AddressSpacePartition.cs
1 using Ryujinx.Common; 2 using Ryujinx.Common.Collections; 3 using Ryujinx.Memory; 4 using System; 5 using System.Diagnostics; 6 using System.Threading; 7 8 namespace Ryujinx.Cpu.Jit.HostTracked 9 { 10 readonly struct PrivateRange 11 { 12 public readonly MemoryBlock Memory; 13 public readonly ulong Offset; 14 public readonly ulong Size; 15 16 public static PrivateRange Empty => new(null, 0, 0); 17 18 public PrivateRange(MemoryBlock memory, ulong offset, ulong size) 19 { 20 Memory = memory; 21 Offset = offset; 22 Size = size; 23 } 24 } 25 26 class AddressSpacePartition : IDisposable 27 { 28 public const ulong GuestPageSize = 0x1000; 29 30 private const int DefaultBlockAlignment = 1 << 20; 31 32 private enum MappingType : byte 33 { 34 None, 35 Private, 36 } 37 38 private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>, IComparable<ulong> 39 { 40 public ulong Address { get; private set; } 41 public ulong Size { get; private set; } 42 public ulong EndAddress => Address + Size; 43 public MappingType Type { get; private set; } 44 45 public Mapping(ulong address, ulong size, MappingType type) 46 { 47 Address = address; 48 Size = size; 49 Type = type; 50 } 51 52 public Mapping Split(ulong splitAddress) 53 { 54 ulong leftSize = splitAddress - Address; 55 ulong rightSize = EndAddress - splitAddress; 56 57 Mapping left = new(Address, leftSize, Type); 58 59 Address = splitAddress; 60 Size = rightSize; 61 62 return left; 63 } 64 65 public void UpdateState(MappingType newType) 66 { 67 Type = newType; 68 } 69 70 public void Extend(ulong sizeDelta) 71 { 72 Size += sizeDelta; 73 } 74 75 public int CompareTo(Mapping other) 76 { 77 if (Address < other.Address) 78 { 79 return -1; 80 } 81 else if (Address <= other.EndAddress - 1UL) 82 { 83 return 0; 84 } 85 else 86 { 87 return 1; 88 } 89 } 90 91 public int CompareTo(ulong address) 92 { 93 if (address < Address) 94 { 95 return -1; 96 } 97 else if (address <= EndAddress - 1UL) 98 { 99 return 0; 100 } 101 else 102 { 103 return 1; 104 } 105 } 106 } 107 108 private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>, IComparable<ulong> 109 { 110 public ulong Address { get; private set; } 111 public ulong Size { get; private set; } 112 public ulong EndAddress => Address + Size; 113 public PrivateMemoryAllocation PrivateAllocation { get; private set; } 114 115 public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation) 116 { 117 Address = address; 118 Size = size; 119 PrivateAllocation = privateAllocation; 120 } 121 122 public PrivateMapping Split(ulong splitAddress) 123 { 124 ulong leftSize = splitAddress - Address; 125 ulong rightSize = EndAddress - splitAddress; 126 127 Debug.Assert(leftSize > 0); 128 Debug.Assert(rightSize > 0); 129 130 (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize); 131 132 PrivateMapping left = new(Address, leftSize, leftAllocation); 133 134 Address = splitAddress; 135 Size = rightSize; 136 137 return left; 138 } 139 140 public void Map(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation) 141 { 142 baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size); 143 PrivateAllocation = newAllocation; 144 } 145 146 public void Unmap(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress) 147 { 148 if (PrivateAllocation.IsValid) 149 { 150 baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size); 151 PrivateAllocation.Dispose(); 152 } 153 154 PrivateAllocation = default; 155 } 156 157 public void Extend(ulong sizeDelta) 158 { 159 Size += sizeDelta; 160 } 161 162 public int CompareTo(PrivateMapping other) 163 { 164 if (Address < other.Address) 165 { 166 return -1; 167 } 168 else if (Address <= other.EndAddress - 1UL) 169 { 170 return 0; 171 } 172 else 173 { 174 return 1; 175 } 176 } 177 178 public int CompareTo(ulong address) 179 { 180 if (address < Address) 181 { 182 return -1; 183 } 184 else if (address <= EndAddress - 1UL) 185 { 186 return 0; 187 } 188 else 189 { 190 return 1; 191 } 192 } 193 } 194 195 private readonly MemoryBlock _backingMemory; 196 private readonly AddressSpacePartitionMultiAllocation _baseMemory; 197 private readonly PrivateMemoryAllocator _privateMemoryAllocator; 198 199 private readonly AddressIntrusiveRedBlackTree<Mapping> _mappingTree; 200 private readonly AddressIntrusiveRedBlackTree<PrivateMapping> _privateTree; 201 202 private readonly ReaderWriterLockSlim _treeLock; 203 204 private readonly ulong _hostPageSize; 205 206 private ulong? _firstPagePa; 207 private ulong? _lastPagePa; 208 private ulong _cachedFirstPagePa; 209 private ulong _cachedLastPagePa; 210 private MemoryBlock _firstPageMemoryForUnmap; 211 private ulong _firstPageOffsetForLateMap; 212 private MemoryPermission _firstPageMemoryProtection; 213 214 public ulong Address { get; } 215 public ulong Size { get; } 216 public ulong EndAddress => Address + Size; 217 218 public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size) 219 { 220 _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable); 221 _mappingTree = new AddressIntrusiveRedBlackTree<Mapping>(); 222 _privateTree = new AddressIntrusiveRedBlackTree<PrivateMapping>(); 223 _treeLock = new ReaderWriterLockSlim(); 224 225 _mappingTree.Add(new Mapping(address, size, MappingType.None)); 226 _privateTree.Add(new PrivateMapping(address, size, default)); 227 228 _hostPageSize = MemoryBlock.GetPageSize(); 229 230 _backingMemory = backingMemory; 231 _baseMemory = new(baseMemory); 232 233 _cachedFirstPagePa = ulong.MaxValue; 234 _cachedLastPagePa = ulong.MaxValue; 235 236 Address = address; 237 Size = size; 238 } 239 240 public bool IsEmpty() 241 { 242 _treeLock.EnterReadLock(); 243 244 try 245 { 246 Mapping map = _mappingTree.GetNode(Address); 247 248 return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None; 249 } 250 finally 251 { 252 _treeLock.ExitReadLock(); 253 } 254 } 255 256 public void Map(ulong va, ulong pa, ulong size) 257 { 258 Debug.Assert(va >= Address); 259 Debug.Assert(va + size <= EndAddress); 260 261 if (va == Address) 262 { 263 _firstPagePa = pa; 264 } 265 266 if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize) 267 { 268 _lastPagePa = pa + ((EndAddress - GuestPageSize) - va); 269 } 270 271 Update(va, pa, size, MappingType.Private); 272 } 273 274 public void Unmap(ulong va, ulong size) 275 { 276 Debug.Assert(va >= Address); 277 Debug.Assert(va + size <= EndAddress); 278 279 if (va == Address) 280 { 281 _firstPagePa = null; 282 } 283 284 if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize) 285 { 286 _lastPagePa = null; 287 } 288 289 Update(va, 0UL, size, MappingType.None); 290 } 291 292 public void ReprotectAligned(ulong va, ulong size, MemoryPermission protection) 293 { 294 Debug.Assert(va >= Address); 295 Debug.Assert(va + size <= EndAddress); 296 297 _baseMemory.Reprotect(va - Address, size, protection, false); 298 299 if (va == Address) 300 { 301 _firstPageMemoryProtection = protection; 302 } 303 } 304 305 public void Reprotect( 306 ulong va, 307 ulong size, 308 MemoryPermission protection, 309 AddressSpacePartitioned addressSpace, 310 Action<ulong, IntPtr, ulong> updatePtCallback) 311 { 312 if (_baseMemory.LazyInitMirrorForProtection(addressSpace, Address, Size, protection)) 313 { 314 LateMap(); 315 } 316 317 updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size); 318 } 319 320 public IntPtr GetPointer(ulong va, ulong size) 321 { 322 Debug.Assert(va >= Address); 323 Debug.Assert(va + size <= EndAddress); 324 325 return _baseMemory.GetPointer(va - Address, size); 326 } 327 328 public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, bool useProtectionMirrors) 329 { 330 ulong firstPagePa = partitionAfter?._firstPagePa ?? ulong.MaxValue; 331 ulong lastPagePa = _lastPagePa ?? ulong.MaxValue; 332 333 if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa) 334 { 335 if (partitionAfter != null && partitionAfter._firstPagePa.HasValue) 336 { 337 (MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset(); 338 339 _baseMemory.MapView(firstPageMemory, firstPageOffset, Size, _hostPageSize); 340 341 if (!useProtectionMirrors) 342 { 343 _baseMemory.Reprotect(Size, _hostPageSize, partitionAfter._firstPageMemoryProtection, throwOnFail: false); 344 } 345 346 _firstPageMemoryForUnmap = firstPageMemory; 347 _firstPageOffsetForLateMap = firstPageOffset; 348 } 349 else 350 { 351 MemoryBlock firstPageMemoryForUnmap = _firstPageMemoryForUnmap; 352 353 if (firstPageMemoryForUnmap != null) 354 { 355 _baseMemory.UnmapView(firstPageMemoryForUnmap, Size, _hostPageSize); 356 _firstPageMemoryForUnmap = null; 357 } 358 } 359 360 _cachedFirstPagePa = firstPagePa; 361 _cachedLastPagePa = lastPagePa; 362 } 363 } 364 365 public void ReprotectBridge(MemoryPermission protection) 366 { 367 if (_firstPageMemoryForUnmap != null) 368 { 369 _baseMemory.Reprotect(Size, _hostPageSize, protection, throwOnFail: false); 370 } 371 } 372 373 private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset() 374 { 375 _treeLock.EnterReadLock(); 376 377 try 378 { 379 PrivateMapping map = _privateTree.GetNode(Address); 380 381 if (map != null && map.PrivateAllocation.IsValid) 382 { 383 return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address)); 384 } 385 } 386 finally 387 { 388 _treeLock.ExitReadLock(); 389 } 390 391 return (_backingMemory, _firstPagePa.Value); 392 } 393 394 public PrivateRange GetPrivateAllocation(ulong va) 395 { 396 _treeLock.EnterReadLock(); 397 398 try 399 { 400 PrivateMapping map = _privateTree.GetNode(va); 401 402 if (map != null && map.PrivateAllocation.IsValid) 403 { 404 return new(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (va - map.Address), map.Size - (va - map.Address)); 405 } 406 } 407 finally 408 { 409 _treeLock.ExitReadLock(); 410 } 411 412 return PrivateRange.Empty; 413 } 414 415 private void Update(ulong va, ulong pa, ulong size, MappingType type) 416 { 417 _treeLock.EnterWriteLock(); 418 419 try 420 { 421 Mapping map = _mappingTree.GetNode(va); 422 423 Update(map, va, pa, size, type); 424 } 425 finally 426 { 427 _treeLock.ExitWriteLock(); 428 } 429 } 430 431 private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type) 432 { 433 ulong endAddress = va + size; 434 435 for (; map != null; map = map.Successor) 436 { 437 if (map.Address < va) 438 { 439 _mappingTree.Add(map.Split(va)); 440 } 441 442 if (map.EndAddress > endAddress) 443 { 444 Mapping newMap = map.Split(endAddress); 445 _mappingTree.Add(newMap); 446 map = newMap; 447 } 448 449 switch (type) 450 { 451 case MappingType.None: 452 ulong alignment = _hostPageSize; 453 454 bool unmappedBefore = map.Predecessor == null || 455 (map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment)); 456 457 bool unmappedAfter = map.Successor == null || 458 (map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment)); 459 460 UnmapPrivate(va, size, unmappedBefore, unmappedAfter); 461 break; 462 case MappingType.Private: 463 MapPrivate(va, size); 464 break; 465 } 466 467 map.UpdateState(type); 468 map = TryCoalesce(map); 469 470 if (map.EndAddress >= endAddress) 471 { 472 break; 473 } 474 } 475 476 return map; 477 } 478 479 private Mapping TryCoalesce(Mapping map) 480 { 481 Mapping previousMap = map.Predecessor; 482 Mapping nextMap = map.Successor; 483 484 if (previousMap != null && CanCoalesce(previousMap, map)) 485 { 486 previousMap.Extend(map.Size); 487 _mappingTree.Remove(map); 488 map = previousMap; 489 } 490 491 if (nextMap != null && CanCoalesce(map, nextMap)) 492 { 493 map.Extend(nextMap.Size); 494 _mappingTree.Remove(nextMap); 495 } 496 497 return map; 498 } 499 500 private static bool CanCoalesce(Mapping left, Mapping right) 501 { 502 return left.Type == right.Type; 503 } 504 505 private void MapPrivate(ulong va, ulong size) 506 { 507 ulong endAddress = va + size; 508 509 ulong alignment = _hostPageSize; 510 511 // Expand the range outwards based on page size to ensure that at least the requested region is mapped. 512 ulong vaAligned = BitUtils.AlignDown(va, alignment); 513 ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment); 514 515 PrivateMapping map = _privateTree.GetNode(va); 516 517 for (; map != null; map = map.Successor) 518 { 519 if (!map.PrivateAllocation.IsValid) 520 { 521 if (map.Address < vaAligned) 522 { 523 _privateTree.Add(map.Split(vaAligned)); 524 } 525 526 if (map.EndAddress > endAddressAligned) 527 { 528 PrivateMapping newMap = map.Split(endAddressAligned); 529 _privateTree.Add(newMap); 530 map = newMap; 531 } 532 533 map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, _hostPageSize)); 534 } 535 536 if (map.EndAddress >= endAddressAligned) 537 { 538 break; 539 } 540 } 541 } 542 543 private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter) 544 { 545 ulong endAddress = va + size; 546 547 ulong alignment = _hostPageSize; 548 549 // If the adjacent mappings are unmapped, expand the range outwards, 550 // otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use. 551 ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment); 552 ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment); 553 554 if (endAddressAligned <= vaAligned) 555 { 556 return; 557 } 558 559 PrivateMapping map = _privateTree.GetNode(vaAligned); 560 561 for (; map != null; map = map.Successor) 562 { 563 if (map.PrivateAllocation.IsValid) 564 { 565 if (map.Address < vaAligned) 566 { 567 _privateTree.Add(map.Split(vaAligned)); 568 } 569 570 if (map.EndAddress > endAddressAligned) 571 { 572 PrivateMapping newMap = map.Split(endAddressAligned); 573 _privateTree.Add(newMap); 574 map = newMap; 575 } 576 577 map.Unmap(_baseMemory, Address); 578 map = TryCoalesce(map); 579 } 580 581 if (map.EndAddress >= endAddressAligned) 582 { 583 break; 584 } 585 } 586 } 587 588 private PrivateMapping TryCoalesce(PrivateMapping map) 589 { 590 PrivateMapping previousMap = map.Predecessor; 591 PrivateMapping nextMap = map.Successor; 592 593 if (previousMap != null && CanCoalesce(previousMap, map)) 594 { 595 previousMap.Extend(map.Size); 596 _privateTree.Remove(map); 597 map = previousMap; 598 } 599 600 if (nextMap != null && CanCoalesce(map, nextMap)) 601 { 602 map.Extend(nextMap.Size); 603 _privateTree.Remove(nextMap); 604 } 605 606 return map; 607 } 608 609 private static bool CanCoalesce(PrivateMapping left, PrivateMapping right) 610 { 611 return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid; 612 } 613 614 private void LateMap() 615 { 616 // Map all existing private allocations. 617 // This is necessary to ensure mirrors that are lazily created have the same mappings as the main one. 618 619 PrivateMapping map = _privateTree.GetNode(Address); 620 621 for (; map != null; map = map.Successor) 622 { 623 if (map.PrivateAllocation.IsValid) 624 { 625 _baseMemory.LateMapView(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset, map.Address - Address, map.Size); 626 } 627 } 628 629 MemoryBlock firstPageMemory = _firstPageMemoryForUnmap; 630 ulong firstPageOffset = _firstPageOffsetForLateMap; 631 632 if (firstPageMemory != null) 633 { 634 _baseMemory.LateMapView(firstPageMemory, firstPageOffset, Size, _hostPageSize); 635 } 636 } 637 638 public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa) 639 { 640 _treeLock.EnterReadLock(); 641 642 try 643 { 644 PrivateMapping map = _privateTree.GetNode(va); 645 646 nextVa = map.EndAddress; 647 648 if (map != null && map.PrivateAllocation.IsValid) 649 { 650 ulong startOffset = va - map.Address; 651 652 return new( 653 map.PrivateAllocation.Memory, 654 map.PrivateAllocation.Offset + startOffset, 655 Math.Min(map.PrivateAllocation.Size - startOffset, size)); 656 } 657 } 658 finally 659 { 660 _treeLock.ExitReadLock(); 661 } 662 663 return PrivateRange.Empty; 664 } 665 666 public bool HasPrivateAllocation(ulong va, ulong size, ulong startVa, ulong startSize, ref PrivateRange range) 667 { 668 ulong endVa = va + size; 669 670 _treeLock.EnterReadLock(); 671 672 try 673 { 674 for (PrivateMapping map = _privateTree.GetNode(va); map != null && map.Address < endVa; map = map.Successor) 675 { 676 if (map.PrivateAllocation.IsValid) 677 { 678 if (map.Address <= startVa && map.EndAddress >= startVa + startSize) 679 { 680 ulong startOffset = startVa - map.Address; 681 682 range = new( 683 map.PrivateAllocation.Memory, 684 map.PrivateAllocation.Offset + startOffset, 685 Math.Min(map.PrivateAllocation.Size - startOffset, startSize)); 686 } 687 688 return true; 689 } 690 } 691 } 692 finally 693 { 694 _treeLock.ExitReadLock(); 695 } 696 697 return false; 698 } 699 700 public void Dispose() 701 { 702 GC.SuppressFinalize(this); 703 704 _privateMemoryAllocator.Dispose(); 705 _baseMemory.Dispose(); 706 } 707 } 708 }