Buffer.cs
1 using Ryujinx.Graphics.GAL; 2 using Ryujinx.Graphics.Gpu.Synchronization; 3 using Ryujinx.Memory.Range; 4 using Ryujinx.Memory.Tracking; 5 using System; 6 using System.Collections.Generic; 7 using System.Linq; 8 using System.Runtime.CompilerServices; 9 using System.Threading; 10 11 namespace Ryujinx.Graphics.Gpu.Memory 12 { 13 delegate void BufferFlushAction(ulong address, ulong size, ulong syncNumber); 14 15 /// <summary> 16 /// Buffer, used to store vertex and index data, uniform and storage buffers, and others. 17 /// </summary> 18 class Buffer : IRange, ISyncActionHandler, IDisposable 19 { 20 private const ulong GranularBufferThreshold = 4096; 21 22 private readonly GpuContext _context; 23 private readonly PhysicalMemory _physicalMemory; 24 25 /// <summary> 26 /// Host buffer handle. 27 /// </summary> 28 public BufferHandle Handle { get; private set; } 29 30 /// <summary> 31 /// Start address of the buffer in guest memory. 32 /// </summary> 33 public ulong Address { get; } 34 35 /// <summary> 36 /// Size of the buffer in bytes. 37 /// </summary> 38 public ulong Size { get; } 39 40 /// <summary> 41 /// End address of the buffer in guest memory. 42 /// </summary> 43 public ulong EndAddress => Address + Size; 44 45 /// <summary> 46 /// Increments when the buffer is (partially) unmapped or disposed. 47 /// </summary> 48 public int UnmappedSequence { get; private set; } 49 50 /// <summary> 51 /// Indicates if the buffer can be used in a sparse buffer mapping. 52 /// </summary> 53 public bool SparseCompatible { get; } 54 55 /// <summary> 56 /// Ranges of the buffer that have been modified on the GPU. 57 /// Ranges defined here cannot be updated from CPU until a CPU waiting sync point is reached. 58 /// Then, write tracking will signal, wait for GPU sync (generated at the syncpoint) and flush these regions. 59 /// </summary> 60 /// <remarks> 61 /// This is null until at least one modification occurs. 62 /// </remarks> 63 private BufferModifiedRangeList _modifiedRanges = null; 64 65 /// <summary> 66 /// A structure that is used to flush buffer data back to a host mapped buffer for cached readback. 67 /// Only used if the buffer data is explicitly owned by device local memory. 68 /// </summary> 69 private BufferPreFlush _preFlush = null; 70 71 /// <summary> 72 /// Usage tracking state that determines what type of backing the buffer should use. 73 /// </summary> 74 public BufferBackingState BackingState; 75 76 private readonly MultiRegionHandle _memoryTrackingGranular; 77 private readonly RegionHandle _memoryTracking; 78 79 private readonly RegionSignal _externalFlushDelegate; 80 private readonly Action<ulong, ulong> _loadDelegate; 81 private readonly Action<ulong, ulong> _modifiedDelegate; 82 83 private HashSet<MultiRangeBuffer> _virtualDependencies; 84 private readonly ReaderWriterLockSlim _virtualDependenciesLock; 85 86 private int _sequenceNumber; 87 88 private readonly bool _useGranular; 89 private bool _syncActionRegistered; 90 91 private int _referenceCount = 1; 92 93 private ulong _dirtyStart = ulong.MaxValue; 94 private ulong _dirtyEnd = ulong.MaxValue; 95 96 /// <summary> 97 /// Creates a new instance of the buffer. 98 /// </summary> 99 /// <param name="context">GPU context that the buffer belongs to</param> 100 /// <param name="physicalMemory">Physical memory where the buffer is mapped</param> 101 /// <param name="address">Start address of the buffer</param> 102 /// <param name="size">Size of the buffer in bytes</param> 103 /// <param name="stage">The type of usage that created the buffer</param> 104 /// <param name="sparseCompatible">Indicates if the buffer can be used in a sparse buffer mapping</param> 105 /// <param name="baseBuffers">Buffers which this buffer contains, and will inherit tracking handles from</param> 106 public Buffer( 107 GpuContext context, 108 PhysicalMemory physicalMemory, 109 ulong address, 110 ulong size, 111 BufferStage stage, 112 bool sparseCompatible, 113 IEnumerable<Buffer> baseBuffers = null) 114 { 115 _context = context; 116 _physicalMemory = physicalMemory; 117 Address = address; 118 Size = size; 119 SparseCompatible = sparseCompatible; 120 121 BackingState = new BufferBackingState(_context, this, stage, baseBuffers); 122 123 BufferAccess access = BackingState.SwitchAccess(this); 124 125 Handle = context.Renderer.CreateBuffer((int)size, access); 126 127 _useGranular = size > GranularBufferThreshold; 128 129 IEnumerable<IRegionHandle> baseHandles = null; 130 131 if (baseBuffers != null) 132 { 133 baseHandles = baseBuffers.SelectMany(buffer => 134 { 135 if (buffer._useGranular) 136 { 137 return buffer._memoryTrackingGranular.GetHandles(); 138 } 139 else 140 { 141 return Enumerable.Repeat(buffer._memoryTracking, 1); 142 } 143 }); 144 } 145 146 if (_useGranular) 147 { 148 _memoryTrackingGranular = physicalMemory.BeginGranularTracking(address, size, ResourceKind.Buffer, RegionFlags.UnalignedAccess, baseHandles); 149 150 _memoryTrackingGranular.RegisterPreciseAction(address, size, PreciseAction); 151 } 152 else 153 { 154 _memoryTracking = physicalMemory.BeginTracking(address, size, ResourceKind.Buffer, RegionFlags.UnalignedAccess); 155 156 if (baseHandles != null) 157 { 158 _memoryTracking.Reprotect(false); 159 160 foreach (IRegionHandle handle in baseHandles) 161 { 162 if (handle.Dirty) 163 { 164 _memoryTracking.Reprotect(true); 165 } 166 167 handle.Dispose(); 168 } 169 } 170 171 _memoryTracking.RegisterPreciseAction(PreciseAction); 172 } 173 174 _externalFlushDelegate = new RegionSignal(ExternalFlush); 175 _loadDelegate = new Action<ulong, ulong>(LoadRegion); 176 _modifiedDelegate = new Action<ulong, ulong>(RegionModified); 177 178 _virtualDependenciesLock = new ReaderWriterLockSlim(); 179 } 180 181 /// <summary> 182 /// Recreates the backing buffer based on the desired access type 183 /// reported by the backing state struct. 184 /// </summary> 185 private void ChangeBacking() 186 { 187 BufferAccess access = BackingState.SwitchAccess(this); 188 189 BufferHandle newHandle = _context.Renderer.CreateBuffer((int)Size, access); 190 191 _context.Renderer.Pipeline.CopyBuffer(Handle, newHandle, 0, 0, (int)Size); 192 193 _modifiedRanges?.SelfMigration(); 194 195 // If swtiching from device local to host mapped, pre-flushing data no longer makes sense. 196 // This is set to null and disposed when the migration fully completes. 197 _preFlush = null; 198 199 Handle = newHandle; 200 201 _physicalMemory.BufferCache.BufferBackingChanged(this); 202 } 203 204 /// <summary> 205 /// Gets a sub-range from the buffer, from a start address til a page boundary after the given size. 206 /// </summary> 207 /// <remarks> 208 /// This can be used to bind and use sub-ranges of the buffer on the host API. 209 /// </remarks> 210 /// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param> 211 /// <param name="size">Size in bytes of the sub-range, must be less than or equal to the buffer size</param> 212 /// <param name="write">Whether the buffer will be written to by this use</param> 213 /// <returns>The buffer sub-range</returns> 214 public BufferRange GetRangeAligned(ulong address, ulong size, bool write) 215 { 216 ulong end = ((address + size + MemoryManager.PageMask) & ~MemoryManager.PageMask) - Address; 217 ulong offset = address - Address; 218 219 return new BufferRange(Handle, (int)offset, (int)(end - offset), write); 220 } 221 222 /// <summary> 223 /// Gets a sub-range from the buffer. 224 /// </summary> 225 /// <remarks> 226 /// This can be used to bind and use sub-ranges of the buffer on the host API. 227 /// </remarks> 228 /// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param> 229 /// <param name="size">Size in bytes of the sub-range, must be less than or equal to the buffer size</param> 230 /// <param name="write">Whether the buffer will be written to by this use</param> 231 /// <returns>The buffer sub-range</returns> 232 public BufferRange GetRange(ulong address, ulong size, bool write) 233 { 234 int offset = (int)(address - Address); 235 236 return new BufferRange(Handle, offset, (int)size, write); 237 } 238 239 /// <summary> 240 /// Checks if a given range overlaps with the buffer. 241 /// </summary> 242 /// <param name="address">Start address of the range</param> 243 /// <param name="size">Size in bytes of the range</param> 244 /// <returns>True if the range overlaps, false otherwise</returns> 245 public bool OverlapsWith(ulong address, ulong size) 246 { 247 return Address < address + size && address < EndAddress; 248 } 249 250 /// <summary> 251 /// Checks if a given range is fully contained in the buffer. 252 /// </summary> 253 /// <param name="address">Start address of the range</param> 254 /// <param name="size">Size in bytes of the range</param> 255 /// <returns>True if the range is contained, false otherwise</returns> 256 public bool FullyContains(ulong address, ulong size) 257 { 258 return address >= Address && address + size <= EndAddress; 259 } 260 261 /// <summary> 262 /// Performs guest to host memory synchronization of the buffer data. 263 /// </summary> 264 /// <remarks> 265 /// This causes the buffer data to be overwritten if a write was detected from the CPU, 266 /// since the last call to this method. 267 /// </remarks> 268 /// <param name="address">Start address of the range to synchronize</param> 269 /// <param name="size">Size in bytes of the range to synchronize</param> 270 [MethodImpl(MethodImplOptions.AggressiveInlining)] 271 public void SynchronizeMemory(ulong address, ulong size) 272 { 273 if (_useGranular) 274 { 275 _memoryTrackingGranular.QueryModified(address, size, _modifiedDelegate, _context.SequenceNumber); 276 } 277 else 278 { 279 if (_context.SequenceNumber != _sequenceNumber && _memoryTracking.DirtyOrVolatile()) 280 { 281 _memoryTracking.Reprotect(); 282 283 if (_modifiedRanges != null) 284 { 285 _modifiedRanges.ExcludeModifiedRegions(Address, Size, _loadDelegate); 286 } 287 else 288 { 289 BackingState.RecordSet(); 290 _context.Renderer.SetBufferData(Handle, 0, _physicalMemory.GetSpan(Address, (int)Size)); 291 CopyToDependantVirtualBuffers(); 292 } 293 294 _sequenceNumber = _context.SequenceNumber; 295 _dirtyStart = ulong.MaxValue; 296 } 297 } 298 299 if (_dirtyStart != ulong.MaxValue) 300 { 301 ulong end = address + size; 302 303 if (end > _dirtyStart && address < _dirtyEnd) 304 { 305 if (_modifiedRanges != null) 306 { 307 _modifiedRanges.ExcludeModifiedRegions(_dirtyStart, _dirtyEnd - _dirtyStart, _loadDelegate); 308 } 309 else 310 { 311 LoadRegion(_dirtyStart, _dirtyEnd - _dirtyStart); 312 } 313 314 _dirtyStart = ulong.MaxValue; 315 } 316 } 317 } 318 319 /// <summary> 320 /// Ensure that the modified range list exists. 321 /// </summary> 322 private void EnsureRangeList() 323 { 324 _modifiedRanges ??= new BufferModifiedRangeList(_context, this, Flush); 325 } 326 327 /// <summary> 328 /// Checks if a backing change is deemed necessary from the given usage. 329 /// If it is, queues a backing change to happen on the next sync action. 330 /// </summary> 331 /// <param name="stage">Buffer stage that can change backing type</param> 332 private void TryQueueBackingChange(BufferStage stage) 333 { 334 if (BackingState.ShouldChangeBacking(stage)) 335 { 336 if (!_syncActionRegistered) 337 { 338 _context.RegisterSyncAction(this); 339 _syncActionRegistered = true; 340 } 341 } 342 } 343 344 /// <summary> 345 /// Signal that the given region of the buffer has been modified. 346 /// </summary> 347 /// <param name="address">The start address of the modified region</param> 348 /// <param name="size">The size of the modified region</param> 349 /// <param name="stage">Buffer stage that triggered the modification</param> 350 public void SignalModified(ulong address, ulong size, BufferStage stage) 351 { 352 EnsureRangeList(); 353 354 TryQueueBackingChange(stage); 355 356 _modifiedRanges.SignalModified(address, size); 357 358 if (!_syncActionRegistered) 359 { 360 _context.RegisterSyncAction(this); 361 _syncActionRegistered = true; 362 } 363 } 364 365 /// <summary> 366 /// Indicate that mofifications in a given region of this buffer have been overwritten. 367 /// </summary> 368 /// <param name="address">The start address of the region</param> 369 /// <param name="size">The size of the region</param> 370 public void ClearModified(ulong address, ulong size) 371 { 372 _modifiedRanges?.Clear(address, size); 373 } 374 375 /// <summary> 376 /// Action to be performed immediately before sync is created. 377 /// This will copy any buffer ranges designated for pre-flushing. 378 /// </summary> 379 /// <param name="syncpoint">True if the action is a guest syncpoint</param> 380 public void SyncPreAction(bool syncpoint) 381 { 382 if (_referenceCount == 0) 383 { 384 return; 385 } 386 387 if (BackingState.ShouldChangeBacking()) 388 { 389 ChangeBacking(); 390 } 391 392 if (BackingState.IsDeviceLocal) 393 { 394 _preFlush ??= new BufferPreFlush(_context, this, FlushImpl); 395 396 if (_preFlush.ShouldCopy) 397 { 398 _modifiedRanges?.GetRangesAtSync(Address, Size, _context.SyncNumber, (address, size) => 399 { 400 _preFlush.CopyModified(address, size); 401 }); 402 } 403 } 404 } 405 406 /// <summary> 407 /// Action to be performed when a syncpoint is reached after modification. 408 /// This will register read/write tracking to flush the buffer from GPU when its memory is used. 409 /// </summary> 410 /// <inheritdoc/> 411 public bool SyncAction(bool syncpoint) 412 { 413 _syncActionRegistered = false; 414 415 if (_useGranular) 416 { 417 _modifiedRanges?.GetRanges(Address, Size, (address, size) => 418 { 419 _memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate); 420 SynchronizeMemory(address, size); 421 }); 422 } 423 else 424 { 425 _memoryTracking.RegisterAction(_externalFlushDelegate); 426 SynchronizeMemory(Address, Size); 427 } 428 429 return true; 430 } 431 432 /// <summary> 433 /// Inherit modified and dirty ranges from another buffer. 434 /// </summary> 435 /// <param name="from">The buffer to inherit from</param> 436 public void InheritModifiedRanges(Buffer from) 437 { 438 if (from._modifiedRanges != null && from._modifiedRanges.HasRanges) 439 { 440 if (from._syncActionRegistered && !_syncActionRegistered) 441 { 442 _context.RegisterSyncAction(this); 443 _syncActionRegistered = true; 444 } 445 446 void registerRangeAction(ulong address, ulong size) 447 { 448 if (_useGranular) 449 { 450 _memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate); 451 } 452 else 453 { 454 _memoryTracking.RegisterAction(_externalFlushDelegate); 455 } 456 } 457 458 EnsureRangeList(); 459 460 _modifiedRanges.InheritRanges(from._modifiedRanges, registerRangeAction); 461 } 462 463 if (from._dirtyStart != ulong.MaxValue) 464 { 465 ForceDirty(from._dirtyStart, from._dirtyEnd - from._dirtyStart); 466 } 467 } 468 469 /// <summary> 470 /// Determine if a given region of the buffer has been modified, and must be flushed. 471 /// </summary> 472 /// <param name="address">The start address of the region</param> 473 /// <param name="size">The size of the region</param> 474 /// <returns></returns> 475 public bool IsModified(ulong address, ulong size) 476 { 477 if (_modifiedRanges != null) 478 { 479 return _modifiedRanges.HasRange(address, size); 480 } 481 482 return false; 483 } 484 485 /// <summary> 486 /// Clear the dirty range that overlaps with the given region. 487 /// </summary> 488 /// <param name="address">Start address of the modified region</param> 489 /// <param name="size">Size of the modified region</param> 490 private void ClearDirty(ulong address, ulong size) 491 { 492 if (_dirtyStart != ulong.MaxValue) 493 { 494 ulong end = address + size; 495 496 if (end > _dirtyStart && address < _dirtyEnd) 497 { 498 if (address <= _dirtyStart) 499 { 500 // Cut off the start. 501 502 if (end < _dirtyEnd) 503 { 504 _dirtyStart = end; 505 } 506 else 507 { 508 _dirtyStart = ulong.MaxValue; 509 } 510 } 511 else if (end >= _dirtyEnd) 512 { 513 // Cut off the end. 514 515 _dirtyEnd = address; 516 } 517 518 // If fully contained, do nothing. 519 } 520 } 521 } 522 523 /// <summary> 524 /// Indicate that a region of the buffer was modified, and must be loaded from memory. 525 /// </summary> 526 /// <param name="mAddress">Start address of the modified region</param> 527 /// <param name="mSize">Size of the modified region</param> 528 private void RegionModified(ulong mAddress, ulong mSize) 529 { 530 if (mAddress < Address) 531 { 532 mAddress = Address; 533 } 534 535 ulong maxSize = Address + Size - mAddress; 536 537 if (mSize > maxSize) 538 { 539 mSize = maxSize; 540 } 541 542 ClearDirty(mAddress, mSize); 543 544 if (_modifiedRanges != null) 545 { 546 _modifiedRanges.ExcludeModifiedRegions(mAddress, mSize, _loadDelegate); 547 } 548 else 549 { 550 LoadRegion(mAddress, mSize); 551 } 552 } 553 554 /// <summary> 555 /// Load a region of the buffer from memory. 556 /// </summary> 557 /// <param name="mAddress">Start address of the modified region</param> 558 /// <param name="mSize">Size of the modified region</param> 559 private void LoadRegion(ulong mAddress, ulong mSize) 560 { 561 BackingState.RecordSet(); 562 563 int offset = (int)(mAddress - Address); 564 565 _context.Renderer.SetBufferData(Handle, offset, _physicalMemory.GetSpan(mAddress, (int)mSize)); 566 567 CopyToDependantVirtualBuffers(mAddress, mSize); 568 } 569 570 /// <summary> 571 /// Force a region of the buffer to be dirty within the memory tracking. Avoids reprotection and nullifies sequence number check. 572 /// </summary> 573 /// <param name="mAddress">Start address of the modified region</param> 574 /// <param name="mSize">Size of the region to force dirty</param> 575 private void ForceTrackingDirty(ulong mAddress, ulong mSize) 576 { 577 if (_useGranular) 578 { 579 _memoryTrackingGranular.ForceDirty(mAddress, mSize); 580 } 581 else 582 { 583 _memoryTracking.ForceDirty(); 584 _sequenceNumber--; 585 } 586 } 587 588 /// <summary> 589 /// Force a region of the buffer to be dirty. Avoids reprotection and nullifies sequence number check. 590 /// </summary> 591 /// <param name="mAddress">Start address of the modified region</param> 592 /// <param name="mSize">Size of the region to force dirty</param> 593 public void ForceDirty(ulong mAddress, ulong mSize) 594 { 595 _modifiedRanges?.Clear(mAddress, mSize); 596 597 ulong end = mAddress + mSize; 598 599 if (_dirtyStart == ulong.MaxValue) 600 { 601 _dirtyStart = mAddress; 602 _dirtyEnd = end; 603 } 604 else 605 { 606 // Is the new range more than a page away from the existing one? 607 608 if ((long)(mAddress - _dirtyEnd) >= (long)MemoryManager.PageSize || 609 (long)(_dirtyStart - end) >= (long)MemoryManager.PageSize) 610 { 611 ForceTrackingDirty(mAddress, mSize); 612 } 613 else 614 { 615 _dirtyStart = Math.Min(_dirtyStart, mAddress); 616 _dirtyEnd = Math.Max(_dirtyEnd, end); 617 } 618 } 619 } 620 621 /// <summary> 622 /// Performs copy of all the buffer data from one buffer to another. 623 /// </summary> 624 /// <param name="destination">The destination buffer to copy the data into</param> 625 /// <param name="dstOffset">The offset of the destination buffer to copy into</param> 626 public void CopyTo(Buffer destination, int dstOffset) 627 { 628 CopyFromDependantVirtualBuffers(); 629 _context.Renderer.Pipeline.CopyBuffer(Handle, destination.Handle, 0, dstOffset, (int)Size); 630 } 631 632 /// <summary> 633 /// Flushes a range of the buffer. 634 /// This writes the range data back into guest memory. 635 /// </summary> 636 /// <param name="handle">Buffer handle to flush data from</param> 637 /// <param name="address">Start address of the range</param> 638 /// <param name="size">Size in bytes of the range</param> 639 private void FlushImpl(BufferHandle handle, ulong address, ulong size) 640 { 641 int offset = (int)(address - Address); 642 643 using PinnedSpan<byte> data = _context.Renderer.GetBufferData(handle, offset, (int)size); 644 645 // TODO: When write tracking shaders, they will need to be aware of changes in overlapping buffers. 646 _physicalMemory.WriteUntracked(address, CopyFromDependantVirtualBuffers(data.Get(), address, size)); 647 } 648 649 /// <summary> 650 /// Flushes a range of the buffer. 651 /// This writes the range data back into guest memory. 652 /// </summary> 653 /// <param name="address">Start address of the range</param> 654 /// <param name="size">Size in bytes of the range</param> 655 private void FlushImpl(ulong address, ulong size) 656 { 657 FlushImpl(Handle, address, size); 658 } 659 660 /// <summary> 661 /// Flushes a range of the buffer from the most optimal source. 662 /// This writes the range data back into guest memory. 663 /// </summary> 664 /// <param name="address">Start address of the range</param> 665 /// <param name="size">Size in bytes of the range</param> 666 /// <param name="syncNumber">Sync number waited for before flushing the data</param> 667 public void Flush(ulong address, ulong size, ulong syncNumber) 668 { 669 BackingState.RecordFlush(); 670 671 BufferPreFlush preFlush = _preFlush; 672 673 if (preFlush != null) 674 { 675 preFlush.FlushWithAction(address, size, syncNumber); 676 } 677 else 678 { 679 FlushImpl(address, size); 680 } 681 } 682 /// <summary> 683 /// Gets an action that disposes the backing buffer using its current handle. 684 /// Useful for deleting an old copy of the buffer after the handle changes. 685 /// </summary> 686 /// <returns>An action that flushes data from the specified range, using the buffer handle at the time the method is generated</returns> 687 public Action GetSnapshotDisposeAction() 688 { 689 BufferHandle handle = Handle; 690 BufferPreFlush preFlush = _preFlush; 691 692 return () => 693 { 694 _context.Renderer.DeleteBuffer(handle); 695 preFlush?.Dispose(); 696 }; 697 } 698 699 /// <summary> 700 /// Gets an action that flushes a range of the buffer using its current handle. 701 /// Useful for flushing data from old copies of the buffer after the handle changes. 702 /// </summary> 703 /// <returns>An action that flushes data from the specified range, using the buffer handle at the time the method is generated</returns> 704 public BufferFlushAction GetSnapshotFlushAction() 705 { 706 BufferHandle handle = Handle; 707 708 return (ulong address, ulong size, ulong _) => 709 { 710 FlushImpl(handle, address, size); 711 }; 712 } 713 714 /// <summary> 715 /// Align a given address and size region to page boundaries. 716 /// </summary> 717 /// <param name="address">The start address of the region</param> 718 /// <param name="size">The size of the region</param> 719 /// <returns>The page aligned address and size</returns> 720 private static (ulong address, ulong size) PageAlign(ulong address, ulong size) 721 { 722 ulong pageMask = MemoryManager.PageMask; 723 ulong rA = address & ~pageMask; 724 ulong rS = ((address + size + pageMask) & ~pageMask) - rA; 725 return (rA, rS); 726 } 727 728 /// <summary> 729 /// Flush modified ranges of the buffer from another thread. 730 /// This will flush all modifications made before the active SyncNumber was set, and may block to wait for GPU sync. 731 /// </summary> 732 /// <param name="address">Address of the memory action</param> 733 /// <param name="size">Size in bytes</param> 734 public void ExternalFlush(ulong address, ulong size) 735 { 736 _context.Renderer.BackgroundContextAction(() => 737 { 738 var ranges = _modifiedRanges; 739 740 if (ranges != null) 741 { 742 (address, size) = PageAlign(address, size); 743 ranges.WaitForAndFlushRanges(address, size); 744 } 745 }, true); 746 } 747 748 /// <summary> 749 /// An action to be performed when a precise memory access occurs to this resource. 750 /// For buffers, this skips flush-on-write by punching holes directly into the modified range list. 751 /// </summary> 752 /// <param name="address">Address of the memory action</param> 753 /// <param name="size">Size in bytes</param> 754 /// <param name="write">True if the access was a write, false otherwise</param> 755 private bool PreciseAction(ulong address, ulong size, bool write) 756 { 757 if (!write) 758 { 759 // We only want to skip flush-on-write. 760 return false; 761 } 762 763 ulong maxAddress = Math.Max(address, Address); 764 ulong minEndAddress = Math.Min(address + size, Address + Size); 765 766 if (maxAddress >= minEndAddress) 767 { 768 // Access doesn't overlap. 769 return false; 770 } 771 772 ForceDirty(maxAddress, minEndAddress - maxAddress); 773 774 return true; 775 } 776 777 /// <summary> 778 /// Called when part of the memory for this buffer has been unmapped. 779 /// Calls are from non-GPU threads. 780 /// </summary> 781 /// <param name="address">Start address of the unmapped region</param> 782 /// <param name="size">Size of the unmapped region</param> 783 public void Unmapped(ulong address, ulong size) 784 { 785 BufferModifiedRangeList modifiedRanges = _modifiedRanges; 786 787 modifiedRanges?.Clear(address, size); 788 789 UnmappedSequence++; 790 } 791 792 /// <summary> 793 /// Adds a virtual buffer dependency, indicating that a virtual buffer depends on data from this buffer. 794 /// </summary> 795 /// <param name="virtualBuffer">Dependant virtual buffer</param> 796 public void AddVirtualDependency(MultiRangeBuffer virtualBuffer) 797 { 798 _virtualDependenciesLock.EnterWriteLock(); 799 800 try 801 { 802 (_virtualDependencies ??= new()).Add(virtualBuffer); 803 } 804 finally 805 { 806 _virtualDependenciesLock.ExitWriteLock(); 807 } 808 } 809 810 /// <summary> 811 /// Removes a virtual buffer dependency, indicating that a virtual buffer no longer depends on data from this buffer. 812 /// </summary> 813 /// <param name="virtualBuffer">Dependant virtual buffer</param> 814 public void RemoveVirtualDependency(MultiRangeBuffer virtualBuffer) 815 { 816 _virtualDependenciesLock.EnterWriteLock(); 817 818 try 819 { 820 if (_virtualDependencies != null) 821 { 822 _virtualDependencies.Remove(virtualBuffer); 823 824 if (_virtualDependencies.Count == 0) 825 { 826 _virtualDependencies = null; 827 } 828 } 829 } 830 finally 831 { 832 _virtualDependenciesLock.ExitWriteLock(); 833 } 834 } 835 836 /// <summary> 837 /// Copies the buffer data to all virtual buffers that depends on it. 838 /// </summary> 839 public void CopyToDependantVirtualBuffers() 840 { 841 CopyToDependantVirtualBuffers(Address, Size); 842 } 843 844 /// <summary> 845 /// Copies the buffer data inside the specifide range to all virtual buffers that depends on it. 846 /// </summary> 847 /// <param name="address">Address of the range</param> 848 /// <param name="size">Size of the range in bytes</param> 849 public void CopyToDependantVirtualBuffers(ulong address, ulong size) 850 { 851 if (_virtualDependencies != null) 852 { 853 foreach (var virtualBuffer in _virtualDependencies) 854 { 855 CopyToDependantVirtualBuffer(virtualBuffer, address, size); 856 } 857 } 858 } 859 860 /// <summary> 861 /// Copies all modified ranges from all virtual buffers back into this buffer. 862 /// </summary> 863 [MethodImpl(MethodImplOptions.AggressiveInlining)] 864 public void CopyFromDependantVirtualBuffers() 865 { 866 if (_virtualDependencies != null) 867 { 868 CopyFromDependantVirtualBuffersImpl(); 869 } 870 } 871 872 /// <summary> 873 /// Copies all modified ranges from all virtual buffers back into this buffer. 874 /// </summary> 875 [MethodImpl(MethodImplOptions.NoInlining)] 876 private void CopyFromDependantVirtualBuffersImpl() 877 { 878 foreach (var virtualBuffer in _virtualDependencies.OrderBy(x => x.ModificationSequenceNumber)) 879 { 880 virtualBuffer.ConsumeModifiedRegion(this, (mAddress, mSize) => 881 { 882 // Get offset inside both this and the virtual buffer. 883 // Note that sometimes there is no right answer for the virtual offset, 884 // as the same physical range might be mapped multiple times inside a virtual buffer. 885 // We just assume it does not happen in practice as it can only be implemented correctly 886 // when the host has support for proper sparse mapping. 887 888 ulong mEndAddress = mAddress + mSize; 889 mAddress = Math.Max(mAddress, Address); 890 mSize = Math.Min(mEndAddress, EndAddress) - mAddress; 891 892 int physicalOffset = (int)(mAddress - Address); 893 int virtualOffset = virtualBuffer.Range.FindOffset(new(mAddress, mSize)); 894 895 _context.Renderer.Pipeline.CopyBuffer(virtualBuffer.Handle, Handle, virtualOffset, physicalOffset, (int)mSize); 896 }); 897 } 898 } 899 900 /// <summary> 901 /// Copies all overlapping modified ranges from all virtual buffers back into this buffer, and returns an updated span with the data. 902 /// </summary> 903 /// <param name="dataSpan">Span where the unmodified data will be taken from for the output</param> 904 /// <param name="address">Address of the region to copy</param> 905 /// <param name="size">Size of the region to copy in bytes</param> 906 /// <returns>A span with <paramref name="dataSpan"/>, and the data for all modified ranges if any</returns> 907 private ReadOnlySpan<byte> CopyFromDependantVirtualBuffers(ReadOnlySpan<byte> dataSpan, ulong address, ulong size) 908 { 909 _virtualDependenciesLock.EnterReadLock(); 910 911 try 912 { 913 if (_virtualDependencies != null) 914 { 915 byte[] storage = dataSpan.ToArray(); 916 917 foreach (var virtualBuffer in _virtualDependencies.OrderBy(x => x.ModificationSequenceNumber)) 918 { 919 virtualBuffer.ConsumeModifiedRegion(address, size, (mAddress, mSize) => 920 { 921 // Get offset inside both this and the virtual buffer. 922 // Note that sometimes there is no right answer for the virtual offset, 923 // as the same physical range might be mapped multiple times inside a virtual buffer. 924 // We just assume it does not happen in practice as it can only be implemented correctly 925 // when the host has support for proper sparse mapping. 926 927 ulong mEndAddress = mAddress + mSize; 928 mAddress = Math.Max(mAddress, address); 929 mSize = Math.Min(mEndAddress, address + size) - mAddress; 930 931 int physicalOffset = (int)(mAddress - Address); 932 int virtualOffset = virtualBuffer.Range.FindOffset(new(mAddress, mSize)); 933 934 _context.Renderer.Pipeline.CopyBuffer(virtualBuffer.Handle, Handle, virtualOffset, physicalOffset, (int)size); 935 virtualBuffer.GetData(storage.AsSpan().Slice((int)(mAddress - address), (int)mSize), virtualOffset, (int)mSize); 936 }); 937 } 938 939 dataSpan = storage; 940 } 941 } 942 finally 943 { 944 _virtualDependenciesLock.ExitReadLock(); 945 } 946 947 return dataSpan; 948 } 949 950 /// <summary> 951 /// Copies the buffer data to the specified virtual buffer. 952 /// </summary> 953 /// <param name="virtualBuffer">Virtual buffer to copy the data into</param> 954 public void CopyToDependantVirtualBuffer(MultiRangeBuffer virtualBuffer) 955 { 956 CopyToDependantVirtualBuffer(virtualBuffer, Address, Size); 957 } 958 959 /// <summary> 960 /// Copies the buffer data inside the given range to the specified virtual buffer. 961 /// </summary> 962 /// <param name="virtualBuffer">Virtual buffer to copy the data into</param> 963 /// <param name="address">Address of the range</param> 964 /// <param name="size">Size of the range in bytes</param> 965 public void CopyToDependantVirtualBuffer(MultiRangeBuffer virtualBuffer, ulong address, ulong size) 966 { 967 // Broadcast data to all ranges of the virtual buffer that are contained inside this buffer. 968 969 ulong lastOffset = 0; 970 971 while (virtualBuffer.TryGetPhysicalOffset(this, lastOffset, out ulong srcOffset, out ulong dstOffset, out ulong copySize)) 972 { 973 ulong innerOffset = address - Address; 974 ulong innerEndOffset = (address + size) - Address; 975 976 lastOffset = dstOffset + copySize; 977 978 // Clamp range to the specified range. 979 ulong copySrcOffset = Math.Max(srcOffset, innerOffset); 980 ulong copySrcEndOffset = Math.Min(innerEndOffset, srcOffset + copySize); 981 982 if (copySrcEndOffset > copySrcOffset) 983 { 984 copySize = copySrcEndOffset - copySrcOffset; 985 dstOffset += copySrcOffset - srcOffset; 986 srcOffset = copySrcOffset; 987 988 _context.Renderer.Pipeline.CopyBuffer(Handle, virtualBuffer.Handle, (int)srcOffset, (int)dstOffset, (int)copySize); 989 } 990 } 991 } 992 993 /// <summary> 994 /// Increments the buffer reference count. 995 /// </summary> 996 public void IncrementReferenceCount() 997 { 998 _referenceCount++; 999 } 1000 1001 /// <summary> 1002 /// Decrements the buffer reference count. 1003 /// </summary> 1004 public void DecrementReferenceCount() 1005 { 1006 if (--_referenceCount == 0) 1007 { 1008 DisposeData(); 1009 } 1010 } 1011 1012 /// <summary> 1013 /// Disposes the host buffer's data, not its tracking handles. 1014 /// </summary> 1015 public void DisposeData() 1016 { 1017 _modifiedRanges?.Clear(); 1018 1019 _context.Renderer.DeleteBuffer(Handle); 1020 _preFlush?.Dispose(); 1021 _preFlush = null; 1022 1023 UnmappedSequence++; 1024 } 1025 1026 /// <summary> 1027 /// Disposes the host buffer. 1028 /// </summary> 1029 public void Dispose() 1030 { 1031 _memoryTrackingGranular?.Dispose(); 1032 _memoryTracking?.Dispose(); 1033 1034 DecrementReferenceCount(); 1035 } 1036 } 1037 }