KThread.cs
1 using Ryujinx.Common.Logging; 2 using Ryujinx.Cpu; 3 using Ryujinx.HLE.HOS.Kernel.Common; 4 using Ryujinx.HLE.HOS.Kernel.Process; 5 using Ryujinx.HLE.HOS.Kernel.SupervisorCall; 6 using Ryujinx.Horizon.Common; 7 using System; 8 using System.Collections.Generic; 9 using System.Numerics; 10 using System.Threading; 11 12 namespace Ryujinx.HLE.HOS.Kernel.Threading 13 { 14 class KThread : KSynchronizationObject, IKFutureSchedulerObject 15 { 16 private const int TlsUserDisableCountOffset = 0x100; 17 private const int TlsUserInterruptFlagOffset = 0x102; 18 19 public const int MaxWaitSyncObjects = 64; 20 21 private ManualResetEvent _schedulerWaitEvent; 22 23 public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent; 24 25 public Thread HostThread { get; private set; } 26 27 public IExecutionContext Context { get; private set; } 28 29 public KThreadContext ThreadContext { get; private set; } 30 31 public int DynamicPriority { get; set; } 32 public ulong AffinityMask { get; set; } 33 34 public ulong ThreadUid { get; private set; } 35 36 private long _totalTimeRunning; 37 38 public long TotalTimeRunning => _totalTimeRunning; 39 40 public KSynchronizationObject SignaledObj { get; set; } 41 42 public ulong CondVarAddress { get; set; } 43 44 private ulong _entrypoint; 45 private ThreadStart _customThreadStart; 46 private bool _forcedUnschedulable; 47 48 public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable; 49 50 public ulong MutexAddress { get; set; } 51 public int KernelWaitersCount { get; private set; } 52 53 public KProcess Owner { get; private set; } 54 55 private ulong _tlsAddress; 56 57 public ulong TlsAddress => _tlsAddress; 58 59 public KSynchronizationObject[] WaitSyncObjects { get; } 60 public int[] WaitSyncHandles { get; } 61 62 public long LastScheduledTime { get; set; } 63 64 public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; } 65 66 public LinkedList<KThread> Withholder { get; set; } 67 public LinkedListNode<KThread> WithholderNode { get; set; } 68 69 public LinkedListNode<KThread> ProcessListNode { get; set; } 70 71 private readonly LinkedList<KThread> _mutexWaiters; 72 private LinkedListNode<KThread> _mutexWaiterNode; 73 74 private readonly LinkedList<KThread> _pinnedWaiters; 75 76 public KThread MutexOwner { get; private set; } 77 78 public int ThreadHandleForUserMutex { get; set; } 79 80 private ThreadSchedState _forcePauseFlags; 81 private ThreadSchedState _forcePausePermissionFlags; 82 83 public Result ObjSyncResult { get; set; } 84 85 public int BasePriority { get; set; } 86 public int PreferredCore { get; set; } 87 88 public int CurrentCore { get; set; } 89 public int ActiveCore { get; set; } 90 91 public bool IsPinned { get; private set; } 92 93 private ulong _originalAffinityMask; 94 private int _originalPreferredCore; 95 private int _originalBasePriority; 96 private int _coreMigrationDisableCount; 97 98 public ThreadSchedState SchedFlags { get; private set; } 99 100 private int _shallBeTerminated; 101 102 private bool ShallBeTerminated => _shallBeTerminated != 0; 103 104 public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending; 105 106 public bool SyncCancelled { get; set; } 107 public bool WaitingSync { get; set; } 108 109 private int _hasExited; 110 private bool _hasBeenInitialized; 111 private bool _hasBeenReleased; 112 113 public bool WaitingInArbitration { get; set; } 114 115 private readonly object _activityOperationLock = new(); 116 117 public KThread(KernelContext context) : base(context) 118 { 119 WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects]; 120 WaitSyncHandles = new int[MaxWaitSyncObjects]; 121 122 SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount]; 123 124 _mutexWaiters = new LinkedList<KThread>(); 125 _pinnedWaiters = new LinkedList<KThread>(); 126 } 127 128 public Result Initialize( 129 ulong entrypoint, 130 ulong argsPtr, 131 ulong stackTop, 132 int priority, 133 int cpuCore, 134 KProcess owner, 135 ThreadType type, 136 ThreadStart customThreadStart = null) 137 { 138 if ((uint)type > 3) 139 { 140 throw new ArgumentException($"Invalid thread type \"{type}\"."); 141 } 142 143 PreferredCore = cpuCore; 144 AffinityMask |= 1UL << cpuCore; 145 146 SchedFlags = ThreadSchedState.None; 147 148 ActiveCore = cpuCore; 149 ObjSyncResult = KernelResult.ThreadNotStarted; 150 DynamicPriority = priority; 151 BasePriority = priority; 152 CurrentCore = cpuCore; 153 IsPinned = false; 154 155 _entrypoint = entrypoint; 156 _customThreadStart = customThreadStart; 157 158 if (type == ThreadType.User) 159 { 160 if (owner.AllocateThreadLocalStorage(out _tlsAddress) != Result.Success) 161 { 162 return KernelResult.OutOfMemory; 163 } 164 165 MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize); 166 } 167 168 bool is64Bits; 169 170 if (owner != null) 171 { 172 Owner = owner; 173 174 owner.IncrementReferenceCount(); 175 owner.IncrementThreadCount(); 176 177 is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit); 178 } 179 else 180 { 181 is64Bits = true; 182 } 183 184 HostThread = new Thread(ThreadStart); 185 186 Context = owner?.CreateExecutionContext() ?? new ProcessExecutionContext(); 187 188 ThreadContext = new KThreadContext(Context); 189 190 Context.IsAarch32 = !is64Bits; 191 192 Context.SetX(0, argsPtr); 193 194 if (is64Bits) 195 { 196 Context.SetX(18, KSystemControl.GenerateRandom() | 1); 197 Context.SetX(31, stackTop); 198 } 199 else 200 { 201 Context.SetX(13, (uint)stackTop); 202 } 203 204 Context.TpidrroEl0 = (long)_tlsAddress; 205 206 ThreadUid = KernelContext.NewThreadUid(); 207 208 HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}"; 209 210 _hasBeenInitialized = true; 211 212 _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask; 213 214 if (owner != null) 215 { 216 owner.AddThread(this); 217 218 if (owner.IsPaused) 219 { 220 KernelContext.CriticalSection.Enter(); 221 222 if (TerminationRequested) 223 { 224 KernelContext.CriticalSection.Leave(); 225 226 return Result.Success; 227 } 228 229 _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag; 230 231 CombineForcePauseFlags(); 232 233 KernelContext.CriticalSection.Leave(); 234 } 235 } 236 237 return Result.Success; 238 } 239 240 public Result Start() 241 { 242 if (!KernelContext.KernelInitialized) 243 { 244 KernelContext.CriticalSection.Enter(); 245 246 if (!TerminationRequested) 247 { 248 _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag; 249 250 CombineForcePauseFlags(); 251 } 252 253 KernelContext.CriticalSection.Leave(); 254 } 255 256 Result result = KernelResult.ThreadTerminating; 257 258 KernelContext.CriticalSection.Enter(); 259 260 if (!ShallBeTerminated) 261 { 262 KThread currentThread = KernelStatic.GetCurrentThread(); 263 264 while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested)) 265 { 266 if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None) 267 { 268 result = KernelResult.InvalidState; 269 break; 270 } 271 272 if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None) 273 { 274 if (Owner != null && _forcePauseFlags != ThreadSchedState.None) 275 { 276 CombineForcePauseFlags(); 277 } 278 279 SetNewSchedFlags(ThreadSchedState.Running); 280 281 StartHostThread(); 282 283 result = Result.Success; 284 break; 285 } 286 else 287 { 288 currentThread.CombineForcePauseFlags(); 289 290 KernelContext.CriticalSection.Leave(); 291 KernelContext.CriticalSection.Enter(); 292 293 if (currentThread.ShallBeTerminated) 294 { 295 break; 296 } 297 } 298 } 299 } 300 301 KernelContext.CriticalSection.Leave(); 302 303 return result; 304 } 305 306 public ThreadSchedState PrepareForTermination() 307 { 308 KernelContext.CriticalSection.Enter(); 309 310 if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this) 311 { 312 Owner.UnpinThread(this); 313 } 314 315 ThreadSchedState result; 316 317 if (Interlocked.Exchange(ref _shallBeTerminated, 1) == 0) 318 { 319 if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None) 320 { 321 SchedFlags = ThreadSchedState.TerminationPending; 322 } 323 else 324 { 325 if (_forcePauseFlags != ThreadSchedState.None) 326 { 327 _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag; 328 329 ThreadSchedState oldSchedFlags = SchedFlags; 330 331 SchedFlags &= ThreadSchedState.LowMask; 332 333 AdjustScheduling(oldSchedFlags); 334 } 335 336 if (BasePriority >= 0x10) 337 { 338 SetPriority(0xF); 339 } 340 341 if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running) 342 { 343 // TODO: GIC distributor stuffs (sgir changes ect) 344 Context.RequestInterrupt(); 345 } 346 347 SignaledObj = null; 348 ObjSyncResult = KernelResult.ThreadTerminating; 349 350 ReleaseAndResume(); 351 } 352 } 353 354 result = SchedFlags; 355 356 KernelContext.CriticalSection.Leave(); 357 358 return result & ThreadSchedState.LowMask; 359 } 360 361 public void Terminate() 362 { 363 ThreadSchedState state = PrepareForTermination(); 364 365 if (state != ThreadSchedState.TerminationPending) 366 { 367 KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _); 368 } 369 } 370 371 public void HandlePostSyscall() 372 { 373 ThreadSchedState state; 374 375 do 376 { 377 if (TerminationRequested) 378 { 379 Exit(); 380 381 // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here. 382 break; 383 } 384 385 KernelContext.CriticalSection.Enter(); 386 387 if (TerminationRequested) 388 { 389 state = ThreadSchedState.TerminationPending; 390 } 391 else 392 { 393 if (_forcePauseFlags != ThreadSchedState.None) 394 { 395 CombineForcePauseFlags(); 396 } 397 398 state = ThreadSchedState.Running; 399 } 400 401 KernelContext.CriticalSection.Leave(); 402 } while (state == ThreadSchedState.TerminationPending); 403 } 404 405 public void Exit() 406 { 407 // TODO: Debug event. 408 409 if (Owner != null) 410 { 411 Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1); 412 413 _hasBeenReleased = true; 414 } 415 416 KernelContext.CriticalSection.Enter(); 417 418 _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask; 419 _forcePausePermissionFlags = 0; 420 421 bool decRef = ExitImpl(); 422 423 Context.StopRunning(); 424 425 KernelContext.CriticalSection.Leave(); 426 427 if (decRef) 428 { 429 DecrementReferenceCount(); 430 } 431 } 432 433 private bool ExitImpl() 434 { 435 KernelContext.CriticalSection.Enter(); 436 437 SetNewSchedFlags(ThreadSchedState.TerminationPending); 438 439 bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0; 440 441 Signal(); 442 443 KernelContext.CriticalSection.Leave(); 444 445 return decRef; 446 } 447 448 private int GetEffectiveRunningCore() 449 { 450 for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++) 451 { 452 if (KernelContext.Schedulers[coreNumber].CurrentThread == this) 453 { 454 return coreNumber; 455 } 456 } 457 458 return -1; 459 } 460 461 public Result Sleep(long timeout) 462 { 463 KernelContext.CriticalSection.Enter(); 464 465 if (TerminationRequested) 466 { 467 KernelContext.CriticalSection.Leave(); 468 469 return KernelResult.ThreadTerminating; 470 } 471 472 SetNewSchedFlags(ThreadSchedState.Paused); 473 474 if (timeout > 0) 475 { 476 KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout); 477 } 478 479 KernelContext.CriticalSection.Leave(); 480 481 if (timeout > 0) 482 { 483 KernelContext.TimeManager.UnscheduleFutureInvocation(this); 484 } 485 486 return Result.Success; 487 } 488 489 public void SetPriority(int priority) 490 { 491 KernelContext.CriticalSection.Enter(); 492 493 if (IsPinned) 494 { 495 _originalBasePriority = priority; 496 } 497 else 498 { 499 BasePriority = priority; 500 } 501 502 UpdatePriorityInheritance(); 503 504 KernelContext.CriticalSection.Leave(); 505 } 506 507 public void Suspend(ThreadSchedState type) 508 { 509 _forcePauseFlags |= type; 510 511 CombineForcePauseFlags(); 512 } 513 514 public void Resume(ThreadSchedState type) 515 { 516 ThreadSchedState oldForcePauseFlags = _forcePauseFlags; 517 518 _forcePauseFlags &= ~type; 519 520 if ((oldForcePauseFlags & ~type) == ThreadSchedState.None) 521 { 522 ThreadSchedState oldSchedFlags = SchedFlags; 523 524 SchedFlags &= ThreadSchedState.LowMask; 525 526 AdjustScheduling(oldSchedFlags); 527 } 528 } 529 530 public Result SetActivity(bool pause) 531 { 532 lock (_activityOperationLock) 533 { 534 Result result = Result.Success; 535 536 KernelContext.CriticalSection.Enter(); 537 538 ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; 539 540 if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running) 541 { 542 KernelContext.CriticalSection.Leave(); 543 544 return KernelResult.InvalidState; 545 } 546 547 if (!TerminationRequested) 548 { 549 if (pause) 550 { 551 // Pause, the force pause flag should be clear (thread is NOT paused). 552 if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) 553 { 554 Suspend(ThreadSchedState.ThreadPauseFlag); 555 } 556 else 557 { 558 result = KernelResult.InvalidState; 559 } 560 } 561 else 562 { 563 // Unpause, the force pause flag should be set (thread is paused). 564 if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0) 565 { 566 Resume(ThreadSchedState.ThreadPauseFlag); 567 } 568 else 569 { 570 result = KernelResult.InvalidState; 571 } 572 } 573 } 574 575 KernelContext.CriticalSection.Leave(); 576 577 if (result == Result.Success && pause) 578 { 579 bool isThreadRunning = true; 580 581 while (isThreadRunning) 582 { 583 KernelContext.CriticalSection.Enter(); 584 585 if (TerminationRequested) 586 { 587 KernelContext.CriticalSection.Leave(); 588 589 break; 590 } 591 592 isThreadRunning = false; 593 594 if (IsPinned) 595 { 596 KThread currentThread = KernelStatic.GetCurrentThread(); 597 598 if (currentThread.TerminationRequested) 599 { 600 KernelContext.CriticalSection.Leave(); 601 602 result = KernelResult.ThreadTerminating; 603 604 break; 605 } 606 607 _pinnedWaiters.AddLast(currentThread); 608 609 currentThread.Reschedule(ThreadSchedState.Paused); 610 } 611 else 612 { 613 isThreadRunning = GetEffectiveRunningCore() >= 0; 614 } 615 616 KernelContext.CriticalSection.Leave(); 617 } 618 } 619 620 return result; 621 } 622 } 623 624 public Result GetThreadContext3(out ThreadContext context) 625 { 626 context = default; 627 628 lock (_activityOperationLock) 629 { 630 KernelContext.CriticalSection.Enter(); 631 632 if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) 633 { 634 KernelContext.CriticalSection.Leave(); 635 636 return KernelResult.InvalidState; 637 } 638 639 if (!TerminationRequested) 640 { 641 context = GetCurrentContext(); 642 } 643 644 KernelContext.CriticalSection.Leave(); 645 } 646 647 return Result.Success; 648 } 649 650 private static uint GetPsr(IExecutionContext context) 651 { 652 return context.Pstate & 0xFF0FFE20; 653 } 654 655 private ThreadContext GetCurrentContext() 656 { 657 const int MaxRegistersAArch32 = 15; 658 const int MaxFpuRegistersAArch32 = 16; 659 660 ThreadContext context = new(); 661 662 if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit)) 663 { 664 for (int i = 0; i < context.Registers.Length; i++) 665 { 666 context.Registers[i] = Context.GetX(i); 667 } 668 669 for (int i = 0; i < context.FpuRegisters.Length; i++) 670 { 671 context.FpuRegisters[i] = Context.GetV(i); 672 } 673 674 context.Fp = Context.GetX(29); 675 context.Lr = Context.GetX(30); 676 context.Sp = Context.GetX(31); 677 context.Pc = Context.Pc; 678 context.Pstate = GetPsr(Context); 679 context.Tpidr = (ulong)Context.TpidrroEl0; 680 } 681 else 682 { 683 for (int i = 0; i < MaxRegistersAArch32; i++) 684 { 685 context.Registers[i] = (uint)Context.GetX(i); 686 } 687 688 for (int i = 0; i < MaxFpuRegistersAArch32; i++) 689 { 690 context.FpuRegisters[i] = Context.GetV(i); 691 } 692 693 context.Pc = (uint)Context.Pc; 694 context.Pstate = GetPsr(Context); 695 context.Tpidr = (uint)Context.TpidrroEl0; 696 } 697 698 context.Fpcr = (uint)Context.Fpcr; 699 context.Fpsr = (uint)Context.Fpsr; 700 701 return context; 702 } 703 704 public void CancelSynchronization() 705 { 706 KernelContext.CriticalSection.Enter(); 707 708 if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync) 709 { 710 SyncCancelled = true; 711 } 712 else if (Withholder != null) 713 { 714 Withholder.Remove(WithholderNode); 715 716 SetNewSchedFlags(ThreadSchedState.Running); 717 718 Withholder = null; 719 720 SyncCancelled = true; 721 } 722 else 723 { 724 SignaledObj = null; 725 ObjSyncResult = KernelResult.Cancelled; 726 727 SetNewSchedFlags(ThreadSchedState.Running); 728 729 SyncCancelled = false; 730 } 731 732 KernelContext.CriticalSection.Leave(); 733 } 734 735 public Result SetCoreAndAffinityMask(int newCore, ulong newAffinityMask) 736 { 737 lock (_activityOperationLock) 738 { 739 KernelContext.CriticalSection.Enter(); 740 741 bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0; 742 743 // The value -3 is "do not change the preferred core". 744 if (newCore == -3) 745 { 746 newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore; 747 748 if ((newAffinityMask & (1UL << newCore)) == 0) 749 { 750 KernelContext.CriticalSection.Leave(); 751 752 return KernelResult.InvalidCombination; 753 } 754 } 755 756 if (isCoreMigrationDisabled) 757 { 758 _originalPreferredCore = newCore; 759 _originalAffinityMask = newAffinityMask; 760 } 761 else 762 { 763 ulong oldAffinityMask = AffinityMask; 764 765 PreferredCore = newCore; 766 AffinityMask = newAffinityMask; 767 768 if (oldAffinityMask != newAffinityMask) 769 { 770 int oldCore = ActiveCore; 771 772 if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0) 773 { 774 if (PreferredCore < 0) 775 { 776 ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask); 777 } 778 else 779 { 780 ActiveCore = PreferredCore; 781 } 782 } 783 784 AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore); 785 } 786 } 787 788 KernelContext.CriticalSection.Leave(); 789 790 bool targetThreadPinned = true; 791 792 while (targetThreadPinned) 793 { 794 KernelContext.CriticalSection.Enter(); 795 796 if (TerminationRequested) 797 { 798 KernelContext.CriticalSection.Leave(); 799 800 break; 801 } 802 803 targetThreadPinned = false; 804 805 int coreNumber = GetEffectiveRunningCore(); 806 bool isPinnedThreadCurrentlyRunning = coreNumber >= 0; 807 808 if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0) 809 { 810 if (IsPinned) 811 { 812 KThread currentThread = KernelStatic.GetCurrentThread(); 813 814 if (currentThread.TerminationRequested) 815 { 816 KernelContext.CriticalSection.Leave(); 817 818 return KernelResult.ThreadTerminating; 819 } 820 821 _pinnedWaiters.AddLast(currentThread); 822 823 currentThread.Reschedule(ThreadSchedState.Paused); 824 } 825 else 826 { 827 targetThreadPinned = true; 828 } 829 } 830 831 KernelContext.CriticalSection.Leave(); 832 } 833 834 return Result.Success; 835 } 836 } 837 838 private void CombineForcePauseFlags() 839 { 840 ThreadSchedState oldFlags = SchedFlags; 841 ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; 842 843 SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags); 844 845 AdjustScheduling(oldFlags); 846 } 847 848 private void SetNewSchedFlags(ThreadSchedState newFlags) 849 { 850 KernelContext.CriticalSection.Enter(); 851 852 ThreadSchedState oldFlags = SchedFlags; 853 854 SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags; 855 856 if ((oldFlags & ThreadSchedState.LowMask) != newFlags) 857 { 858 AdjustScheduling(oldFlags); 859 } 860 861 KernelContext.CriticalSection.Leave(); 862 } 863 864 public void ReleaseAndResume() 865 { 866 KernelContext.CriticalSection.Enter(); 867 868 if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused) 869 { 870 if (Withholder != null) 871 { 872 Withholder.Remove(WithholderNode); 873 874 SetNewSchedFlags(ThreadSchedState.Running); 875 876 Withholder = null; 877 } 878 else 879 { 880 SetNewSchedFlags(ThreadSchedState.Running); 881 } 882 } 883 884 KernelContext.CriticalSection.Leave(); 885 } 886 887 public void Reschedule(ThreadSchedState newFlags) 888 { 889 KernelContext.CriticalSection.Enter(); 890 891 ThreadSchedState oldFlags = SchedFlags; 892 893 SchedFlags = (oldFlags & ThreadSchedState.HighMask) | 894 (newFlags & ThreadSchedState.LowMask); 895 896 AdjustScheduling(oldFlags); 897 898 KernelContext.CriticalSection.Leave(); 899 } 900 901 public void AddMutexWaiter(KThread requester) 902 { 903 AddToMutexWaitersList(requester); 904 905 requester.MutexOwner = this; 906 907 UpdatePriorityInheritance(); 908 } 909 910 public void RemoveMutexWaiter(KThread thread) 911 { 912 if (thread._mutexWaiterNode?.List != null) 913 { 914 _mutexWaiters.Remove(thread._mutexWaiterNode); 915 } 916 917 thread.MutexOwner = null; 918 919 UpdatePriorityInheritance(); 920 } 921 922 public KThread RelinquishMutex(ulong mutexAddress, out int count) 923 { 924 count = 0; 925 926 if (_mutexWaiters.First == null) 927 { 928 return null; 929 } 930 931 KThread newMutexOwner = null; 932 933 LinkedListNode<KThread> currentNode = _mutexWaiters.First; 934 935 do 936 { 937 // Skip all threads that are not waiting for this mutex. 938 while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress) 939 { 940 currentNode = currentNode.Next; 941 } 942 943 if (currentNode == null) 944 { 945 break; 946 } 947 948 LinkedListNode<KThread> nextNode = currentNode.Next; 949 950 _mutexWaiters.Remove(currentNode); 951 952 currentNode.Value.MutexOwner = newMutexOwner; 953 954 if (newMutexOwner != null) 955 { 956 // New owner was already selected, re-insert on new owner list. 957 newMutexOwner.AddToMutexWaitersList(currentNode.Value); 958 } 959 else 960 { 961 // New owner not selected yet, use current thread. 962 newMutexOwner = currentNode.Value; 963 } 964 965 count++; 966 967 currentNode = nextNode; 968 } 969 while (currentNode != null); 970 971 if (newMutexOwner != null) 972 { 973 UpdatePriorityInheritance(); 974 975 newMutexOwner.UpdatePriorityInheritance(); 976 } 977 978 return newMutexOwner; 979 } 980 981 private void UpdatePriorityInheritance() 982 { 983 // If any of the threads waiting for the mutex has 984 // higher priority than the current thread, then 985 // the current thread inherits that priority. 986 int highestPriority = BasePriority; 987 988 if (_mutexWaiters.First != null) 989 { 990 int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority; 991 992 if (waitingDynamicPriority < highestPriority) 993 { 994 highestPriority = waitingDynamicPriority; 995 } 996 } 997 998 if (highestPriority != DynamicPriority) 999 { 1000 int oldPriority = DynamicPriority; 1001 1002 DynamicPriority = highestPriority; 1003 1004 AdjustSchedulingForNewPriority(oldPriority); 1005 1006 if (MutexOwner != null) 1007 { 1008 // Remove and re-insert to ensure proper sorting based on new priority. 1009 MutexOwner._mutexWaiters.Remove(_mutexWaiterNode); 1010 1011 MutexOwner.AddToMutexWaitersList(this); 1012 1013 MutexOwner.UpdatePriorityInheritance(); 1014 } 1015 } 1016 } 1017 1018 private void AddToMutexWaitersList(KThread thread) 1019 { 1020 LinkedListNode<KThread> nextPrio = _mutexWaiters.First; 1021 1022 int currentPriority = thread.DynamicPriority; 1023 1024 while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority) 1025 { 1026 nextPrio = nextPrio.Next; 1027 } 1028 1029 if (nextPrio != null) 1030 { 1031 thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread); 1032 } 1033 else 1034 { 1035 thread._mutexWaiterNode = _mutexWaiters.AddLast(thread); 1036 } 1037 } 1038 1039 private void AdjustScheduling(ThreadSchedState oldFlags) 1040 { 1041 if (oldFlags == SchedFlags) 1042 { 1043 return; 1044 } 1045 1046 if (!IsSchedulable) 1047 { 1048 if (!_forcedUnschedulable) 1049 { 1050 // Ensure our thread is running and we have an event. 1051 StartHostThread(); 1052 1053 // If the thread is not schedulable, we want to just run or pause 1054 // it directly as we don't care about priority or the core it is 1055 // running on in this case. 1056 1057 if (SchedFlags == ThreadSchedState.Running) 1058 { 1059 _schedulerWaitEvent.Set(); 1060 } 1061 else 1062 { 1063 _schedulerWaitEvent.Reset(); 1064 } 1065 } 1066 1067 return; 1068 } 1069 1070 if (oldFlags == ThreadSchedState.Running) 1071 { 1072 // Was running, now it's stopped. 1073 if (ActiveCore >= 0) 1074 { 1075 KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this); 1076 } 1077 1078 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1079 { 1080 if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) 1081 { 1082 KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this); 1083 } 1084 } 1085 } 1086 else if (SchedFlags == ThreadSchedState.Running) 1087 { 1088 // Was stopped, now it's running. 1089 if (ActiveCore >= 0) 1090 { 1091 KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this); 1092 } 1093 1094 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1095 { 1096 if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) 1097 { 1098 KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); 1099 } 1100 } 1101 } 1102 1103 KernelContext.ThreadReselectionRequested = true; 1104 } 1105 1106 private void AdjustSchedulingForNewPriority(int oldPriority) 1107 { 1108 if (SchedFlags != ThreadSchedState.Running || !IsSchedulable) 1109 { 1110 return; 1111 } 1112 1113 // Remove thread from the old priority queues. 1114 if (ActiveCore >= 0) 1115 { 1116 KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this); 1117 } 1118 1119 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1120 { 1121 if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) 1122 { 1123 KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this); 1124 } 1125 } 1126 1127 // Add thread to the new priority queues. 1128 KThread currentThread = KernelStatic.GetCurrentThread(); 1129 1130 if (ActiveCore >= 0) 1131 { 1132 if (currentThread == this) 1133 { 1134 KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this); 1135 } 1136 else 1137 { 1138 KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this); 1139 } 1140 } 1141 1142 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1143 { 1144 if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) 1145 { 1146 KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); 1147 } 1148 } 1149 1150 KernelContext.ThreadReselectionRequested = true; 1151 } 1152 1153 private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore) 1154 { 1155 if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable) 1156 { 1157 return; 1158 } 1159 1160 // Remove thread from the old priority queues. 1161 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1162 { 1163 if (((oldAffinityMask >> core) & 1) != 0) 1164 { 1165 if (core == oldCore) 1166 { 1167 KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this); 1168 } 1169 else 1170 { 1171 KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this); 1172 } 1173 } 1174 } 1175 1176 // Add thread to the new priority queues. 1177 for (int core = 0; core < KScheduler.CpuCoresCount; core++) 1178 { 1179 if (((AffinityMask >> core) & 1) != 0) 1180 { 1181 if (core == ActiveCore) 1182 { 1183 KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this); 1184 } 1185 else 1186 { 1187 KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); 1188 } 1189 } 1190 } 1191 1192 KernelContext.ThreadReselectionRequested = true; 1193 } 1194 1195 public void SetEntryArguments(long argsPtr, int threadHandle) 1196 { 1197 Context.SetX(0, (ulong)argsPtr); 1198 Context.SetX(1, (ulong)threadHandle); 1199 } 1200 1201 public void TimeUp() 1202 { 1203 ReleaseAndResume(); 1204 } 1205 1206 public string GetGuestStackTrace() 1207 { 1208 return Owner.Debugger.GetGuestStackTrace(this); 1209 } 1210 1211 public string GetGuestRegisterPrintout() 1212 { 1213 return Owner.Debugger.GetCpuRegisterPrintout(this); 1214 } 1215 1216 public void PrintGuestStackTrace() 1217 { 1218 Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n"); 1219 } 1220 1221 public void PrintGuestRegisterPrintout() 1222 { 1223 Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n"); 1224 } 1225 1226 public void AddCpuTime(long ticks) 1227 { 1228 Interlocked.Add(ref _totalTimeRunning, ticks); 1229 } 1230 1231 public void StartHostThread() 1232 { 1233 if (_schedulerWaitEvent == null) 1234 { 1235 var schedulerWaitEvent = new ManualResetEvent(false); 1236 1237 if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null) 1238 { 1239 HostThread.Start(); 1240 } 1241 else 1242 { 1243 schedulerWaitEvent.Dispose(); 1244 } 1245 } 1246 } 1247 1248 private void ThreadStart() 1249 { 1250 _schedulerWaitEvent.WaitOne(); 1251 KernelStatic.SetKernelContext(KernelContext, this); 1252 1253 if (_customThreadStart != null) 1254 { 1255 _customThreadStart(); 1256 1257 // Ensure that anything trying to join the HLE thread is unblocked. 1258 Exit(); 1259 HandlePostSyscall(); 1260 } 1261 else 1262 { 1263 Owner.Context.Execute(Context, _entrypoint); 1264 } 1265 1266 Context.Dispose(); 1267 _schedulerWaitEvent.Dispose(); 1268 } 1269 1270 public void MakeUnschedulable() 1271 { 1272 _forcedUnschedulable = true; 1273 } 1274 1275 public override bool IsSignaled() 1276 { 1277 return _hasExited != 0; 1278 } 1279 1280 protected override void Destroy() 1281 { 1282 if (_hasBeenInitialized) 1283 { 1284 FreeResources(); 1285 1286 bool released = Owner != null || _hasBeenReleased; 1287 1288 if (Owner != null) 1289 { 1290 Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1); 1291 1292 Owner.DecrementReferenceCount(); 1293 } 1294 else 1295 { 1296 KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1); 1297 } 1298 } 1299 } 1300 1301 private void FreeResources() 1302 { 1303 Owner?.RemoveThread(this); 1304 1305 if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != Result.Success) 1306 { 1307 throw new InvalidOperationException("Unexpected failure freeing thread local storage."); 1308 } 1309 1310 KernelContext.CriticalSection.Enter(); 1311 1312 // Wake up all threads that may be waiting for a mutex being held by this thread. 1313 foreach (KThread thread in _mutexWaiters) 1314 { 1315 thread.MutexOwner = null; 1316 thread._originalPreferredCore = 0; 1317 thread.ObjSyncResult = KernelResult.InvalidState; 1318 1319 thread.ReleaseAndResume(); 1320 } 1321 1322 KernelContext.CriticalSection.Leave(); 1323 1324 Owner?.DecrementThreadCountAndTerminateIfZero(); 1325 } 1326 1327 public void Pin() 1328 { 1329 IsPinned = true; 1330 _coreMigrationDisableCount++; 1331 1332 int activeCore = ActiveCore; 1333 1334 _originalPreferredCore = PreferredCore; 1335 _originalAffinityMask = AffinityMask; 1336 1337 ActiveCore = CurrentCore; 1338 PreferredCore = CurrentCore; 1339 AffinityMask = 1UL << CurrentCore; 1340 1341 if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask) 1342 { 1343 AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore); 1344 } 1345 1346 _originalBasePriority = BasePriority; 1347 BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1); 1348 UpdatePriorityInheritance(); 1349 1350 // Disallows thread pausing 1351 _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag; 1352 CombineForcePauseFlags(); 1353 1354 // TODO: Assign reduced SVC permissions 1355 } 1356 1357 public void Unpin() 1358 { 1359 IsPinned = false; 1360 _coreMigrationDisableCount--; 1361 1362 ulong affinityMask = AffinityMask; 1363 int activeCore = ActiveCore; 1364 1365 PreferredCore = _originalPreferredCore; 1366 AffinityMask = _originalAffinityMask; 1367 1368 if (AffinityMask != affinityMask) 1369 { 1370 if ((AffinityMask & 1UL << ActiveCore) != 0) 1371 { 1372 if (PreferredCore >= 0) 1373 { 1374 ActiveCore = PreferredCore; 1375 } 1376 else 1377 { 1378 ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask); 1379 } 1380 1381 AdjustSchedulingForNewAffinity(affinityMask, activeCore); 1382 } 1383 } 1384 1385 BasePriority = _originalBasePriority; 1386 UpdatePriorityInheritance(); 1387 1388 if (!TerminationRequested) 1389 { 1390 // Allows thread pausing 1391 _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag; 1392 CombineForcePauseFlags(); 1393 1394 // TODO: Restore SVC permissions 1395 } 1396 1397 // Wake up waiters 1398 foreach (KThread waiter in _pinnedWaiters) 1399 { 1400 waiter.ReleaseAndResume(); 1401 } 1402 1403 _pinnedWaiters.Clear(); 1404 } 1405 1406 public void SynchronizePreemptionState() 1407 { 1408 KernelContext.CriticalSection.Enter(); 1409 1410 if (Owner != null && Owner.PinnedThreads[CurrentCore] == this) 1411 { 1412 ClearUserInterruptFlag(); 1413 1414 Owner.UnpinThread(this); 1415 } 1416 1417 KernelContext.CriticalSection.Leave(); 1418 } 1419 1420 public ushort GetUserDisableCount() 1421 { 1422 return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset); 1423 } 1424 1425 public void SetUserInterruptFlag() 1426 { 1427 Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1); 1428 } 1429 1430 public void ClearUserInterruptFlag() 1431 { 1432 Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0); 1433 } 1434 } 1435 }