sync_sema.c
1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 * 31 */ 32 /* 33 * File: kern/sync_sema.c 34 * Author: Joseph CaraDonna 35 * 36 * Contains RT distributed semaphore synchronization services. 37 */ 38 39 #include <mach/mach_types.h> 40 #include <mach/mach_traps.h> 41 #include <mach/kern_return.h> 42 #include <mach/semaphore.h> 43 #include <mach/sync_policy.h> 44 #include <mach/task.h> 45 46 #include <kern/misc_protos.h> 47 #include <kern/sync_sema.h> 48 #include <kern/spl.h> 49 #include <kern/ipc_kobject.h> 50 #include <kern/ipc_sync.h> 51 #include <kern/ipc_tt.h> 52 #include <kern/thread.h> 53 #include <kern/clock.h> 54 #include <ipc/ipc_port.h> 55 #include <ipc/ipc_space.h> 56 #include <kern/host.h> 57 #include <kern/waitq.h> 58 #include <kern/zalloc.h> 59 #include <kern/mach_param.h> 60 61 #include <libkern/OSAtomic.h> 62 63 static unsigned int semaphore_event; 64 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event) 65 66 ZONE_DECLARE(semaphore_zone, "semaphores", sizeof(struct semaphore), ZC_NONE); 67 68 os_refgrp_decl(static, sema_refgrp, "semaphore", NULL); 69 70 /* Forward declarations */ 71 72 73 kern_return_t 74 semaphore_wait_trap_internal( 75 mach_port_name_t name, 76 void (*caller_cont)(kern_return_t)); 77 78 kern_return_t 79 semaphore_wait_signal_trap_internal( 80 mach_port_name_t wait_name, 81 mach_port_name_t signal_name, 82 void (*caller_cont)(kern_return_t)); 83 84 kern_return_t 85 semaphore_timedwait_trap_internal( 86 mach_port_name_t name, 87 unsigned int sec, 88 clock_res_t nsec, 89 void (*caller_cont)(kern_return_t)); 90 91 kern_return_t 92 semaphore_timedwait_signal_trap_internal( 93 mach_port_name_t wait_name, 94 mach_port_name_t signal_name, 95 unsigned int sec, 96 clock_res_t nsec, 97 void (*caller_cont)(kern_return_t)); 98 99 kern_return_t 100 semaphore_signal_internal_trap(mach_port_name_t sema_name); 101 102 kern_return_t 103 semaphore_signal_internal( 104 semaphore_t semaphore, 105 thread_t thread, 106 int options); 107 108 kern_return_t 109 semaphore_convert_wait_result( 110 int wait_result); 111 112 void 113 semaphore_wait_continue(void *arg __unused, wait_result_t wr); 114 115 static kern_return_t 116 semaphore_wait_internal( 117 semaphore_t wait_semaphore, 118 semaphore_t signal_semaphore, 119 uint64_t deadline, 120 int option, 121 void (*caller_cont)(kern_return_t)); 122 123 static __inline__ uint64_t 124 semaphore_deadline( 125 unsigned int sec, 126 clock_res_t nsec) 127 { 128 uint64_t abstime; 129 130 nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); 131 clock_absolutetime_interval_to_deadline(abstime, &abstime); 132 133 return abstime; 134 } 135 136 /* 137 * Routine: semaphore_create 138 * 139 * Creates a semaphore. 140 * The port representing the semaphore is returned as a parameter. 141 */ 142 kern_return_t 143 semaphore_create( 144 task_t task, 145 semaphore_t *new_semaphore, 146 int policy, 147 int value) 148 { 149 semaphore_t s = SEMAPHORE_NULL; 150 kern_return_t kret; 151 152 *new_semaphore = SEMAPHORE_NULL; 153 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX || policy < 0) { 154 return KERN_INVALID_ARGUMENT; 155 } 156 157 s = (semaphore_t) zalloc(semaphore_zone); 158 159 if (s == SEMAPHORE_NULL) { 160 return KERN_RESOURCE_SHORTAGE; 161 } 162 163 kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */ 164 if (kret != KERN_SUCCESS) { 165 zfree(semaphore_zone, s); 166 return kret; 167 } 168 169 /* 170 * Initialize the semaphore values. 171 */ 172 s->port = IP_NULL; 173 os_ref_init(&s->ref_count, &sema_refgrp); 174 s->count = value; 175 s->active = TRUE; 176 s->owner = task; 177 178 /* 179 * Associate the new semaphore with the task by adding 180 * the new semaphore to the task's semaphore list. 181 */ 182 task_lock(task); 183 /* Check for race with task_terminate */ 184 if (!task->active) { 185 task_unlock(task); 186 zfree(semaphore_zone, s); 187 return KERN_INVALID_TASK; 188 } 189 enqueue_head(&task->semaphore_list, (queue_entry_t) s); 190 task->semaphores_owned++; 191 task_unlock(task); 192 193 *new_semaphore = s; 194 195 return KERN_SUCCESS; 196 } 197 198 /* 199 * Routine: semaphore_destroy_internal 200 * 201 * Disassociate a semaphore from its owning task, mark it inactive, 202 * and set any waiting threads running with THREAD_RESTART. 203 * 204 * Conditions: 205 * task is locked 206 * semaphore is locked 207 * semaphore is owned by the specified task 208 * Returns: 209 * with semaphore unlocked 210 */ 211 static void 212 semaphore_destroy_internal( 213 task_t task, 214 semaphore_t semaphore) 215 { 216 int old_count; 217 218 /* unlink semaphore from owning task */ 219 assert(semaphore->owner == task); 220 remqueue((queue_entry_t) semaphore); 221 semaphore->owner = TASK_NULL; 222 task->semaphores_owned--; 223 224 /* 225 * Deactivate semaphore 226 */ 227 assert(semaphore->active); 228 semaphore->active = FALSE; 229 230 /* 231 * Wakeup blocked threads 232 */ 233 old_count = semaphore->count; 234 semaphore->count = 0; 235 236 if (old_count < 0) { 237 waitq_wakeup64_all_locked(&semaphore->waitq, 238 SEMAPHORE_EVENT, 239 THREAD_RESTART, NULL, 240 WAITQ_ALL_PRIORITIES, 241 WAITQ_UNLOCK); 242 /* waitq/semaphore is unlocked */ 243 } else { 244 semaphore_unlock(semaphore); 245 } 246 } 247 248 /* 249 * Routine: semaphore_destroy 250 * 251 * Destroys a semaphore and consume the caller's reference on the 252 * semaphore. 253 */ 254 kern_return_t 255 semaphore_destroy( 256 task_t task, 257 semaphore_t semaphore) 258 { 259 spl_t spl_level; 260 261 if (semaphore == SEMAPHORE_NULL) { 262 return KERN_INVALID_ARGUMENT; 263 } 264 265 if (task == TASK_NULL) { 266 semaphore_dereference(semaphore); 267 return KERN_INVALID_ARGUMENT; 268 } 269 270 task_lock(task); 271 spl_level = splsched(); 272 semaphore_lock(semaphore); 273 274 if (semaphore->owner != task) { 275 semaphore_unlock(semaphore); 276 semaphore_dereference(semaphore); 277 splx(spl_level); 278 task_unlock(task); 279 return KERN_INVALID_ARGUMENT; 280 } 281 282 semaphore_destroy_internal(task, semaphore); 283 /* semaphore unlocked */ 284 285 splx(spl_level); 286 task_unlock(task); 287 288 semaphore_dereference(semaphore); 289 return KERN_SUCCESS; 290 } 291 292 /* 293 * Routine: semaphore_destroy_all 294 * 295 * Destroy all the semaphores associated with a given task. 296 */ 297 #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */ 298 299 void 300 semaphore_destroy_all( 301 task_t task) 302 { 303 uint32_t count; 304 spl_t spl_level; 305 306 count = 0; 307 task_lock(task); 308 while (!queue_empty(&task->semaphore_list)) { 309 semaphore_t semaphore; 310 311 semaphore = (semaphore_t) queue_first(&task->semaphore_list); 312 313 if (count == 0) { 314 spl_level = splsched(); 315 } 316 semaphore_lock(semaphore); 317 318 semaphore_destroy_internal(task, semaphore); 319 /* semaphore unlocked */ 320 321 /* throttle number of semaphores per interrupt disablement */ 322 if (++count == SEMASPERSPL) { 323 count = 0; 324 splx(spl_level); 325 } 326 } 327 if (count != 0) { 328 splx(spl_level); 329 } 330 331 task_unlock(task); 332 } 333 334 /* 335 * Routine: semaphore_signal_internal 336 * 337 * Signals the semaphore as direct. 338 * Assumptions: 339 * Semaphore is locked. 340 */ 341 kern_return_t 342 semaphore_signal_internal( 343 semaphore_t semaphore, 344 thread_t thread, 345 int options) 346 { 347 kern_return_t kr; 348 spl_t spl_level; 349 350 spl_level = splsched(); 351 semaphore_lock(semaphore); 352 353 if (!semaphore->active) { 354 semaphore_unlock(semaphore); 355 splx(spl_level); 356 return KERN_TERMINATED; 357 } 358 359 if (thread != THREAD_NULL) { 360 if (semaphore->count < 0) { 361 kr = waitq_wakeup64_thread_locked( 362 &semaphore->waitq, 363 SEMAPHORE_EVENT, 364 thread, 365 THREAD_AWAKENED, 366 WAITQ_UNLOCK); 367 /* waitq/semaphore is unlocked */ 368 } else { 369 kr = KERN_NOT_WAITING; 370 semaphore_unlock(semaphore); 371 } 372 splx(spl_level); 373 return kr; 374 } 375 376 if (options & SEMAPHORE_SIGNAL_ALL) { 377 int old_count = semaphore->count; 378 379 kr = KERN_NOT_WAITING; 380 if (old_count < 0) { 381 semaphore->count = 0; /* always reset */ 382 kr = waitq_wakeup64_all_locked( 383 &semaphore->waitq, 384 SEMAPHORE_EVENT, 385 THREAD_AWAKENED, NULL, 386 WAITQ_ALL_PRIORITIES, 387 WAITQ_UNLOCK); 388 /* waitq / semaphore is unlocked */ 389 } else { 390 if (options & SEMAPHORE_SIGNAL_PREPOST) { 391 semaphore->count++; 392 } 393 kr = KERN_SUCCESS; 394 semaphore_unlock(semaphore); 395 } 396 splx(spl_level); 397 return kr; 398 } 399 400 if (semaphore->count < 0) { 401 waitq_options_t wq_option = (options & SEMAPHORE_THREAD_HANDOFF) ? 402 WQ_OPTION_HANDOFF : WQ_OPTION_NONE; 403 kr = waitq_wakeup64_one_locked( 404 &semaphore->waitq, 405 SEMAPHORE_EVENT, 406 THREAD_AWAKENED, NULL, 407 WAITQ_ALL_PRIORITIES, 408 WAITQ_KEEP_LOCKED, 409 wq_option); 410 if (kr == KERN_SUCCESS) { 411 semaphore_unlock(semaphore); 412 splx(spl_level); 413 return KERN_SUCCESS; 414 } else { 415 semaphore->count = 0; /* all waiters gone */ 416 } 417 } 418 419 if (options & SEMAPHORE_SIGNAL_PREPOST) { 420 semaphore->count++; 421 } 422 423 semaphore_unlock(semaphore); 424 splx(spl_level); 425 return KERN_NOT_WAITING; 426 } 427 428 /* 429 * Routine: semaphore_signal_thread 430 * 431 * If the specified thread is blocked on the semaphore, it is 432 * woken up. If a NULL thread was supplied, then any one 433 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING 434 * and the semaphore is unchanged. 435 */ 436 kern_return_t 437 semaphore_signal_thread( 438 semaphore_t semaphore, 439 thread_t thread) 440 { 441 kern_return_t ret; 442 443 if (semaphore == SEMAPHORE_NULL) { 444 return KERN_INVALID_ARGUMENT; 445 } 446 447 ret = semaphore_signal_internal(semaphore, 448 thread, 449 SEMAPHORE_OPTION_NONE); 450 return ret; 451 } 452 453 /* 454 * Routine: semaphore_signal_thread_trap 455 * 456 * Trap interface to the semaphore_signal_thread function. 457 */ 458 kern_return_t 459 semaphore_signal_thread_trap( 460 struct semaphore_signal_thread_trap_args *args) 461 { 462 mach_port_name_t sema_name = args->signal_name; 463 mach_port_name_t thread_name = args->thread_name; 464 semaphore_t semaphore; 465 thread_t thread; 466 kern_return_t kr; 467 468 /* 469 * MACH_PORT_NULL is not an error. It means that we want to 470 * select any one thread that is already waiting, but not to 471 * pre-post the semaphore. 472 */ 473 if (thread_name != MACH_PORT_NULL) { 474 thread = port_name_to_thread(thread_name, PORT_TO_THREAD_NONE); 475 if (thread == THREAD_NULL) { 476 return KERN_INVALID_ARGUMENT; 477 } 478 } else { 479 thread = THREAD_NULL; 480 } 481 482 kr = port_name_to_semaphore(sema_name, &semaphore); 483 if (kr == KERN_SUCCESS) { 484 kr = semaphore_signal_internal(semaphore, 485 thread, 486 SEMAPHORE_OPTION_NONE); 487 semaphore_dereference(semaphore); 488 } 489 if (thread != THREAD_NULL) { 490 thread_deallocate(thread); 491 } 492 return kr; 493 } 494 495 496 497 /* 498 * Routine: semaphore_signal 499 * 500 * Traditional (in-kernel client and MIG interface) semaphore 501 * signal routine. Most users will access the trap version. 502 * 503 * This interface in not defined to return info about whether 504 * this call found a thread waiting or not. The internal 505 * routines (and future external routines) do. We have to 506 * convert those into plain KERN_SUCCESS returns. 507 */ 508 kern_return_t 509 semaphore_signal( 510 semaphore_t semaphore) 511 { 512 kern_return_t kr; 513 514 if (semaphore == SEMAPHORE_NULL) { 515 return KERN_INVALID_ARGUMENT; 516 } 517 518 kr = semaphore_signal_internal(semaphore, 519 THREAD_NULL, 520 SEMAPHORE_SIGNAL_PREPOST); 521 if (kr == KERN_NOT_WAITING) { 522 return KERN_SUCCESS; 523 } 524 return kr; 525 } 526 527 /* 528 * Routine: semaphore_signal_trap 529 * 530 * Trap interface to the semaphore_signal function. 531 */ 532 kern_return_t 533 semaphore_signal_trap( 534 struct semaphore_signal_trap_args *args) 535 { 536 mach_port_name_t sema_name = args->signal_name; 537 538 return semaphore_signal_internal_trap(sema_name); 539 } 540 541 kern_return_t 542 semaphore_signal_internal_trap(mach_port_name_t sema_name) 543 { 544 semaphore_t semaphore; 545 kern_return_t kr; 546 547 kr = port_name_to_semaphore(sema_name, &semaphore); 548 if (kr == KERN_SUCCESS) { 549 kr = semaphore_signal_internal(semaphore, 550 THREAD_NULL, 551 SEMAPHORE_SIGNAL_PREPOST); 552 semaphore_dereference(semaphore); 553 if (kr == KERN_NOT_WAITING) { 554 kr = KERN_SUCCESS; 555 } 556 } 557 return kr; 558 } 559 560 /* 561 * Routine: semaphore_signal_all 562 * 563 * Awakens ALL threads currently blocked on the semaphore. 564 * The semaphore count returns to zero. 565 */ 566 kern_return_t 567 semaphore_signal_all( 568 semaphore_t semaphore) 569 { 570 kern_return_t kr; 571 572 if (semaphore == SEMAPHORE_NULL) { 573 return KERN_INVALID_ARGUMENT; 574 } 575 576 kr = semaphore_signal_internal(semaphore, 577 THREAD_NULL, 578 SEMAPHORE_SIGNAL_ALL); 579 if (kr == KERN_NOT_WAITING) { 580 return KERN_SUCCESS; 581 } 582 return kr; 583 } 584 585 /* 586 * Routine: semaphore_signal_all_trap 587 * 588 * Trap interface to the semaphore_signal_all function. 589 */ 590 kern_return_t 591 semaphore_signal_all_trap( 592 struct semaphore_signal_all_trap_args *args) 593 { 594 mach_port_name_t sema_name = args->signal_name; 595 semaphore_t semaphore; 596 kern_return_t kr; 597 598 kr = port_name_to_semaphore(sema_name, &semaphore); 599 if (kr == KERN_SUCCESS) { 600 kr = semaphore_signal_internal(semaphore, 601 THREAD_NULL, 602 SEMAPHORE_SIGNAL_ALL); 603 semaphore_dereference(semaphore); 604 if (kr == KERN_NOT_WAITING) { 605 kr = KERN_SUCCESS; 606 } 607 } 608 return kr; 609 } 610 611 /* 612 * Routine: semaphore_convert_wait_result 613 * 614 * Generate the return code after a semaphore wait/block. It 615 * takes the wait result as an input and coverts that to an 616 * appropriate result. 617 */ 618 kern_return_t 619 semaphore_convert_wait_result(int wait_result) 620 { 621 switch (wait_result) { 622 case THREAD_AWAKENED: 623 return KERN_SUCCESS; 624 625 case THREAD_TIMED_OUT: 626 return KERN_OPERATION_TIMED_OUT; 627 628 case THREAD_INTERRUPTED: 629 return KERN_ABORTED; 630 631 case THREAD_RESTART: 632 return KERN_TERMINATED; 633 634 default: 635 panic("semaphore_block\n"); 636 return KERN_FAILURE; 637 } 638 } 639 640 /* 641 * Routine: semaphore_wait_continue 642 * 643 * Common continuation routine after waiting on a semphore. 644 * It returns directly to user space. 645 */ 646 void 647 semaphore_wait_continue(void *arg __unused, wait_result_t wr) 648 { 649 thread_t self = current_thread(); 650 void (*caller_cont)(kern_return_t) = self->sth_continuation; 651 652 assert(self->sth_waitsemaphore != SEMAPHORE_NULL); 653 semaphore_dereference(self->sth_waitsemaphore); 654 if (self->sth_signalsemaphore != SEMAPHORE_NULL) { 655 semaphore_dereference(self->sth_signalsemaphore); 656 } 657 658 assert(self->handoff_thread == THREAD_NULL); 659 assert(caller_cont != (void (*)(kern_return_t))0); 660 (*caller_cont)(semaphore_convert_wait_result(wr)); 661 } 662 663 /* 664 * Routine: semaphore_wait_internal 665 * 666 * Decrements the semaphore count by one. If the count is 667 * negative after the decrement, the calling thread blocks 668 * (possibly at a continuation and/or with a timeout). 669 * 670 * Assumptions: 671 * The reference 672 * A reference is held on the signal semaphore. 673 */ 674 static kern_return_t 675 semaphore_wait_internal( 676 semaphore_t wait_semaphore, 677 semaphore_t signal_semaphore, 678 uint64_t deadline, 679 int option, 680 void (*caller_cont)(kern_return_t)) 681 { 682 int wait_result; 683 spl_t spl_level; 684 kern_return_t kr = KERN_ALREADY_WAITING; 685 686 spl_level = splsched(); 687 semaphore_lock(wait_semaphore); 688 thread_t self = current_thread(); 689 thread_t handoff_thread = THREAD_NULL; 690 thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; 691 int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST; 692 693 if (!wait_semaphore->active) { 694 kr = KERN_TERMINATED; 695 } else if (wait_semaphore->count > 0) { 696 wait_semaphore->count--; 697 kr = KERN_SUCCESS; 698 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { 699 kr = KERN_OPERATION_TIMED_OUT; 700 } else { 701 wait_semaphore->count = -1; /* we don't keep an actual count */ 702 703 thread_set_pending_block_hint(self, kThreadWaitSemaphore); 704 (void)waitq_assert_wait64_locked( 705 &wait_semaphore->waitq, 706 SEMAPHORE_EVENT, 707 THREAD_ABORTSAFE, 708 TIMEOUT_URGENCY_USER_NORMAL, 709 deadline, TIMEOUT_NO_LEEWAY, 710 self); 711 712 semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF; 713 } 714 semaphore_unlock(wait_semaphore); 715 splx(spl_level); 716 717 /* 718 * wait_semaphore is unlocked so we are free to go ahead and 719 * signal the signal_semaphore (if one was provided). 720 */ 721 if (signal_semaphore != SEMAPHORE_NULL) { 722 kern_return_t signal_kr; 723 724 /* 725 * lock the signal semaphore reference we got and signal it. 726 * This will NOT block (we cannot block after having asserted 727 * our intention to wait above). 728 */ 729 signal_kr = semaphore_signal_internal(signal_semaphore, 730 THREAD_NULL, semaphore_signal_options); 731 732 if (signal_kr == KERN_NOT_WAITING) { 733 assert(self->handoff_thread == THREAD_NULL); 734 signal_kr = KERN_SUCCESS; 735 } else if (signal_kr == KERN_TERMINATED) { 736 /* 737 * Uh!Oh! The semaphore we were to signal died. 738 * We have to get ourselves out of the wait in 739 * case we get stuck here forever (it is assumed 740 * that the semaphore we were posting is gating 741 * the decision by someone else to post the 742 * semaphore we are waiting on). People will 743 * discover the other dead semaphore soon enough. 744 * If we got out of the wait cleanly (someone 745 * already posted a wakeup to us) then return that 746 * (most important) result. Otherwise, 747 * return the KERN_TERMINATED status. 748 */ 749 assert(self->handoff_thread == THREAD_NULL); 750 clear_wait(self, THREAD_INTERRUPTED); 751 kr = semaphore_convert_wait_result(self->wait_result); 752 if (kr == KERN_ABORTED) { 753 kr = KERN_TERMINATED; 754 } 755 } 756 } 757 758 /* 759 * If we had an error, or we didn't really need to wait we can 760 * return now that we have signalled the signal semaphore. 761 */ 762 if (kr != KERN_ALREADY_WAITING) { 763 assert(self->handoff_thread == THREAD_NULL); 764 return kr; 765 } 766 767 if (self->handoff_thread) { 768 handoff_thread = self->handoff_thread; 769 self->handoff_thread = THREAD_NULL; 770 handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; 771 } 772 /* 773 * Now, we can block. If the caller supplied a continuation 774 * pointer of his own for after the block, block with the 775 * appropriate semaphore continuation. This will gather the 776 * semaphore results, release references on the semaphore(s), 777 * and then call the caller's continuation. 778 */ 779 if (caller_cont) { 780 self->sth_continuation = caller_cont; 781 self->sth_waitsemaphore = wait_semaphore; 782 self->sth_signalsemaphore = signal_semaphore; 783 784 thread_handoff_parameter(handoff_thread, semaphore_wait_continue, 785 NULL, handoff_option); 786 } else { 787 wait_result = thread_handoff_deallocate(handoff_thread, handoff_option); 788 } 789 790 assert(self->handoff_thread == THREAD_NULL); 791 return semaphore_convert_wait_result(wait_result); 792 } 793 794 795 /* 796 * Routine: semaphore_wait 797 * 798 * Traditional (non-continuation) interface presented to 799 * in-kernel clients to wait on a semaphore. 800 */ 801 kern_return_t 802 semaphore_wait( 803 semaphore_t semaphore) 804 { 805 if (semaphore == SEMAPHORE_NULL) { 806 return KERN_INVALID_ARGUMENT; 807 } 808 809 return semaphore_wait_internal(semaphore, 810 SEMAPHORE_NULL, 811 0ULL, SEMAPHORE_OPTION_NONE, 812 (void (*)(kern_return_t))0); 813 } 814 815 kern_return_t 816 semaphore_wait_noblock( 817 semaphore_t semaphore) 818 { 819 if (semaphore == SEMAPHORE_NULL) { 820 return KERN_INVALID_ARGUMENT; 821 } 822 823 return semaphore_wait_internal(semaphore, 824 SEMAPHORE_NULL, 825 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, 826 (void (*)(kern_return_t))0); 827 } 828 829 kern_return_t 830 semaphore_wait_deadline( 831 semaphore_t semaphore, 832 uint64_t deadline) 833 { 834 if (semaphore == SEMAPHORE_NULL) { 835 return KERN_INVALID_ARGUMENT; 836 } 837 838 return semaphore_wait_internal(semaphore, 839 SEMAPHORE_NULL, 840 deadline, SEMAPHORE_OPTION_NONE, 841 (void (*)(kern_return_t))0); 842 } 843 844 /* 845 * Trap: semaphore_wait_trap 846 * 847 * Trap version of semaphore wait. Called on behalf of user-level 848 * clients. 849 */ 850 851 kern_return_t 852 semaphore_wait_trap( 853 struct semaphore_wait_trap_args *args) 854 { 855 return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return); 856 } 857 858 859 860 kern_return_t 861 semaphore_wait_trap_internal( 862 mach_port_name_t name, 863 void (*caller_cont)(kern_return_t)) 864 { 865 semaphore_t semaphore; 866 kern_return_t kr; 867 868 kr = port_name_to_semaphore(name, &semaphore); 869 if (kr == KERN_SUCCESS) { 870 kr = semaphore_wait_internal(semaphore, 871 SEMAPHORE_NULL, 872 0ULL, SEMAPHORE_OPTION_NONE, 873 caller_cont); 874 semaphore_dereference(semaphore); 875 } 876 return kr; 877 } 878 879 /* 880 * Routine: semaphore_timedwait 881 * 882 * Traditional (non-continuation) interface presented to 883 * in-kernel clients to wait on a semaphore with a timeout. 884 * 885 * A timeout of {0,0} is considered non-blocking. 886 */ 887 kern_return_t 888 semaphore_timedwait( 889 semaphore_t semaphore, 890 mach_timespec_t wait_time) 891 { 892 int option = SEMAPHORE_OPTION_NONE; 893 uint64_t deadline = 0; 894 895 if (semaphore == SEMAPHORE_NULL) { 896 return KERN_INVALID_ARGUMENT; 897 } 898 899 if (BAD_MACH_TIMESPEC(&wait_time)) { 900 return KERN_INVALID_VALUE; 901 } 902 903 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { 904 option = SEMAPHORE_TIMEOUT_NOBLOCK; 905 } else { 906 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); 907 } 908 909 return semaphore_wait_internal(semaphore, 910 SEMAPHORE_NULL, 911 deadline, option, 912 (void (*)(kern_return_t))0); 913 } 914 915 /* 916 * Trap: semaphore_timedwait_trap 917 * 918 * Trap version of a semaphore_timedwait. The timeout parameter 919 * is passed in two distinct parts and re-assembled on this side 920 * of the trap interface (to accomodate calling conventions that 921 * pass structures as pointers instead of inline in registers without 922 * having to add a copyin). 923 * 924 * A timeout of {0,0} is considered non-blocking. 925 */ 926 kern_return_t 927 semaphore_timedwait_trap( 928 struct semaphore_timedwait_trap_args *args) 929 { 930 return semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return); 931 } 932 933 934 kern_return_t 935 semaphore_timedwait_trap_internal( 936 mach_port_name_t name, 937 unsigned int sec, 938 clock_res_t nsec, 939 void (*caller_cont)(kern_return_t)) 940 { 941 semaphore_t semaphore; 942 mach_timespec_t wait_time; 943 kern_return_t kr; 944 945 wait_time.tv_sec = sec; 946 wait_time.tv_nsec = nsec; 947 if (BAD_MACH_TIMESPEC(&wait_time)) { 948 return KERN_INVALID_VALUE; 949 } 950 951 kr = port_name_to_semaphore(name, &semaphore); 952 if (kr == KERN_SUCCESS) { 953 int option = SEMAPHORE_OPTION_NONE; 954 uint64_t deadline = 0; 955 956 if (sec == 0 && nsec == 0) { 957 option = SEMAPHORE_TIMEOUT_NOBLOCK; 958 } else { 959 deadline = semaphore_deadline(sec, nsec); 960 } 961 962 kr = semaphore_wait_internal(semaphore, 963 SEMAPHORE_NULL, 964 deadline, option, 965 caller_cont); 966 semaphore_dereference(semaphore); 967 } 968 return kr; 969 } 970 971 /* 972 * Routine: semaphore_wait_signal 973 * 974 * Atomically register a wait on a semaphore and THEN signal 975 * another. This is the in-kernel entry point that does not 976 * block at a continuation and does not free a signal_semaphore 977 * reference. 978 */ 979 kern_return_t 980 semaphore_wait_signal( 981 semaphore_t wait_semaphore, 982 semaphore_t signal_semaphore) 983 { 984 if (wait_semaphore == SEMAPHORE_NULL) { 985 return KERN_INVALID_ARGUMENT; 986 } 987 988 return semaphore_wait_internal(wait_semaphore, 989 signal_semaphore, 990 0ULL, SEMAPHORE_OPTION_NONE, 991 (void (*)(kern_return_t))0); 992 } 993 994 /* 995 * Trap: semaphore_wait_signal_trap 996 * 997 * Atomically register a wait on a semaphore and THEN signal 998 * another. This is the trap version from user space. 999 */ 1000 kern_return_t 1001 semaphore_wait_signal_trap( 1002 struct semaphore_wait_signal_trap_args *args) 1003 { 1004 return semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return); 1005 } 1006 1007 kern_return_t 1008 semaphore_wait_signal_trap_internal( 1009 mach_port_name_t wait_name, 1010 mach_port_name_t signal_name, 1011 void (*caller_cont)(kern_return_t)) 1012 { 1013 semaphore_t wait_semaphore; 1014 semaphore_t signal_semaphore; 1015 kern_return_t kr; 1016 1017 kr = port_name_to_semaphore(signal_name, &signal_semaphore); 1018 if (kr == KERN_SUCCESS) { 1019 kr = port_name_to_semaphore(wait_name, &wait_semaphore); 1020 if (kr == KERN_SUCCESS) { 1021 kr = semaphore_wait_internal(wait_semaphore, 1022 signal_semaphore, 1023 0ULL, SEMAPHORE_OPTION_NONE, 1024 caller_cont); 1025 semaphore_dereference(wait_semaphore); 1026 } 1027 semaphore_dereference(signal_semaphore); 1028 } 1029 return kr; 1030 } 1031 1032 1033 /* 1034 * Routine: semaphore_timedwait_signal 1035 * 1036 * Atomically register a wait on a semaphore and THEN signal 1037 * another. This is the in-kernel entry point that does not 1038 * block at a continuation. 1039 * 1040 * A timeout of {0,0} is considered non-blocking. 1041 */ 1042 kern_return_t 1043 semaphore_timedwait_signal( 1044 semaphore_t wait_semaphore, 1045 semaphore_t signal_semaphore, 1046 mach_timespec_t wait_time) 1047 { 1048 int option = SEMAPHORE_OPTION_NONE; 1049 uint64_t deadline = 0; 1050 1051 if (wait_semaphore == SEMAPHORE_NULL) { 1052 return KERN_INVALID_ARGUMENT; 1053 } 1054 1055 if (BAD_MACH_TIMESPEC(&wait_time)) { 1056 return KERN_INVALID_VALUE; 1057 } 1058 1059 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { 1060 option = SEMAPHORE_TIMEOUT_NOBLOCK; 1061 } else { 1062 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); 1063 } 1064 1065 return semaphore_wait_internal(wait_semaphore, 1066 signal_semaphore, 1067 deadline, option, 1068 (void (*)(kern_return_t))0); 1069 } 1070 1071 /* 1072 * Trap: semaphore_timedwait_signal_trap 1073 * 1074 * Atomically register a timed wait on a semaphore and THEN signal 1075 * another. This is the trap version from user space. 1076 */ 1077 kern_return_t 1078 semaphore_timedwait_signal_trap( 1079 struct semaphore_timedwait_signal_trap_args *args) 1080 { 1081 return semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return); 1082 } 1083 1084 kern_return_t 1085 semaphore_timedwait_signal_trap_internal( 1086 mach_port_name_t wait_name, 1087 mach_port_name_t signal_name, 1088 unsigned int sec, 1089 clock_res_t nsec, 1090 void (*caller_cont)(kern_return_t)) 1091 { 1092 semaphore_t wait_semaphore; 1093 semaphore_t signal_semaphore; 1094 mach_timespec_t wait_time; 1095 kern_return_t kr; 1096 1097 wait_time.tv_sec = sec; 1098 wait_time.tv_nsec = nsec; 1099 if (BAD_MACH_TIMESPEC(&wait_time)) { 1100 return KERN_INVALID_VALUE; 1101 } 1102 1103 kr = port_name_to_semaphore(signal_name, &signal_semaphore); 1104 if (kr == KERN_SUCCESS) { 1105 kr = port_name_to_semaphore(wait_name, &wait_semaphore); 1106 if (kr == KERN_SUCCESS) { 1107 int option = SEMAPHORE_OPTION_NONE; 1108 uint64_t deadline = 0; 1109 1110 if (sec == 0 && nsec == 0) { 1111 option = SEMAPHORE_TIMEOUT_NOBLOCK; 1112 } else { 1113 deadline = semaphore_deadline(sec, nsec); 1114 } 1115 1116 kr = semaphore_wait_internal(wait_semaphore, 1117 signal_semaphore, 1118 deadline, option, 1119 caller_cont); 1120 semaphore_dereference(wait_semaphore); 1121 } 1122 semaphore_dereference(signal_semaphore); 1123 } 1124 return kr; 1125 } 1126 1127 1128 /* 1129 * Routine: semaphore_reference 1130 * 1131 * Take out a reference on a semaphore. This keeps the data structure 1132 * in existence (but the semaphore may be deactivated). 1133 */ 1134 void 1135 semaphore_reference( 1136 semaphore_t semaphore) 1137 { 1138 os_ref_retain(&semaphore->ref_count); 1139 } 1140 1141 /* 1142 * Routine: semaphore_dereference 1143 * 1144 * Release a reference on a semaphore. If this is the last reference, 1145 * the semaphore data structure is deallocated. 1146 */ 1147 void 1148 semaphore_dereference( 1149 semaphore_t semaphore) 1150 { 1151 uint32_t collisions; 1152 spl_t spl_level; 1153 1154 if (semaphore == NULL) { 1155 return; 1156 } 1157 1158 if (os_ref_release(&semaphore->ref_count) > 0) { 1159 return; 1160 } 1161 1162 /* 1163 * Last ref, clean up the port [if any] 1164 * associated with the semaphore, destroy 1165 * it (if still active) and then free 1166 * the semaphore. 1167 */ 1168 ipc_port_t port = semaphore->port; 1169 1170 if (IP_VALID(port)) { 1171 assert(!port->ip_srights); 1172 ipc_port_dealloc_kernel(port); 1173 } 1174 1175 /* 1176 * Lock the semaphore to lock in the owner task reference. 1177 * Then continue to try to lock the task (inverse order). 1178 */ 1179 spl_level = splsched(); 1180 semaphore_lock(semaphore); 1181 for (collisions = 0; semaphore->active; collisions++) { 1182 task_t task = semaphore->owner; 1183 1184 assert(task != TASK_NULL); 1185 1186 if (task_lock_try(task)) { 1187 semaphore_destroy_internal(task, semaphore); 1188 /* semaphore unlocked */ 1189 splx(spl_level); 1190 task_unlock(task); 1191 goto out; 1192 } 1193 1194 /* failed to get out-of-order locks */ 1195 semaphore_unlock(semaphore); 1196 splx(spl_level); 1197 mutex_pause(collisions); 1198 spl_level = splsched(); 1199 semaphore_lock(semaphore); 1200 } 1201 semaphore_unlock(semaphore); 1202 splx(spl_level); 1203 1204 out: 1205 zfree(semaphore_zone, semaphore); 1206 } 1207 1208 #define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq))) 1209 void 1210 kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) 1211 { 1212 semaphore_t sem = WAITQ_TO_SEMA(waitq); 1213 assert(event == SEMAPHORE_EVENT); 1214 1215 zone_require(semaphore_zone, sem); 1216 1217 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); 1218 if (sem->owner) { 1219 waitinfo->owner = pid_from_task(sem->owner); 1220 } 1221 }