mach_node.c
1 /* 2 * Copyright (c) 2015-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* File: kern/mach_node.h 29 * Author: Dean Reece 30 * Date: 2016 31 * 32 * Implementation of mach node support. 33 * This is the basis for flipc, which provides inter-node communication. 34 */ 35 36 37 #include <mach/mach_types.h> 38 #include <mach/boolean.h> 39 #include <mach/kern_return.h> 40 41 #include <kern/kern_types.h> 42 #include <kern/assert.h> 43 44 #include <kern/host.h> 45 #include <kern/kalloc.h> 46 #include <kern/mach_node_link.h> 47 #include <kern/mach_node.h> 48 #include <kern/ipc_mig.h> // mach_msg_send_from_kernel_proper() 49 50 #include <ipc/port.h> 51 #include <ipc/ipc_types.h> 52 #include <ipc/ipc_init.h> 53 #include <ipc/ipc_kmsg.h> 54 #include <ipc/ipc_port.h> 55 #include <ipc/ipc_pset.h> 56 #include <ipc/ipc_table.h> 57 #include <ipc/ipc_entry.h> 58 59 #include <ipc/flipc.h> 60 61 #include <libkern/OSAtomic.h> // OSAddAtomic64(), OSCompareAndSwap() 62 #include <libkern/OSByteOrder.h> // OSHostByteOrder() 63 64 #pragma pack(4) 65 66 #define MNL_NAME_TABLE_SIZE (256) // Hash is evenly distributed, so ^2 is ok 67 #define MNL_NAME_HASH(name) (name % MNL_NAME_TABLE_SIZE) 68 69 /*** Visible outside mach_node layer ***/ 70 mach_node_id_t localnode_id = -1; // This node's FLIPC id. 71 #if MACH_FLIPC 72 mach_node_t localnode; // This node's mach_node_t struct 73 74 75 /*** Private to mach_node layer ***/ 76 static int mach_nodes_to_publish; 77 static mach_node_t mach_node_table[MACH_NODES_MAX]; 78 static LCK_SPIN_DECLARE_ATTR(mach_node_table_lock_data, 79 &ipc_lck_grp, &ipc_lck_attr); 80 #define MACH_NODE_TABLE_LOCK() lck_spin_lock(&mach_node_table_lock_data) 81 #define MACH_NODE_TABLE_UNLOCK() lck_spin_unlock(&mach_node_table_lock_data) 82 83 static volatile SInt64 mnl_name_next; 84 static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; 85 static LCK_SPIN_DECLARE_ATTR(mnl_name_table_lock_data, 86 &ipc_lck_grp, &ipc_lck_attr); 87 #define MNL_NAME_TABLE_LOCK() lck_spin_lock(&mnl_name_table_lock_data) 88 #define MNL_NAME_TABLE_UNLOCK() lck_spin_unlock(&mnl_name_table_lock_data) 89 90 static void mach_node_init(void); 91 static void mnl_name_table_init(void); 92 static void mach_node_table_init(void); 93 static void mach_node_publish(mach_node_t node); 94 95 static mach_node_t mach_node_alloc_init(mach_node_id_t node_id); 96 static kern_return_t mach_node_register(mach_node_t node); 97 98 99 /* mach_node_init() is run lazily when a node link driver registers 100 * or the node special port is set. 101 * The variable localnode_id is used to determine if init has already run. 102 */ 103 void 104 mach_node_init(void) 105 { 106 mach_node_id_t node_id = 0; // TODO: Read from device tree? 107 if (OSCompareAndSwap((UInt32)(HOST_LOCAL_NODE), 108 (UInt32)node_id, 109 &localnode_id)) { 110 printf("mach_node_init(): localnode_id=%d of %d\n", 111 localnode_id, MACH_NODES_MAX); 112 mach_node_table_init(); 113 mnl_name_table_init(); 114 } // TODO: else block until init is finished (init completion race) 115 } 116 117 void 118 mach_node_table_init(void) 119 { 120 MACH_NODE_TABLE_LOCK(); 121 122 /* Start with an enpty node table. */ 123 bzero(mach_node_table, sizeof(mach_node_t) * MACH_NODES_MAX); 124 mach_nodes_to_publish = 0; 125 126 /* Allocate localnode's struct */ 127 localnode = mach_node_for_id_locked(localnode_id, 1, 1); 128 assert(MACH_NODE_VALID(localnode)); 129 130 MACH_NODE_TABLE_UNLOCK(); 131 132 /* Set up localnode's struct */ 133 bzero(localnode, sizeof(*localnode)); 134 localnode->info.datamodel = LOCAL_DATA_MODEL; 135 localnode->info.byteorder = OSHostByteOrder(); 136 localnode->info.proto_vers_min = MNL_PROTOCOL_V1; 137 localnode->info.proto_vers_max = MNL_PROTOCOL_V1; 138 localnode->proto_vers = MNL_PROTOCOL_V1; 139 localnode->published = 0; 140 localnode->active = 1; 141 142 MACH_NODE_UNLOCK(localnode); 143 } 144 145 /* Sends a publication message to the local node's bootstrap server. 146 * This function is smart and will only send a notification if one as really 147 * needed - it can be called speculatively on any node at any time. 148 * 149 * Note: MUST be called with the node table lock held. 150 */ 151 152 void 153 mach_node_publish(mach_node_t node) 154 { 155 kern_return_t kr; 156 157 if (!MACH_NODE_VALID(node) || (!node->active) || (node->published)) { 158 return; // node is invalid or not suitable for publication 159 } 160 ipc_port_t bs_port = localnode->bootstrap_port; 161 if (!IP_VALID(bs_port)) { 162 return; // No bootstrap server to notify! 163 } 164 /* Node is suitable and server is present, so make registration message */ 165 struct mach_node_server_register_msg msg; 166 167 msg.node_header.header.msgh_remote_port = bs_port; 168 msg.node_header.header.msgh_size = sizeof(msg); 169 msg.node_header.header.msgh_local_port = MACH_PORT_NULL; 170 msg.node_header.header.msgh_voucher_port = MACH_PORT_NULL; 171 msg.node_header.header.msgh_id = MACH_NODE_SERVER_MSG_ID; 172 msg.node_header.node_id = node->info.node_id; 173 msg.node_header.options = 0; 174 msg.datamodel = node->info.datamodel; 175 msg.byteorder = node->info.byteorder; 176 177 if (node == localnode) { 178 msg.node_header.identifier = MACH_NODE_SM_REG_LOCAL; 179 msg.node_header.header.msgh_bits = 180 MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0); 181 } else { 182 msg.node_header.identifier = MACH_NODE_SM_REG_REMOTE; 183 msg.node_header.header.msgh_local_port = node->bootstrap_port; 184 msg.node_header.header.msgh_bits = MACH_MSGH_BITS_SET 185 (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, 0, 0); 186 } 187 188 kr = mach_msg_send_from_kernel_proper(&msg.node_header.header, 189 sizeof(msg)); 190 if (kr == KERN_SUCCESS) { 191 node->published = 1; 192 mach_nodes_to_publish--; 193 } 194 printf("mach_node_publish(%d)=%d\n", node->info.node_id, kr); 195 } 196 197 /* Called whenever the node special port changes */ 198 void 199 mach_node_port_changed(void) 200 { 201 ipc_port_t bs_port; 202 203 mach_node_init(); // Lazy init of mach_node layer 204 205 /* Cleanup previous bootstrap port if necessary */ 206 MACH_NODE_LOCK(localnode); 207 flipc_node_retire(localnode); 208 bs_port = localnode->bootstrap_port; 209 if (IP_VALID(bs_port)) { 210 localnode->bootstrap_port = IP_NULL; 211 // TODO: destroy send right to outgoing bs_port 212 } 213 214 kernel_get_special_port(host_priv_self(), HOST_NODE_PORT, &bs_port); 215 assert(IP_VALID(bs_port)); 216 localnode->bootstrap_port = bs_port; 217 flipc_node_prepare(localnode); 218 MACH_NODE_UNLOCK(localnode); 219 220 /* Cleanup the publication state of all nodes in the table */ 221 MACH_NODE_TABLE_LOCK(); 222 // TODO: Signup for bootstrap port death notifications 223 localnode->active = 1; 224 225 mach_nodes_to_publish = 0; 226 227 int n; 228 for (n = 0; n < MACH_NODES_MAX; n++) { 229 mach_node_t np = mach_node_table[n]; 230 // Publish all active nodes (except the local node) 231 if (!MACH_NODE_VALID(np)) { 232 continue; 233 } 234 np->published = 0; 235 if (np->active == 1) { 236 mach_nodes_to_publish++; 237 } 238 } 239 240 mach_node_publish(localnode); // Always publish local node first 241 242 for (n = 0; n < MACH_NODES_MAX; n++) { 243 mach_node_publish(mach_node_table[n]); 244 } 245 246 MACH_NODE_TABLE_UNLOCK(); 247 248 // TODO: notify all active nodes we are bootstrapped 249 } 250 251 /* Allocate/init a mach_node struct and fill in the node_id field. 252 * This does NOT insert the node struct into the node table. 253 */ 254 mach_node_t 255 mach_node_alloc_init(mach_node_id_t node_id) 256 { 257 mach_node_t node = MACH_NODE_ALLOC(); 258 if (MACH_NODE_VALID(node)) { 259 bzero(node, sizeof(struct mach_node)); 260 MACH_NODE_LOCK_INIT(node); 261 node->info.node_id = node_id; 262 } 263 return node; 264 } 265 266 267 /* This function takes a mach_node struct with a completed info field and 268 * registers it with the mach_node and flipc (if flipc is enabled) layers. 269 */ 270 kern_return_t 271 mach_node_register(mach_node_t node) 272 { 273 assert(MACH_NODE_VALID(node)); 274 mach_node_id_t nid = node->info.node_id; 275 assert(MACH_NODE_ID_VALID(nid)); 276 277 kern_return_t kr; 278 ipc_space_t proxy_space = IS_NULL; 279 ipc_pset_t pp_set = IPS_NULL; // pset for proxy ports 280 ipc_port_t bs_port = MACH_PORT_NULL; 281 ipc_port_t ack_port = MACH_PORT_NULL; 282 283 printf("mach_node_register(%d)\n", nid); 284 285 /* TODO: Support non-native byte order and data models */ 286 if ((node->info.byteorder != OSHostByteOrder()) || 287 (node->info.datamodel != LOCAL_DATA_MODEL)) { 288 printf("mach_node_register: unsupported byte order (%d) or width (%d)", 289 node->info.byteorder, node->info.datamodel); 290 return KERN_INVALID_ARGUMENT; 291 } 292 293 /* Create the space that holds all local rights assigned to <nid> */ 294 kr = ipc_space_create_special(&proxy_space); 295 if (kr != KERN_SUCCESS) { 296 goto out; 297 } 298 proxy_space->is_node_id = nid; 299 300 /* Create the bootstrap proxy port for this remote node */ 301 bs_port = ipc_port_alloc_special(proxy_space, IPC_PORT_INIT_MESSAGE_QUEUE); 302 if (bs_port == MACH_PORT_NULL) { 303 kr = KERN_RESOURCE_SHORTAGE; 304 goto out; 305 } 306 307 /* Create the control (ack) port for this remote node */ 308 ack_port = ipc_port_alloc_special(proxy_space, IPC_PORT_INIT_MESSAGE_QUEUE); 309 if (ack_port == MACH_PORT_NULL) { 310 kr = KERN_RESOURCE_SHORTAGE; 311 goto out; 312 } 313 314 /* Create the set that holds all proxy ports for this remote node */ 315 pp_set = ipc_pset_alloc_special(proxy_space); 316 if (pp_set == IPS_NULL) { 317 kr = KERN_RESOURCE_SHORTAGE; 318 goto out; 319 } 320 321 waitq_set_lazy_init_link(pp_set); 322 /* Add the bootstrap port to the proxy port set */ 323 uint64_t wq_link_id = waitq_link_reserve(NULL); 324 uint64_t wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, 325 WAITQ_DONT_LOCK); 326 ips_lock(pp_set); 327 ip_lock(bs_port); 328 ipc_pset_add(pp_set, 329 bs_port, 330 &wq_link_id, 331 &wq_reserved_prepost); 332 ip_unlock(bs_port); 333 ips_unlock(pp_set); 334 335 waitq_link_release(wq_link_id); 336 waitq_prepost_release_reserve(wq_reserved_prepost); 337 338 /* Add the control port to the proxy port set */ 339 wq_link_id = waitq_link_reserve(NULL); 340 wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, 341 WAITQ_DONT_LOCK); 342 ips_lock(pp_set); 343 ip_lock(ack_port); 344 ipc_pset_add(pp_set, 345 ack_port, 346 &wq_link_id, 347 &wq_reserved_prepost); 348 ip_unlock(ack_port); 349 ips_unlock(pp_set); 350 351 waitq_link_release(wq_link_id); 352 waitq_prepost_release_reserve(wq_reserved_prepost); 353 354 // Setup mach_node struct 355 node->published = 0; 356 node->active = 1; 357 node->proxy_space = proxy_space; 358 node->proxy_port_set = pp_set; 359 node->bootstrap_port = bs_port; 360 node->proto_vers = node->info.proto_vers_max; 361 node->control_port = ack_port; 362 363 // Place new mach_node struct into node table 364 MACH_NODE_TABLE_LOCK(); 365 366 mach_node_t old_node = mach_node_table[nid]; 367 if (!MACH_NODE_VALID(old_node) || (old_node->dead)) { 368 node->antecedent = old_node; 369 flipc_node_prepare(node); 370 mach_node_table[nid] = node; 371 mach_nodes_to_publish++; 372 mach_node_publish(node); 373 kr = KERN_SUCCESS; 374 } else { 375 printf("mach_node_register: id %d already active!", nid); 376 kr = KERN_FAILURE; 377 } 378 MACH_NODE_TABLE_UNLOCK(); 379 380 out: 381 if (kr != KERN_SUCCESS) { // Dispose of whatever we allocated 382 if (pp_set) { 383 ips_lock(pp_set); 384 ipc_pset_destroy(proxy_space, pp_set); 385 } 386 387 if (bs_port) { 388 ipc_port_dealloc_special(bs_port, proxy_space); 389 } 390 391 if (ack_port) { 392 ipc_port_dealloc_special(ack_port, proxy_space); 393 } 394 395 if (proxy_space) { 396 ipc_space_terminate(proxy_space); 397 } 398 } 399 400 return kr; 401 } 402 403 404 /* Gets or allocates a locked mach_node struct for the specified <node_id>. 405 * The current node is locked and returned if it is not dead, or if it is dead 406 * and <alloc_if_dead> is false. A new node struct is allocated, locked and 407 * returned if the node is dead and <alloc_if_dead> is true, or if the node 408 * is absent and <alloc_if_absent> is true. MACH_NODE_NULL is returned if 409 * the node is absent and <alloc_if_absent> is false. MACH_NODE_NULL is also 410 * returned if a new node structure was not able to be allocated. 411 * 412 * Note: This function must be called with the node table lock held! 413 */ 414 mach_node_t 415 mach_node_for_id_locked(mach_node_id_t node_id, 416 boolean_t alloc_if_dead, 417 boolean_t alloc_if_absent) 418 { 419 if ((node_id < 0) || (node_id >= MACH_NODES_MAX)) { 420 return MACH_NODE_NULL; 421 } 422 423 mach_node_t node = mach_node_table[node_id]; 424 425 if ((!MACH_NODE_VALID(node) && alloc_if_absent) || 426 (MACH_NODE_VALID(node) && node->dead && alloc_if_dead)) { 427 node = mach_node_alloc_init(node_id); 428 if (MACH_NODE_VALID(node)) { 429 node->antecedent = mach_node_table[node_id]; 430 mach_node_table[node_id] = node; 431 } 432 } 433 434 if (MACH_NODE_VALID(node)) { 435 MACH_NODE_LOCK(node); 436 } 437 438 return node; 439 } 440 441 442 443 /*** Mach Node Link Name and Hash Table Implementation ***/ 444 445 /* Allocate a new unique name and return it. 446 * Dispose of this with mnl_name_free(). 447 * Returns MNL_NAME_NULL on failure. 448 */ 449 mnl_name_t 450 mnl_name_alloc(void) 451 { 452 return (mnl_name_t)OSAddAtomic64(MACH_NODES_MAX, &mnl_name_next); 453 } 454 455 456 /* Deallocate a unique name that was allocated via mnl_name_alloc(). 457 */ 458 void 459 mnl_name_free(mnl_name_t name __unused) 460 { 461 ; // Nothing to do for now since we don't recycle mnl names. 462 } 463 464 465 /* Called once from mach_node_init(), this sets up the hash table structures. 466 */ 467 void 468 mnl_name_table_init(void) 469 { 470 MNL_NAME_TABLE_LOCK(); 471 472 // Set the first name to this node's bootstrap name 473 mnl_name_next = localnode_id + MACH_NODES_MAX; 474 475 for (int i = 0; i < MNL_NAME_TABLE_SIZE; i++) { 476 queue_head_init(mnl_name_table[i]); 477 } 478 479 MNL_NAME_TABLE_UNLOCK(); 480 } 481 482 483 /* Initialize the data structures in the mnl_obj structure at the head of the 484 * provided object. This should be called on an object before it is passed to 485 * any other mnl_obj* routine. 486 */ 487 void 488 mnl_obj_init(mnl_obj_t obj) 489 { 490 queue_chain_init(obj->links); 491 obj->name = MNL_NAME_NULL; 492 } 493 494 495 /* Search the local node's hash table for the object associated with a 496 * mnl_name_t and return it. Returns MNL_NAME_NULL on failure. 497 */ 498 mnl_obj_t 499 mnl_obj_lookup(mnl_name_t name) 500 { 501 mnl_obj_t obj = MNL_OBJ_NULL; 502 503 if (name != MNL_NAME_NULL) { 504 qe_foreach_element(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) { 505 if (obj->name == name) { 506 break; 507 } 508 } 509 } 510 return obj; 511 } 512 513 514 /* Search the local node's hash table for the object associated with a 515 * mnl_name_t and remove it. The pointer to the removed object is returned so 516 * that the caller can appropriately dispose of the object. 517 * Returns MNL_NAME_NULL on failure. 518 */ 519 mnl_obj_t 520 mnl_obj_remove(mnl_name_t name) 521 { 522 mnl_obj_t obj = MNL_OBJ_NULL; 523 524 if (name != MNL_NAME_NULL) { 525 qe_foreach_element_safe(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) { 526 if (obj->name == name) { 527 remqueue(&obj->links); 528 } 529 } 530 } 531 return obj; 532 } 533 534 535 /* Insert an object into the local node's hash table. If the name of the 536 * provided object is MNL_NAME_NULL then a new mnl_name is allocated and 537 * assigned to the object. 538 * Returns KERN_SUCCESS if obj was added to hash table 539 * Returns KERN_INVALID_ARGUMENT if obj is invalid 540 * Returns KERN_NAME_EXISTS if obj's name already exists in hash table 541 */ 542 kern_return_t 543 mnl_obj_insert(mnl_obj_t obj) 544 { 545 if (!MNL_OBJ_VALID(obj)) { 546 return KERN_INVALID_ARGUMENT; 547 } 548 549 MNL_NAME_TABLE_LOCK(); 550 551 if (!MNL_NAME_VALID(obj->name)) { 552 // obj is unnammed, so lets allocate a fresh one 553 obj->name = mnl_name_alloc(); 554 } 555 556 enqueue(&mnl_name_table[MNL_NAME_HASH(obj->name)], &obj->links); 557 MNL_NAME_TABLE_UNLOCK(); 558 559 if (obj->name >= (MACH_NODES_MAX << 1)) { 560 panic("Unexpected MNL_NAME %lld in obj %p", obj->name, obj); 561 } 562 563 return KERN_SUCCESS; 564 } 565 566 567 /*** Mach Node Link Driver Interface Implementation ***/ 568 569 /* Allocate a mnl_msg struct plus additional payload. Link drivers are not 570 * required to use this to allocate messages; any wired and mapped kernel 571 * memory is acceptable. 572 * 573 * Arguments: 574 * payload Number of additional bytes to allocate for message payload 575 * flags Currently unused; 0 should be passed 576 * 577 * Return values: 578 * MNL_MSG_NULL: Allocation failed 579 * *: Pointer to new mnl_msg struct of requested size 580 */ 581 mnl_msg_t 582 mnl_msg_alloc(int payload, 583 uint32_t flags __unused) 584 { 585 mnl_msg_t msg = kalloc(MNL_MSG_SIZE + payload); 586 587 if (MNL_MSG_VALID(msg)) { 588 bzero(msg, MNL_MSG_SIZE); // Only zero the header 589 msg->size = payload; 590 } 591 592 return msg; 593 } 594 595 596 /* Free a mnl_msg struct allocated by mnl_msg_alloc(). 597 * 598 * Arguments: 599 * msg Pointer to the message buffer to be freed 600 * flags Currently unused; 0 should be passed 601 */ 602 void 603 mnl_msg_free(mnl_msg_t msg, 604 uint32_t flags __unused) 605 { 606 if (MNL_MSG_VALID(msg)) { 607 kfree(msg, MNL_MSG_SIZE + msg->size); 608 } 609 } 610 611 612 /* The link driver calls this to setup a new (or restarted) node, and to get 613 * an mnl_node_info struct for use as a parameter to other mnl functions. 614 * If MNL_NODE_NULL is returned, the operation failed. Otherwise, a pointer 615 * to a new mnl_node struct is returned. The caller should set all fields 616 * in the structure, then call mnl_register() to complete node registration. 617 * 618 * Arguments: 619 * nid The id of the node to be instantiated 620 * flags Currently unused; 0 should be passed 621 * 622 * Return values: 623 * MNL_NODE_NULL: Operation failed 624 * *: Pointer to a new mnl_node struct 625 */ 626 mnl_node_info_t 627 mnl_instantiate(mach_node_id_t nid, 628 uint32_t flags __unused) 629 { 630 mach_node_init(); // Lazy init of mach_node layer 631 632 if ((nid == localnode_id) || !MACH_NODE_ID_VALID(nid)) { 633 return MNL_NODE_NULL; 634 } 635 636 return (mnl_node_info_t)mach_node_alloc_init(nid); 637 } 638 639 /* The link driver calls mnl_register() to complete the node registration 640 * process. KERN_SUCCESS is returned if registration succeeded, otherwise 641 * an error is returned. 642 * 643 * Arguments: 644 * node Pointer to the node's mnl_node structure 645 * flags Currently unused; 0 should be passed 646 * 647 * Return values: 648 * KERN_SUCCESS: Registration succeeded 649 * KERN_INVALID_ARGUMENT: Field(s) in <node> contained unacceptable values 650 * KERN_*: Values returned from underlying functions 651 */ 652 kern_return_t 653 mnl_register(mnl_node_info_t node, 654 uint32_t flags __unused) 655 { 656 if (MNL_NODE_VALID(node) && (node->node_id != localnode_id)) { 657 return mach_node_register((mach_node_t)node); 658 } 659 660 return KERN_INVALID_ARGUMENT; 661 } 662 663 664 /* The link driver calls this to report that the link has been raised in one 665 * or both directions. If the link is two uni-directional channels, each link 666 * driver will independently call this function, each only raising the link 667 * they are responsible for. The mach_node layer will not communicate with 668 * the remote node until both rx and tx links are up. 669 * 670 * Arguments: 671 * node Pointer to the node's mnl_node structure 672 * link Indicates which link(s) are up (see MNL_LINK_* defines) 673 * flags Currently unused; 0 should be passed 674 * 675 * Return values: 676 * KERN_SUCCESS: Link state changed successfully. 677 * KERN_INVALID_ARGUMENT: An argument value was not allowed. 678 * KERN_*: Values returned from underlying functions. 679 */ 680 kern_return_t 681 mnl_set_link_state(mnl_node_info_t node, 682 int link, 683 uint32_t flags __unused) 684 { 685 kern_return_t kr; 686 mach_node_t mnode = (mach_node_t)node; 687 688 if (!MACH_NODE_VALID(mnode) || !(link & MNL_LINK_UP) || (link & mnode->link)) { 689 return KERN_INVALID_ARGUMENT; // bad node, or bad link argument 690 } 691 MACH_NODE_LOCK(mnode); 692 693 if (mnode->dead) { 694 kr = KERN_NODE_DOWN; 695 } else { 696 mnode->link |= link; 697 kr = KERN_SUCCESS; 698 } 699 700 MACH_NODE_UNLOCK(mnode); 701 702 return kr; 703 } 704 705 /* The link driver calls this to indicate a node has terminated and is no 706 * longer available for messaging. This may be due to a crash or an orderly 707 * shutdown, but either way the remote node no longer retains any state about 708 * the remaining nodes. References held on behalf of the terminated node 709 * will be cleaned up. After this is called, both the rx and tx links are 710 * marked as down. If the remote node restarts, the link driver can bring 711 * up the link using mnl_instantiate() again. 712 * 713 * Arguments: 714 * node Pointer to the node's mnl_node structure 715 * flags Currently unused; 0 should be passed 716 * 717 * Return values: 718 * KERN_SUCCESS: Node was terminated. 719 * KERN_INVALID_ARGUMENT: Node id was invalid or non-existant. 720 * KERN_*: Values returned from underlying functions. 721 */ 722 kern_return_t 723 mnl_terminate(mnl_node_info_t node, 724 uint32_t flags __unused) 725 { 726 kern_return_t kr = KERN_SUCCESS; 727 mach_node_t mnode = (mach_node_t)node; 728 729 if (!MACH_NODE_VALID(mnode)) { 730 return KERN_INVALID_ARGUMENT; // bad node 731 } 732 MACH_NODE_LOCK(mnode); 733 if (mnode->dead) { 734 kr = KERN_NODE_DOWN; // node is already terminated 735 goto unlock; 736 } 737 738 mnode->link = MNL_LINK_DOWN; 739 mnode->active = 0; 740 mnode->suspended = 0; 741 mnode->dead = 1; 742 743 flipc_node_retire(mnode); 744 745 // Wake any threads sleeping on the proxy port set 746 if (mnode->proxy_port_set != IPS_NULL) { 747 ips_lock(mnode->proxy_port_set); 748 ipc_pset_destroy(mnode->proxy_space, mnode->proxy_port_set); 749 mnode->proxy_port_set = IPS_NULL; 750 } 751 752 // TODO: Inform node name server (if registered) of termination 753 754 unlock: 755 MACH_NODE_UNLOCK(mnode); 756 return kr; 757 } 758 759 760 /* The link driver calls this to deliver an incoming message. Note that the 761 * link driver must dispose of the memory pointed to by <msg> after the 762 * function call returns. 763 * 764 * Arguments: 765 * node Pointer to the node's mnl_node structure 766 * msg Pointer to the message buffer 767 * flags Currently unused; 0 should be passed 768 */ 769 void 770 mnl_msg_from_node(mnl_node_info_t node __unused, 771 mnl_msg_t msg, 772 uint32_t flags __unused) 773 { 774 assert(MNL_MSG_VALID(msg)); 775 assert(MACH_NODE_ID_VALID(msg->node_id)); 776 assert(MNL_NODE_VALID(node)); 777 778 /* If node message forwarding is supported, the from_node_id arg may not 779 * match fmsg->info.node_id. The former is the node from which we received 780 * the message; the latter is the node that generated the message originally. 781 * We always use fmsg->info.node_id, which is where the ack needs to go. 782 */ 783 784 switch (msg->sub) { 785 case MACH_NODE_SUB_FLIPC: 786 flipc_msg_from_node((mach_node_t)node, msg, flags); 787 break; 788 789 default: 790 #if DEBUG 791 PE_enter_debugger("mnl_msg_from_node(): Invalid subsystem"); 792 #endif 793 break; 794 } 795 } 796 797 798 /* The link driver calls this to fetch the next message to transmit. 799 * This function will block until a message is available, or will return 800 * FLIPC_MSG_NULL if the link is to be terminated. After the caller has 801 * completed the transmission and no longer needs the msg buffer, it should 802 * call mnl_msg_complete(). 803 * 804 * Arguments: 805 * node Pointer to the node's mnl_node structure 806 * flags Currently unused; 0 should be passed 807 */ 808 mnl_msg_t 809 mnl_msg_to_node(mnl_node_info_t node __unused, 810 uint32_t flags __unused) 811 { 812 assert(MNL_NODE_VALID(node)); 813 814 #if DEBUG 815 thread_set_thread_name(current_thread(), "MNL_Link"); 816 #endif 817 818 return flipc_msg_to_remote_node((mach_node_t)node, 0); 819 } 820 821 822 /* The link driver calls this to indicate that the specified msg buffer has 823 * been sent over the link and can be deallocated. 824 * 825 * Arguments: 826 * node Pointer to the node's mnl_node structure 827 * msg Pointer to the message buffer 828 * flags Currently unused; 0 should be passed 829 */ 830 void 831 mnl_msg_complete(mnl_node_info_t node __unused, 832 mnl_msg_t msg, 833 uint32_t flags) 834 { 835 switch (msg->sub) { 836 case MACH_NODE_SUB_NODE: 837 mnl_msg_free(msg, flags); 838 break; 839 840 case MACH_NODE_SUB_FLIPC: 841 flipc_msg_free(msg, flags); 842 break; 843 844 default: 845 #if DEBUG 846 PE_enter_debugger("mnl_msg_complete(): Invalid subsystem"); 847 #endif 848 break; 849 } 850 } 851 852 #else // MACH_FLIPC not configured, so provide KPI stubs 853 854 mnl_msg_t 855 mnl_msg_alloc(int payload __unused, uint32_t flags __unused) 856 { 857 return MNL_MSG_NULL; 858 } 859 860 void 861 mnl_msg_free(mnl_msg_t msg __unused, uint32_t flags __unused) 862 { 863 return; 864 } 865 866 mnl_node_info_t 867 mnl_instantiate(mach_node_id_t nid __unused, uint32_t flags __unused) 868 { 869 return MNL_NODE_NULL; 870 } 871 872 kern_return_t 873 mnl_register(mnl_node_info_t node __unused, uint32_t flags __unused) 874 { 875 return KERN_FAILURE; 876 } 877 878 kern_return_t 879 mnl_set_link_state(mnl_node_info_t node __unused, 880 int link __unused, 881 uint32_t flags __unused) 882 { 883 return KERN_FAILURE; 884 } 885 886 kern_return_t 887 mnl_terminate(mnl_node_info_t node __unused, uint32_t flags __unused) 888 { 889 return KERN_FAILURE; 890 } 891 892 void 893 mnl_msg_from_node(mnl_node_info_t node __unused, 894 mnl_msg_t msg __unused, 895 uint32_t flags __unused) 896 { 897 return; 898 } 899 900 mnl_msg_t 901 mnl_msg_to_node(mnl_node_info_t node __unused, uint32_t flags __unused) 902 { 903 return MNL_MSG_NULL; 904 } 905 906 void 907 mnl_msg_complete(mnl_node_info_t node __unused, 908 mnl_msg_t msg __unused, 909 uint32_t flags __unused) 910 { 911 return; 912 } 913 914 #endif // MACH_FLIPC