/ src / call.cpp
call.cpp
   1  /**
   2   * This file is part of Darling.
   3   *
   4   * Copyright (C) 2021 Darling developers
   5   *
   6   * Darling is free software: you can redistribute it and/or modify
   7   * it under the terms of the GNU General Public License as published by
   8   * the Free Software Foundation, either version 3 of the License, or
   9   * (at your option) any later version.
  10   *
  11   * Darling is distributed in the hope that it will be useful,
  12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14   * GNU General Public License for more details.
  15   *
  16   * You should have received a copy of the GNU General Public License
  17   * along with Darling.  If not, see <http://www.gnu.org/licenses/>.
  18   */
  19  
  20  #define _GNU_SOURCE 1
  21  #include <darlingserver/call.hpp>
  22  #include <darlingserver/server.hpp>
  23  #include <sys/uio.h>
  24  
  25  #include <darlingserver/logging.hpp>
  26  #include <darlingserver/duct-tape.h>
  27  #include <darlingserver/config.hpp>
  28  #include <sys/fcntl.h>
  29  #include <sys/syscall.h>
  30  #include <darlingserver/kqchan.hpp>
  31  
  32  static DarlingServer::Log callLog("calls");
  33  
  34  DarlingServer::Log DarlingServer::Call::rpcReplyLog("replies");
  35  
  36  std::shared_ptr<DarlingServer::Call> DarlingServer::Call::callFromMessage(Message&& requestMessage) {
  37  	if (requestMessage.data().size() < sizeof(dserver_rpc_callhdr_t)) {
  38  		throw std::invalid_argument("Message buffer was too small for call header");
  39  	}
  40  
  41  	dserver_rpc_callhdr_t* header = reinterpret_cast<dserver_rpc_callhdr_t*>(requestMessage.data().data());
  42  	std::shared_ptr<Call> result = nullptr;
  43  	std::shared_ptr<Process> process = nullptr;
  44  	std::shared_ptr<Thread> thread = nullptr;
  45  
  46  	// first, make sure we know this call number
  47  	switch (header->number) {
  48  		case dserver_callnum_s2c:
  49  		case dserver_callnum_push_reply:
  50  		DSERVER_VALID_CALLNUM_CASES
  51  			break;
  52  
  53  		default:
  54  			throw std::invalid_argument("Invalid call number");
  55  	}
  56  
  57  	if ((header->number & DSERVER_CALL_UNMANAGED_FLAG) == 0) {
  58  		// now let's lookup (and possibly create) the process and thread making this call
  59  		process = processRegistry().registerIfAbsent(header->pid, [&]() {
  60  			std::shared_ptr<Process> tmp = nullptr;
  61  
  62  			int lifetimePipe = -1;
  63  
  64  			if (header->number == dserver_callnum_checkin) {
  65  				auto checkinCall = reinterpret_cast<dserver_rpc_call_checkin_t*>(header);
  66  				if (checkinCall->body.lifetime_listener_pipe != -1) {
  67  					lifetimePipe = requestMessage.extractDescriptorAtIndex(checkinCall->body.lifetime_listener_pipe);
  68  					checkinCall->body.lifetime_listener_pipe = -1;
  69  				}
  70  			}
  71  
  72  			try {
  73  				tmp = std::make_shared<Process>(requestMessage.pid(), header->pid, static_cast<Process::Architecture>(header->architecture), lifetimePipe);
  74  			} catch (std::system_error e) {
  75  				return tmp;
  76  			}
  77  
  78  			Server::sharedInstance().monitorProcess(tmp);
  79  			return tmp;
  80  		});
  81  
  82  		if (!process) {
  83  			callLog.error() << "Received call from non-existent process?" << callLog.endLog;
  84  
  85  			// ignore this call
  86  			// TODO: instead of ignoring it, we should return a generic reply indicating `-ESRCH` or something like that.
  87  			return nullptr;
  88  		}
  89  
  90  		thread = threadRegistry().registerIfAbsent(header->tid, [&]() {
  91  			std::shared_ptr<Thread> tmp = nullptr;
  92  
  93  			void* stackHint = nullptr;
  94  
  95  			if (header->number == dserver_callnum_checkin) {
  96  				auto checkinCall = reinterpret_cast<dserver_rpc_call_checkin_t*>(header);
  97  				stackHint = reinterpret_cast<void*>(checkinCall->body.stack_hint);
  98  			}
  99  
 100  			try {
 101  				tmp = std::make_shared<Thread>(process, header->tid, stackHint);
 102  			} catch (std::system_error e) {
 103  				return tmp;
 104  			}
 105  
 106  			tmp->setAddress(requestMessage.address());
 107  			tmp->registerWithProcess();
 108  			return tmp;
 109  		});
 110  
 111  		if (!thread) {
 112  			callLog.error() << "Received call from non-existent thread?" << callLog.endLog;
 113  
 114  			// ignore this call
 115  			// TODO: instead of ignoring it, we should return a generic reply indicating `-ESRCH` or something like that.
 116  			return nullptr;
 117  		}
 118  
 119  		thread->setAddress(requestMessage.address());
 120  
 121  		if (process->id() != requestMessage.pid()) {
 122  			throw std::runtime_error("System-reported message PID != darlingserver-recorded PID");
 123  		}
 124  	}
 125  
 126  	auto pidString = (process) ? (std::to_string(process->id()) + " (" + std::to_string(process->nsid()) + ")") : (std::to_string(header->pid) + " (-1)");
 127  	auto tidString = (thread) ? (std::to_string(thread->id()) + " (" + std::to_string(thread->nsid()) + ")") : (std::to_string(header->tid) + " (-1)");
 128  	callLog.debug() << "Received call #" << header->number << " (" << dserver_callnum_to_string(header->number) << ") from PID " << pidString << ", TID " << tidString << callLog.endLog;
 129  
 130  	if (header->number == dserver_callnum_s2c) {
 131  		// this is an S2C reply
 132  
 133  		{
 134  			std::unique_lock lock(thread->_rwlock);
 135  
 136  			if (thread->_s2cReply) {
 137  				throw std::runtime_error("Received S2C reply but thread already had one pending");
 138  			}
 139  
 140  			thread->_s2cReply = std::move(requestMessage);
 141  		}
 142  
 143  		dtape_semaphore_up(thread->_s2cReplySempahore);
 144  
 145  		return nullptr;
 146  	} else if (header->number == dserver_callnum_push_reply) {
 147  		// this is a reply push
 148  		// (used to send interrupted replies back to the server)
 149  
 150  		auto pushReplyCall = reinterpret_cast<const dserver_rpc_call_push_reply_t*>(requestMessage.data().data());
 151  		Message replyToSave(pushReplyCall->reply_size, 0);
 152  
 153  		// extract the reply-push synchronization pipe
 154  		auto pipeDesc = requestMessage.extractDescriptorAtIndex(requestMessage.descriptors().size() - 1);
 155  		char tmp = 1;
 156  
 157  		if (pipeDesc < 0) {
 158  			throw std::runtime_error("Failed to extract reply-push synchronization pipe");
 159  		}
 160  
 161  		if (!process->readMemory(pushReplyCall->reply, replyToSave.data().data(), pushReplyCall->reply_size)) {
 162  			throw std::runtime_error("Failed to read client-pushed reply body");
 163  		}
 164  
 165  		replyToSave.replaceDescriptors(requestMessage.descriptors());
 166  		requestMessage.replaceDescriptors({});
 167  
 168  		replyToSave.setAddress(requestMessage.address());
 169  
 170  		{
 171  			std::unique_lock lock(thread->_rwlock);
 172  			if (thread->_pendingCall && thread->_pendingCall->number() == Call::Number::InterruptEnter) {
 173  				// this means the client got interrupted after we had already sent a reply for the interrupted call,
 174  				// the client saw this unexpected reply while waiting for interrupt_enter to respond and sent it back to us,
 175  				// and we received both calls (interrupt_enter and push_reply) at the same time
 176  				thread->_pendingSavedReply = std::move(replyToSave);
 177  			} else {
 178  				if (thread->_interrupts.empty()) {
 179  					throw std::runtime_error("Client tried to push reply outside of interrupt");
 180  				}
 181  				if (thread->_interrupts.top().savedReply) {
 182  					throw std::runtime_error("Client-pushed reply overwriting existing saved reply");
 183  				}
 184  				thread->_interrupts.top().savedReply = std::move(replyToSave);
 185  			}
 186  		}
 187  
 188  		callLog.debug() << *thread << ": Saved client-pushed reply (" << ((thread->_pendingSavedReply) ? "pending" : "normal") << ")" << callLog.endLog;
 189  
 190  		// write a byte to the pipe so the caller can continue
 191  		write(pipeDesc, &tmp, sizeof(tmp));
 192  		// and close it
 193  		close(pipeDesc);
 194  
 195  		return nullptr;
 196  	}
 197  
 198  	// finally, let's construct the call class
 199  
 200  	#define CALL_CASE(_callName, _className) \
 201  		case dserver_callnum_ ## _callName: { \
 202  			if (requestMessage.data().size() < sizeof(dserver_rpc_call_ ## _callName ## _t)) { \
 203  				throw std::invalid_argument("Message buffer was too small for dserver_call_" #_callName "_t"); \
 204  			} \
 205  			result = std::make_shared<_className>(thread, reinterpret_cast<dserver_rpc_call_ ## _callName ## _t*>(header), std::move(requestMessage)); \
 206  		} break;
 207  
 208  	switch (header->number) {
 209  		DSERVER_CONSTRUCT_CASES
 210  
 211  		default:
 212  			throw std::invalid_argument("Invalid call number");
 213  	}
 214  
 215  	#undef CALL_CASE
 216  
 217  	if (thread) {
 218  		thread->setPendingCall(result);
 219  		return result;
 220  	} else {
 221  		Thread::kernelAsync([result]() {
 222  			result->processCall();
 223  		});
 224  		return nullptr;
 225  	}
 226  };
 227  
 228  DarlingServer::Call::Call(std::shared_ptr<Thread> thread, Address replyAddress, dserver_rpc_callhdr_t* callHeader):
 229  	_thread(thread),
 230  	_replyAddress(replyAddress),
 231  	_header(*callHeader)
 232  	{};
 233  
 234  DarlingServer::Call::~Call() {};
 235  
 236  std::shared_ptr<DarlingServer::Thread> DarlingServer::Call::thread() const {
 237  	return _thread.lock();
 238  };
 239  
 240  void DarlingServer::Call::sendBasicReply(int resultCode) {
 241  	throw std::runtime_error("This call cannot send a basic reply");
 242  };
 243  
 244  void DarlingServer::Call::sendBSDReply(int resultCode, uint32_t returnValue) {
 245  	throw std::runtime_error("This call cannot send a BSD reply");
 246  };
 247  
 248  bool DarlingServer::Call::isXNUTrap() const {
 249  	return false;
 250  };
 251  
 252  bool DarlingServer::Call::isBSDTrap() const {
 253  	return false;
 254  };
 255  
 256  void DarlingServer::Call::sendReply(Message&& reply) {
 257  	Server::sharedInstance().sendMessage(std::move(reply));
 258  };
 259  
 260  //
 261  // call processing
 262  //
 263  
 264  /*
 265   *
 266   * A note about RPC wrappers:
 267   *
 268   * The auto-generated RPC wrappers provide both client-side wrappers as well as server-side wrappers.
 269   * The server-side wrappers automatically handle a few things like replies and descriptors.
 270   *
 271   * Replies:
 272   * The RPC wrappers provide a custom `_sendReply` method specific to each call class.
 273   * This method takes the result/status code as its first parameter followed by the return parameters
 274   * specified in the call interface. When a call is done processing, it simply calls `_sendReply` with the necessary
 275   * parameters and the RPC wrappers will take care of setting up the message and loading it onto the reply queue
 276   * for the server to send it out.
 277   *
 278   * Descriptors:
 279   * The RPC wrappers automatically handle ownership of descriptors, both incoming and outgoing.
 280   *
 281   * Incoming descriptors are extracted from the message and ownership is moved into the call instance.
 282   * The call processing code can use the descriptor however it likes while the call instance is still alive.
 283   * If it would like to move ownership out of the call instance, it can set the descriptor in the `_body` to `-1`.
 284   * Descriptors still left in the `_body` when the call instance is destroyed are automatically closed.
 285   *
 286   * Ownership of outgoing descriptors is passed into the reply message. In other words, when a descriptor
 287   * is given to `_sendReply`, the call instance loses ownership of that descriptor. If the call instance
 288   * would like to retain ownership, it should `dup()` the descriptor and pass the `dup()`ed descriptor to `_sendReply` instead.
 289   *
 290   */
 291  
 292  void DarlingServer::Call::Checkin::processCall() {
 293  	// the Call instance creation already took care of registering the process and thread.
 294  
 295  	int code = 0;
 296  
 297  	if (auto thread = _thread.lock()) {
 298  		if (auto process = thread->process()) {
 299  			// the process needs to know when the checkin occurs, in case it has a pending replacement
 300  			// and also to notify its parent about when the fork is complete
 301  			process->notifyCheckin(static_cast<Process::Architecture>(_header.architecture));
 302  		} else {
 303  			code = -ESRCH;
 304  		}
 305  	} else {
 306  		code = -ESRCH;
 307  	}
 308  
 309  	_sendReply(code);
 310  };
 311  
 312  void DarlingServer::Call::Checkout::processCall() {
 313  	int code = 0;
 314  
 315  	if (auto thread = _thread.lock()) {
 316  		if (auto process = thread->process()) {
 317  			if (_body.exec_listener_pipe >= 0) {
 318  				// this is actually an execve;
 319  				// let's monitor the FD we got
 320  
 321  				// make it non-blocking
 322  				int flags = fcntl(_body.exec_listener_pipe, F_GETFL);
 323  				if (flags < 0) {
 324  					code = -errno;
 325  				} else {
 326  					flags |= O_NONBLOCK;
 327  					if (fcntl(_body.exec_listener_pipe, F_SETFL, flags) < 0) {
 328  						code = -errno;
 329  					} else {
 330  						// now monitor it
 331  						auto fd = std::make_shared<FD>(_body.exec_listener_pipe);
 332  						_body.exec_listener_pipe = -1; // the FD instance now owns the descriptor
 333  
 334  						auto replacingWithDarlingProcess = _body.executing_macho;
 335  
 336  						std::weak_ptr<Process> weakProcess = process;
 337  						Server::sharedInstance().addMonitor(std::make_shared<Monitor>(fd, Monitor::Event::HangUp, false, true, [fd, weakProcess, replacingWithDarlingProcess](std::shared_ptr<Monitor> monitor, Monitor::Event events) {
 338  							Server::sharedInstance().removeMonitor(monitor);
 339  
 340  							auto process = weakProcess.lock();
 341  
 342  							if (!process) {
 343  								// the process died...
 344  								return;
 345  							}
 346  
 347  							char tmp;
 348  							int result = read(fd->fd(), &tmp, sizeof(tmp));
 349  
 350  							if (result < 0) {
 351  								// we shouldn't even get EAGAIN
 352  								throw std::system_error(errno, std::generic_category(), "Failed to read from exec listener pipe");
 353  							}
 354  
 355  							if (result == 0) {
 356  								// the execve succeeded
 357  								if (replacingWithDarlingProcess) {
 358  									process->setPendingReplacement();
 359  								} else {
 360  									// the Darling process was replaced with a non-Darling process
 361  									// treat it like death
 362  									process->notifyDead();
 363  								}
 364  							} else {
 365  								// the execve failed
 366  								// do nothing in this case
 367  							}
 368  						}));
 369  					}
 370  				}
 371  			} else {
 372  				thread->notifyDead();
 373  
 374  				// if this was the last thread in the process, it'll be automatically unregistered
 375  			}
 376  		} else {
 377  			code = -ESRCH;
 378  		}
 379  	} else {
 380  		code = -ESRCH;
 381  	}
 382  
 383  	// clear the thread pointer so that the reply will be sent directly through the server
 384  	// (otherwise, we would attempt to send it through the thread, which is now dead)
 385  	_thread.reset();
 386  
 387  	_sendReply(code);
 388  };
 389  
 390  void DarlingServer::Call::VchrootPath::processCall() {
 391  	int code = 0;
 392  	size_t fullLength = 0;
 393  
 394  	if (auto thread = _thread.lock()) {
 395  		if (auto process = thread->process()) {
 396  			if (_body.buffer_size > 0) {
 397  				auto tmpstr = process->vchrootPath().substr(0, _body.buffer_size - 1);
 398  				auto len = std::min(tmpstr.length() + 1, _body.buffer_size);
 399  
 400  				fullLength = process->vchrootPath().length();
 401  
 402  				if (!process->writeMemory(_body.buffer, tmpstr.c_str(), len, &code)) {
 403  					// writeMemory returns a positive error code, but we want a negative one
 404  					code = -code;
 405  				}
 406  			}
 407  		} else {
 408  			code = -ESRCH;
 409  		}
 410  	} else {
 411  		code = -ESRCH;
 412  	}
 413  
 414  	_sendReply(code, fullLength);
 415  };
 416  
 417  void DarlingServer::Call::TaskSelfTrap::processCall() {
 418  	const auto taskSelfPort = dtape_task_self_trap();
 419  	_sendReply(0, taskSelfPort);
 420  };
 421  
 422  void DarlingServer::Call::HostSelfTrap::processCall() {
 423  	const auto hostSelfPort = dtape_host_self_trap();
 424  	_sendReply(0, hostSelfPort);
 425  };
 426  
 427  void DarlingServer::Call::ThreadSelfTrap::processCall() {
 428  	const auto threadSelfPort = dtape_thread_self_trap();
 429  	_sendReply(0, threadSelfPort);
 430  };
 431  
 432  void DarlingServer::Call::MachReplyPort::processCall() {
 433  	const auto machReplyPort = dtape_mach_reply_port();
 434  	_sendReply(0, machReplyPort);
 435  };
 436  
 437  void DarlingServer::Call::Kprintf::processCall() {
 438  	static auto kprintfLog = Log("kprintf");
 439  	int code = 0;
 440  
 441  	if (auto thread = _thread.lock()) {
 442  		if (auto process = thread->process()) {
 443  			char* tmp = (char*)malloc(_body.string_length + 1);
 444  
 445  			if (tmp) {
 446  				if (process->readMemory(_body.string, tmp, _body.string_length, &code)) {
 447  					size_t len = _body.string_length;
 448  
 449  					// strip trailing whitespace
 450  					while (len > 0 && isspace(tmp[len - 1])) {
 451  						--len;
 452  					}
 453  					tmp[len] = '\0';
 454  
 455  					kprintfLog.info() << tmp << kprintfLog.endLog;
 456  				} else {
 457  					// readMemory returns a positive error code, but we want a negative one
 458  					code = -code;
 459  				}
 460  
 461  				free(tmp);
 462  			} else {
 463  				code = -ENOMEM;
 464  			}
 465  		} else {
 466  			code = -ESRCH;
 467  		}
 468  	} else {
 469  		code = -ESRCH;
 470  	}
 471  
 472  	_sendReply(code);
 473  };
 474  
 475  void DarlingServer::Call::StartedSuspended::processCall() {
 476  	int code = 0;
 477  	bool suspended = false;
 478  
 479  	if (auto thread = _thread.lock()) {
 480  		if (auto process = thread->process()) {
 481  			suspended = process->startSuspended();
 482  			process->setStartSuspended(false);
 483  		} else {
 484  			code = -ESRCH;
 485  		}
 486  	} else {
 487  		code = -ESRCH;
 488  	}
 489  
 490  	_sendReply(code, suspended);
 491  };
 492  
 493  void DarlingServer::Call::GetTracer::processCall() {
 494  	int code = 0;
 495  	int32_t tracer = 0;
 496  
 497  	if (auto thread = _thread.lock()) {
 498  		if (auto process = thread->process()) {
 499  			if (auto tracerProcess = process->tracerProcess()) {
 500  				tracer = tracerProcess->nsid();
 501  			} else {
 502  				// leave `tracer` as 0
 503  			}
 504  		} else {
 505  			code = -ESRCH;
 506  		}
 507  	} else {
 508  		code = -ESRCH;
 509  	}
 510  
 511  	_sendReply(code, tracer);
 512  };
 513  
 514  void DarlingServer::Call::Uidgid::processCall() {
 515  	int code = 0;
 516  	int uid = -1;
 517  	int gid = -1;
 518  
 519  	if (auto thread = _thread.lock()) {
 520  		if (auto process = thread->process()) {
 521  			// HACK
 522  			// we shouldn't need to access _dtapeTask; Process should provide a method for this (but it doesn't yet because i'm not sure how to make that API feel at-home in C++)
 523  			dtape_task_uidgid(process->_dtapeTask, _body.new_uid, _body.new_gid, &uid, &gid);
 524  		} else {
 525  			code = -ESRCH;
 526  		}
 527  	} else {
 528  		code = -ESRCH;
 529  	}
 530  
 531  	_sendReply(code, uid, gid);
 532  };
 533  
 534  void DarlingServer::Call::SetThreadHandles::processCall() {
 535  	int code = 0;
 536  
 537  	if (auto thread = _thread.lock()) {
 538  		thread->setThreadHandles(_body.pthread_handle, _body.dispatch_qaddr);
 539  	} else {
 540  		code = -ESRCH;
 541  	}
 542  
 543  	_sendReply(code);
 544  };
 545  
 546  void DarlingServer::Call::Vchroot::processCall() {
 547  	int code = 0;
 548  
 549  	// TODO: wrap all `processCall` calls in try-catch like this
 550  	try {
 551  		if (auto thread = _thread.lock()) {
 552  			if (auto process = thread->process()) {
 553  				process->setVchrootDirectory(std::make_shared<FD>(_body.directory_fd));
 554  				_body.directory_fd = -1;
 555  			} else {
 556  				code = -ESRCH;
 557  			}
 558  		} else {
 559  			code = -ESRCH;
 560  		}
 561  	} catch (std::system_error err) {
 562  		code = -err.code().value();
 563  	} catch (...) {
 564  		code = std::numeric_limits<int>::min();
 565  	}
 566  
 567  	_sendReply(code);
 568  };
 569  
 570  void DarlingServer::Call::MldrPath::processCall() {
 571  	int code = 0;
 572  	uint64_t fullLength = 0;
 573  
 574  	if (auto thread = _thread.lock()) {
 575  		if (auto process = thread->process()) {
 576  			auto tmpstr = std::string(Config::defaultMldrPath).substr(0, _body.buffer_size - 1);
 577  			auto len = std::min(tmpstr.length() + 1, _body.buffer_size);
 578  
 579  			fullLength = process->vchrootPath().length();
 580  
 581  			if (!process->writeMemory(_body.buffer, tmpstr.c_str(), len, &code)) {
 582  				// writeMemory returns a positive error code, but we want a negative one
 583  				code = -code;
 584  			}
 585  		} else {
 586  			code = -ESRCH;
 587  		}
 588  	} else {
 589  		code = -ESRCH;
 590  	}
 591  
 592  	_sendReply(code, fullLength);
 593  };
 594  
 595  void DarlingServer::Call::ThreadGetSpecialReplyPort::processCall() {
 596  	_sendReply(0, dtape_thread_get_special_reply_port());
 597  };
 598  
 599  void DarlingServer::Call::MkTimerCreate::processCall() {
 600  	_sendReply(0, dtape_mk_timer_create());
 601  };
 602  
 603  void DarlingServer::Call::PthreadKill::processCall() {
 604  	int code = 0;
 605  
 606  	if (auto targetThread = Thread::threadForPort(_body.thread_port)) {
 607  		try {
 608  			targetThread->sendSignal(_body.signal);
 609  		} catch (std::system_error e) {
 610  			code = -e.code().value();
 611  		}
 612  	} else {
 613  		code = -ESRCH;
 614  	}
 615  
 616  	_sendReply(code);
 617  };
 618  
 619  void DarlingServer::Call::PthreadCanceled::processCall() {
 620  	int code = 0;
 621  
 622  	callLog.warning() << "TODO: " << __PRETTY_FUNCTION__ << callLog.endLog;
 623  	code = -ENOSYS;
 624  
 625  	_sendReply(code);
 626  };
 627  
 628  void DarlingServer::Call::PthreadMarkcancel::processCall() {
 629  	int code = 0;
 630  
 631  	if (auto targetThread = Thread::threadForPort(_body.thread_port)) {
 632  		callLog.warning() << "TODO: " << __PRETTY_FUNCTION__ << callLog.endLog;
 633  		code = -ENOSYS;
 634  	} else {
 635  		code = -ESRCH;
 636  	}
 637  
 638  	_sendReply(code);
 639  };
 640  
 641  void DarlingServer::Call::KqchanMachPortOpen::processCall() {
 642  	int code = 0;
 643  	int socket = -1;
 644  
 645  	if (auto thread = _thread.lock()) {
 646  		if (auto process = thread->process()) {
 647  			auto kqchan = std::make_shared<Kqchan::MachPort>(process, _body.port_name, _body.receive_buffer, _body.receive_buffer_size, _body.saved_filter_flags);
 648  
 649  			try {
 650  				socket = kqchan->setup();
 651  			} catch (std::system_error e) {
 652  				code = -e.code().value();
 653  			} catch (...) {
 654  				// just report that we couldn't find the port
 655  				code = -ESRCH;
 656  			}
 657  
 658  			process->registerKqchan(kqchan);
 659  		} else {
 660  			code = -ESRCH;
 661  		}
 662  	} else {
 663  		code = -ESRCH;
 664  	}
 665  
 666  	_sendReply(code, socket);
 667  };
 668  
 669  void DarlingServer::Call::KqchanProcOpen::processCall() {
 670  	int code = 0;
 671  	int socket = -1;
 672  
 673  	if (auto thread = _thread.lock()) {
 674  		if (auto process = thread->process()) {
 675  			auto kqchan = std::make_shared<Kqchan::Process>(process, _body.pid, _body.flags);
 676  
 677  			try {
 678  				socket = kqchan->setup();
 679  			} catch (std::system_error e) {
 680  				code = -e.code().value();
 681  			} catch (...) {
 682  				// just report that we couldn't find the process
 683  				code = -ESRCH;
 684  			}
 685  
 686  			process->registerKqchan(kqchan);
 687  		} else {
 688  			code = -ESRCH;
 689  		}
 690  	} else {
 691  		code = -ESRCH;
 692  	}
 693  
 694  	_sendReply(code, socket);
 695  };
 696  
 697  void DarlingServer::Call::ForkWaitForChild::processCall() {
 698  	int code = 0;
 699  
 700  	if (auto thread = _thread.lock()) {
 701  		if (auto process = thread->process()) {
 702  			process->waitForChildAfterFork();
 703  		} else {
 704  			code = -ESRCH;
 705  		}
 706  	} else {
 707  		code = -ESRCH;
 708  	}
 709  
 710  	_sendReply(code);
 711  };
 712  
 713  void DarlingServer::Call::Sigprocess::processCall() {
 714  	int code = 0;
 715  	int newBSDSignal = 0;
 716  
 717  	if (auto thread = _thread.lock()) {
 718  		try {
 719  			thread->processSignal(_body.bsd_signal_number, _body.linux_signal_number, _body.code, _body.signal_address, _body.thread_state, _body.float_state);
 720  			newBSDSignal = thread->pendingSignal();
 721  		} catch (std::system_error e) {
 722  			code = -e.code().value();
 723  		}
 724  	} else {
 725  		code = -ESRCH;
 726  	}
 727  
 728  	_sendReply(code, newBSDSignal);
 729  };
 730  
 731  void DarlingServer::Call::TaskIs64Bit::processCall() {
 732  	int code = 0;
 733  	bool is64Bit = false;
 734  
 735  	if (auto maybeTargetProcess = processRegistry().lookupEntryByNSID(_body.id)) {
 736  		auto targetProcess = *maybeTargetProcess;
 737  		is64Bit = targetProcess->is64Bit();
 738  	} else {
 739  		code = -ESRCH;
 740  	}
 741  
 742  	_sendReply(code, is64Bit);
 743  };
 744  
 745  void DarlingServer::Call::InterruptEnter::processCall() {
 746  	Thread::_handleInterruptEnterForCurrentThread();
 747  
 748  	_sendReply(0);
 749  };
 750  
 751  void DarlingServer::Call::InterruptExit::processCall() {
 752  	auto thread = _thread.lock();
 753  
 754  	dtape_thread_sigexc_exit(thread->_dtapeThread);
 755  
 756  	_sendReply(0);
 757  
 758  	{
 759  		std::unique_lock lock(thread->_rwlock);
 760  
 761  		auto tmp = std::move(thread->_interrupts.top());
 762  
 763  		thread->_interrupts.pop();
 764  
 765  		if (tmp.savedReply) {
 766  			callLog.debug() << *thread << ": Going to send saved reply" << callLog.endLog;
 767  			Server::sharedInstance().sendMessage(std::move(*tmp.savedReply));
 768  			tmp.savedReply = std::nullopt;
 769  		}
 770  	}
 771  };
 772  
 773  void DarlingServer::Call::ConsoleOpen::processCall() {
 774  	static Log consoleLog("console");
 775  
 776  	int code = 0;
 777  	int sockets[2] = { -1, -1 };
 778  
 779  	// we don't really need bidirectional communication, so a pipe would suffice,
 780  	// except that when you set O_NONBLOCK on one side of a pipe, it is set for both.
 781  
 782  	if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sockets) < 0) {
 783  		int err = errno;
 784  		callLog.warning() << __PRETTY_FUNCTION__ << ": socketpair failed with " << err << callLog.endLog;
 785  
 786  		// just report EMFILE for the peer
 787  		code = EMFILE;
 788  	} else {
 789  		// make our side non-blocking
 790  		int flags = fcntl(sockets[0], F_GETFL);
 791  		if (flags < 0) {
 792  			code = -errno;
 793  		} else {
 794  			flags |= O_NONBLOCK;
 795  			if (fcntl(sockets[0], F_SETFL, flags) < 0) {
 796  				code = -errno;
 797  			} else {
 798  				// now monitor it
 799  				auto fd = std::make_shared<FD>(sockets[0]);
 800  				std::weak_ptr<Process> weakProcess;
 801  
 802  				if (auto thread = _thread.lock()) {
 803  					if (auto process = thread->process()) {
 804  						weakProcess = process;
 805  					}
 806  				}
 807  
 808  				Server::sharedInstance().addMonitor(std::make_shared<Monitor>(fd, Monitor::Event::Readable | Monitor::Event::HangUp, false, false, [fd, weakProcess](std::shared_ptr<Monitor> monitor, Monitor::Event events) {
 809  					auto proc = weakProcess.lock();
 810  
 811  					if (!proc || static_cast<uint64_t>(events & Monitor::Event::HangUp) != 0) {
 812  						Server::sharedInstance().removeMonitor(monitor);
 813  						return;
 814  					}
 815  
 816  					if (static_cast<uint64_t>(events & Monitor::Event::Readable) != 0) {
 817  						std::stringstream data;
 818  						while (true) {
 819  							char buf[128];
 820  							auto count = read(fd->fd(), buf, sizeof(buf) - 1);
 821  							if (count <= 0) {
 822  								break;
 823  							}
 824  							buf[count] = '\0';
 825  							data << buf;
 826  						}
 827  						consoleLog.info() << *proc << ": " << data.rdbuf();
 828  					}
 829  				}));
 830  			}
 831  		}
 832  	}
 833  
 834  	if (code != 0) {
 835  		if (sockets[0] >= 0) {
 836  			close(sockets[0]);
 837  			sockets[0] = -1;
 838  		}
 839  		if (sockets[1] >= 0) {
 840  			close(sockets[1]);
 841  			sockets[1] = -1;
 842  		}
 843  	}
 844  	_sendReply(code, sockets[1]);
 845  };
 846  
 847  void DarlingServer::Call::SetDyldInfo::processCall() {
 848  	int code = 0;
 849  
 850  	if (auto thread = _thread.lock()) {
 851  		if (auto process = thread->process()) {
 852  			dtape_task_set_dyld_info(process->_dtapeTask, _body.address, _body.length);
 853  		} else {
 854  			code = -ESRCH;
 855  		}
 856  	} else {
 857  		code = -ESRCH;
 858  	}
 859  
 860  	_sendReply(code);
 861  };
 862  
 863  void DarlingServer::Call::StopAfterExec::processCall() {
 864  	int code = 0;
 865  
 866  	if (auto thread = _thread.lock()) {
 867  		if (auto process = thread->process()) {
 868  			process->setStartSuspended(true);
 869  		} else {
 870  			code = -ESRCH;
 871  		}
 872  	} else {
 873  		code = -ESRCH;
 874  	}
 875  
 876  	_sendReply(code);
 877  };
 878  
 879  void DarlingServer::Call::SetTracer::processCall() {
 880  	int code = 0;
 881  	std::shared_ptr<Process> targetProcess = nullptr;
 882  	std::shared_ptr<Process> tracerProcess = nullptr;
 883  
 884  	if (_body.target == 0) {
 885  		if (auto thread = _thread.lock()) {
 886  			targetProcess = thread->process();
 887  		}
 888  	} else {
 889  		if (auto maybeTargetProcess = processRegistry().lookupEntryByNSID(_body.target)) {
 890  			targetProcess = *maybeTargetProcess;
 891  		}
 892  	}
 893  
 894  	if (targetProcess) {
 895  		if (_body.tracer == 0) {
 896  			// leave tracer process as nullptr
 897  		} else {
 898  			if (auto maybeTracerProcess = processRegistry().lookupEntryByNSID(_body.tracer)) {
 899  				tracerProcess = *maybeTracerProcess;
 900  			} else {
 901  				// intentionally not negated because this is not an internal error;
 902  				// this is a perfectly valid case
 903  				code = ESRCH;
 904  			}
 905  		}
 906  
 907  		if (code == 0) {
 908  			if (!targetProcess->setTracerProcess(tracerProcess)) {
 909  				// again, not negated because this isn't an internal error;
 910  				// simply indicates there was already a tracer set for the target
 911  				code = EPERM;
 912  			}
 913  		}
 914  	} else {
 915  		// ditto from before
 916  		code = ESRCH;
 917  	}
 918  
 919  	_sendReply(code);
 920  };
 921  
 922  void DarlingServer::Call::TidForThread::processCall() {
 923  	int code = 0;
 924  	int32_t tid = 0;
 925  
 926  	if (auto thread = Thread::threadForPort(_body.thread)) {
 927  		tid = thread->nsid();
 928  	} else {
 929  		// might be user error (e.g. invalid port number or dead thread), so don't negate it
 930  		code = ESRCH;
 931  	}
 932  
 933  	_sendReply(code, tid);
 934  };
 935  
 936  void DarlingServer::Call::PtraceSigexc::processCall() {
 937  	int code = 0;
 938  
 939  	if (auto maybeProcess = processRegistry().lookupEntryByNSID(_body.target)) {
 940  		auto process = *maybeProcess;
 941  
 942  		dtape_task_set_sigexc_enabled(process->_dtapeTask, _body.enabled);
 943  		dtape_task_try_resume(process->_dtapeTask);
 944  	} else {
 945  		// not negated because this isn't an internal error
 946  		code = ESRCH;
 947  	}
 948  
 949  	_sendReply(code);
 950  };
 951  
 952  void DarlingServer::Call::PtraceThupdate::processCall() {
 953  	int code = 0;
 954  
 955  	if (auto maybeThread = threadRegistry().lookupEntryByNSID(_body.target)) {
 956  		auto thread = *maybeThread;
 957  
 958  		thread->setPendingSignal(_body.signum);
 959  	} else {
 960  		// not negated because this isn't an internal error
 961  		code = ESRCH;
 962  	}
 963  
 964  	_sendReply(code);
 965  };
 966  
 967  void DarlingServer::Call::ThreadSuspended::processCall() {
 968  	int code = 0;
 969  
 970  	if (auto thread = _thread.lock()) {
 971  		thread->waitWhileUserSuspended(_body.thread_state, _body.float_state);
 972  	} else {
 973  		code = -ESRCH;
 974  	}
 975  
 976  	_sendReply(code);
 977  };
 978  
 979  void DarlingServer::Call::S2CPerform::processCall() {
 980  	int code = 0;
 981  
 982  	if (auto thread = _thread.lock()) {
 983  		dtape_semaphore_up(thread->_s2cInterruptEnterSemaphore);
 984  		dtape_semaphore_down_simple(thread->_s2cInterruptExitSemaphore);
 985  	} else {
 986  		code = -ESRCH;
 987  	}
 988  
 989  	_sendReply(code);
 990  };
 991  
 992  void DarlingServer::Call::SetExecutablePath::processCall() {
 993  	int code = 0;
 994  
 995  	if (auto thread = _thread.lock()) {
 996  		if (auto process = thread->process()) {
 997  			std::string tmpstr;
 998  			tmpstr.resize(_body.buffer_size);
 999  			if (!process->readMemory((uintptr_t)_body.buffer, tmpstr.data(), _body.buffer_size, &code)) {
1000  				code = -code;
1001  			} else {
1002  				process->setExecutablePath(tmpstr.c_str());
1003  			}
1004  		} else {
1005  			code = -ESRCH;
1006  		}
1007  	} else {
1008  		code = -ESRCH;
1009  	}
1010  
1011  	_sendReply(code);
1012  }
1013  
1014  void DarlingServer::Call::GetExecutablePath::processCall() {
1015  	int code = 0;
1016  	uint64_t fullLength;
1017  
1018  	if (auto callingThread = _thread.lock()) {
1019  		if (auto callingProcess = callingThread->process()) {
1020  			if (auto maybeTargetProcess = processRegistry().lookupEntryByNSID(_body.pid)) {
1021  				auto targetProcess = *maybeTargetProcess;
1022  				auto path = targetProcess->executablePath();
1023  				auto len = std::min(path.length() + 1, _body.buffer_size);
1024  				if (!callingProcess->writeMemory((uintptr_t)_body.buffer, path.c_str(), len, &code)) {
1025  					code = -code;
1026  				}
1027  				fullLength = path.length();
1028  			} else {
1029  				// not negated because this is an acceptable case.
1030  				// e.g. the target process may have died before the call was processed.
1031  				code = ESRCH;
1032  			}
1033  		} else {
1034  			code = -ESRCH;
1035  		}
1036  	} else {
1037  		code = -ESRCH;
1038  	}
1039  
1040  	_sendReply(code, fullLength);
1041  }
1042  
1043  void DarlingServer::Call::Groups::processCall() {
1044  	int code = 0;
1045  	std::vector<uint32_t> oldGroups;
1046  
1047  	if (auto thread = _thread.lock()) {
1048  		if (auto process = thread->process()) {
1049  			oldGroups = process->groups();
1050  
1051  			if (_body.new_groups != 0 && _body.new_group_count > 0) {
1052  				std::vector<uint32_t> newGroups;
1053  				newGroups.resize(_body.new_group_count);
1054  
1055  				if (!process->readMemory((uintptr_t)_body.new_groups, newGroups.data(), newGroups.size() * sizeof(uint32_t), &code)) {
1056  					code = -code;
1057  				} else {
1058  					process->setGroups(newGroups);
1059  				}
1060  			}
1061  
1062  			if (code == 0 && _body.old_groups != 0 && _body.old_group_space > 0) {
1063  				auto len = std::min(oldGroups.size(), _body.old_group_space) * sizeof(uint32_t);
1064  				if (!process->writeMemory((uintptr_t)_body.old_groups, oldGroups.data(), len, &code)) {
1065  					code = -code;
1066  				}
1067  			}
1068  		} else {
1069  			code = -ESRCH;
1070  		}
1071  	} else {
1072  		code = -ESRCH;
1073  	}
1074  
1075  	_sendReply(code, oldGroups.size());
1076  };
1077  
1078  void DarlingServer::Call::DebugListProcesses::processCall() {
1079  	int code = 0;
1080  	auto processes = processRegistry().copyEntries();
1081  	int pipes[2] = {-1, -1};
1082  
1083  	code = pipe(pipes);
1084  	if (code == 0) {
1085  		for (const auto& process: processes) {
1086  			dserver_debug_process_t debugProcess;
1087  			debugProcess.pid = process->nsid();
1088  			debugProcess.port_count = dtape_debug_task_port_count(process->_dtapeTask);
1089  			write(pipes[1], &debugProcess, sizeof(debugProcess));
1090  		}
1091  
1092  		close(pipes[1]);
1093  	}
1094  
1095  	_sendReply(code, processes.size(), pipes[0]);
1096  };
1097  
1098  void DarlingServer::Call::DebugListPorts::processCall() {
1099  	int code = 0;
1100  	uint64_t portCount = 0;
1101  	int pipes[2] = {-1, -1};
1102  
1103  	if (auto maybeProcess = processRegistry().lookupEntryByNSID(_body.process)) {
1104  		auto process = *maybeProcess;
1105  
1106  		code = pipe(pipes);
1107  		if (code == 0) {
1108  			portCount = dtape_debug_task_list_ports(process->_dtapeTask, [](void* context, const dtape_debug_port_t* port) {
1109  				int& writeFD = *(int*)context;
1110  				dserver_debug_port_t debugPort;
1111  
1112  				debugPort.port_name = port->name;
1113  				debugPort.rights = port->rights;
1114  				debugPort.refs = port->refs;
1115  				debugPort.messages = port->messages;
1116  
1117  				write(writeFD, &debugPort, sizeof(debugPort));
1118  
1119  				return true;
1120  			}, &pipes[1]);
1121  		}
1122  	} else {
1123  		code = -ESRCH;
1124  	}
1125  
1126  	_sendReply(code, portCount, pipes[0]);
1127  };
1128  
1129  void DarlingServer::Call::DebugListMembers::processCall() {
1130  	int code = 0;
1131  	uint64_t portCount = 0;
1132  	int pipes[2] = {-1, -1};
1133  
1134  	if (auto maybeProcess = processRegistry().lookupEntryByNSID(_body.process)) {
1135  		auto process = *maybeProcess;
1136  
1137  		code = pipe(pipes);
1138  		if (code == 0) {
1139  			portCount = dtape_debug_portset_list_members(process->_dtapeTask, _body.portset, [](void* context, const dtape_debug_port_t* port) {
1140  				int& writeFD = *(int*)context;
1141  				dserver_debug_port_t debugPort;
1142  
1143  				debugPort.port_name = port->name;
1144  				debugPort.rights = port->rights;
1145  				debugPort.refs = port->refs;
1146  				debugPort.messages = port->messages;
1147  
1148  				write(writeFD, &debugPort, sizeof(debugPort));
1149  
1150  				return true;
1151  			}, &pipes[1]);
1152  		}
1153  	} else {
1154  		code = -ESRCH;
1155  	}
1156  
1157  	_sendReply(code, portCount, pipes[0]);
1158  };
1159  
1160  void DarlingServer::Call::DebugListMessages::processCall() {
1161  	int code = 0;
1162  	uint64_t portCount = 0;
1163  	int pipes[2] = {-1, -1};
1164  
1165  	if (auto maybeProcess = processRegistry().lookupEntryByNSID(_body.process)) {
1166  		auto process = *maybeProcess;
1167  
1168  		code = pipe(pipes);
1169  		if (code == 0) {
1170  			portCount = dtape_debug_port_list_messages(process->_dtapeTask, _body.port, [](void* context, const dtape_debug_message_t* port) {
1171  				int& writeFD = *(int*)context;
1172  				dserver_debug_message_t debugMessage;
1173  
1174  				debugMessage.sender = port->sender;
1175  				debugMessage.size = port->size;
1176  
1177  				write(writeFD, &debugMessage, sizeof(debugMessage));
1178  
1179  				return true;
1180  			}, &pipes[1]);
1181  		}
1182  	} else {
1183  		code = -ESRCH;
1184  	}
1185  
1186  	_sendReply(code, portCount, pipes[0]);
1187  };
1188  
1189  DSERVER_CLASS_SOURCE_DEFS;