/ duct-tape / src / kqchan.c
kqchan.c
  1  #include <darlingserver/duct-tape.h>
  2  #include <darlingserver/duct-tape/kqchan.h>
  3  #include <darlingserver/duct-tape/stubs.h>
  4  #include <darlingserver/duct-tape/thread.h>
  5  #include <darlingserver/duct-tape/log.h>
  6  
  7  #include <kern/debug.h>
  8  #include <stdlib.h>
  9  #include <ipc/ipc_mqueue.h>
 10  #include <kern/thread.h>
 11  
 12  extern int filt_machportattach(struct knote *kn, struct kevent_qos_s *kev);
 13  extern void filt_machportdetach(struct knote *kn);
 14  extern int filt_machportevent(struct knote *kn, long hint);
 15  extern int filt_machporttouch(struct knote *kn, struct kevent_qos_s *kev);
 16  extern int filt_machportprocess(struct knote *kn, struct kevent_qos_s *kev);
 17  extern int filt_machportpeek(struct knote *kn);
 18  
 19  dtape_kqchan_mach_port_t* dtape_kqchan_mach_port_create(dtape_task_t* owning_task, uint32_t port, uint64_t receive_buffer, uint64_t receive_buffer_size, uint64_t saved_filter_flags, dtape_kqchan_mach_port_notification_callback_f notification_callback, void* context) {
 20  	dtape_kqchan_mach_port_t* kqchan = malloc(sizeof(dtape_kqchan_mach_port_t));
 21  	if (!kqchan) {
 22  		return NULL;
 23  	}
 24  
 25  	memset(kqchan, 0, sizeof(*kqchan));
 26  
 27  	kqchan->callback = notification_callback;
 28  	kqchan->context = context;
 29  	kqchan->task = owning_task;
 30  
 31  	// keep a reference to the task
 32  	task_reference(&kqchan->task->xnu_task);
 33  
 34  	os_ref_init(&kqchan->refcount, NULL);
 35  
 36  	kqchan->knote.kn_id = port;
 37  	kqchan->knote.kn_ext[0] = receive_buffer;
 38  	kqchan->knote.kn_ext[1] = receive_buffer_size;
 39  	kqchan->knote.kn_sfflags = saved_filter_flags;
 40  	kqchan->knote.kn_filter = EVFILT_MACHPORT;
 41  
 42  	// try to attach to the Mach port
 43  	filt_machportattach(&kqchan->knote, NULL);
 44  
 45  	if (kqchan->knote.kn_flags & EV_ERROR) {
 46  		free(kqchan);
 47  		return NULL;
 48  	}
 49  
 50  	return kqchan;
 51  };
 52  
 53  void dtape_kqchan_mach_port_destroy(dtape_kqchan_mach_port_t* kqchan) {
 54  	if (os_ref_release(&kqchan->refcount) != 0) {
 55  		panic("Duct-taped Mach port kqchan over-retained or still in-use at destruction");
 56  	}
 57  
 58  	filt_machportdetach(&kqchan->knote);
 59  
 60  	task_deallocate(&kqchan->task->xnu_task);
 61  
 62  	free(kqchan);
 63  };
 64  
 65  void dtape_kqchan_mach_port_modify(dtape_kqchan_mach_port_t* kqchan, uint64_t receive_buffer, uint64_t receive_buffer_size, uint64_t saved_filter_flags) {
 66  	struct kevent_qos_s kev = {
 67  		.fflags = saved_filter_flags,
 68  		.ext = { receive_buffer, receive_buffer_size },
 69  	};
 70  	filt_machporttouch(&kqchan->knote, &kev);
 71  };
 72  
 73  void dtape_kqchan_mach_port_disable_notifications(dtape_kqchan_mach_port_t* kqchan) {
 74  	kqchan->callback = NULL;
 75  	kqchan->context = NULL;
 76  };
 77  
 78  bool dtape_kqchan_mach_port_fill(dtape_kqchan_mach_port_t* kqchan, dserver_kqchan_reply_mach_port_read_t* reply, uint64_t default_buffer, uint64_t default_buffer_size) {
 79  	struct kevent_qos_s kev;
 80  	bool maybe_used_default_buffer = false;
 81  	thread_t xthread = current_thread();
 82  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
 83  
 84  	thread->kevent_ctx.kec_data_out = thread->kevent_ctx.kec_data_avail = default_buffer;
 85  	thread->kevent_ctx.kec_data_size = thread->kevent_ctx.kec_data_resid = default_buffer_size;
 86  	thread->kevent_ctx.kec_process_flags = 0;
 87  
 88  	dtape_log_debug("trying to fill kevent for kqchan %p with mqueue %p", kqchan, kqchan->knote.kn_mqueue);
 89  
 90  	if (kqchan->knote.kn_status & KN_VANISHED) {
 91  		// create a fake event
 92  		memset(&reply->kev, 0, sizeof(reply->kev));
 93  
 94  		reply->kev.filter = kqchan->knote.kn_filter;
 95  		reply->kev.ident = kqchan->knote.kn_id;
 96  		reply->kev.flags = EV_DISPATCH2 | EV_ONESHOT | EV_VANISHED;
 97  		reply->kev.udata = kqchan->knote.kn_udata;
 98  
 99  		return true;
100  	}
101  
102  	bool has_events = dtape_kqchan_mach_port_has_events(kqchan);
103  
104  	dtape_log_debug("has events before process? %s", has_events ? "yes" : "no");
105  
106  	bool result = (filt_machportprocess(&kqchan->knote, (void*)&reply->kev) & FILTER_ACTIVE) ? true : false;
107  
108  	dtape_log_debug("had events that were processed? %s", result ? "yes" : "no");
109  
110  	dtape_log_debug("states matched? %s", (has_events == result) ? "yes" : "no");
111  
112  	if (!result) {
113  		dtape_log_debug("imq_msgcount = %u; imq_messages.ikmq_base = %p; imq_receiver_name = %d", kqchan->knote.kn_mqueue->imq_msgcount, kqchan->knote.kn_mqueue->imq_messages.ikmq_base, kqchan->knote.kn_mqueue->imq_receiver_name);
114  	}
115  
116  	if (kqchan->waiter_read_semaphore) {
117  		dtape_semaphore_up(kqchan->waiter_read_semaphore);
118  	}
119  
120  	return result;
121  };
122  
123  bool dtape_kqchan_mach_port_has_events(dtape_kqchan_mach_port_t* kqchan) {
124  	if (imq_is_set(kqchan->knote.kn_mqueue)) {
125  		return ipc_mqueue_set_peek(kqchan->knote.kn_mqueue);
126  	} else {
127  		return ipc_mqueue_peek(kqchan->knote.kn_mqueue, NULL, NULL, NULL, NULL, NULL);
128  	}
129  };
130  
131  kevent_ctx_t kevent_get_context(thread_t xthread) {
132  	dtape_thread_t* thread = dtape_thread_for_xnu_thread(xthread);
133  	return &thread->kevent_ctx;
134  };
135  
136  static void knote_post(struct knote* kn, long hint) {
137  	dtape_kqchan_mach_port_t* kqchan = __container_of(kn, dtape_kqchan_mach_port_t, knote);
138  
139  	dtape_log_debug("%s: kn=%p, kqchan=%p, kqchan->callback=%p", __FUNCTION__, kn, kqchan, kqchan->callback);
140  
141  	if (!kqchan->callback) {
142  		return;
143  	}
144  
145  #if 0
146  	if (!dtape_kqchan_mach_port_has_events(kqchan)) {
147  		return;
148  	}
149  #endif
150  
151  	dtape_log_debug("%s: kn=%p, kqchan=%p; invoking callback...", __FUNCTION__, kn, kqchan);
152  
153  	kqchan->callback(kqchan->context);
154  };
155  
156  void knote(struct klist* list, long hint) {
157  	struct knote *kn;
158  
159  	SLIST_FOREACH(kn, list, kn_selnext) {
160  		knote_post(kn, hint);
161  	}
162  };
163  
164  void knote_vanish(struct klist* list, bool make_active) {
165  	struct knote* kn;
166  	struct knote* tmp;
167  
168  	dtape_log_debug("klist %p is vanishing", list);
169  
170  	SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmp) {
171  		dtape_log_debug("knote %p is vanishing", kn);
172  		// TODO: handle the old style of vanishing (i.e. `EV_EOF | EV_ONESHOT`)
173  		kn->kn_status |= KN_VANISHED;
174  		knote_post(kn, 0);
175  	}
176  };
177  
178  static void kqchan_waitq_waiter_entry(void* context, wait_result_t wait_result) {
179  	dtape_kqchan_mach_port_t* kqchan = context;
180  	struct waitq* wq = NULL;
181  
182  	dtape_log_debug("kqchan waitq waiter thread entering");
183  
184  	while ((wq = kqchan->waitq) != NULL) {
185  		if ((wait_result = waitq_assert_wait64(wq, IPC_MQUEUE_RECEIVE, THREAD_INTERRUPTIBLE, 0)) == THREAD_WAITING) {
186  			wait_result = thread_block(NULL);
187  		}
188  
189  		dtape_log_debug("kqchan waitq waiter thread unblocked with wait result: %d", wait_result);
190  
191  		if (wait_result == THREAD_INTERRUPTED) {
192  			// a wakeup with "THREAD_INTERRUPTED" indicates we should die
193  			break;
194  		} else {
195  			if (filt_machportpeek(&kqchan->knote) & FILTER_ACTIVE) {
196  				kqchan->callback(kqchan->context);
197  			}
198  			// wait until it's read
199  			if (!dtape_semaphore_down_simple(kqchan->waiter_read_semaphore)) {
200  				// we got interrupted
201  				break;
202  			}
203  		}
204  	}
205  
206  	dtape_log_debug("kqchan waitq waiter thread exiting");
207  
208  	// to prevent us from racing with the kqchan's death/deallocation, we have a death semaphore that the kqchan waits for before dying
209  	dtape_semaphore_up(kqchan->waiter_death_semaphore);
210  
211  	thread_terminate_self();
212  	__builtin_unreachable();
213  };
214  
215  int knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link) {
216  	dtape_kqchan_mach_port_t* kqchan = __container_of(kn, dtape_kqchan_mach_port_t, knote);
217  
218  	if (kqchan->waitq) {
219  		dtape_log_warning("Attempt to link kqchan to %p while it was already linked to %p", wq, kqchan->waitq);
220  		return 1;
221  	}
222  
223  	kqchan->waitq = wq;
224  	kqchan->waiter_death_semaphore = dtape_semaphore_create(dtape_task_for_xnu_task(kernel_task), 0);
225  	kqchan->waiter_read_semaphore = dtape_semaphore_create(dtape_task_for_xnu_task(kernel_task), 0);
226  
227  	if (kernel_thread_start(kqchan_waitq_waiter_entry, kqchan, &kqchan->waiter_thread) != KERN_SUCCESS) {
228  		return 1;
229  	}
230  
231  	return 0;
232  };
233  
234  int knote_unlink_waitq(struct knote *kn, struct waitq *wq) {
235  	dtape_kqchan_mach_port_t* kqchan = __container_of(kn, dtape_kqchan_mach_port_t, knote);
236  
237  	if (kqchan->waitq != wq) {
238  		panic("Attempt to unlink kqchan from %p while it was linked to %p", wq, kqchan->waitq);
239  	}
240  
241  	// the kernel thread will see this and terminate if it's not currently blocked waiting
242  	kqchan->waitq = NULL;
243  
244  	// if the kernel thread *is* currently blocked waiting, wake it up with THREAD_INTERRUPTED (it will know it needs to terminate)
245  	clear_wait(kqchan->waiter_thread, THREAD_INTERRUPTED);
246  
247  	// now release our reference on the kernel thread
248  	thread_deallocate(kqchan->waiter_thread);
249  	kqchan->waiter_thread = NULL;
250  
251  	// wait for the waiter thread to die
252  	dtape_semaphore_down_simple(kqchan->waiter_death_semaphore);
253  
254  	// now destroy the waiter thread death semaphore
255  	dtape_semaphore_destroy(kqchan->waiter_death_semaphore);
256  	kqchan->waiter_death_semaphore = NULL;
257  
258  	dtape_semaphore_destroy(kqchan->waiter_read_semaphore);
259  	kqchan->waiter_read_semaphore = NULL;
260  
261  	return 0;
262  };
263  
264  void knote_link_waitqset_lazy_alloc(struct knote *kn) {
265  	dtape_stub();
266  };
267  
268  boolean_t knote_link_waitqset_should_lazy_alloc(struct knote *kn) {
269  	dtape_stub_safe();
270  	return FALSE;
271  };
272  
273  struct turnstile* kqueue_alloc_turnstile(struct kqueue* kq) {
274  	dtape_stub();
275  	return NULL;
276  };
277  
278  // <copied from="xnu://7195.141.2/bsd/kern/kern_event.c">
279  
280  void
281  klist_init(struct klist *list)
282  {
283  	SLIST_INIT(list);
284  }
285  
286  /*!
287   * @function knote_fill_kevent_with_sdata
288   *
289   * @brief
290   * Fills in a kevent from the current content of a knote.
291   *
292   * @discussion
293   * This is meant to be called from filter's f_event hooks.
294   * The kevent data is filled with kn->kn_sdata.
295   *
296   * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
297   *
298   * Using knote_fill_kevent is typically preferred.
299   */
300  OS_ALWAYS_INLINE
301  void
302  knote_fill_kevent_with_sdata(struct knote *kn, struct kevent_qos_s *kev)
303  {
304  #define knote_assert_aliases(name1, offs1, name2) \
305  	static_assert(offsetof(struct kevent_qos_s, name1) + offs1 == \
306  	    offsetof(struct kevent_internal_s, name2), \
307  	        "kevent_qos_s::" #name1 " and kevent_internal_s::" #name2 "need to alias")
308  	/*
309  	 * All the code makes assumptions on these aliasing,
310  	 * so make sure we fail the build if we ever ever ever break them.
311  	 */
312  	knote_assert_aliases(ident, 0, kei_ident);
313  #ifdef __LITTLE_ENDIAN__
314  	knote_assert_aliases(filter, 0, kei_filter);  // non trivial overlap
315  	knote_assert_aliases(filter, 1, kei_filtid);  // non trivial overlap
316  #else
317  	knote_assert_aliases(filter, 0, kei_filtid);  // non trivial overlap
318  	knote_assert_aliases(filter, 1, kei_filter);  // non trivial overlap
319  #endif
320  	knote_assert_aliases(flags, 0, kei_flags);
321  	knote_assert_aliases(qos, 0, kei_qos);
322  	knote_assert_aliases(udata, 0, kei_udata);
323  	knote_assert_aliases(fflags, 0, kei_fflags);
324  	knote_assert_aliases(xflags, 0, kei_sfflags); // non trivial overlap
325  	knote_assert_aliases(data, 0, kei_sdata);     // non trivial overlap
326  	knote_assert_aliases(ext, 0, kei_ext);
327  #undef knote_assert_aliases
328  
329  	/*
330  	 * Fix the differences between kevent_qos_s and kevent_internal_s:
331  	 * - xflags is where kn_sfflags lives, we need to zero it
332  	 * - fixup the high bits of `filter` where kn_filtid lives
333  	 */
334  	*kev = *(struct kevent_qos_s *)&kn->kn_kevent;
335  	kev->xflags = 0;
336  	kev->filter |= 0xff00;
337  	if (kn->kn_flags & EV_CLEAR) {
338  		kn->kn_fflags = 0;
339  	}
340  }
341  
342  /*!
343   * @function knote_fill_kevent
344   *
345   * @brief
346   * Fills in a kevent from the current content of a knote.
347   *
348   * @discussion
349   * This is meant to be called from filter's f_event hooks.
350   * The kevent data is filled with the passed in data.
351   *
352   * kn->kn_fflags is cleared if kn->kn_flags has EV_CLEAR set.
353   */
354  OS_ALWAYS_INLINE
355  void
356  knote_fill_kevent(struct knote *kn, struct kevent_qos_s *kev, int64_t data)
357  {
358  	knote_fill_kevent_with_sdata(kn, kev);
359  	kev->filter = kn->kn_filter;
360  	kev->data = data;
361  }
362  
363  /*
364   * attach a knote to the specified list.  Return true if this is the first entry.
365   * The list is protected by whatever lock the object it is associated with uses.
366   */
367  int
368  knote_attach(struct klist *list, struct knote *kn)
369  {
370  	int ret = SLIST_EMPTY(list);
371  	SLIST_INSERT_HEAD(list, kn, kn_selnext);
372  	return ret;
373  }
374  
375  /*
376   * detach a knote from the specified list.  Return true if that was the last entry.
377   * The list is protected by whatever lock the object it is associated with uses.
378   */
379  int
380  knote_detach(struct klist *list, struct knote *kn)
381  {
382  	SLIST_REMOVE(list, kn, knote, kn_selnext);
383  	return SLIST_EMPTY(list);
384  }
385  
386  OS_ALWAYS_INLINE
387  void
388  knote_set_error(struct knote *kn, int error)
389  {
390  	kn->kn_flags |= EV_ERROR;
391  	kn->kn_sdata = error;
392  }
393  
394  // </copied>