/ duct-tape / xnu / osfmk / kern / lock_stat.h
lock_stat.h
  1  /*
  2   * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
  3   *
  4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  5   *
  6   * This file contains Original Code and/or Modifications of Original Code
  7   * as defined in and that are subject to the Apple Public Source License
  8   * Version 2.0 (the 'License'). You may not use this file except in
  9   * compliance with the License. The rights granted to you under the License
 10   * may not be used to create, or enable the creation or redistribution of,
 11   * unlawful or unlicensed copies of an Apple operating system, or to
 12   * circumvent, violate, or enable the circumvention or violation of, any
 13   * terms of an Apple operating system software license agreement.
 14   *
 15   * Please obtain a copy of the License at
 16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
 17   *
 18   * The Original Code and all software distributed under the License are
 19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 23   * Please see the License for the specific language governing rights and
 24   * limitations under the License.
 25   *
 26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 27   */
 28  #ifndef _KERN_LOCKSTAT_H
 29  #define _KERN_LOCKSTAT_H
 30  #include <machine/locks.h>
 31  #include <machine/atomic.h>
 32  #include <kern/lock_group.h>
 33  
 34  /*
 35   * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
 36   * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
 37   * as a 64-bit quantity (the new x86 specific statistics are also maintained
 38   * as 32-bit quantities).
 39   *
 40   *
 41   * Enable this preprocessor define to record the first miss alone
 42   * By default, we count every miss, hence multiple misses may be
 43   * recorded for a single lock acquire attempt via lck_mtx_lock
 44   */
 45  #undef LOG_FIRST_MISS_ALONE
 46  
 47  /*
 48   * This preprocessor define controls whether the R-M-W update of the
 49   * per-group statistics elements are atomic (LOCK-prefixed)
 50   * Enabled by default.
 51   */
 52  #define ATOMIC_STAT_UPDATES 1
 53  
 54  /*
 55   * DTrace lockstat probe definitions
 56   *
 57   */
 58  
 59  enum lockstat_probe_id {
 60  	/* Spinlocks */
 61  	LS_LCK_SPIN_LOCK_ACQUIRE,
 62  	LS_LCK_SPIN_LOCK_SPIN,
 63  	LS_LCK_SPIN_UNLOCK_RELEASE,
 64  
 65  	/*
 66  	 * Mutexes can also have interlock-spin events, which are
 67  	 * unique to our lock implementation.
 68  	 */
 69  	LS_LCK_MTX_LOCK_ACQUIRE,
 70  	LS_LCK_MTX_LOCK_BLOCK,
 71  	LS_LCK_MTX_LOCK_SPIN,
 72  	LS_LCK_MTX_LOCK_ILK_SPIN,
 73  	LS_LCK_MTX_TRY_LOCK_ACQUIRE,
 74  	LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE,
 75  	LS_LCK_MTX_UNLOCK_RELEASE,
 76  	LS_LCK_MTX_LOCK_SPIN_ACQUIRE,
 77  
 78  	/*
 79  	 * Provide a parallel set for indirect mutexes
 80  	 */
 81  	LS_LCK_MTX_EXT_LOCK_ACQUIRE,
 82  	LS_LCK_MTX_EXT_LOCK_BLOCK,
 83  	LS_LCK_MTX_EXT_LOCK_SPIN,
 84  	LS_LCK_MTX_EXT_LOCK_ILK_SPIN,
 85  	LS_LCK_MTX_EXT_UNLOCK_RELEASE,
 86  
 87  	/*
 88  	 * Reader-writer locks support a blocking upgrade primitive, as
 89  	 * well as the possibility of spinning on the interlock.
 90  	 */
 91  	LS_LCK_RW_LOCK_SHARED_ACQUIRE,
 92  	LS_LCK_RW_LOCK_SHARED_BLOCK,
 93  	LS_LCK_RW_LOCK_SHARED_SPIN,
 94  
 95  	LS_LCK_RW_LOCK_EXCL_ACQUIRE,
 96  	LS_LCK_RW_LOCK_EXCL_BLOCK,
 97  	LS_LCK_RW_LOCK_EXCL_SPIN,
 98  
 99  	LS_LCK_RW_DONE_RELEASE,
100  
101  	LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE,
102  	LS_LCK_RW_TRY_LOCK_SHARED_SPIN,
103  
104  	LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE,
105  	LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN,
106  
107  	LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,
108  	LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN,
109  	LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK,
110  
111  	LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE,
112  	LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN,
113  
114  	/* Ticket lock */
115  	LS_LCK_TICKET_LOCK_ACQUIRE,
116  	LS_LCK_TICKET_LOCK_RELEASE,
117  	LS_LCK_TICKET_LOCK_SPIN,
118  
119  	LS_NPROBES
120  };
121  
122  #if CONFIG_DTRACE
123  extern uint32_t lockstat_probemap[LS_NPROBES];
124  extern void dtrace_probe(uint32_t, uint64_t, uint64_t,
125      uint64_t, uint64_t, uint64_t);
126  /*
127   * Macros to record lockstat probes.
128   */
129  #define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)             \
130  	{                                                                   \
131  	        uint32_t id;                                                \
132  	        if (__improbable(id = lockstat_probemap[(probe)])) {        \
133  	                dtrace_probe(id, (uintptr_t)(lp), (arg0),           \
134  	                    (arg1), (arg2), (arg3));                        \
135  	        }                                                           \
136  	}
137  #define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)
138  #define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
139  #define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
140  #else
141  #define LOCKSTAT_RECORD()
142  #endif /* CONFIG_DTRACE */
143  
144  /*
145   * Time threshold before dtrace lockstat spin
146   * probes are triggered
147   */
148  extern uint64_t dtrace_spin_threshold;
149  
150  #if CONFIG_DTRACE
151  void lockprof_invoke(lck_grp_t*, lck_grp_stat_t*, uint64_t);
152  #endif /* CONFIG_DTRACE */
153  
154  static inline void
155  lck_grp_stat_enable(lck_grp_stat_t *stat)
156  {
157  	stat->lgs_enablings++;
158  }
159  
160  static inline void
161  lck_grp_stat_disable(lck_grp_stat_t *stat)
162  {
163  	stat->lgs_enablings--;
164  }
165  
166  #if MACH_KERNEL_PRIVATE
167  static inline void
168  lck_grp_inc_stats(lck_grp_t *grp, lck_grp_stat_t *stat)
169  {
170  #pragma unused(grp)
171  	if (__improbable(stat->lgs_enablings)) {
172  #if ATOMIC_STAT_UPDATES
173  		uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed);
174  #else
175  		uint64_t val = stat->lgs_count++;
176  #endif /* ATOMIC_STAT_UPDATES */
177  #if CONFIG_DTRACE && LOCK_STATS
178  		if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) {
179  			lockprof_invoke(grp, stat, val);
180  		}
181  #else
182  #pragma unused(val)
183  #endif /* CONFIG_DTRACE && LOCK_STATS */
184  	}
185  }
186  
187  #if LOCK_STATS
188  static inline void
189  lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time)
190  {
191  	if (__improbable(stat->lgs_enablings)) {
192  		uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed);
193  #if CONFIG_DTRACE
194  		if (__improbable(stat->lgs_limit)) {
195  			while (__improbable(time > stat->lgs_limit)) {
196  				time -= stat->lgs_limit;
197  				lockprof_invoke(grp, stat, val);
198  			}
199  			if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) {
200  				lockprof_invoke(grp, stat, val);
201  			}
202  		}
203  #else
204  #pragma unused(val)
205  #endif /* CONFIG_DTRACE */
206  	}
207  }
208  
209  #endif /* LOCK_STATS */
210  
211  static inline void
212  lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
213  {
214  #pragma unused(lock)
215  #if CONFIG_DTRACE
216  	LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
217  #endif
218  #if LOCK_STATS
219  	if (!grp) {
220  		return;
221  	}
222  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_held;
223  	lck_grp_inc_stats(grp, stat);
224  #endif /* LOCK_STATS */
225  }
226  
227  static inline void
228  lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
229  {
230  #pragma unused(lock)
231  #if LOCK_STATS
232  	if (!grp) {
233  		return;
234  	}
235  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_miss;
236  	lck_grp_inc_stats(grp, stat);
237  #endif /* LOCK_STATS */
238  }
239  
240  static inline void
241  lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
242  {
243  #pragma unused(lock, time)
244  #if CONFIG_DTRACE
245  	if (time > dtrace_spin_threshold) {
246  		LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
247  	}
248  #endif /* CONFIG_DTRACE */
249  #if LOCK_STATS
250  	if (!grp) {
251  		return;
252  	}
253  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin;
254  	lck_grp_inc_time_stats(grp, stat, time);
255  #endif /* LOCK_STATS */
256  }
257  
258  static inline boolean_t
259  lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
260  {
261  #pragma unused(lock)
262  	boolean_t enabled = FALSE;
263  #if CONFIG_DTRACE
264  	enabled |= lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
265  #endif /* CONFIG_DTRACE */
266  #if LOCK_STATS
267  	enabled |= (grp && grp->lck_grp_stats.lgss_spin_spin.lgs_enablings);
268  #endif /* LOCK_STATS */
269  	return enabled;
270  }
271  
272  static inline void
273  lck_grp_ticket_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
274  {
275  #pragma unused(lock)
276  #if CONFIG_DTRACE
277  	LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
278  #endif
279  #if LOCK_STATS
280  	if (!grp) {
281  		return;
282  	}
283  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_held;
284  	lck_grp_inc_stats(grp, stat);
285  #endif /* LOCK_STATS */
286  }
287  
288  static inline void
289  lck_grp_ticket_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
290  {
291  #pragma unused(lock)
292  #if LOCK_STATS
293  	if (!grp) {
294  		return;
295  	}
296  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_miss;
297  	lck_grp_inc_stats(grp, stat);
298  #endif /* LOCK_STATS */
299  }
300  
301  static inline boolean_t
302  lck_grp_ticket_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
303  {
304  #pragma unused(lock)
305  	boolean_t enabled = FALSE;
306  #if CONFIG_DTRACE
307  	enabled |= lockstat_probemap[LS_LCK_TICKET_LOCK_SPIN] != 0;
308  #endif /* CONFIG_DTRACE */
309  #if LOCK_STATS
310  	enabled |= (grp && grp->lck_grp_stats.lgss_ticket_spin.lgs_enablings);
311  #endif /* LOCK_STATS */
312  	return enabled;
313  }
314  
315  static inline void
316  lck_grp_ticket_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
317  {
318  #pragma unused(lock, time)
319  #if CONFIG_DTRACE
320  	if (time > dtrace_spin_threshold) {
321  		LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
322  	}
323  #endif /* CONFIG_DTRACE */
324  #if LOCK_STATS
325  	if (!grp) {
326  		return;
327  	}
328  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_spin;
329  	lck_grp_inc_time_stats(grp, stat, time);
330  #endif /* LOCK_STATS */
331  }
332  
333  
334  static void inline
335  lck_grp_mtx_update_miss(
336  	struct _lck_mtx_ext_ *lock,
337  	int *first_miss)
338  {
339  #pragma unused(first_miss)
340  #if LOG_FIRST_MISS_ALONE
341  	if ((*first_miss & 1) == 0) {
342  #endif /* LOG_FIRST_MISS_ALONE */
343  	lck_grp_t *grp = lock->lck_mtx_grp;
344  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_miss;
345  	lck_grp_inc_stats(grp, stat);
346  
347  #if LOG_FIRST_MISS_ALONE
348  	*first_miss |= 1;
349  }
350  #endif /* LOG_FIRST_MISS_ALONE */
351  }
352  
353  static void inline
354  lck_grp_mtx_update_direct_wait(
355  	struct _lck_mtx_ext_ *lock)
356  {
357  	lck_grp_t *grp = lock->lck_mtx_grp;
358  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_direct_wait;
359  	lck_grp_inc_stats(grp, stat);
360  }
361  
362  static void inline
363  lck_grp_mtx_update_wait(
364  	struct _lck_mtx_ext_ *lock,
365  	int *first_miss)
366  {
367  #pragma unused(first_miss)
368  #if LOG_FIRST_MISS_ALONE
369  	if ((*first_miss & 2) == 0) {
370  #endif /* LOG_FIRST_MISS_ALONE */
371  	lck_grp_t *grp = lock->lck_mtx_grp;
372  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_wait;
373  	lck_grp_inc_stats(grp, stat);
374  #if LOG_FIRST_MISS_ALONE
375  	*first_miss |= 2;
376  }
377  #endif /* LOG_FIRST_MISS_ALONE */
378  }
379  
380  static void inline
381  lck_grp_mtx_update_held(
382  	struct _lck_mtx_ext_ *lock)
383  {
384  	lck_grp_t *grp = lock->lck_mtx_grp;
385  	lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_held;
386  	lck_grp_inc_stats(grp, stat);
387  }
388  
389  #endif /* MACH_KERNEL_PRIVATE */
390  #endif /* _KERN_LOCKSTAT_H */