/ duct-tape / xnu / bsd / sys / dtrace_impl.h
dtrace_impl.h
   1  /*
   2   * CDDL HEADER START
   3   *
   4   * The contents of this file are subject to the terms of the
   5   * Common Development and Distribution License (the "License").
   6   * You may not use this file except in compliance with the License.
   7   *
   8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9   * or http://www.opensolaris.org/os/licensing.
  10   * See the License for the specific language governing permissions
  11   * and limitations under the License.
  12   *
  13   * When distributing Covered Code, include this CDDL HEADER in each
  14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   * If applicable, add the following below this CDDL HEADER, with the
  16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   *
  19   * CDDL HEADER END
  20   */
  21  
  22  /*
  23   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24   * Use is subject to license terms.
  25   *
  26   * Portions Copyright (c) 2012 by Delphix. All rights reserved.
  27   * Portions Copyright (c) 2016 by Joyent, Inc.
  28   */
  29  
  30  #ifndef _SYS_DTRACE_IMPL_H
  31  #define	_SYS_DTRACE_IMPL_H
  32  
  33  #ifdef	__cplusplus
  34  extern "C" {
  35  #endif
  36  
  37  /*
  38   * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces
  39   *
  40   * Note: The contents of this file are private to the implementation of the
  41   * Solaris system and DTrace subsystem and are subject to change at any time
  42   * without notice.  Applications and drivers using these interfaces will fail
  43   * to run on future releases.  These interfaces should not be used for any
  44   * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB).
  45   * Please refer to the "Solaris Dynamic Tracing Guide" for more information.
  46   */
  47  
  48  #include <sys/dtrace.h>
  49  
  50  /*
  51   * DTrace Implementation Locks
  52   */
  53  extern lck_attr_t dtrace_lck_attr;
  54  extern lck_grp_t dtrace_lck_grp;
  55  extern lck_mtx_t dtrace_procwaitfor_lock;
  56  
  57  /*
  58   * DTrace Implementation Constants and Typedefs
  59   */
  60  #define	DTRACE_MAXPROPLEN		128
  61  #define	DTRACE_DYNVAR_CHUNKSIZE		256
  62  
  63  struct dtrace_probe;
  64  struct dtrace_ecb;
  65  struct dtrace_predicate;
  66  struct dtrace_action;
  67  struct dtrace_provider;
  68  struct dtrace_state;
  69  
  70  typedef struct dtrace_probe dtrace_probe_t;
  71  typedef struct dtrace_ecb dtrace_ecb_t;
  72  typedef struct dtrace_predicate dtrace_predicate_t;
  73  typedef struct dtrace_action dtrace_action_t;
  74  typedef struct dtrace_provider dtrace_provider_t;
  75  typedef struct dtrace_meta dtrace_meta_t;
  76  typedef struct dtrace_state dtrace_state_t;
  77  typedef uint32_t dtrace_optid_t;
  78  typedef uint32_t dtrace_specid_t;
  79  typedef uint64_t dtrace_genid_t;
  80  
  81  /*
  82   * DTrace Probes
  83   *
  84   * The probe is the fundamental unit of the DTrace architecture.  Probes are
  85   * created by DTrace providers, and managed by the DTrace framework.  A probe
  86   * is identified by a unique <provider, module, function, name> tuple, and has
  87   * a unique probe identifier assigned to it.  (Some probes are not associated
  88   * with a specific point in text; these are called _unanchored probes_ and have
  89   * no module or function associated with them.)  Probes are represented as a
  90   * dtrace_probe structure.  To allow quick lookups based on each element of the
  91   * probe tuple, probes are hashed by each of provider, module, function and
  92   * name.  (If a lookup is performed based on a regular expression, a
  93   * dtrace_probekey is prepared, and a linear search is performed.) Each probe
  94   * is additionally pointed to by a linear array indexed by its identifier.  The
  95   * identifier is the provider's mechanism for indicating to the DTrace
  96   * framework that a probe has fired:  the identifier is passed as the first
  97   * argument to dtrace_probe(), where it is then mapped into the corresponding
  98   * dtrace_probe structure.  From the dtrace_probe structure, dtrace_probe() can
  99   * iterate over the probe's list of enabling control blocks; see "DTrace
 100   * Enabling Control Blocks", below.)
 101   */
 102  struct dtrace_probe {
 103  	dtrace_id_t dtpr_id;			/* probe identifier */
 104  	dtrace_ecb_t *dtpr_ecb;			/* ECB list; see below */
 105  	dtrace_ecb_t *dtpr_ecb_last;		/* last ECB in list */
 106  	void *dtpr_arg;				/* provider argument */
 107  	dtrace_cacheid_t dtpr_predcache;	/* predicate cache ID */
 108  	int dtpr_aframes;			/* artificial frames */
 109  	dtrace_provider_t *dtpr_provider;	/* pointer to provider */
 110  	char *dtpr_mod;				/* probe's module name */
 111  	char *dtpr_func;			/* probe's function name */
 112  	char *dtpr_name;			/* probe's name */
 113  	dtrace_probe_t *dtpr_nextprov;		/* next in provider hash */
 114  	dtrace_probe_t *dtpr_prevprov;		/* previous in provider hash */
 115  	dtrace_probe_t *dtpr_nextmod;		/* next in module hash */
 116  	dtrace_probe_t *dtpr_prevmod;		/* previous in module hash */
 117  	dtrace_probe_t *dtpr_nextfunc;		/* next in function hash */
 118  	dtrace_probe_t *dtpr_prevfunc;		/* previous in function hash */
 119  	dtrace_probe_t *dtpr_nextname;		/* next in name hash */
 120  	dtrace_probe_t *dtpr_prevname;		/* previous in name hash */
 121  	dtrace_genid_t dtpr_gen;		/* probe generation ID */
 122  };
 123  
 124  typedef int dtrace_probekey_f(const char *, const char *, int);
 125  
 126  typedef struct dtrace_probekey {
 127  	const char *dtpk_prov;			/* provider name to match */
 128  	dtrace_probekey_f *dtpk_pmatch;		/* provider matching function */
 129  	const char *dtpk_mod;			/* module name to match */
 130  	dtrace_probekey_f *dtpk_mmatch;		/* module matching function */
 131  	const char *dtpk_func;			/* func name to match */
 132  	dtrace_probekey_f *dtpk_fmatch;		/* func matching function */
 133  	const char *dtpk_name;			/* name to match */
 134  	dtrace_probekey_f *dtpk_nmatch;		/* name matching function */
 135  	dtrace_id_t dtpk_id;			/* identifier to match */
 136  } dtrace_probekey_t;
 137  
 138  typedef struct dtrace_hashbucket {
 139  	struct dtrace_hashbucket *dthb_next;	/* next on hash chain */
 140  	void *dthb_chain;			/* chain of elements */
 141  	int dthb_len;				/* number of probes here */
 142  } dtrace_hashbucket_t;
 143  
 144  typedef const char* dtrace_strkey_f(void*, uintptr_t);
 145  
 146  typedef struct dtrace_hash {
 147  	dtrace_hashbucket_t **dth_tab;	/* hash table */
 148  	int dth_size;			/* size of hash table */
 149  	int dth_mask;			/* mask to index into table */
 150  	int dth_nbuckets;		/* total number of buckets */
 151  	uintptr_t dth_nextoffs;		/* offset of next in element */
 152  	uintptr_t dth_prevoffs;		/* offset of prev in element */
 153  	dtrace_strkey_f *dth_getstr;	/* func to retrieve str in element */
 154  	uintptr_t dth_stroffs;		/* offset of str in element */
 155  } dtrace_hash_t;
 156  
 157  /*
 158   * DTrace Enabling Control Blocks
 159   *
 160   * When a provider wishes to fire a probe, it calls into dtrace_probe(),
 161   * passing the probe identifier as the first argument.  As described above,
 162   * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t
 163   * structure.  This structure contains information about the probe, and a
 164   * pointer to the list of Enabling Control Blocks (ECBs).  Each ECB points to
 165   * DTrace consumer state, and contains an optional predicate, and a list of
 166   * actions.  (Shown schematically below.)  The ECB abstraction allows a single
 167   * probe to be multiplexed across disjoint consumers, or across disjoint
 168   * enablings of a single probe within one consumer.
 169   *
 170   *   Enabling Control Block
 171   *        dtrace_ecb_t
 172   * +------------------------+
 173   * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID)
 174   * | dtrace_state_t * ------+--------------> State associated with this ECB
 175   * | dtrace_predicate_t * --+---------+
 176   * | dtrace_action_t * -----+----+    |
 177   * | dtrace_ecb_t * ---+    |    |    |       Predicate (if any)
 178   * +-------------------+----+    |    |       dtrace_predicate_t
 179   *                     |         |    +---> +--------------------+
 180   *                     |         |          | dtrace_difo_t * ---+----> DIFO
 181   *                     |         |          +--------------------+
 182   *                     |         |
 183   *            Next ECB |         |           Action
 184   *            (if any) |         |       dtrace_action_t
 185   *                     :         +--> +-------------------+
 186   *                     :              | dtrace_actkind_t -+------> kind
 187   *                     v              | dtrace_difo_t * --+------> DIFO (if any)
 188   *                                    | dtrace_recdesc_t -+------> record descr.
 189   *                                    | dtrace_action_t * +------+
 190   *                                    +-------------------+      |
 191   *                                                               | Next action
 192   *                               +-------------------------------+  (if any)
 193   *                               |
 194   *                               |           Action
 195   *                               |       dtrace_action_t
 196   *                               +--> +-------------------+
 197   *                                    | dtrace_actkind_t -+------> kind
 198   *                                    | dtrace_difo_t * --+------> DIFO (if any)
 199   *                                    | dtrace_action_t * +------+
 200   *                                    +-------------------+      |
 201   *                                                               | Next action
 202   *                               +-------------------------------+  (if any)
 203   *                               |
 204   *                               :
 205   *                               v
 206   *
 207   *
 208   * dtrace_probe() iterates over the ECB list.  If the ECB needs less space
 209   * than is available in the principal buffer, the ECB is processed:  if the
 210   * predicate is non-NULL, the DIF object is executed.  If the result is
 211   * non-zero, the action list is processed, with each action being executed
 212   * accordingly.  When the action list has been completely executed, processing
 213   * advances to the next ECB. The ECB abstraction allows disjoint consumers
 214   * to multiplex on single probes.
 215   *
 216   * Execution of the ECB results in consuming dte_size bytes in the buffer
 217   * to record data.  During execution, dte_needed bytes must be available in
 218   * the buffer.  This space is used for both recorded data and tuple data.
 219   */
 220  struct dtrace_ecb {
 221  	dtrace_epid_t dte_epid;			/* enabled probe ID */
 222  	uint32_t dte_alignment;			/* required alignment */
 223  	size_t dte_needed;			/* space needed for execution */
 224  	size_t dte_size;			/* size of recorded payload */
 225  	dtrace_predicate_t *dte_predicate;	/* predicate, if any */
 226  	dtrace_action_t *dte_action;		/* actions, if any */
 227  	dtrace_ecb_t *dte_next;			/* next ECB on probe */
 228  	dtrace_state_t *dte_state;		/* pointer to state */
 229  	uint32_t dte_cond;			/* security condition */
 230  	dtrace_probe_t *dte_probe;		/* pointer to probe */
 231  	dtrace_action_t *dte_action_last;	/* last action on ECB */
 232  	uint64_t dte_uarg;			/* library argument */
 233  };
 234  
 235  struct dtrace_predicate {
 236  	dtrace_difo_t *dtp_difo;		/* DIF object */
 237  	dtrace_cacheid_t dtp_cacheid;		/* cache identifier */
 238  	int dtp_refcnt;				/* reference count */
 239  };
 240  
 241  struct dtrace_action {
 242  	dtrace_actkind_t dta_kind;		/* kind of action */
 243  	uint16_t dta_intuple;			/* boolean:  in aggregation */
 244  	uint32_t dta_refcnt;			/* reference count */
 245  	dtrace_difo_t *dta_difo;		/* pointer to DIFO */
 246  	dtrace_recdesc_t dta_rec;		/* record description */
 247  	dtrace_action_t *dta_prev;		/* previous action */
 248  	dtrace_action_t *dta_next;		/* next action */
 249  };
 250  
 251  typedef struct dtrace_aggregation {
 252  	dtrace_action_t dtag_action;		/* action; must be first */
 253  	dtrace_aggid_t dtag_id;			/* identifier */
 254  	dtrace_ecb_t *dtag_ecb;			/* corresponding ECB */
 255  	dtrace_action_t *dtag_first;		/* first action in tuple */
 256  	uint32_t dtag_base;			/* base of aggregation */
 257  	uint8_t dtag_hasarg;			/* boolean:  has argument */
 258  	uint64_t dtag_initial;			/* initial value */
 259  	void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t);
 260  } dtrace_aggregation_t;
 261  
 262  /*
 263   * DTrace Buffers
 264   *
 265   * Principal buffers, aggregation buffers, and speculative buffers are all
 266   * managed with the dtrace_buffer structure.  By default, this structure
 267   * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the
 268   * active and passive buffers, respectively.  For speculative buffers,
 269   * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point
 270   * to a scratch buffer.  For all buffer types, the dtrace_buffer structure is
 271   * always allocated on a per-CPU basis; a single dtrace_buffer structure is
 272   * never shared among CPUs.  (That is, there is never true sharing of the
 273   * dtrace_buffer structure; to prevent false sharing of the structure, it must
 274   * always be aligned to the coherence granularity -- generally 64 bytes.)
 275   *
 276   * One of the critical design decisions of DTrace is that a given ECB always
 277   * stores the same quantity and type of data.  This is done to assure that the
 278   * only metadata required for an ECB's traced data is the EPID.  That is, from
 279   * the EPID, the consumer can determine the data layout.  (The data buffer
 280   * layout is shown schematically below.)  By assuring that one can determine
 281   * data layout from the EPID, the metadata stream can be separated from the
 282   * data stream -- simplifying the data stream enormously.  The ECB always
 283   * proceeds the recorded data as part of the dtrace_rechdr_t structure that
 284   * includes the EPID and a high-resolution timestamp used for output ordering
 285   * consistency.
 286   *
 287   *      base of data buffer --->  +--------+--------------------+--------+
 288   *                                | rechdr | data               | rechdr |
 289   *                                +--------+------+--------+----+--------+
 290   *                                | data          | rechdr | data        |
 291   *                                +---------------+--------+-------------+
 292   *                                | data, cont.                          |
 293   *                                +--------+--------------------+--------+
 294   *                                | rechdr | data               |        |
 295   *                                +--------+--------------------+        |
 296   *                                |                ||                    |
 297   *                                |                ||                    |
 298   *                                |                \/                    |
 299   *                                :                                      :
 300   *                                .                                      .
 301   *                                .                                      .
 302   *                                .                                      .
 303   *                                :                                      :
 304   *                                |                                      |
 305   *     limit of data buffer --->  +--------------------------------------+
 306   *
 307   * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the
 308   * principal buffer (both scratch and payload) exceed the available space.  If
 309   * the ECB's needs exceed available space (and if the principal buffer policy
 310   * is the default "switch" policy), the ECB is dropped, the buffer's drop count
 311   * is incremented, and processing advances to the next ECB.  If the ECB's needs
 312   * can be met with the available space, the ECB is processed, but the offset in
 313   * the principal buffer is only advanced if the ECB completes processing
 314   * without error.
 315   *
 316   * When a buffer is to be switched (either because the buffer is the principal
 317   * buffer with a "switch" policy or because it is an aggregation buffer), a
 318   * cross call is issued to the CPU associated with the buffer.  In the cross
 319   * call context, interrupts are disabled, and the active and the inactive
 320   * buffers are atomically switched.  This involves switching the data pointers,
 321   * copying the various state fields (offset, drops, errors, etc.) into their
 322   * inactive equivalents, and clearing the state fields.  Because interrupts are
 323   * disabled during this procedure, the switch is guaranteed to appear atomic to
 324   * dtrace_probe().
 325   *
 326   * DTrace Ring Buffering
 327   *
 328   * To process a ring buffer correctly, one must know the oldest valid record.
 329   * Processing starts at the oldest record in the buffer and continues until
 330   * the end of the buffer is reached.  Processing then resumes starting with
 331   * the record stored at offset 0 in the buffer, and continues until the
 332   * youngest record is processed.  If trace records are of a fixed-length,
 333   * determining the oldest record is trivial:
 334   *
 335   *   - If the ring buffer has not wrapped, the oldest record is the record
 336   *     stored at offset 0.
 337   *
 338   *   - If the ring buffer has wrapped, the oldest record is the record stored
 339   *     at the current offset.
 340   *
 341   * With variable length records, however, just knowing the current offset
 342   * doesn't suffice for determining the oldest valid record:  assuming that one
 343   * allows for arbitrary data, one has no way of searching forward from the
 344   * current offset to find the oldest valid record.  (That is, one has no way
 345   * of separating data from metadata.) It would be possible to simply refuse to
 346   * process any data in the ring buffer between the current offset and the
 347   * limit, but this leaves (potentially) an enormous amount of otherwise valid
 348   * data unprocessed.
 349   *
 350   * To effect ring buffering, we track two offsets in the buffer:  the current
 351   * offset and the _wrapped_ offset.  If a request is made to reserve some
 352   * amount of data, and the buffer has wrapped, the wrapped offset is
 353   * incremented until the wrapped offset minus the current offset is greater
 354   * than or equal to the reserve request.  This is done by repeatedly looking
 355   * up the ECB corresponding to the EPID at the current wrapped offset, and
 356   * incrementing the wrapped offset by the size of the data payload
 357   * corresponding to that ECB.  If this offset is greater than or equal to the
 358   * limit of the data buffer, the wrapped offset is set to 0.  Thus, the
 359   * current offset effectively "chases" the wrapped offset around the buffer.
 360   * Schematically:
 361   *
 362   *      base of data buffer --->  +------+--------------------+------+
 363   *                                | EPID | data               | EPID |
 364   *                                +------+--------+------+----+------+
 365   *                                | data          | EPID | data      |
 366   *                                +---------------+------+-----------+
 367   *                                | data, cont.                      |
 368   *                                +------+---------------------------+
 369   *                                | EPID | data                      |
 370   *           current offset --->  +------+---------------------------+
 371   *                                | invalid data                     |
 372   *           wrapped offset --->  +------+--------------------+------+
 373   *                                | EPID | data               | EPID |
 374   *                                +------+--------+------+----+------+
 375   *                                | data          | EPID | data      |
 376   *                                +---------------+------+-----------+
 377   *                                :                                  :
 378   *                                .                                  .
 379   *                                .        ... valid data ...        .
 380   *                                .                                  .
 381   *                                :                                  :
 382   *                                +------+-------------+------+------+
 383   *                                | EPID | data        | EPID | data |
 384   *                                +------+------------++------+------+
 385   *                                | data, cont.       | leftover     |
 386   *     limit of data buffer --->  +-------------------+--------------+
 387   *
 388   * If the amount of requested buffer space exceeds the amount of space
 389   * available between the current offset and the end of the buffer:
 390   *
 391   *  (1)  all words in the data buffer between the current offset and the limit
 392   *       of the data buffer (marked "leftover", above) are set to
 393   *       DTRACE_EPIDNONE
 394   *
 395   *  (2)  the wrapped offset is set to zero
 396   *
 397   *  (3)  the iteration process described above occurs until the wrapped offset
 398   *       is greater than the amount of desired space.
 399   *
 400   * The wrapped offset is implemented by (re-)using the inactive offset.
 401   * In a "switch" buffer policy, the inactive offset stores the offset in
 402   * the inactive buffer; in a "ring" buffer policy, it stores the wrapped
 403   * offset.
 404   *
 405   * DTrace Scratch Buffering
 406   *
 407   * Some ECBs may wish to allocate dynamically-sized temporary scratch memory.
 408   * To accommodate such requests easily, scratch memory may be allocated in
 409   * the buffer beyond the current offset plus the needed memory of the current
 410   * ECB.  If there isn't sufficient room in the buffer for the requested amount
 411   * of scratch space, the allocation fails and an error is generated.  Scratch
 412   * memory is tracked in the dtrace_mstate_t and is automatically freed when
 413   * the ECB ceases processing.  Note that ring buffers cannot allocate their
 414   * scratch from the principal buffer -- lest they needlessly overwrite older,
 415   * valid data.  Ring buffers therefore have their own dedicated scratch buffer
 416   * from which scratch is allocated.
 417   */
 418  #define	DTRACEBUF_RING		0x0001		/* bufpolicy set to "ring" */
 419  #define	DTRACEBUF_FILL		0x0002		/* bufpolicy set to "fill" */
 420  #define	DTRACEBUF_NOSWITCH	0x0004		/* do not switch buffer */
 421  #define	DTRACEBUF_WRAPPED	0x0008		/* ring buffer has wrapped */
 422  #define	DTRACEBUF_DROPPED	0x0010		/* drops occurred */
 423  #define	DTRACEBUF_ERROR		0x0020		/* errors occurred */
 424  #define	DTRACEBUF_FULL		0x0040		/* "fill" buffer is full */
 425  #define	DTRACEBUF_CONSUMED	0x0080		/* buffer has been consumed */
 426  #define	DTRACEBUF_INACTIVE	0x0100		/* buffer is not yet active */
 427  
 428  typedef struct dtrace_buffer {
 429  	uint64_t dtb_offset;			/* current offset in buffer */
 430  	uint64_t dtb_cur_limit;			/* current limit before signaling/dropping */
 431  	uint64_t dtb_limit;			/* limit before signaling */
 432  	uint64_t dtb_size;			/* size of buffer */
 433  	uint32_t dtb_flags;			/* flags */
 434  	uint32_t dtb_drops;			/* number of drops */
 435  	caddr_t dtb_tomax;			/* active buffer */
 436  	caddr_t dtb_xamot;			/* inactive buffer */
 437  	uint32_t dtb_xamot_flags;		/* inactive flags */
 438  	uint32_t dtb_xamot_drops;		/* drops in inactive buffer */
 439  	uint64_t dtb_xamot_offset;		/* offset in inactive buffer */
 440  	uint32_t dtb_errors;			/* number of errors */
 441  	uint32_t dtb_xamot_errors;		/* errors in inactive buffer */
 442  #ifndef _LP64
 443  	uint64_t dtb_pad1;
 444  #endif
 445  	uint64_t dtb_switched;			/* time of last switch */
 446  	uint64_t dtb_interval;			/* observed switch interval */
 447  	uint64_t dtb_pad2[4];			/* pad to avoid false sharing */
 448  } dtrace_buffer_t;
 449  
 450  /*
 451   * DTrace Aggregation Buffers
 452   *
 453   * Aggregation buffers use much of the same mechanism as described above
 454   * ("DTrace Buffers").  However, because an aggregation is fundamentally a
 455   * hash, there exists dynamic metadata associated with an aggregation buffer
 456   * that is not associated with other kinds of buffers.  This aggregation
 457   * metadata is _only_ relevant for the in-kernel implementation of
 458   * aggregations; it is not actually relevant to user-level consumers.  To do
 459   * this, we allocate dynamic aggregation data (hash keys and hash buckets)
 460   * starting below the _limit_ of the buffer, and we allocate data from the
 461   * _base_ of the buffer.  When the aggregation buffer is copied out, _only_ the
 462   * data is copied out; the metadata is simply discarded.  Schematically,
 463   * aggregation buffers look like:
 464   *
 465   *      base of data buffer --->  +-------+------+-----------+-------+
 466   *                                | aggid | key  | value     | aggid |
 467   *                                +-------+------+-----------+-------+
 468   *                                | key                              |
 469   *                                +-------+-------+-----+------------+
 470   *                                | value | aggid | key | value      |
 471   *                                +-------+------++-----+------+-----+
 472   *                                | aggid | key  | value       |     |
 473   *                                +-------+------+-------------+     |
 474   *                                |                ||                |
 475   *                                |                ||                |
 476   *                                |                \/                |
 477   *                                :                                  :
 478   *                                .                                  .
 479   *                                .                                  .
 480   *                                .                                  .
 481   *                                :                                  :
 482   *                                |                /\                |
 483   *                                |                ||   +------------+
 484   *                                |                ||   |            |
 485   *                                +---------------------+            |
 486   *                                | hash keys                        |
 487   *                                | (dtrace_aggkey structures)       |
 488   *                                |                                  |
 489   *                                +----------------------------------+
 490   *                                | hash buckets                     |
 491   *                                | (dtrace_aggbuffer structure)     |
 492   *                                |                                  |
 493   *     limit of data buffer --->  +----------------------------------+
 494   *
 495   *
 496   * As implied above, just as we assure that ECBs always store a constant
 497   * amount of data, we assure that a given aggregation -- identified by its
 498   * aggregation ID -- always stores data of a constant quantity and type.
 499   * As with EPIDs, this allows the aggregation ID to serve as the metadata for a
 500   * given record.
 501   *
 502   * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t)
 503   * aligned.  (If this the structure changes such that this becomes false, an
 504   * assertion will fail in dtrace_aggregate().)
 505   */
 506  typedef struct dtrace_aggkey {
 507  	uint32_t dtak_hashval;			/* hash value */
 508  	uint32_t dtak_action:4;			/* action -- 4 bits */
 509  	uint32_t dtak_size:28;			/* size -- 28 bits */
 510  	caddr_t dtak_data;			/* data pointer */
 511  	struct dtrace_aggkey *dtak_next;	/* next in hash chain */
 512  } dtrace_aggkey_t;
 513  
 514  typedef struct dtrace_aggbuffer {
 515  	uintptr_t dtagb_hashsize;		/* number of buckets */
 516  	uintptr_t dtagb_free;			/* free list of keys */
 517  	dtrace_aggkey_t **dtagb_hash;		/* hash table */
 518  } dtrace_aggbuffer_t;
 519  
 520  /*
 521   * DTrace Speculations
 522   *
 523   * Speculations have a per-CPU buffer and a global state.  Once a speculation
 524   * buffer has been comitted or discarded, it cannot be reused until all CPUs
 525   * have taken the same action (commit or discard) on their respective
 526   * speculative buffer.  However, because DTrace probes may execute in arbitrary
 527   * context, other CPUs cannot simply be cross-called at probe firing time to
 528   * perform the necessary commit or discard.  The speculation states thus
 529   * optimize for the case that a speculative buffer is only active on one CPU at
 530   * the time of a commit() or discard() -- for if this is the case, other CPUs
 531   * need not take action, and the speculation is immediately available for
 532   * reuse.  If the speculation is active on multiple CPUs, it must be
 533   * asynchronously cleaned -- potentially leading to a higher rate of dirty
 534   * speculative drops.  The speculation states are as follows:
 535   *
 536   *  DTRACESPEC_INACTIVE       <= Initial state; inactive speculation
 537   *  DTRACESPEC_ACTIVE         <= Allocated, but not yet speculatively traced to
 538   *  DTRACESPEC_ACTIVEONE      <= Speculatively traced to on one CPU
 539   *  DTRACESPEC_ACTIVEMANY     <= Speculatively traced to on more than one CPU
 540   *  DTRACESPEC_COMMITTING     <= Currently being commited on one CPU
 541   *  DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs
 542   *  DTRACESPEC_DISCARDING     <= Currently being discarded on many CPUs
 543   *
 544   * The state transition diagram is as follows:
 545   *
 546   *     +----------------------------------------------------------+
 547   *     |                                                          |
 548   *     |                      +------------+                      |
 549   *     |  +-------------------| COMMITTING |<-----------------+   |
 550   *     |  |                   +------------+                  |   |
 551   *     |  | copied spec.            ^             commit() on |   | discard() on
 552   *     |  | into principal          |              active CPU |   | active CPU
 553   *     |  |                         | commit()                |   |
 554   *     V  V                         |                         |   |
 555   * +----------+                 +--------+                +-----------+
 556   * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE |
 557   * +----------+  speculation()  +--------+  speculate()   +-----------+
 558   *     ^  ^                         |                         |   |
 559   *     |  |                         | discard()               |   |
 560   *     |  | asynchronously          |            discard() on |   | speculate()
 561   *     |  | cleaned                 V            inactive CPU |   | on inactive
 562   *     |  |                   +------------+                  |   | CPU
 563   *     |  +-------------------| DISCARDING |<-----------------+   |
 564   *     |                      +------------+                      |
 565   *     | asynchronously             ^                             |
 566   *     | copied spec.               |       discard()             |
 567   *     | into principal             +------------------------+    |
 568   *     |                                                     |    V
 569   *  +----------------+             commit()              +------------+
 570   *  | COMMITTINGMANY |<----------------------------------| ACTIVEMANY |
 571   *  +----------------+                                   +------------+
 572   */
 573  typedef enum dtrace_speculation_state {
 574  	DTRACESPEC_INACTIVE = 0,
 575  	DTRACESPEC_ACTIVE,
 576  	DTRACESPEC_ACTIVEONE,
 577  	DTRACESPEC_ACTIVEMANY,
 578  	DTRACESPEC_COMMITTING,
 579  	DTRACESPEC_COMMITTINGMANY,
 580  	DTRACESPEC_DISCARDING
 581  } dtrace_speculation_state_t;
 582  
 583  typedef struct dtrace_speculation {
 584  	dtrace_speculation_state_t dtsp_state;	/* current speculation state */
 585  	int dtsp_cleaning;			/* non-zero if being cleaned */
 586  	dtrace_buffer_t *dtsp_buffer;		/* speculative buffer */
 587  } dtrace_speculation_t;
 588  
 589  /*
 590   * DTrace Dynamic Variables
 591   *
 592   * The dynamic variable problem is obviously decomposed into two subproblems:
 593   * allocating new dynamic storage, and freeing old dynamic storage.  The
 594   * presence of the second problem makes the first much more complicated -- or
 595   * rather, the absence of the second renders the first trivial.  This is the
 596   * case with aggregations, for which there is effectively no deallocation of
 597   * dynamic storage.  (Or more accurately, all dynamic storage is deallocated
 598   * when a snapshot is taken of the aggregation.)  As DTrace dynamic variables
 599   * allow for both dynamic allocation and dynamic deallocation, the
 600   * implementation of dynamic variables is quite a bit more complicated than
 601   * that of their aggregation kin.
 602   *
 603   * We observe that allocating new dynamic storage is tricky only because the
 604   * size can vary -- the allocation problem is much easier if allocation sizes
 605   * are uniform.  We further observe that in D, the size of dynamic variables is
 606   * actually _not_ dynamic -- dynamic variable sizes may be determined by static
 607   * analysis of DIF text.  (This is true even of putatively dynamically-sized
 608   * objects like strings and stacks, the sizes of which are dictated by the
 609   * "stringsize" and "stackframes" variables, respectively.)  We exploit this by
 610   * performing this analysis on all DIF before enabling any probes.  For each
 611   * dynamic load or store, we calculate the dynamically-allocated size plus the
 612   * size of the dtrace_dynvar structure plus the storage required to key the
 613   * data.  For all DIF, we take the largest value and dub it the _chunksize_.
 614   * We then divide dynamic memory into two parts:  a hash table that is wide
 615   * enough to have every chunk in its own bucket, and a larger region of equal
 616   * chunksize units.  Whenever we wish to dynamically allocate a variable, we
 617   * always allocate a single chunk of memory.  Depending on the uniformity of
 618   * allocation, this will waste some amount of memory -- but it eliminates the
 619   * non-determinism inherent in traditional heap fragmentation.
 620   *
 621   * Dynamic objects are allocated by storing a non-zero value to them; they are
 622   * deallocated by storing a zero value to them.  Dynamic variables are
 623   * complicated enormously by being shared between CPUs.  In particular,
 624   * consider the following scenario:
 625   *
 626   *                 CPU A                                 CPU B
 627   *  +---------------------------------+   +---------------------------------+
 628   *  |                                 |   |                                 |
 629   *  | allocates dynamic object a[123] |   |                                 |
 630   *  | by storing the value 345 to it  |   |                                 |
 631   *  |                               --------->                              |
 632   *  |                                 |   | wishing to load from object     |
 633   *  |                                 |   | a[123], performs lookup in      |
 634   *  |                                 |   | dynamic variable space          |
 635   *  |                               <---------                              |
 636   *  | deallocates object a[123] by    |   |                                 |
 637   *  | storing 0 to it                 |   |                                 |
 638   *  |                                 |   |                                 |
 639   *  | allocates dynamic object b[567] |   | performs load from a[123]       |
 640   *  | by storing the value 789 to it  |   |                                 |
 641   *  :                                 :   :                                 :
 642   *  .                                 .   .                                 .
 643   *
 644   * This is obviously a race in the D program, but there are nonetheless only
 645   * two valid values for CPU B's load from a[123]:  345 or 0.  Most importantly,
 646   * CPU B may _not_ see the value 789 for a[123].
 647   *
 648   * There are essentially two ways to deal with this:
 649   *
 650   *  (1)  Explicitly spin-lock variables.  That is, if CPU B wishes to load
 651   *       from a[123], it needs to lock a[123] and hold the lock for the
 652   *       duration that it wishes to manipulate it.
 653   *
 654   *  (2)  Avoid reusing freed chunks until it is known that no CPU is referring
 655   *       to them.
 656   *
 657   * The implementation of (1) is rife with complexity, because it requires the
 658   * user of a dynamic variable to explicitly decree when they are done using it.
 659   * Were all variables by value, this perhaps wouldn't be debilitating -- but
 660   * dynamic variables of non-scalar types are tracked by reference.  That is, if
 661   * a dynamic variable is, say, a string, and that variable is to be traced to,
 662   * say, the principal buffer, the DIF emulation code returns to the main
 663   * dtrace_probe() loop a pointer to the underlying storage, not the contents of
 664   * the storage.  Further, code calling on DIF emulation would have to be aware
 665   * that the DIF emulation has returned a reference to a dynamic variable that
 666   * has been potentially locked.  The variable would have to be unlocked after
 667   * the main dtrace_probe() loop is finished with the variable, and the main
 668   * dtrace_probe() loop would have to be careful to not call any further DIF
 669   * emulation while the variable is locked to avoid deadlock.  More generally,
 670   * if one were to implement (1), DIF emulation code dealing with dynamic
 671   * variables could only deal with one dynamic variable at a time (lest deadlock
 672   * result).  To sum, (1) exports too much subtlety to the users of dynamic
 673   * variables -- increasing maintenance burden and imposing serious constraints
 674   * on future DTrace development.
 675   *
 676   * The implementation of (2) is also complex, but the complexity is more
 677   * manageable.  We need to be sure that when a variable is deallocated, it is
 678   * not placed on a traditional free list, but rather on a _dirty_ list.  Once a
 679   * variable is on a dirty list, it cannot be found by CPUs performing a
 680   * subsequent lookup of the variable -- but it may still be in use by other
 681   * CPUs.  To assure that all CPUs that may be seeing the old variable have
 682   * cleared out of probe context, a dtrace_sync() can be issued.  Once the
 683   * dtrace_sync() has completed, it can be known that all CPUs are done
 684   * manipulating the dynamic variable -- the dirty list can be atomically
 685   * appended to the free list.  Unfortunately, there's a slight hiccup in this
 686   * mechanism:  dtrace_sync() may not be issued from probe context.  The
 687   * dtrace_sync() must be therefore issued asynchronously from non-probe
 688   * context.  For this we rely on the DTrace cleaner, a cyclic that runs at the
 689   * "cleanrate" frequency.  To ease this implementation, we define several chunk
 690   * lists:
 691   *
 692   *   - Dirty.  Deallocated chunks, not yet cleaned.  Not available.
 693   *
 694   *   - Rinsing.  Formerly dirty chunks that are currently being asynchronously
 695   *     cleaned.  Not available, but will be shortly.  Dynamic variable
 696   *     allocation may not spin or block for availability, however.
 697   *
 698   *   - Clean.  Clean chunks, ready for allocation -- but not on the free list.
 699   *
 700   *   - Free.  Available for allocation.
 701   *
 702   * Moreover, to avoid absurd contention, _each_ of these lists is implemented
 703   * on a per-CPU basis.  This is only for performance, not correctness; chunks
 704   * may be allocated from another CPU's free list.  The algorithm for allocation
 705   * then is this:
 706   *
 707   *   (1)  Attempt to atomically allocate from current CPU's free list.  If list
 708   *        is non-empty and allocation is successful, allocation is complete.
 709   *
 710   *   (2)  If the clean list is non-empty, atomically move it to the free list,
 711   *        and reattempt (1).
 712   *
 713   *   (3)  If the dynamic variable space is in the CLEAN state, look for free
 714   *        and clean lists on other CPUs by setting the current CPU to the next
 715   *        CPU, and reattempting (1).  If the next CPU is the current CPU (that
 716   *        is, if all CPUs have been checked), atomically switch the state of
 717   *        the dynamic variable space based on the following:
 718   *
 719   *        - If no free chunks were found and no dirty chunks were found,
 720   *          atomically set the state to EMPTY.
 721   *
 722   *        - If dirty chunks were found, atomically set the state to DIRTY.
 723   *
 724   *        - If rinsing chunks were found, atomically set the state to RINSING.
 725   *
 726   *   (4)  Based on state of dynamic variable space state, increment appropriate
 727   *        counter to indicate dynamic drops (if in EMPTY state) vs. dynamic
 728   *        dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in
 729   *        RINSING state).  Fail the allocation.
 730   *
 731   * The cleaning cyclic operates with the following algorithm:  for all CPUs
 732   * with a non-empty dirty list, atomically move the dirty list to the rinsing
 733   * list.  Perform a dtrace_sync().  For all CPUs with a non-empty rinsing list,
 734   * atomically move the rinsing list to the clean list.  Perform another
 735   * dtrace_sync().  By this point, all CPUs have seen the new clean list; the
 736   * state of the dynamic variable space can be restored to CLEAN.
 737   *
 738   * There exist two final races that merit explanation.  The first is a simple
 739   * allocation race:
 740   *
 741   *                 CPU A                                 CPU B
 742   *  +---------------------------------+   +---------------------------------+
 743   *  |                                 |   |                                 |
 744   *  | allocates dynamic object a[123] |   | allocates dynamic object a[123] |
 745   *  | by storing the value 345 to it  |   | by storing the value 567 to it  |
 746   *  |                                 |   |                                 |
 747   *  :                                 :   :                                 :
 748   *  .                                 .   .                                 .
 749   *
 750   * Again, this is a race in the D program.  It can be resolved by having a[123]
 751   * hold the value 345 or a[123] hold the value 567 -- but it must be true that
 752   * a[123] have only _one_ of these values.  (That is, the racing CPUs may not
 753   * put the same element twice on the same hash chain.)  This is resolved
 754   * simply:  before the allocation is undertaken, the start of the new chunk's
 755   * hash chain is noted.  Later, after the allocation is complete, the hash
 756   * chain is atomically switched to point to the new element.  If this fails
 757   * (because of either concurrent allocations or an allocation concurrent with a
 758   * deletion), the newly allocated chunk is deallocated to the dirty list, and
 759   * the whole process of looking up (and potentially allocating) the dynamic
 760   * variable is reattempted.
 761   *
 762   * The final race is a simple deallocation race:
 763   *
 764   *                 CPU A                                 CPU B
 765   *  +---------------------------------+   +---------------------------------+
 766   *  |                                 |   |                                 |
 767   *  | deallocates dynamic object      |   | deallocates dynamic object      |
 768   *  | a[123] by storing the value 0   |   | a[123] by storing the value 0   |
 769   *  | to it                           |   | to it                           |
 770   *  |                                 |   |                                 |
 771   *  :                                 :   :                                 :
 772   *  .                                 .   .                                 .
 773   *
 774   * Once again, this is a race in the D program, but it is one that we must
 775   * handle without corrupting the underlying data structures.  Because
 776   * deallocations require the deletion of a chunk from the middle of a hash
 777   * chain, we cannot use a single-word atomic operation to remove it.  For this,
 778   * we add a spin lock to the hash buckets that is _only_ used for deallocations
 779   * (allocation races are handled as above).  Further, this spin lock is _only_
 780   * held for the duration of the delete; before control is returned to the DIF
 781   * emulation code, the hash bucket is unlocked.
 782   */
 783  typedef struct dtrace_key {
 784  	uint64_t dttk_value;			/* data value or data pointer */
 785  	uint64_t dttk_size;			/* 0 if by-val, >0 if by-ref */
 786  } dtrace_key_t;
 787  
 788  typedef struct dtrace_tuple {
 789  	uint32_t dtt_nkeys;			/* number of keys in tuple */
 790  	uint32_t dtt_pad;			/* padding */
 791  	dtrace_key_t dtt_key[1];		/* array of tuple keys */
 792  } dtrace_tuple_t;
 793  
 794  typedef struct dtrace_dynvar {
 795  	uint64_t dtdv_hashval;			/* hash value -- 0 if free */
 796  	struct dtrace_dynvar *dtdv_next;	/* next on list or hash chain */
 797  	void *dtdv_data;			/* pointer to data */
 798  	dtrace_tuple_t dtdv_tuple;		/* tuple key */
 799  } dtrace_dynvar_t;
 800  
 801  typedef enum dtrace_dynvar_op {
 802  	DTRACE_DYNVAR_ALLOC,
 803  	DTRACE_DYNVAR_NOALLOC,
 804  	DTRACE_DYNVAR_DEALLOC
 805  } dtrace_dynvar_op_t;
 806  
 807  typedef struct dtrace_dynhash {
 808  	dtrace_dynvar_t *dtdh_chain;		/* hash chain for this bucket */
 809  	uintptr_t dtdh_lock;			/* deallocation lock */
 810  #ifdef _LP64
 811  	uintptr_t dtdh_pad[6];			/* pad to avoid false sharing */
 812  #else
 813  	uintptr_t dtdh_pad[14];			/* pad to avoid false sharing */
 814  #endif
 815  } dtrace_dynhash_t;
 816  
 817  typedef struct dtrace_dstate_percpu {
 818  	dtrace_dynvar_t *dtdsc_free;		/* free list for this CPU */
 819  	dtrace_dynvar_t *dtdsc_dirty;		/* dirty list for this CPU */
 820  	dtrace_dynvar_t *dtdsc_rinsing;		/* rinsing list for this CPU */
 821  	dtrace_dynvar_t *dtdsc_clean;		/* clean list for this CPU */
 822  	uint64_t dtdsc_drops;			/* number of capacity drops */
 823  	uint64_t dtdsc_dirty_drops;		/* number of dirty drops */
 824  	uint64_t dtdsc_rinsing_drops;		/* number of rinsing drops */
 825  #ifdef _LP64
 826  	uint64_t dtdsc_pad;			/* pad to avoid false sharing */
 827  #else
 828  	uint64_t dtdsc_pad[2];			/* pad to avoid false sharing */
 829  #endif
 830  } dtrace_dstate_percpu_t;
 831  
 832  typedef enum dtrace_dstate_state {
 833  	DTRACE_DSTATE_CLEAN = 0,
 834  	DTRACE_DSTATE_EMPTY,
 835  	DTRACE_DSTATE_DIRTY,
 836  	DTRACE_DSTATE_RINSING
 837  } dtrace_dstate_state_t;
 838  
 839  typedef struct dtrace_dstate {
 840  	void *dtds_base;			/* base of dynamic var. space */
 841  	size_t dtds_size;			/* size of dynamic var. space */
 842  	size_t dtds_hashsize;			/* number of buckets in hash */
 843  	size_t dtds_chunksize;			/* size of each chunk */
 844  	dtrace_dynhash_t *dtds_hash;		/* pointer to hash table */
 845  	dtrace_dstate_state_t dtds_state;	/* current dynamic var. state */
 846  	dtrace_dstate_percpu_t *dtds_percpu;	/* per-CPU dyn. var. state */
 847  } dtrace_dstate_t;
 848  
 849  /*
 850   * DTrace Variable State
 851   *
 852   * The DTrace variable state tracks user-defined variables in its dtrace_vstate
 853   * structure.  Each DTrace consumer has exactly one dtrace_vstate structure,
 854   * but some dtrace_vstate structures may exist without a corresponding DTrace
 855   * consumer (see "DTrace Helpers", below).  As described in <sys/dtrace.h>,
 856   * user-defined variables can have one of three scopes:
 857   *
 858   *  DIFV_SCOPE_GLOBAL  =>  global scope
 859   *  DIFV_SCOPE_THREAD  =>  thread-local scope (i.e. "self->" variables)
 860   *  DIFV_SCOPE_LOCAL   =>  clause-local scope (i.e. "this->" variables)
 861   *
 862   * The variable state tracks variables by both their scope and their allocation
 863   * type:
 864   *
 865   *  - The dtvs_globals and dtvs_locals members each point to an array of
 866   *    dtrace_statvar structures.  These structures contain both the variable
 867   *    metadata (dtrace_difv structures) and the underlying storage for all
 868   *    statically allocated variables, including statically allocated
 869   *    DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables.
 870   *
 871   *  - The dtvs_tlocals member points to an array of dtrace_difv structures for
 872   *    DIFV_SCOPE_THREAD variables.  As such, this array tracks _only_ the
 873   *    variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage
 874   *    is allocated out of the dynamic variable space.
 875   *
 876   *  - The dtvs_dynvars member is the dynamic variable state associated with the
 877   *    variable state.  The dynamic variable state (described in "DTrace Dynamic
 878   *    Variables", above) tracks all DIFV_SCOPE_THREAD variables and all
 879   *    dynamically-allocated DIFV_SCOPE_GLOBAL variables.
 880   */
 881  typedef struct dtrace_statvar {
 882  	uint64_t dtsv_data;			/* data or pointer to it */
 883  	size_t dtsv_size;			/* size of pointed-to data */
 884  	int dtsv_refcnt;			/* reference count */
 885  	dtrace_difv_t dtsv_var;			/* variable metadata */
 886  } dtrace_statvar_t;
 887  
 888  typedef struct dtrace_vstate {
 889  	dtrace_state_t *dtvs_state;		/* back pointer to state */
 890  	dtrace_statvar_t **dtvs_globals;	/* statically-allocated glbls */
 891  	int dtvs_nglobals;			/* number of globals */
 892  	dtrace_difv_t *dtvs_tlocals;		/* thread-local metadata */
 893  	int dtvs_ntlocals;			/* number of thread-locals */
 894  	dtrace_statvar_t **dtvs_locals;		/* clause-local data */
 895  	int dtvs_nlocals;			/* number of clause-locals */
 896  	dtrace_dstate_t dtvs_dynvars;		/* dynamic variable state */
 897  } dtrace_vstate_t;
 898  
 899  /*
 900   * DTrace Machine State
 901   *
 902   * In the process of processing a fired probe, DTrace needs to track and/or
 903   * cache some per-CPU state associated with that particular firing.  This is
 904   * state that is always discarded after the probe firing has completed, and
 905   * much of it is not specific to any DTrace consumer, remaining valid across
 906   * all ECBs.  This state is tracked in the dtrace_mstate structure.
 907   */
 908  #define	DTRACE_MSTATE_ARGS		0x00000001
 909  #define	DTRACE_MSTATE_PROBE		0x00000002
 910  #define	DTRACE_MSTATE_EPID		0x00000004
 911  #define	DTRACE_MSTATE_TIMESTAMP		0x00000008
 912  #define	DTRACE_MSTATE_STACKDEPTH	0x00000010
 913  #define	DTRACE_MSTATE_CALLER		0x00000020
 914  #define	DTRACE_MSTATE_IPL		0x00000040
 915  #define	DTRACE_MSTATE_FLTOFFS		0x00000080
 916  #define	DTRACE_MSTATE_WALLTIMESTAMP	0x00000100
 917  #define	DTRACE_MSTATE_USTACKDEPTH	0x00000200
 918  #define	DTRACE_MSTATE_UCALLER		0x00000400
 919  #define	DTRACE_MSTATE_MACHTIMESTAMP	0x00000800
 920  #define	DTRACE_MSTATE_MACHCTIMESTAMP	0x00001000
 921  
 922  typedef struct dtrace_mstate {
 923  	uintptr_t dtms_scratch_base;		/* base of scratch space */
 924  	uintptr_t dtms_scratch_ptr;		/* current scratch pointer */
 925  	size_t dtms_scratch_size;		/* scratch size */
 926  	uint32_t dtms_present;			/* variables that are present */
 927  	uint64_t dtms_arg[5];			/* cached arguments */
 928  	dtrace_epid_t dtms_epid;		/* current EPID */
 929  	uint64_t dtms_timestamp;		/* cached timestamp */
 930  	hrtime_t dtms_walltimestamp;		/* cached wall timestamp */
 931  	uint64_t dtms_machtimestamp;		/* cached mach absolute timestamp */
 932  	uint64_t dtms_machctimestamp;		/* cached mach continuous timestamp */
 933  	int dtms_stackdepth;			/* cached stackdepth */
 934  	int dtms_ustackdepth;			/* cached ustackdepth */
 935  	struct dtrace_probe *dtms_probe;	/* current probe */
 936  	uintptr_t dtms_caller;			/* cached caller */
 937  	uint64_t dtms_ucaller;			/* cached user-level caller */
 938  	int dtms_ipl;				/* cached interrupt pri lev */
 939  	int dtms_fltoffs;			/* faulting DIFO offset */
 940  	uintptr_t dtms_strtok;			/* saved strtok() pointer */
 941  	uintptr_t dtms_strtok_limit;		/* upper bound of strtok ptr */
 942  	uint32_t dtms_access;			/* memory access rights */
 943  	dtrace_difo_t *dtms_difo;		/* current dif object */
 944  } dtrace_mstate_t;
 945  
 946  #define	DTRACE_COND_OWNER	0x1
 947  #define	DTRACE_COND_USERMODE	0x2
 948  #define	DTRACE_COND_ZONEOWNER	0x4
 949  
 950  #define	DTRACE_PROBEKEY_MAXDEPTH	8	/* max glob recursion depth */
 951  
 952  /*
 953   * Access flag used by dtrace_mstate.dtms_access.
 954   */
 955  #define	DTRACE_ACCESS_KERNEL	0x1		/* the priv to read kmem */
 956  
 957  
 958  /*
 959   * DTrace Activity
 960   *
 961   * Each DTrace consumer is in one of several states, which (for purposes of
 962   * avoiding yet-another overloading of the noun "state") we call the current
 963   * _activity_.  The activity transitions on dtrace_go() (from DTRACIOCGO), on
 964   * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action.  Activities may
 965   * only transition in one direction; the activity transition diagram is a
 966   * directed acyclic graph.  The activity transition diagram is as follows:
 967   *
 968   *
 969   *
 970   * +----------+                   +--------+                   +--------+
 971   * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE |
 972   * +----------+   dtrace_go(),    +--------+   dtrace_go(),    +--------+
 973   *                before BEGIN        |        after BEGIN       |  |  |
 974   *                                    |                          |  |  |
 975   *                      exit() action |                          |  |  |
 976   *                     from BEGIN ECB |                          |  |  |
 977   *                                    |                          |  |  |
 978   *                                    v                          |  |  |
 979   *                               +----------+     exit() action  |  |  |
 980   * +-----------------------------| DRAINING |<-------------------+  |  |
 981   * |                             +----------+                       |  |
 982   * |                                  |                             |  |
 983   * |                   dtrace_stop(), |                             |  |
 984   * |                     before END   |                             |  |
 985   * |                                  |                             |  |
 986   * |                                  v                             |  |
 987   * | +---------+                 +----------+                       |  |
 988   * | | STOPPED |<----------------| COOLDOWN |<----------------------+  |
 989   * | +---------+  dtrace_stop(), +----------+     dtrace_stop(),       |
 990   * |                after END                       before END         |
 991   * |                                                                   |
 992   * |                              +--------+                           |
 993   * +----------------------------->| KILLED |<--------------------------+
 994   *       deadman timeout or       +--------+     deadman timeout or
 995   *        killed consumer                         killed consumer
 996   *
 997   * Note that once a DTrace consumer has stopped tracing, there is no way to
 998   * restart it; if a DTrace consumer wishes to restart tracing, it must reopen
 999   * the DTrace pseudodevice.
1000   */
1001  typedef enum dtrace_activity {
1002  	DTRACE_ACTIVITY_INACTIVE = 0,		/* not yet running */
1003  	DTRACE_ACTIVITY_WARMUP,			/* while starting */
1004  	DTRACE_ACTIVITY_ACTIVE,			/* running */
1005  	DTRACE_ACTIVITY_DRAINING,		/* before stopping */
1006  	DTRACE_ACTIVITY_COOLDOWN,		/* while stopping */
1007  	DTRACE_ACTIVITY_STOPPED,		/* after stopping */
1008  	DTRACE_ACTIVITY_KILLED			/* killed */
1009  } dtrace_activity_t;
1010  
1011  
1012  /*
1013   * APPLE NOTE:  DTrace dof modes implementation
1014   *
1015   * DTrace has four "dof modes". They are:
1016   *
1017   * DTRACE_DOF_MODE_NEVER	Never load any dof, period.
1018   * DTRACE_DOF_MODE_LAZY_ON	Defer loading dof until later
1019   * DTRACE_DOF_MODE_LAZY_OFF	Load all deferred dof now, and any new dof 
1020   * DTRACE_DOF_MODE_NON_LAZY	Load all dof immediately.
1021   *
1022   * It is legal to transition between the two lazy modes. The NEVER and
1023   * NON_LAZY modes are permanent, and must not change once set.
1024   *
1025   * The current dof mode is kept in dtrace_dof_mode, which is protected by the
1026   * dtrace_dof_mode_lock. This is a RW lock, reads require shared access, writes
1027   * require exclusive access. Because NEVER and NON_LAZY are permanent states,
1028   * it is legal to test for those modes without holding the dof mode lock.
1029   *
1030   * Lock ordering is dof mode lock before any dtrace lock, and before the
1031   * process p_dtrace_sprlock. In general, other locks should not be held when
1032   * taking the dof mode lock. Acquiring the dof mode lock in exclusive mode
1033   * will block process fork, exec, and exit, so it should be held exclusive
1034   * for as short a time as possible.
1035   */
1036  
1037  #define DTRACE_DOF_MODE_NEVER 		0
1038  #define DTRACE_DOF_MODE_LAZY_ON		1
1039  #define DTRACE_DOF_MODE_LAZY_OFF	2
1040  #define DTRACE_DOF_MODE_NON_LAZY	3
1041  
1042  /*
1043   * dtrace kernel symbol modes are used to control when the kernel may dispose of
1044   * symbol information used by the fbt/sdt provider. The kernel itself, as well as
1045   * every kext, has symbol table/nlist info that has historically been preserved
1046   * for dtrace's use. This allowed dtrace to be lazy about allocating fbt/sdt probes,
1047   * at the expense of keeping the symbol info in the kernel permanently.
1048   *
1049   * Starting in 10.7+, fbt probes may be created from userspace, in the same
1050   * fashion as pid probes. The kernel allows dtrace "first right of refusal"
1051   * whenever symbol data becomes available (such as a kext load). If dtrace is
1052   * active, it will immediately read/copy the needed data, and then the kernel
1053   * may free it. If dtrace is not active, it returns immediately, having done
1054   * no work or allocations, and the symbol data is freed. Should dtrace need
1055   * this data later, it is expected that the userspace client will push the
1056   * data into the kernel via ioctl calls.
1057   *
1058   * The kernel symbol modes are used to control what dtrace does with symbol data:
1059   *
1060   * DTRACE_KERNEL_SYMBOLS_NEVER			Effectively disables fbt/sdt
1061   * DTRACE_KERNEL_SYMBOLS_FROM_KERNEL		Immediately read/copy symbol data
1062   * DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE		Wait for symbols from userspace
1063   * DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL	Immediately read/copy symbol data
1064   *
1065   * It is legal to transition between DTRACE_KERNEL_SYMBOLS_FROM_KERNEL and 
1066   * DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE. The DTRACE_KERNEL_SYMBOLS_NEVER and
1067   * DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL are permanent modes, intended to
1068   * disable fbt probes entirely, or prevent any symbols being loaded from
1069   * userspace.
1070  *
1071   * The kernel symbol mode is kept in dtrace_kernel_symbol_mode, which is protected
1072   * by the dtrace_lock.
1073   */
1074  
1075  #define DTRACE_KERNEL_SYMBOLS_NEVER 			0
1076  #define DTRACE_KERNEL_SYMBOLS_FROM_KERNEL		1
1077  #define DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE		2
1078  #define DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL	3
1079  	
1080  
1081  /*
1082   * DTrace Helper Implementation
1083   *
1084   * A description of the helper architecture may be found in <sys/dtrace.h>.
1085   * Each process contains a pointer to its helpers in its p_dtrace_helpers
1086   * member.  This is a pointer to a dtrace_helpers structure, which contains an
1087   * array of pointers to dtrace_helper structures, helper variable state (shared
1088   * among a process's helpers) and a generation count.  (The generation count is
1089   * used to provide an identifier when a helper is added so that it may be
1090   * subsequently removed.)  The dtrace_helper structure is self-explanatory,
1091   * containing pointers to the objects needed to execute the helper.  Note that
1092   * helpers are _duplicated_ across fork(2), and destroyed on exec(2).  No more
1093   * than dtrace_helpers_max are allowed per-process.
1094   */
1095  #define	DTRACE_HELPER_ACTION_USTACK	0
1096  #define	DTRACE_NHELPER_ACTIONS		1
1097  
1098  typedef struct dtrace_helper_action {
1099  	int dtha_generation;			/* helper action generation */
1100  	int dtha_nactions;			/* number of actions */
1101  	dtrace_difo_t *dtha_predicate;		/* helper action predicate */
1102  	dtrace_difo_t **dtha_actions;		/* array of actions */
1103  	struct dtrace_helper_action *dtha_next;	/* next helper action */
1104  } dtrace_helper_action_t;
1105  
1106  typedef struct dtrace_helper_provider {
1107  	int dthp_generation;			/* helper provider generation */
1108  	uint32_t dthp_ref;			/* reference count */
1109  	dof_helper_t dthp_prov;			/* DOF w/ provider and probes */
1110  } dtrace_helper_provider_t;
1111  
1112  typedef struct dtrace_helpers {
1113  	dtrace_helper_action_t **dthps_actions;	/* array of helper actions */
1114  	dtrace_vstate_t dthps_vstate;		/* helper action var. state */
1115  	dtrace_helper_provider_t **dthps_provs;	/* array of providers */
1116  	uint_t dthps_nprovs;			/* count of providers */
1117  	uint_t dthps_maxprovs;			/* provider array size */
1118  	int dthps_generation;			/* current generation */
1119  	pid_t dthps_pid;			/* pid of associated proc */
1120  	int dthps_deferred;			/* helper in deferred list */
1121  	struct dtrace_helpers *dthps_next;	/* next pointer */
1122  	struct dtrace_helpers *dthps_prev;	/* prev pointer */
1123  } dtrace_helpers_t;
1124  
1125  /*
1126   * DTrace Helper Action Tracing
1127   *
1128   * Debugging helper actions can be arduous.  To ease the development and
1129   * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing-
1130   * framework: helper tracing.  If dtrace_helptrace_enabled is non-zero (which
1131   * it is by default on DEBUG kernels), all helper activity will be traced to a
1132   * global, in-kernel ring buffer.  Each entry includes a pointer to the specific
1133   * helper, the location within the helper, and a trace of all local variables.
1134   * The ring buffer may be displayed in a human-readable format with the
1135   * ::dtrace_helptrace mdb(1) dcmd.
1136   */
1137  #define	DTRACE_HELPTRACE_NEXT	(-1)
1138  #define	DTRACE_HELPTRACE_DONE	(-2)
1139  #define	DTRACE_HELPTRACE_ERR	(-3)
1140  
1141  
1142  typedef struct dtrace_helptrace {
1143  	dtrace_helper_action_t	*dtht_helper;	/* helper action */
1144  	int dtht_where;				/* where in helper action */
1145  	int dtht_nlocals;			/* number of locals */
1146  	int dtht_fault;				/* type of fault (if any) */
1147  	int dtht_fltoffs;			/* DIF offset */
1148  	uint64_t dtht_illval;			/* faulting value */
1149  	uint64_t dtht_locals[1];		/* local variables */
1150  } dtrace_helptrace_t;
1151  
1152  /*
1153   * DTrace Credentials
1154   *
1155   * In probe context, we have limited flexibility to examine the credentials
1156   * of the DTrace consumer that created a particular enabling.  We use
1157   * the Least Privilege interfaces to cache the consumer's cred pointer and
1158   * some facts about that credential in a dtrace_cred_t structure. These
1159   * can limit the consumer's breadth of visibility and what actions the
1160   * consumer may take.
1161   */
1162  #define	DTRACE_CRV_ALLPROC		0x01
1163  #define	DTRACE_CRV_KERNEL		0x02
1164  #define	DTRACE_CRV_ALLZONE		0x04
1165  
1166  #define	DTRACE_CRV_ALL		(DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \
1167  	DTRACE_CRV_ALLZONE)
1168  
1169  #define	DTRACE_CRA_PROC				0x0001
1170  #define	DTRACE_CRA_PROC_CONTROL			0x0002
1171  #define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER	0x0004
1172  #define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE	0x0008
1173  #define	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG	0x0010
1174  #define	DTRACE_CRA_KERNEL			0x0020
1175  #define	DTRACE_CRA_KERNEL_DESTRUCTIVE		0x0040
1176  
1177  #define	DTRACE_CRA_ALL		(DTRACE_CRA_PROC | \
1178  	DTRACE_CRA_PROC_CONTROL | \
1179  	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \
1180  	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \
1181  	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \
1182  	DTRACE_CRA_KERNEL | \
1183  	DTRACE_CRA_KERNEL_DESTRUCTIVE)
1184  
1185  typedef struct dtrace_cred {
1186  	cred_t			*dcr_cred;
1187  	uint8_t			dcr_destructive;
1188  	uint8_t			dcr_visible;
1189  	uint16_t		dcr_action;
1190  } dtrace_cred_t;
1191  
1192  typedef struct dtrace_format {
1193  	uint64_t dtf_refcount;
1194  	char dtf_str[];
1195  } dtrace_format_t;
1196  
1197  #define DTRACE_FORMAT_SIZE(fmt) (strlen(fmt->dtf_str) + 1 + sizeof(dtrace_format_t))
1198  
1199  /*
1200   * DTrace Consumer State
1201   *
1202   * Each DTrace consumer has an associated dtrace_state structure that contains
1203   * its in-kernel DTrace state -- including options, credentials, statistics and
1204   * pointers to ECBs, buffers, speculations and formats.  A dtrace_state
1205   * structure is also allocated for anonymous enablings.  When anonymous state
1206   * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed
1207   * dtrace_state structure.
1208   */
1209  struct dtrace_state {
1210  	dev_t dts_dev;				/* device */
1211  	int dts_necbs;				/* total number of ECBs */
1212  	dtrace_ecb_t **dts_ecbs;		/* array of ECBs */
1213  	dtrace_epid_t dts_epid;			/* next EPID to allocate */
1214  	size_t dts_needed;			/* greatest needed space */
1215  	struct dtrace_state *dts_anon;		/* anon. state, if grabbed */
1216  	dtrace_activity_t dts_activity;		/* current activity */
1217  	dtrace_vstate_t dts_vstate;		/* variable state */
1218  	dtrace_buffer_t *dts_buffer;		/* principal buffer */
1219  	dtrace_buffer_t *dts_aggbuffer;		/* aggregation buffer */
1220  	dtrace_speculation_t *dts_speculations;	/* speculation array */
1221  	int dts_nspeculations;			/* number of speculations */
1222  	int dts_naggregations;			/* number of aggregations */
1223  	dtrace_aggregation_t **dts_aggregations; /* aggregation array */
1224  	vmem_t *dts_aggid_arena;		/* arena for aggregation IDs */
1225  	uint64_t dts_errors;			/* total number of errors */
1226  	uint32_t dts_speculations_busy;		/* number of spec. busy */
1227  	uint32_t dts_speculations_unavail;	/* number of spec unavail */
1228  	uint32_t dts_stkstroverflows;		/* stack string tab overflows */
1229  	uint32_t dts_dblerrors;			/* errors in ERROR probes */
1230  	uint32_t dts_reserve;			/* space reserved for END */
1231  	hrtime_t dts_laststatus;		/* time of last status */
1232  	cyclic_id_t dts_cleaner;		/* cleaning cyclic */
1233  	cyclic_id_t dts_deadman;		/* deadman cyclic */
1234  	hrtime_t dts_alive;			/* time last alive */
1235  	char dts_speculates;			/* boolean: has speculations */
1236  	char dts_destructive;			/* boolean: has dest. actions */
1237  	int dts_nformats;			/* number of formats */
1238  	dtrace_format_t **dts_formats;		/* format string array */
1239  	dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
1240  	dtrace_cred_t dts_cred;			/* credentials */
1241  	size_t dts_nretained;			/* number of retained enabs */
1242  	uint64_t dts_arg_error_illval;
1243  	uint32_t dts_buf_over_limit;		/* number of bufs over dtb_limit */
1244  	uint64_t **dts_rstate;			/* per-CPU random state */
1245  };
1246  
1247  struct dtrace_provider {
1248  	dtrace_pattr_t dtpv_attr;		/* provider attributes */
1249  	dtrace_ppriv_t dtpv_priv;		/* provider privileges */
1250  	dtrace_pops_t dtpv_pops;		/* provider operations */
1251  	char *dtpv_name;			/* provider name */
1252  	void *dtpv_arg;				/* provider argument */
1253  	uint_t dtpv_defunct;			/* boolean: defunct provider */
1254  	struct dtrace_provider *dtpv_next;	/* next provider */
1255  	uint64_t dtpv_probe_count;		/* number of associated probes */
1256  	uint64_t dtpv_ecb_count;		/* number of associated enabled ECBs */
1257  };
1258  
1259  struct dtrace_meta {
1260  	dtrace_mops_t dtm_mops;			/* meta provider operations */
1261  	char *dtm_name;				/* meta provider name */
1262  	void *dtm_arg;				/* meta provider user arg */
1263  	uint64_t dtm_count;			/* number of associated providers */
1264  };
1265  
1266  /*
1267   * DTrace Enablings
1268   *
1269   * A dtrace_enabling structure is used to track a collection of ECB
1270   * descriptions -- before they have been turned into actual ECBs.  This is
1271   * created as a result of DOF processing, and is generally used to generate
1272   * ECBs immediately thereafter.  However, enablings are also generally
1273   * retained should the probes they describe be created at a later time; as
1274   * each new module or provider registers with the framework, the retained
1275   * enablings are reevaluated, with any new match resulting in new ECBs.  To
1276   * prevent probes from being matched more than once, the enabling tracks the
1277   * last probe generation matched, and only matches probes from subsequent
1278   * generations.
1279   */
1280  typedef struct dtrace_enabling {
1281  	dtrace_ecbdesc_t **dten_desc;		/* all ECB descriptions */
1282  	int dten_ndesc;				/* number of ECB descriptions */
1283  	int dten_maxdesc;			/* size of ECB array */
1284  	dtrace_vstate_t *dten_vstate;		/* associated variable state */
1285  	dtrace_genid_t dten_probegen;		/* matched probe generation */
1286  	dtrace_ecbdesc_t *dten_current;		/* current ECB description */
1287  	int dten_error;				/* current error value */
1288  	int dten_primed;			/* boolean: set if primed */
1289  	struct dtrace_enabling *dten_prev;	/* previous enabling */
1290  	struct dtrace_enabling *dten_next;	/* next enabling */
1291  } dtrace_enabling_t;
1292  
1293  /*
1294   * DTrace Anonymous Enablings
1295   *
1296   * Anonymous enablings are DTrace enablings that are not associated with a
1297   * controlling process, but rather derive their enabling from DOF stored as
1298   * properties in the dtrace.conf file.  If there is an anonymous enabling, a
1299   * DTrace consumer state and enabling are created on attach.  The state may be
1300   * subsequently grabbed by the first consumer specifying the "grabanon"
1301   * option.  As long as an anonymous DTrace enabling exists, dtrace(7D) will
1302   * refuse to unload.
1303   */
1304  typedef struct dtrace_anon {
1305  	dtrace_state_t *dta_state;		/* DTrace consumer state */
1306  	dtrace_enabling_t *dta_enabling;	/* pointer to enabling */
1307  	processorid_t dta_beganon;		/* which CPU BEGIN ran on */
1308  } dtrace_anon_t;
1309  
1310  /*
1311   * DTrace Error Debugging
1312   */
1313  #if DEBUG
1314  #define	DTRACE_ERRDEBUG
1315  #endif
1316  
1317  #ifdef DTRACE_ERRDEBUG
1318  
1319  typedef struct dtrace_errhash {
1320  	const char	*dter_msg;	/* error message */
1321  	int		dter_count;	/* number of times seen */
1322  } dtrace_errhash_t;
1323  
1324  #define	DTRACE_ERRHASHSZ	256	/* must be > number of err msgs */
1325  
1326  #endif	/* DTRACE_ERRDEBUG */
1327  
1328  typedef struct dtrace_string dtrace_string_t;
1329  
1330  typedef struct dtrace_string {
1331  	dtrace_string_t *dtst_next;
1332  	dtrace_string_t *dtst_prev;
1333  	uint32_t dtst_refcount;
1334  	char dtst_str[];
1335  } dtrace_string_t;
1336  
1337  /**
1338   * DTrace Matching pre-conditions
1339   *
1340   * Used when matching new probes to discard matching of enablings that
1341   * doesn't match the condition tested by dmc_func
1342   */
1343  typedef struct dtrace_match_cond {
1344  	int (*dmc_func)(dtrace_probedesc_t*, void*);
1345  	void *dmc_data;
1346  } dtrace_match_cond_t;
1347  
1348  
1349  /*
1350   * DTrace Toxic Ranges
1351   *
1352   * DTrace supports safe loads from probe context; if the address turns out to
1353   * be invalid, a bit will be set by the kernel indicating that DTrace
1354   * encountered a memory error, and DTrace will propagate the error to the user
1355   * accordingly.  However, there may exist some regions of memory in which an
1356   * arbitrary load can change system state, and from which it is impossible to
1357   * recover from such a load after it has been attempted.  Examples of this may
1358   * include memory in which programmable I/O registers are mapped (for which a
1359   * read may have some implications for the device) or (in the specific case of
1360   * UltraSPARC-I and -II) the virtual address hole.  The platform is required
1361   * to make DTrace aware of these toxic ranges; DTrace will then check that
1362   * target addresses are not in a toxic range before attempting to issue a
1363   * safe load.
1364   */
1365  typedef struct dtrace_toxrange {
1366  	uintptr_t	dtt_base;		/* base of toxic range */
1367  	uintptr_t	dtt_limit;		/* limit of toxic range */
1368  } dtrace_toxrange_t;
1369  
1370  extern uint64_t dtrace_getarg(int, int, dtrace_mstate_t*, dtrace_vstate_t*);
1371  extern int dtrace_getipl(void);
1372  extern uintptr_t dtrace_caller(int);
1373  extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t);
1374  extern void *dtrace_casptr(void *, void *, void *);
1375  extern void dtrace_copyin(user_addr_t, uintptr_t, size_t, volatile uint16_t *);
1376  extern void dtrace_copyinstr(user_addr_t, uintptr_t, size_t, volatile uint16_t *);
1377  extern void dtrace_copyout(uintptr_t, user_addr_t, size_t, volatile uint16_t *);
1378  extern void dtrace_copyoutstr(uintptr_t, user_addr_t, size_t, volatile uint16_t *);
1379  extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *);
1380  extern uint64_t dtrace_load64(uintptr_t);
1381  extern int dtrace_canload(uint64_t, size_t, dtrace_mstate_t*, dtrace_vstate_t*);
1382  
1383  extern uint64_t dtrace_getreg(struct regs *, uint_t);
1384  extern uint64_t dtrace_getvmreg(uint_t);
1385  extern int dtrace_getstackdepth(int);
1386  extern void dtrace_getupcstack(uint64_t *, int);
1387  extern void dtrace_getufpstack(uint64_t *, uint64_t *, int);
1388  extern int dtrace_getustackdepth(void);
1389  extern uintptr_t dtrace_fulword(void *);
1390  extern uint8_t dtrace_fuword8(user_addr_t);
1391  extern uint16_t dtrace_fuword16(user_addr_t);
1392  extern uint32_t dtrace_fuword32(user_addr_t);
1393  extern uint64_t dtrace_fuword64(user_addr_t);
1394  extern int dtrace_proc_waitfor(dtrace_procdesc_t*);
1395  extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int,
1396      int, uint64_t);
1397  extern int dtrace_assfail(const char *, const char *, int);
1398  extern int dtrace_attached(void);
1399  extern hrtime_t dtrace_gethrestime(void);
1400  
1401  extern void dtrace_flush_caches(void);
1402  
1403  extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
1404  extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1405  
1406  extern void* dtrace_ptrauth_strip(void*, uint64_t);
1407  extern int dtrace_is_valid_ptrauth_key(uint64_t);
1408  
1409  extern uint64_t dtrace_physmem_read(uint64_t, size_t);
1410  extern void dtrace_physmem_write(uint64_t, uint64_t, size_t);
1411  
1412  /*
1413   * DTrace state handling
1414   */
1415  extern minor_t dtrace_state_reserve(void);
1416  extern dtrace_state_t* dtrace_state_allocate(minor_t minor);
1417  extern dtrace_state_t* dtrace_state_get(minor_t minor);
1418  extern void dtrace_state_free(minor_t minor);
1419  
1420  /*
1421   * DTrace restriction checks
1422   */
1423  extern void dtrace_restriction_policy_load(void);
1424  extern boolean_t dtrace_is_restricted(void);
1425  extern boolean_t dtrace_are_restrictions_relaxed(void);
1426  extern boolean_t dtrace_fbt_probes_restricted(void);
1427  extern boolean_t dtrace_sdt_probes_restricted(void);
1428  extern boolean_t dtrace_can_attach_to_proc(proc_t);
1429  
1430  /*
1431   * DTrace Assertions
1432   *
1433   * DTrace calls ASSERT and VERIFY from probe context.  To assure that a failed
1434   * ASSERT or VERIFYdoes not induce a markedly more catastrophic failure (e.g.,
1435   * one from which a dump cannot be gleaned), DTrace must define its own ASSERT
1436   * and VERIFY macros to be ones that may safely be called from probe context.
1437   * This header file must thus be included by any DTrace component that calls
1438   * ASSERT and/or VERIFY from probe context, and _only_ by those components.
1439   * (The only exception to this is kernel debugging infrastructure at user-level
1440   * that doesn't depend on calling ASSERT.)
1441   */
1442  #undef ASSERT
1443  #undef VERIFY
1444  
1445  #define	VERIFY(EX)	((void)((EX) || \
1446  			dtrace_assfail(#EX, __FILE__, __LINE__)))
1447  
1448  #if DEBUG
1449  #define	ASSERT(EX)	((void)((EX) || \
1450  			dtrace_assfail(#EX, __FILE__, __LINE__)))
1451  #else
1452  #define	ASSERT(X)	((void)0)
1453  #endif
1454  
1455  #ifdef	__cplusplus
1456  }
1457  #endif
1458  
1459  #endif /* _SYS_DTRACE_IMPL_H */
1460