/ duct-tape / xnu / osfmk / arm64 / exception_asm.h
exception_asm.h
  1  /*
  2   * Copyright (c) 2018 Apple Inc. All rights reserved.
  3   *
  4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  5   *
  6   * This file contains Original Code and/or Modifications of Original Code
  7   * as defined in and that are subject to the Apple Public Source License
  8   * Version 2.0 (the 'License'). You may not use this file except in
  9   * compliance with the License. The rights granted to you under the License
 10   * may not be used to create, or enable the creation or redistribution of,
 11   * unlawful or unlicensed copies of an Apple operating system, or to
 12   * circumvent, violate, or enable the circumvention or violation of, any
 13   * terms of an Apple operating system software license agreement.
 14   *
 15   * Please obtain a copy of the License at
 16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
 17   *
 18   * The Original Code and all software distributed under the License are
 19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 23   * Please see the License for the specific language governing rights and
 24   * limitations under the License.
 25   *
 26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 27   */
 28  
 29  #include <arm64/pac_asm.h>
 30  #include <pexpert/arm64/board_config.h>
 31  #include "assym.s"
 32  
 33  #if XNU_MONITOR
 34  /* Exit path defines; for controlling PPL -> kernel transitions. */
 35  #define PPL_EXIT_DISPATCH   0 /* This is a clean exit after a PPL request. */
 36  #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
 37  #define PPL_EXIT_BAD_CALL   2 /* The PPL request failed. */
 38  #define PPL_EXIT_EXCEPTION  3 /* The PPL took an exception. */
 39  
 40  
 41  #define KERNEL_MODE_ELR      ELR_GL11
 42  #define KERNEL_MODE_FAR      FAR_GL11
 43  #define KERNEL_MODE_ESR      ESR_GL11
 44  #define KERNEL_MODE_SPSR     SPSR_GL11
 45  #define KERNEL_MODE_ASPSR    ASPSR_GL11
 46  #define KERNEL_MODE_VBAR     VBAR_GL11
 47  #define KERNEL_MODE_TPIDR    TPIDR_GL11
 48  
 49  #define GUARDED_MODE_ELR     ELR_EL1
 50  #define GUARDED_MODE_FAR     FAR_EL1
 51  #define GUARDED_MODE_ESR     ESR_EL1
 52  #define GUARDED_MODE_SPSR    SPSR_EL1
 53  #define GUARDED_MODE_ASPSR   ASPSR_EL1
 54  #define GUARDED_MODE_VBAR    VBAR_EL1
 55  #define GUARDED_MODE_TPIDR   TPIDR_EL1
 56  
 57  /*
 58   * LOAD_PMAP_CPU_DATA
 59   *
 60   * Loads the PPL per-CPU data array entry for the current CPU.
 61   *   arg0 - Address of the PPL per-CPU data is returned through this
 62   *   arg1 - Scratch register
 63   *   arg2 - Scratch register
 64   *
 65   */
 66  .macro LOAD_PMAP_CPU_DATA
 67  	/* Get the CPU ID. */
 68  	mrs		$0, MPIDR_EL1
 69  	ubfx	$1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH
 70  	adrp	$2, EXT(cluster_offsets)@page
 71  	add		$2, $2, EXT(cluster_offsets)@pageoff
 72  	ldr		$1, [$2, $1, lsl #3]
 73  
 74  	and		$0, $0, MPIDR_AFF0_MASK
 75  	add		$0, $0, $1
 76  
 77  	/* Get the PPL CPU data array. */
 78  	adrp	$1, EXT(pmap_cpu_data_array)@page
 79  	add		$1, $1, EXT(pmap_cpu_data_array)@pageoff
 80  
 81  	/*
 82  	 * Sanity check the CPU ID (this is not a panic because this pertains to
 83  	 * the hardware configuration; this should only fail if our
 84  	 * understanding of the hardware is incorrect).
 85  	 */
 86  	cmp		$0, MAX_CPUS
 87  	b.hs	.
 88  
 89  	mov		$2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
 90  	/* Get the PPL per-CPU data. */
 91  	madd	$0, $0, $2, $1
 92  .endmacro
 93  
 94  /*
 95   * GET_PMAP_CPU_DATA
 96   *
 97   * Retrieves the PPL per-CPU data for the current CPU.
 98   *   arg0 - Address of the PPL per-CPU data is returned through this
 99   *   arg1 - Scratch register
100   *   arg2 - Scratch register
101   *
102   */
103  .macro GET_PMAP_CPU_DATA
104  	LOAD_PMAP_CPU_DATA $0, $1, $2
105  .endmacro
106  
107  #endif /* XNU_MONITOR */
108  
109  /*
110   * INIT_SAVED_STATE_FLAVORS
111   *
112   * Initializes the saved state flavors of a new saved state structure
113   *  arg0 - saved state pointer
114   *  arg1 - 32-bit scratch reg
115   *  arg2 - 32-bit scratch reg
116   */
117  .macro INIT_SAVED_STATE_FLAVORS
118  	mov		$1, ARM_SAVED_STATE64                                   // Set saved state to 64-bit flavor
119  	mov		$2, ARM_SAVED_STATE64_COUNT
120  	stp		$1, $2, [$0, SS_FLAVOR]
121  	mov		$1, ARM_NEON_SAVED_STATE64                              // Set neon state to 64-bit flavor
122  	str		$1, [$0, NS_FLAVOR]
123  	mov		$1, ARM_NEON_SAVED_STATE64_COUNT
124  	str		$1, [$0, NS_COUNT]
125  .endmacro
126  
127  /*
128   * SPILL_REGISTERS
129   *
130   * Spills the current set of registers (excluding x0, x1, sp) to the specified
131   * save area.
132   *
133   * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
134   * These keys are deliberately kept loaded into the CPU for later kernel use.
135   *
136   *   arg0 - KERNEL_MODE or HIBERNATE_MODE
137   *   x0 - Address of the save area
138   */
139  #define KERNEL_MODE 0
140  #define HIBERNATE_MODE 1
141  
142  .macro SPILL_REGISTERS	mode
143  	stp		x2, x3, [x0, SS64_X2]                                   // Save remaining GPRs
144  	stp		x4, x5, [x0, SS64_X4]
145  	stp		x6, x7, [x0, SS64_X6]
146  	stp		x8, x9, [x0, SS64_X8]
147  	stp		x10, x11, [x0, SS64_X10]
148  	stp		x12, x13, [x0, SS64_X12]
149  	stp		x14, x15, [x0, SS64_X14]
150  	stp		x16, x17, [x0, SS64_X16]
151  	stp		x18, x19, [x0, SS64_X18]
152  	stp		x20, x21, [x0, SS64_X20]
153  	stp		x22, x23, [x0, SS64_X22]
154  	stp		x24, x25, [x0, SS64_X24]
155  	stp		x26, x27, [x0, SS64_X26]
156  	stp		x28, fp,  [x0, SS64_X28]
157  	str		lr, [x0, SS64_LR]
158  
159  	/* Save arm_neon_saved_state64 */
160  
161  	stp		q0, q1, [x0, NS64_Q0]
162  	stp		q2, q3, [x0, NS64_Q2]
163  	stp		q4, q5, [x0, NS64_Q4]
164  	stp		q6, q7, [x0, NS64_Q6]
165  	stp		q8, q9, [x0, NS64_Q8]
166  	stp		q10, q11, [x0, NS64_Q10]
167  	stp		q12, q13, [x0, NS64_Q12]
168  	stp		q14, q15, [x0, NS64_Q14]
169  	stp		q16, q17, [x0, NS64_Q16]
170  	stp		q18, q19, [x0, NS64_Q18]
171  	stp		q20, q21, [x0, NS64_Q20]
172  	stp		q22, q23, [x0, NS64_Q22]
173  	stp		q24, q25, [x0, NS64_Q24]
174  	stp		q26, q27, [x0, NS64_Q26]
175  	stp		q28, q29, [x0, NS64_Q28]
176  	stp		q30, q31, [x0, NS64_Q30]
177  
178  	mrs		x22, ELR_EL1                                                     // Get exception link register
179  	mrs		x23, SPSR_EL1                                                   // Load CPSR into var reg x23
180  	mrs		x24, FPSR
181  	mrs		x25, FPCR
182  
183  #if defined(HAS_APPLE_PAC)
184  	.if \mode != HIBERNATE_MODE
185  
186  	/* Save x1 and LR to preserve across call */
187  	mov		x21, x1
188  	mov		x20, lr
189  
190  	/*
191  	 * Create thread state signature
192  	 *
193  	 * Arg0: The ARM context pointer
194  	 * Arg1: The PC value to sign
195  	 * Arg2: The CPSR value to sign
196  	 * Arg3: The LR value to sign
197  	 * Arg4: The X16 value to sign
198  	 * Arg5: The X17 value to sign
199  	 */
200  	mov		x1, x22
201  	mov		w2, w23
202  	mov		x3, x20
203  	mov		x4, x16
204  	mov		x5, x17
205  	bl		_ml_sign_thread_state
206  	mov		lr, x20
207  	mov		x1, x21
208  	.endif
209  #endif /* defined(HAS_APPLE_PAC) */
210  
211  	str		x22, [x0, SS64_PC]                                               // Save ELR to PCB
212  	str		w23, [x0, SS64_CPSR]                                    // Save CPSR to PCB
213  	str		w24, [x0, NS64_FPSR]
214  	str		w25, [x0, NS64_FPCR]
215  
216  	mrs		x20, FAR_EL1
217  	mrs		x21, ESR_EL1
218  
219  	str		x20, [x0, SS64_FAR]
220  	str		w21, [x0, SS64_ESR]
221  .endmacro
222  
223  .macro DEADLOOP
224  	b	.
225  .endmacro
226  
227  // SP0 is expected to already be selected
228  .macro SWITCH_TO_KERN_STACK
229  	ldr		x1, [x1, TH_KSTACKPTR]	// Load the top of the kernel stack to x1
230  	mov		sp, x1			// Set the stack pointer to the kernel stack
231  .endmacro
232  
233  // SP0 is expected to already be selected
234  .macro SWITCH_TO_INT_STACK
235  	mrs		x1, TPIDR_EL1
236  	ldr		x1, [x1, ACT_CPUDATAP]
237  	ldr		x1, [x1, CPU_ISTACKPTR]
238  	mov		sp, x1			// Set the stack pointer to the interrupt stack
239  .endmacro
240  
241  /*
242   * REENABLE_DAIF
243   *
244   * Restores the DAIF bits to their original state (well, the AIF bits at least).
245   *   arg0 - DAIF bits (read from the DAIF interface) to restore
246   */
247  .macro REENABLE_DAIF
248  	/* AIF enable. */
249  	tst		$0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
250  	b.eq		3f
251  
252  	/* IF enable. */
253  	tst		$0, #(DAIF_IRQF | DAIF_FIQF)
254  	b.eq		2f
255  
256  	/* A enable. */
257  	tst		$0, #(DAIF_ASYNCF)
258  	b.eq		1f
259  
260  	/* Enable nothing. */
261  	b		4f
262  
263  	/* A enable. */
264  1:
265  	msr		DAIFClr, #(DAIFSC_ASYNCF)
266  	b		4f
267  
268  	/* IF enable. */
269  2:
270  	msr		DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
271  	b		4f
272  
273  	/* AIF enable. */
274  3:
275  	msr		DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
276  
277  	/* Done! */
278  4:
279  .endmacro
280