/ duct-tape / xnu / osfmk / arm64 / cswitch.s
cswitch.s
  1  /*
  2   * Copyright (c) 2007 Apple Inc. All rights reserved.
  3   *
  4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  5   *
  6   * This file contains Original Code and/or Modifications of Original Code
  7   * as defined in and that are subject to the Apple Public Source License
  8   * Version 2.0 (the 'License'). You may not use this file except in
  9   * compliance with the License. The rights granted to you under the License
 10   * may not be used to create, or enable the creation or redistribution of,
 11   * unlawful or unlicensed copies of an Apple operating system, or to
 12   * circumvent, violate, or enable the circumvention or violation of, any
 13   * terms of an Apple operating system software license agreement.
 14   *
 15   * Please obtain a copy of the License at
 16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
 17   *
 18   * The Original Code and all software distributed under the License are
 19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 23   * Please see the License for the specific language governing rights and
 24   * limitations under the License.
 25   *
 26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 27   */
 28  #include <machine/asm.h>
 29  #include <arm64/machine_machdep.h>
 30  #include <arm64/machine_routines_asm.h>
 31  #include <arm64/pac_asm.h>
 32  #include <arm64/proc_reg.h>
 33  #include "assym.s"
 34  
 35  /*
 36   * save_general_registers
 37   *
 38   * Saves variable registers to kernel PCB.
 39   *   arg0 - thread_kernel_state pointer
 40   *   arg1 - Scratch register
 41   */
 42  
 43  .macro	save_general_registers
 44  /* AAPCS-64 Page 14
 45   *
 46   * A subroutine invocation must preserve the contents of the registers r19-r29
 47   * and SP. We also save IP0 and IP1, as machine_idle uses IP0 for saving the LR.
 48   */
 49  	stp		x16, x17, [$0, SS64_KERNEL_X16]
 50  	stp		x19, x20, [$0, SS64_KERNEL_X19]
 51  	stp		x21, x22, [$0, SS64_KERNEL_X21]
 52  	stp		x23, x24, [$0, SS64_KERNEL_X23]
 53  	stp		x25, x26, [$0, SS64_KERNEL_X25]
 54  	stp		x27, x28, [$0, SS64_KERNEL_X27]
 55  	stp		fp, lr, [$0, SS64_KERNEL_FP]
 56  	str		xzr, [$0, SS64_KERNEL_PC]
 57  	MOV32	w$1, PSR64_KERNEL_POISON
 58  	str		w$1, [$0, SS64_KERNEL_CPSR]	
 59  #ifdef HAS_APPLE_PAC
 60  	stp		x0, x1, [sp, #-16]!
 61  	stp		x2, x3, [sp, #-16]!
 62  	stp		x4, x5, [sp, #-16]!
 63  
 64  	/*
 65  	 * Arg0: The ARM context pointer
 66  	 * Arg1: PC value to sign
 67  	 * Arg2: CPSR value to sign
 68  	 * Arg3: LR to sign
 69  	 */
 70  	mov		x0, $0
 71  	mov		x1, #0
 72  	mov		w2, w$1
 73  	mov		x3, lr
 74  	mov		x4, x16
 75  	mov		x5, x17
 76  	bl		EXT(ml_sign_kernel_thread_state)
 77  
 78  	ldp		x4, x5, [sp], #16
 79  	ldp		x2, x3, [sp], #16
 80  	ldp		x0, x1, [sp], #16
 81  	ldp		fp, lr, [$0, SS64_KERNEL_FP]
 82  #endif /* defined(HAS_APPLE_PAC) */
 83  	mov		x$1, sp
 84  	str		x$1, [$0, SS64_KERNEL_SP]
 85  
 86  /* AAPCS-64 Page 14
 87   *
 88   * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine
 89   * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved
 90   * (or should be preserved by the caller).
 91   */
 92  	str		d8,	[$0, NS64_KERNEL_D8]
 93  	str		d9,	[$0, NS64_KERNEL_D9]
 94  	str		d10,[$0, NS64_KERNEL_D10]
 95  	str		d11,[$0, NS64_KERNEL_D11]
 96  	str		d12,[$0, NS64_KERNEL_D12]
 97  	str		d13,[$0, NS64_KERNEL_D13]
 98  	str		d14,[$0, NS64_KERNEL_D14]
 99  	str		d15,[$0, NS64_KERNEL_D15]
100  
101  	mrs		x$1, FPCR
102  	str		w$1, [$0, NS64_KERNEL_FPCR]
103  .endmacro
104  
105  /*
106   * load_general_registers
107   *
108   * Loads variable registers from kernel PCB.
109   *   arg0 - thread_kernel_state pointer
110   *   arg1 - Scratch register
111   */
112  .macro	load_general_registers
113  	mov		x20, x0
114  	mov		x21, x1
115  	mov		x22, x2
116  
117  	mov		x0, $0
118  	AUTH_KERNEL_THREAD_STATE_IN_X0	x23, x24, x25, x26, x27
119  
120  	mov		x0, x20
121  	mov		x1, x21
122  	mov		x2, x22
123  
124  	ldr		w$1, [$0, NS64_KERNEL_FPCR]
125  	mrs		x19, FPCR
126  	CMSR FPCR, x19, x$1, 1
127  1:
128  
129  	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
130  	ldp		x19, x20, [$0, SS64_KERNEL_X19]
131  	ldp		x21, x22, [$0, SS64_KERNEL_X21]
132  	ldp		x23, x24, [$0, SS64_KERNEL_X23]
133  	ldp		x25, x26, [$0, SS64_KERNEL_X25]
134  	ldp		x27, x28, [$0, SS64_KERNEL_X27]
135  	ldr		fp, [$0, SS64_KERNEL_FP]
136  	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
137  	ldr		x$1, [$0, SS64_KERNEL_SP]
138  	mov		sp, x$1
139  
140  	ldr		d8,	[$0, NS64_KERNEL_D8]
141  	ldr		d9,	[$0, NS64_KERNEL_D9]
142  	ldr		d10,[$0, NS64_KERNEL_D10]
143  	ldr		d11,[$0, NS64_KERNEL_D11]
144  	ldr		d12,[$0, NS64_KERNEL_D12]
145  	ldr		d13,[$0, NS64_KERNEL_D13]
146  	ldr		d14,[$0, NS64_KERNEL_D14]
147  	ldr		d15,[$0, NS64_KERNEL_D15]
148  .endmacro
149  
150  
151  /*
152   * set_thread_registers
153   *
154   * Updates thread registers during context switch
155   *  arg0 - New thread pointer
156   *  arg1 - Scratch register
157   *  arg2 - Scratch register
158   */
159  .macro	set_thread_registers
160  	msr		TPIDR_EL1, $0						// Write new thread pointer to TPIDR_EL1
161  	ldr		$1, [$0, ACT_CPUDATAP]
162  	str		$0, [$1, CPU_ACTIVE_THREAD]
163  	ldr		$1, [$0, TH_CTH_SELF]				// Get cthread pointer
164  	mrs		$2, TPIDRRO_EL0						// Extract cpu number from TPIDRRO_EL0
165  	and		$2, $2, #(MACHDEP_CPUNUM_MASK)
166  	orr		$2, $1, $2							// Save new cthread/cpu to TPIDRRO_EL0
167  	msr		TPIDRRO_EL0, $2
168  	msr		TPIDR_EL0, xzr
169  #if DEBUG || DEVELOPMENT
170  	ldr		$1, [$0, TH_THREAD_ID]				// Save the bottom 32-bits of the thread ID into
171  	msr		CONTEXTIDR_EL1, $1					// CONTEXTIDR_EL1 (top 32-bits are RES0).
172  #endif /* DEBUG || DEVELOPMENT */
173  .endmacro
174  
175  #define CSWITCH_ROP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
176  #define CSWITCH_JOP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
177  
178  /*
179   * set_process_dependent_keys_and_sync_context
180   *
181   * Updates process dependent keys and issues explicit context sync during context switch if necessary
182   *  Per CPU Data rop_key is initialized in arm_init() for bootstrap processor
183   *  and in cpu_data_init for slave processors
184   *
185   *  thread - New thread pointer
186   *  new_key - Scratch register: New Thread Key
187   *  tmp_key - Scratch register: Current CPU Key
188   *  cpudatap - Scratch register: Current CPU Data pointer
189   *  wsync - Half-width scratch register: CPU sync required flag
190   *
191   *  to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5,
192   *  we just use wsync to keep track of needing an ISB
193   */
194  .macro set_process_dependent_keys_and_sync_context	thread, new_key, tmp_key, cpudatap, wsync
195  
196  
197  #if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC)
198  	ldr		\cpudatap, [\thread, ACT_CPUDATAP]
199  #endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
200  
201  #if defined(__ARM_ARCH_8_5__)
202  	ldrb	\wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH]
203  #else /* defined(__ARM_ARCH_8_5__) */
204  	mov		\wsync, #0
205  #endif
206  
207  
208  #if CSWITCH_ROP_KEYS
209  	ldr		\new_key, [\thread, TH_ROP_PID]
210  	REPROGRAM_ROP_KEYS	Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key
211  #if HAS_PARAVIRTUALIZED_PAC
212  	/* xnu hypervisor guarantees context synchronization during guest re-entry */
213  	mov		\wsync, #0
214  #else
215  	mov		\wsync, #1
216  #endif
217  Lskip_rop_keys_\@:
218  #endif /* CSWITCH_ROP_KEYS */
219  
220  #if CSWITCH_JOP_KEYS
221  	ldr		\new_key, [\thread, TH_JOP_PID]
222  	REPROGRAM_JOP_KEYS	Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
223  #if HAS_PARAVIRTUALIZED_PAC
224  	mov		\wsync, #0
225  #else
226  	mov		\wsync, #1
227  #endif
228  Lskip_jop_keys_\@:
229  #endif /* CSWITCH_JOP_KEYS */
230  
231  	cbz		\wsync, 1f
232  	isb 	sy
233  
234  #if HAS_PARAVIRTUALIZED_PAC
235  1:	/* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */
236  #endif
237  #if defined(__ARM_ARCH_8_5__)
238  	strb	wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH]
239  #endif
240  1:
241  .endmacro
242  
243  /*
244   * void     machine_load_context(thread_t        thread)
245   *
246   * Load the context for the first thread to run on a
247   * cpu, and go.
248   */
249  	.text
250  	.align 2
251  	.globl	EXT(machine_load_context)
252  
253  LEXT(machine_load_context)
254  	set_thread_registers 	x0, x1, x2
255  	ldr		x1, [x0, TH_KSTACKPTR]				// Get top of kernel stack
256  	load_general_registers 	x1, 2
257  	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
258  	mov		x0, #0								// Clear argument to thread_continue
259  	ret
260  
261  /*
262   *  typedef void (*thread_continue_t)(void *param, wait_result_t)
263   *
264   *	void Call_continuation( thread_continue_t continuation,
265   *	            			void *param,
266   *				            wait_result_t wresult,
267   *                          bool enable interrupts)
268   */
269  	.text
270  	.align	5
271  	.globl	EXT(Call_continuation)
272  
273  LEXT(Call_continuation)
274  	mrs		x4, TPIDR_EL1						// Get the current thread pointer
275  
276  	/* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */
277  	ldr		x5, [x4, TH_KSTACKPTR]				// Get the top of the kernel stack
278  	mov		sp, x5								// Set stack pointer
279  	mov		fp, #0								// Clear the frame pointer
280  
281  	set_process_dependent_keys_and_sync_context	x4, x5, x6, x7, w20
282  
283  	mov x20, x0  //continuation
284  	mov x21, x1  //continuation parameter
285  	mov x22, x2  //wait result
286  
287  	cbz x3, 1f
288  	mov x0, #1
289  	bl EXT(ml_set_interrupts_enabled)
290  1:
291  
292  	mov		x0, x21								// Set the first parameter
293  	mov		x1, x22								// Set the wait result arg
294  #ifdef HAS_APPLE_PAC
295  	mov		x21, THREAD_CONTINUE_T_DISC
296  	blraa	x20, x21							// Branch to the continuation
297  #else
298  	blr		x20									// Branch to the continuation
299  #endif
300  	mrs		x0, TPIDR_EL1						// Get the current thread pointer
301  	b		EXT(thread_terminate)				// Kill the thread
302  
303  
304  /*
305   *	thread_t Switch_context(thread_t	old,
306   * 				void		(*cont)(void),
307   *				thread_t	new)
308   */
309  	.text
310  	.align 5
311  	.globl	EXT(Switch_context)
312  
313  LEXT(Switch_context)
314  	cbnz	x1, Lswitch_threads					// Skip saving old state if blocking on continuation
315  	ldr		x3, [x0, TH_KSTACKPTR]				// Get the old kernel stack top
316  	save_general_registers	x3, 4
317  Lswitch_threads:
318  	set_thread_registers	x2, x3, x4
319  	ldr		x3, [x2, TH_KSTACKPTR]
320  	load_general_registers	x3, 4
321  	set_process_dependent_keys_and_sync_context	x2, x3, x4, x5, w6
322  	ret
323  
324  /*
325   *	thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
326   *
327   */
328  	.text
329  	.align 5
330  	.globl	EXT(Shutdown_context)
331  
332  LEXT(Shutdown_context)
333  	mrs		x10, TPIDR_EL1							// Get thread pointer
334  	ldr		x11, [x10, TH_KSTACKPTR]				// Get the top of the kernel stack
335  	save_general_registers	x11, 12
336  	msr		DAIFSet, #(DAIFSC_FIQF | DAIFSC_IRQF)	// Disable interrupts
337  	ldr		x11, [x10, ACT_CPUDATAP]				// Get current cpu
338  	ldr		x12, [x11, CPU_ISTACKPTR]				// Switch to interrupt stack
339  	mov		sp, x12
340  	b		EXT(cpu_doshutdown)
341  
342  /*
343   *	thread_t Idle_context(void)
344   *
345   */
346  	.text
347  	.align 5
348  	.globl	EXT(Idle_context)
349  
350  LEXT(Idle_context)
351  	mrs		x0, TPIDR_EL1						// Get thread pointer
352  	ldr		x1, [x0, TH_KSTACKPTR]				// Get the top of the kernel stack
353  	save_general_registers	x1, 2
354  	ldr		x1, [x0, ACT_CPUDATAP]				// Get current cpu
355  	ldr		x2, [x1, CPU_ISTACKPTR]				// Switch to interrupt stack
356  	mov		sp, x2
357  	b		EXT(cpu_idle)
358  
359  /*
360   *	thread_t Idle_context(void)
361   *
362   */
363  	.text
364  	.align 5
365  	.globl	EXT(Idle_load_context)
366  
367  LEXT(Idle_load_context)
368  	mrs		x0, TPIDR_EL1						// Get thread pointer
369  	ldr		x1, [x0, TH_KSTACKPTR]				// Get the top of the kernel stack
370  	load_general_registers	x1, 2
371  	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
372  	ret
373  
374  	.align	2
375  	.globl	EXT(machine_set_current_thread)
376  LEXT(machine_set_current_thread)
377  	set_thread_registers x0, x1, x2
378  	ret
379  
380  
381  /* vim: set ts=4: */