/ src / include / cpu / x86 / mp.h
mp.h
  1  /* SPDX-License-Identifier: GPL-2.0-only */
  2  
  3  #ifndef _X86_MP_H_
  4  #define _X86_MP_H_
  5  
  6  #include <cpu/x86/smm.h>
  7  #include <types.h>
  8  
  9  #define CACHELINE_SIZE 64
 10  
 11  struct cpu_info;
 12  struct bus;
 13  
 14  static inline void mfence(void)
 15  {
 16  	/* mfence came with the introduction of SSE2. */
 17  	if (CONFIG(SSE2))
 18  		__asm__ __volatile__("mfence\t\n": : :"memory");
 19  	else
 20  		__asm__ __volatile__("lock; addl $0,0(%%esp)": : : "memory");
 21  }
 22  
 23  /* The sequence of the callbacks are in calling order. */
 24  struct mp_ops {
 25  	/*
 26  	 * Optionally provide a callback prior to kicking off MP
 27  	 * startup. This callback is done prior to loading the SIPI
 28  	 * vector but after gathering the MP state information. Please
 29  	 * see the sequence below.
 30  	 */
 31  	void (*pre_mp_init)(void);
 32  	/*
 33  	 * Return the number of logical x86 execution contexts that
 34  	 * need to be brought out of SIPI state as well as have SMM
 35  	 * handlers installed.
 36  	 */
 37  	int (*get_cpu_count)(void);
 38  	/*
 39  	 * Optionally fill in permanent SMM region and save state size. If
 40  	 * this callback is not present no SMM handlers will be installed.
 41  	 * The perm_smsize is the size available to house the permanent SMM
 42  	 * handler.
 43  	 */
 44  	void (*get_smm_info)(uintptr_t *perm_smbase, size_t *perm_smsize,
 45  				size_t *smm_save_state_size);
 46  	/*
 47  	 * Optionally fill in pointer to microcode and indicate if the APs
 48  	 * can load the microcode in parallel.
 49  	 */
 50  	void (*get_microcode_info)(const void **microcode, int *parallel);
 51  	/*
 52  	 * Optionally provide a callback prior to the APs starting SMM
 53  	 * relocation or CPU driver initialization. However, note that
 54  	 * this callback is called after SMM handlers have been loaded.
 55  	 */
 56  	void (*pre_mp_smm_init)(void);
 57  	/*
 58  	 * Optional function to use to trigger SMM to perform relocation. If
 59  	 * not provided, smm_initiate_relocation() is used.
 60  	 * This function is called on each CPU.
 61  	 * On platforms that select CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER) to
 62  	 * not relocate in SMM, this function can be used to relocate CPUs.
 63  	 */
 64  	void (*per_cpu_smm_trigger)(void);
 65  	/*
 66  	 * This function is called while each CPU is in the SMM relocation
 67  	 * handler. Its primary purpose is to adjust the SMBASE for the
 68  	 * permanent handler. The parameters passed are the current cpu
 69  	 * running the relocation handler, current SMBASE of relocation handler,
 70  	 * and the pre-calculated staggered CPU SMBASE address of the permanent
 71  	 * SMM handler.
 72  	 * This function is only called with !CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER) set.
 73  	 */
 74  	void (*relocation_handler)(int cpu, uintptr_t curr_smbase,
 75  		uintptr_t staggered_smbase);
 76  	/*
 77  	 * Optionally provide a callback that is called after the APs
 78  	 * and the BSP have gone through the initialization sequence.
 79  	 */
 80  	void (*post_mp_init)(void);
 81  };
 82  
 83  /*
 84   * The mp_ops argument is used to drive the multiprocess initialization. Unless
 85   * otherwise stated each callback is called on the BSP only. The sequence of
 86   * operations is the following:
 87   * 1. pre_mp_init()
 88   * 2. get_cpu_count()
 89   * 3. get_smm_info()
 90   * 4. get_microcode_info()
 91   * 5. adjust_cpu_apic_entry() for each number of get_cpu_count()
 92   * 6. pre_mp_smm_init()
 93   * 7. per_cpu_smm_trigger() in parallel for all cpus which calls
 94   *    relocation_handler() in SMM.
 95   * 8. mp_initialize_cpu() for each cpu
 96   * 9. post_mp_init()
 97   */
 98  enum cb_err mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops);
 99  
100  enum {
101  	/* Function runs on all cores (both BSP and APs) */
102  	MP_RUN_ON_ALL_CPUS,
103  	/* Need to specify cores (only on APs) numbers */
104  };
105  
106  /*
107   * After APs are up and PARALLEL_MP_AP_WORK is enabled one can issue work
108   * to all the APs to perform. Currently the BSP is the only CPU that is allowed
109   * to issue work. i.e. the APs should not call any of these functions.
110   *
111   * Input parameter expire_us <= 0 to specify an infinite timeout.
112   * logical_cpu_num = MP_RUN_ON_ALL_CPUS to execute function over all cores (BSP
113   * + APs) else specified AP number using logical_cpu_num.
114   */
115  enum cb_err mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
116  		long expire_us);
117  
118  /*
119   * Runs func on all APs excluding BSP, with a provision to run calls in parallel
120   * or serially per AP.
121   */
122  enum cb_err mp_run_on_all_aps(void (*func)(void *), void *arg, long expire_us,
123  			      bool run_parallel);
124  
125  /* Like mp_run_on_aps() but also runs func on BSP. */
126  enum cb_err mp_run_on_all_cpus(void (*func)(void *), void *arg);
127  
128  /* Like mp_run_on_all_cpus but make sure all APs finish executing the
129     function call. The time limit on a function call is 1 second per AP. */
130  enum cb_err mp_run_on_all_cpus_synchronously(void (*func)(void *), void *arg);
131  
132  /*
133   * Park all APs to prepare for OS boot. This is handled automatically
134   * by the coreboot infrastructure.
135   */
136  enum cb_err mp_park_aps(void);
137  
138  /*
139   * SMM helpers to use with initializing CPUs.
140   */
141  
142  /* Send SMI to self without any serialization. */
143  void smm_initiate_relocation_parallel(void);
144  /* Send SMI to self with single execution. */
145  void smm_initiate_relocation(void);
146  
147  #endif /* _X86_MP_H_ */