Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2012 ARM Ltd.
0004  */
0005 #ifndef __ASM_SMP_H
0006 #define __ASM_SMP_H
0007 
0008 #include <linux/const.h>
0009 
0010 /* Values for secondary_data.status */
0011 #define CPU_STUCK_REASON_SHIFT      (8)
0012 #define CPU_BOOT_STATUS_MASK        ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1)
0013 
0014 #define CPU_MMU_OFF         (-1)
0015 #define CPU_BOOT_SUCCESS        (0)
0016 /* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
0017 #define CPU_KILL_ME         (1)
0018 /* The cpu couldn't die gracefully and is looping in the kernel */
0019 #define CPU_STUCK_IN_KERNEL     (2)
0020 /* Fatal system error detected by secondary CPU, crash the system */
0021 #define CPU_PANIC_KERNEL        (3)
0022 
0023 #define CPU_STUCK_REASON_52_BIT_VA  (UL(1) << CPU_STUCK_REASON_SHIFT)
0024 #define CPU_STUCK_REASON_NO_GRAN    (UL(2) << CPU_STUCK_REASON_SHIFT)
0025 
0026 #ifndef __ASSEMBLY__
0027 
0028 #include <asm/percpu.h>
0029 
0030 #include <linux/threads.h>
0031 #include <linux/cpumask.h>
0032 #include <linux/thread_info.h>
0033 
0034 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
0035 
0036 /*
0037  * We don't use this_cpu_read(cpu_number) as that has implicit writes to
0038  * preempt_count, and associated (compiler) barriers, that we'd like to avoid
0039  * the expense of. If we're preemptible, the value can be stale at use anyway.
0040  * And we can't use this_cpu_ptr() either, as that winds up recursing back
0041  * here under CONFIG_DEBUG_PREEMPT=y.
0042  */
0043 #define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
0044 
0045 /*
0046  * Logical CPU mapping.
0047  */
0048 extern u64 __cpu_logical_map[NR_CPUS];
0049 extern u64 cpu_logical_map(unsigned int cpu);
0050 
0051 static inline void set_cpu_logical_map(unsigned int cpu, u64 hwid)
0052 {
0053     __cpu_logical_map[cpu] = hwid;
0054 }
0055 
0056 struct seq_file;
0057 
0058 /*
0059  * Discover the set of possible CPUs and determine their
0060  * SMP operations.
0061  */
0062 extern void smp_init_cpus(void);
0063 
0064 /*
0065  * Register IPI interrupts with the arch SMP code
0066  */
0067 extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
0068 
0069 /*
0070  * Called from the secondary holding pen, this is the secondary CPU entry point.
0071  */
0072 asmlinkage void secondary_start_kernel(void);
0073 
0074 /*
0075  * Initial data for bringing up a secondary CPU.
0076  * @status - Result passed back from the secondary CPU to
0077  *           indicate failure.
0078  */
0079 struct secondary_data {
0080     struct task_struct *task;
0081     long status;
0082 };
0083 
0084 extern struct secondary_data secondary_data;
0085 extern long __early_cpu_boot_status;
0086 extern void secondary_entry(void);
0087 
0088 extern void arch_send_call_function_single_ipi(int cpu);
0089 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
0090 
0091 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
0092 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
0093 #else
0094 static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
0095 {
0096     BUILD_BUG();
0097 }
0098 #endif
0099 
0100 extern int __cpu_disable(void);
0101 
0102 extern void __cpu_die(unsigned int cpu);
0103 extern void cpu_die(void);
0104 extern void cpu_die_early(void);
0105 
0106 static inline void cpu_park_loop(void)
0107 {
0108     for (;;) {
0109         wfe();
0110         wfi();
0111     }
0112 }
0113 
0114 static inline void update_cpu_boot_status(int val)
0115 {
0116     WRITE_ONCE(secondary_data.status, val);
0117     /* Ensure the visibility of the status update */
0118     dsb(ishst);
0119 }
0120 
0121 /*
0122  * The calling secondary CPU has detected serious configuration mismatch,
0123  * which calls for a kernel panic. Update the boot status and park the calling
0124  * CPU.
0125  */
0126 static inline void cpu_panic_kernel(void)
0127 {
0128     update_cpu_boot_status(CPU_PANIC_KERNEL);
0129     cpu_park_loop();
0130 }
0131 
0132 /*
0133  * If a secondary CPU enters the kernel but fails to come online,
0134  * (e.g. due to mismatched features), and cannot exit the kernel,
0135  * we increment cpus_stuck_in_kernel and leave the CPU in a
0136  * quiesecent loop within the kernel text. The memory containing
0137  * this loop must not be re-used for anything else as the 'stuck'
0138  * core is executing it.
0139  *
0140  * This function is used to inhibit features like kexec and hibernate.
0141  */
0142 bool cpus_are_stuck_in_kernel(void);
0143 
0144 extern void crash_smp_send_stop(void);
0145 extern bool smp_crash_stop_failed(void);
0146 extern void panic_smp_self_stop(void);
0147 
0148 #endif /* ifndef __ASSEMBLY__ */
0149 
0150 #endif /* ifndef __ASM_SMP_H */