![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 #include <linux/ftrace.h> 0003 #include <linux/percpu.h> 0004 #include <linux/slab.h> 0005 #include <linux/uaccess.h> 0006 #include <linux/pgtable.h> 0007 #include <asm/alternative.h> 0008 #include <asm/cacheflush.h> 0009 #include <asm/cpufeature.h> 0010 #include <asm/cpuidle.h> 0011 #include <asm/daifflags.h> 0012 #include <asm/debug-monitors.h> 0013 #include <asm/exec.h> 0014 #include <asm/mte.h> 0015 #include <asm/memory.h> 0016 #include <asm/mmu_context.h> 0017 #include <asm/smp_plat.h> 0018 #include <asm/suspend.h> 0019 0020 /* 0021 * This is allocated by cpu_suspend_init(), and used to store a pointer to 0022 * the 'struct sleep_stack_data' the contains a particular CPUs state. 0023 */ 0024 unsigned long *sleep_save_stash; 0025 0026 /* 0027 * This hook is provided so that cpu_suspend code can restore HW 0028 * breakpoints as early as possible in the resume path, before reenabling 0029 * debug exceptions. Code cannot be run from a CPU PM notifier since by the 0030 * time the notifier runs debug exceptions might have been enabled already, 0031 * with HW breakpoints registers content still in an unknown state. 0032 */ 0033 static int (*hw_breakpoint_restore)(unsigned int); 0034 void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) 0035 { 0036 /* Prevent multiple restore hook initializations */ 0037 if (WARN_ON(hw_breakpoint_restore)) 0038 return; 0039 hw_breakpoint_restore = hw_bp_restore; 0040 } 0041 0042 void notrace __cpu_suspend_exit(void) 0043 { 0044 unsigned int cpu = smp_processor_id(); 0045 0046 /* 0047 * We are resuming from reset with the idmap active in TTBR0_EL1. 0048 * We must uninstall the idmap and restore the expected MMU 0049 * state before we can possibly return to userspace. 0050 */ 0051 cpu_uninstall_idmap(); 0052 0053 /* Restore CnP bit in TTBR1_EL1 */ 0054 if (system_supports_cnp()) 0055 cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); 0056 0057 /* 0058 * PSTATE was not saved over suspend/resume, re-enable any detected 0059 * features that might not have been set correctly. 0060 */ 0061 __uaccess_enable_hw_pan(); 0062 0063 /* 0064 * Restore HW breakpoint registers to sane values 0065 * before debug exceptions are possibly reenabled 0066 * by cpu_suspend()s local_daif_restore() call. 0067 */ 0068 if (hw_breakpoint_restore) 0069 hw_breakpoint_restore(cpu); 0070 0071 /* 0072 * On resume, firmware implementing dynamic mitigation will 0073 * have turned the mitigation on. If the user has forcefully 0074 * disabled it, make sure their wishes are obeyed. 0075 */ 0076 spectre_v4_enable_mitigation(NULL); 0077 0078 /* Restore additional feature-specific configuration */ 0079 ptrauth_suspend_exit(); 0080 } 0081 0082 /* 0083 * cpu_suspend 0084 * 0085 * arg: argument to pass to the finisher function 0086 * fn: finisher function pointer 0087 * 0088 */ 0089 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 0090 { 0091 int ret = 0; 0092 unsigned long flags; 0093 struct sleep_stack_data state; 0094 struct arm_cpuidle_irq_context context; 0095 0096 /* Report any MTE async fault before going to suspend */ 0097 mte_suspend_enter(); 0098 0099 /* 0100 * From this point debug exceptions are disabled to prevent 0101 * updates to mdscr register (saved and restored along with 0102 * general purpose registers) from kernel debuggers. 0103 */ 0104 flags = local_daif_save(); 0105 0106 /* 0107 * Function graph tracer state gets inconsistent when the kernel 0108 * calls functions that never return (aka suspend finishers) hence 0109 * disable graph tracing during their execution. 0110 */ 0111 pause_graph_tracing(); 0112 0113 /* 0114 * Switch to using DAIF.IF instead of PMR in order to reliably 0115 * resume if we're using pseudo-NMIs. 0116 */ 0117 arm_cpuidle_save_irq_context(&context); 0118 0119 if (__cpu_suspend_enter(&state)) { 0120 /* Call the suspend finisher */ 0121 ret = fn(arg); 0122 0123 /* 0124 * Never gets here, unless the suspend finisher fails. 0125 * Successful cpu_suspend() should return from cpu_resume(), 0126 * returning through this code path is considered an error 0127 * If the return value is set to 0 force ret = -EOPNOTSUPP 0128 * to make sure a proper error condition is propagated 0129 */ 0130 if (!ret) 0131 ret = -EOPNOTSUPP; 0132 } else { 0133 RCU_NONIDLE(__cpu_suspend_exit()); 0134 } 0135 0136 arm_cpuidle_restore_irq_context(&context); 0137 0138 unpause_graph_tracing(); 0139 0140 /* 0141 * Restore pstate flags. OS lock and mdscr have been already 0142 * restored, so from this point onwards, debugging is fully 0143 * reenabled if it was enabled when core started shutdown. 0144 */ 0145 local_daif_restore(flags); 0146 0147 return ret; 0148 } 0149 0150 static int __init cpu_suspend_init(void) 0151 { 0152 /* ctx_ptr is an array of physical addresses */ 0153 sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash), 0154 GFP_KERNEL); 0155 0156 if (WARN_ON(!sleep_save_stash)) 0157 return -ENOMEM; 0158 0159 return 0; 0160 } 0161 early_initcall(cpu_suspend_init);
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |