0001
0002
0003 #include <linux/spinlock.h>
0004 #include <linux/percpu.h>
0005 #include <linux/kallsyms.h>
0006 #include <linux/kcore.h>
0007 #include <linux/pgtable.h>
0008
0009 #include <asm/cpu_entry_area.h>
0010 #include <asm/fixmap.h>
0011 #include <asm/desc.h>
0012
0013 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
0014
0015 #ifdef CONFIG_X86_64
0016 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
0017 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
0018 #endif
0019
0020 #ifdef CONFIG_X86_32
0021 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
0022 #endif
0023
0024
0025 noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
0026 {
0027 unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
0028 BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
0029
0030 return (struct cpu_entry_area *) va;
0031 }
0032 EXPORT_SYMBOL(get_cpu_entry_area);
0033
0034 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
0035 {
0036 unsigned long va = (unsigned long) cea_vaddr;
0037 pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
0038
0039
0040
0041
0042
0043
0044
0045
0046 if (boot_cpu_has(X86_FEATURE_PGE) &&
0047 (pgprot_val(flags) & _PAGE_PRESENT))
0048 pte = pte_set_flags(pte, _PAGE_GLOBAL);
0049
0050 set_pte_vaddr(va, pte);
0051 }
0052
0053 static void __init
0054 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
0055 {
0056 for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
0057 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
0058 }
0059
0060 static void __init percpu_setup_debug_store(unsigned int cpu)
0061 {
0062 #ifdef CONFIG_CPU_SUP_INTEL
0063 unsigned int npages;
0064 void *cea;
0065
0066 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
0067 return;
0068
0069 cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
0070 npages = sizeof(struct debug_store) / PAGE_SIZE;
0071 BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
0072 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
0073 PAGE_KERNEL);
0074
0075 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
0076
0077
0078
0079
0080 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
0081 for (; npages; npages--, cea += PAGE_SIZE)
0082 cea_set_pte(cea, 0, PAGE_NONE);
0083 #endif
0084 }
0085
0086 #ifdef CONFIG_X86_64
0087
0088 #define cea_map_stack(name) do { \
0089 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
0090 cea_map_percpu_pages(cea->estacks.name## _stack, \
0091 estacks->name## _stack, npages, PAGE_KERNEL); \
0092 } while (0)
0093
0094 static void __init percpu_setup_exception_stacks(unsigned int cpu)
0095 {
0096 struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
0097 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0098 unsigned int npages;
0099
0100 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
0101
0102 per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
0103
0104
0105
0106
0107
0108
0109 cea_map_stack(DF);
0110 cea_map_stack(NMI);
0111 cea_map_stack(DB);
0112 cea_map_stack(MCE);
0113
0114 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
0115 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
0116 cea_map_stack(VC);
0117 cea_map_stack(VC2);
0118 }
0119 }
0120 }
0121 #else
0122 static inline void percpu_setup_exception_stacks(unsigned int cpu)
0123 {
0124 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0125
0126 cea_map_percpu_pages(&cea->doublefault_stack,
0127 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
0128 }
0129 #endif
0130
0131
0132 static void __init setup_cpu_entry_area(unsigned int cpu)
0133 {
0134 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0135 #ifdef CONFIG_X86_64
0136
0137 pgprot_t gdt_prot = PAGE_KERNEL_RO;
0138 pgprot_t tss_prot = PAGE_KERNEL_RO;
0139 #else
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
0151 PAGE_KERNEL_RO : PAGE_KERNEL;
0152 pgprot_t tss_prot = PAGE_KERNEL;
0153 #endif
0154
0155 cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
0156
0157 cea_map_percpu_pages(&cea->entry_stack_page,
0158 per_cpu_ptr(&entry_stack_storage, cpu), 1,
0159 PAGE_KERNEL);
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
0179 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
0180 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
0181
0182
0183
0184
0185
0186 BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
0187 BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
0188
0189 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
0190 sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
0191
0192 #ifdef CONFIG_X86_32
0193 per_cpu(cpu_entry_area, cpu) = cea;
0194 #endif
0195
0196 percpu_setup_exception_stacks(cpu);
0197
0198 percpu_setup_debug_store(cpu);
0199 }
0200
0201 static __init void setup_cpu_entry_area_ptes(void)
0202 {
0203 #ifdef CONFIG_X86_32
0204 unsigned long start, end;
0205
0206
0207 BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
0208 BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
0209 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
0210
0211 start = CPU_ENTRY_AREA_BASE;
0212 end = start + CPU_ENTRY_AREA_MAP_SIZE;
0213
0214
0215 for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
0216 populate_extra_pte(start);
0217 #endif
0218 }
0219
0220 void __init setup_cpu_entry_areas(void)
0221 {
0222 unsigned int cpu;
0223
0224 setup_cpu_entry_area_ptes();
0225
0226 for_each_possible_cpu(cpu)
0227 setup_cpu_entry_area(cpu);
0228
0229
0230
0231
0232
0233 sync_initial_page_table();
0234 }