0001
0002
0003 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
0004 #define _ASM_X86_CPU_ENTRY_AREA_H
0005
0006 #include <linux/percpu-defs.h>
0007 #include <asm/processor.h>
0008 #include <asm/intel_ds.h>
0009 #include <asm/pgtable_areas.h>
0010
0011 #ifdef CONFIG_X86_64
0012
0013 #ifdef CONFIG_AMD_MEM_ENCRYPT
0014 #define VC_EXCEPTION_STKSZ EXCEPTION_STKSZ
0015 #else
0016 #define VC_EXCEPTION_STKSZ 0
0017 #endif
0018
0019
0020 #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
0021 char DF_stack_guard[guardsize]; \
0022 char DF_stack[EXCEPTION_STKSZ]; \
0023 char NMI_stack_guard[guardsize]; \
0024 char NMI_stack[EXCEPTION_STKSZ]; \
0025 char DB_stack_guard[guardsize]; \
0026 char DB_stack[EXCEPTION_STKSZ]; \
0027 char MCE_stack_guard[guardsize]; \
0028 char MCE_stack[EXCEPTION_STKSZ]; \
0029 char VC_stack_guard[guardsize]; \
0030 char VC_stack[optional_stack_size]; \
0031 char VC2_stack_guard[guardsize]; \
0032 char VC2_stack[optional_stack_size]; \
0033 char IST_top_guard[guardsize]; \
0034
0035
0036 struct exception_stacks {
0037 ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ)
0038 };
0039
0040
0041 struct cea_exception_stacks {
0042 ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
0043 };
0044
0045
0046
0047
0048 enum exception_stack_ordering {
0049 ESTACK_DF,
0050 ESTACK_NMI,
0051 ESTACK_DB,
0052 ESTACK_MCE,
0053 ESTACK_VC,
0054 ESTACK_VC2,
0055 N_EXCEPTION_STACKS
0056 };
0057
0058 #define CEA_ESTACK_SIZE(st) \
0059 sizeof(((struct cea_exception_stacks *)0)->st## _stack)
0060
0061 #define CEA_ESTACK_BOT(ceastp, st) \
0062 ((unsigned long)&(ceastp)->st## _stack)
0063
0064 #define CEA_ESTACK_TOP(ceastp, st) \
0065 (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
0066
0067 #define CEA_ESTACK_OFFS(st) \
0068 offsetof(struct cea_exception_stacks, st## _stack)
0069
0070 #define CEA_ESTACK_PAGES \
0071 (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
0072
0073 #endif
0074
0075 #ifdef CONFIG_X86_32
0076 struct doublefault_stack {
0077 unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
0078 struct x86_hw_tss tss;
0079 } __aligned(PAGE_SIZE);
0080 #endif
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 struct cpu_entry_area {
0091 char gdt[PAGE_SIZE];
0092
0093
0094
0095
0096
0097
0098 #ifdef CONFIG_X86_32
0099 char guard_entry_stack[PAGE_SIZE];
0100 #endif
0101 struct entry_stack_page entry_stack_page;
0102
0103 #ifdef CONFIG_X86_32
0104 char guard_doublefault_stack[PAGE_SIZE];
0105 struct doublefault_stack doublefault_stack;
0106 #endif
0107
0108
0109
0110
0111
0112 struct tss_struct tss;
0113
0114 #ifdef CONFIG_X86_64
0115
0116
0117
0118 struct cea_exception_stacks estacks;
0119 #endif
0120
0121
0122
0123
0124 struct debug_store cpu_debug_store;
0125
0126
0127
0128
0129 struct debug_store_buffers cpu_debug_buffers;
0130 };
0131
0132 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
0133 #define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
0134
0135
0136 #define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
0137
0138 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
0139 DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
0140
0141 extern void setup_cpu_entry_areas(void);
0142 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
0143
0144 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
0145
0146 static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
0147 {
0148 return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
0149 }
0150
0151 #define __this_cpu_ist_top_va(name) \
0152 CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
0153
0154 #define __this_cpu_ist_bottom_va(name) \
0155 CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
0156
0157 #endif