Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/spinlock.h>
0004 #include <linux/percpu.h>
0005 #include <linux/kallsyms.h>
0006 #include <linux/kcore.h>
0007 #include <linux/pgtable.h>
0008 
0009 #include <asm/cpu_entry_area.h>
0010 #include <asm/fixmap.h>
0011 #include <asm/desc.h>
0012 
0013 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
0014 
0015 #ifdef CONFIG_X86_64
0016 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
0017 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
0018 #endif
0019 
0020 #ifdef CONFIG_X86_32
0021 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
0022 #endif
0023 
0024 /* Is called from entry code, so must be noinstr */
0025 noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
0026 {
0027     unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
0028     BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
0029 
0030     return (struct cpu_entry_area *) va;
0031 }
0032 EXPORT_SYMBOL(get_cpu_entry_area);
0033 
0034 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
0035 {
0036     unsigned long va = (unsigned long) cea_vaddr;
0037     pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
0038 
0039     /*
0040      * The cpu_entry_area is shared between the user and kernel
0041      * page tables.  All of its ptes can safely be global.
0042      * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
0043      * non-present PTEs, so be careful not to set it in that
0044      * case to avoid confusion.
0045      */
0046     if (boot_cpu_has(X86_FEATURE_PGE) &&
0047         (pgprot_val(flags) & _PAGE_PRESENT))
0048         pte = pte_set_flags(pte, _PAGE_GLOBAL);
0049 
0050     set_pte_vaddr(va, pte);
0051 }
0052 
0053 static void __init
0054 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
0055 {
0056     for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
0057         cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
0058 }
0059 
0060 static void __init percpu_setup_debug_store(unsigned int cpu)
0061 {
0062 #ifdef CONFIG_CPU_SUP_INTEL
0063     unsigned int npages;
0064     void *cea;
0065 
0066     if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
0067         return;
0068 
0069     cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
0070     npages = sizeof(struct debug_store) / PAGE_SIZE;
0071     BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
0072     cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
0073                  PAGE_KERNEL);
0074 
0075     cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
0076     /*
0077      * Force the population of PMDs for not yet allocated per cpu
0078      * memory like debug store buffers.
0079      */
0080     npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
0081     for (; npages; npages--, cea += PAGE_SIZE)
0082         cea_set_pte(cea, 0, PAGE_NONE);
0083 #endif
0084 }
0085 
0086 #ifdef CONFIG_X86_64
0087 
0088 #define cea_map_stack(name) do {                    \
0089     npages = sizeof(estacks->name## _stack) / PAGE_SIZE;        \
0090     cea_map_percpu_pages(cea->estacks.name## _stack,        \
0091             estacks->name## _stack, npages, PAGE_KERNEL);   \
0092     } while (0)
0093 
0094 static void __init percpu_setup_exception_stacks(unsigned int cpu)
0095 {
0096     struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
0097     struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0098     unsigned int npages;
0099 
0100     BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
0101 
0102     per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
0103 
0104     /*
0105      * The exceptions stack mappings in the per cpu area are protected
0106      * by guard pages so each stack must be mapped separately. DB2 is
0107      * not mapped; it just exists to catch triple nesting of #DB.
0108      */
0109     cea_map_stack(DF);
0110     cea_map_stack(NMI);
0111     cea_map_stack(DB);
0112     cea_map_stack(MCE);
0113 
0114     if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
0115         if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
0116             cea_map_stack(VC);
0117             cea_map_stack(VC2);
0118         }
0119     }
0120 }
0121 #else
0122 static inline void percpu_setup_exception_stacks(unsigned int cpu)
0123 {
0124     struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0125 
0126     cea_map_percpu_pages(&cea->doublefault_stack,
0127                  &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
0128 }
0129 #endif
0130 
0131 /* Setup the fixmap mappings only once per-processor */
0132 static void __init setup_cpu_entry_area(unsigned int cpu)
0133 {
0134     struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
0135 #ifdef CONFIG_X86_64
0136     /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
0137     pgprot_t gdt_prot = PAGE_KERNEL_RO;
0138     pgprot_t tss_prot = PAGE_KERNEL_RO;
0139 #else
0140     /*
0141      * On native 32-bit systems, the GDT cannot be read-only because
0142      * our double fault handler uses a task gate, and entering through
0143      * a task gate needs to change an available TSS to busy.  If the
0144      * GDT is read-only, that will triple fault.  The TSS cannot be
0145      * read-only because the CPU writes to it on task switches.
0146      *
0147      * On Xen PV, the GDT must be read-only because the hypervisor
0148      * requires it.
0149      */
0150     pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
0151         PAGE_KERNEL_RO : PAGE_KERNEL;
0152     pgprot_t tss_prot = PAGE_KERNEL;
0153 #endif
0154 
0155     cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
0156 
0157     cea_map_percpu_pages(&cea->entry_stack_page,
0158                  per_cpu_ptr(&entry_stack_storage, cpu), 1,
0159                  PAGE_KERNEL);
0160 
0161     /*
0162      * The Intel SDM says (Volume 3, 7.2.1):
0163      *
0164      *  Avoid placing a page boundary in the part of the TSS that the
0165      *  processor reads during a task switch (the first 104 bytes). The
0166      *  processor may not correctly perform address translations if a
0167      *  boundary occurs in this area. During a task switch, the processor
0168      *  reads and writes into the first 104 bytes of each TSS (using
0169      *  contiguous physical addresses beginning with the physical address
0170      *  of the first byte of the TSS). So, after TSS access begins, if
0171      *  part of the 104 bytes is not physically contiguous, the processor
0172      *  will access incorrect information without generating a page-fault
0173      *  exception.
0174      *
0175      * There are also a lot of errata involving the TSS spanning a page
0176      * boundary.  Assert that we're not doing that.
0177      */
0178     BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
0179               offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
0180     BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
0181     /*
0182      * VMX changes the host TR limit to 0x67 after a VM exit. This is
0183      * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
0184      * that this is correct.
0185      */
0186     BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
0187     BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
0188 
0189     cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
0190                  sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
0191 
0192 #ifdef CONFIG_X86_32
0193     per_cpu(cpu_entry_area, cpu) = cea;
0194 #endif
0195 
0196     percpu_setup_exception_stacks(cpu);
0197 
0198     percpu_setup_debug_store(cpu);
0199 }
0200 
0201 static __init void setup_cpu_entry_area_ptes(void)
0202 {
0203 #ifdef CONFIG_X86_32
0204     unsigned long start, end;
0205 
0206     /* The +1 is for the readonly IDT: */
0207     BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
0208     BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
0209     BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
0210 
0211     start = CPU_ENTRY_AREA_BASE;
0212     end = start + CPU_ENTRY_AREA_MAP_SIZE;
0213 
0214     /* Careful here: start + PMD_SIZE might wrap around */
0215     for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
0216         populate_extra_pte(start);
0217 #endif
0218 }
0219 
0220 void __init setup_cpu_entry_areas(void)
0221 {
0222     unsigned int cpu;
0223 
0224     setup_cpu_entry_area_ptes();
0225 
0226     for_each_possible_cpu(cpu)
0227         setup_cpu_entry_area(cpu);
0228 
0229     /*
0230      * This is the last essential update to swapper_pgdir which needs
0231      * to be synchronized to initial_page_table on 32bit.
0232      */
0233     sync_initial_page_table();
0234 }