Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright 2002 Andi Kleen, SuSE Labs.
0004  * Thanks to Ben LaHaise for precious feedback.
0005  */
0006 #include <linux/highmem.h>
0007 #include <linux/memblock.h>
0008 #include <linux/sched.h>
0009 #include <linux/mm.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/seq_file.h>
0012 #include <linux/debugfs.h>
0013 #include <linux/pfn.h>
0014 #include <linux/percpu.h>
0015 #include <linux/gfp.h>
0016 #include <linux/pci.h>
0017 #include <linux/vmalloc.h>
0018 #include <linux/libnvdimm.h>
0019 #include <linux/vmstat.h>
0020 #include <linux/kernel.h>
0021 #include <linux/cc_platform.h>
0022 #include <linux/set_memory.h>
0023 
0024 #include <asm/e820/api.h>
0025 #include <asm/processor.h>
0026 #include <asm/tlbflush.h>
0027 #include <asm/sections.h>
0028 #include <asm/setup.h>
0029 #include <linux/uaccess.h>
0030 #include <asm/pgalloc.h>
0031 #include <asm/proto.h>
0032 #include <asm/memtype.h>
0033 #include <asm/hyperv-tlfs.h>
0034 #include <asm/mshyperv.h>
0035 
0036 #include "../mm_internal.h"
0037 
0038 /*
0039  * The current flushing context - we pass it instead of 5 arguments:
0040  */
0041 struct cpa_data {
0042     unsigned long   *vaddr;
0043     pgd_t       *pgd;
0044     pgprot_t    mask_set;
0045     pgprot_t    mask_clr;
0046     unsigned long   numpages;
0047     unsigned long   curpage;
0048     unsigned long   pfn;
0049     unsigned int    flags;
0050     unsigned int    force_split     : 1,
0051             force_static_prot   : 1,
0052             force_flush_all     : 1;
0053     struct page **pages;
0054 };
0055 
0056 enum cpa_warn {
0057     CPA_CONFLICT,
0058     CPA_PROTECT,
0059     CPA_DETECT,
0060 };
0061 
0062 static const int cpa_warn_level = CPA_PROTECT;
0063 
0064 /*
0065  * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
0066  * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
0067  * entries change the page attribute in parallel to some other cpu
0068  * splitting a large page entry along with changing the attribute.
0069  */
0070 static DEFINE_SPINLOCK(cpa_lock);
0071 
0072 #define CPA_FLUSHTLB 1
0073 #define CPA_ARRAY 2
0074 #define CPA_PAGES_ARRAY 4
0075 #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
0076 
0077 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
0078 {
0079     return __pgprot(cachemode2protval(pcm));
0080 }
0081 
0082 #ifdef CONFIG_PROC_FS
0083 static unsigned long direct_pages_count[PG_LEVEL_NUM];
0084 
0085 void update_page_count(int level, unsigned long pages)
0086 {
0087     /* Protect against CPA */
0088     spin_lock(&pgd_lock);
0089     direct_pages_count[level] += pages;
0090     spin_unlock(&pgd_lock);
0091 }
0092 
0093 static void split_page_count(int level)
0094 {
0095     if (direct_pages_count[level] == 0)
0096         return;
0097 
0098     direct_pages_count[level]--;
0099     if (system_state == SYSTEM_RUNNING) {
0100         if (level == PG_LEVEL_2M)
0101             count_vm_event(DIRECT_MAP_LEVEL2_SPLIT);
0102         else if (level == PG_LEVEL_1G)
0103             count_vm_event(DIRECT_MAP_LEVEL3_SPLIT);
0104     }
0105     direct_pages_count[level - 1] += PTRS_PER_PTE;
0106 }
0107 
0108 void arch_report_meminfo(struct seq_file *m)
0109 {
0110     seq_printf(m, "DirectMap4k:    %8lu kB\n",
0111             direct_pages_count[PG_LEVEL_4K] << 2);
0112 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
0113     seq_printf(m, "DirectMap2M:    %8lu kB\n",
0114             direct_pages_count[PG_LEVEL_2M] << 11);
0115 #else
0116     seq_printf(m, "DirectMap4M:    %8lu kB\n",
0117             direct_pages_count[PG_LEVEL_2M] << 12);
0118 #endif
0119     if (direct_gbpages)
0120         seq_printf(m, "DirectMap1G:    %8lu kB\n",
0121             direct_pages_count[PG_LEVEL_1G] << 20);
0122 }
0123 #else
0124 static inline void split_page_count(int level) { }
0125 #endif
0126 
0127 #ifdef CONFIG_X86_CPA_STATISTICS
0128 
0129 static unsigned long cpa_1g_checked;
0130 static unsigned long cpa_1g_sameprot;
0131 static unsigned long cpa_1g_preserved;
0132 static unsigned long cpa_2m_checked;
0133 static unsigned long cpa_2m_sameprot;
0134 static unsigned long cpa_2m_preserved;
0135 static unsigned long cpa_4k_install;
0136 
0137 static inline void cpa_inc_1g_checked(void)
0138 {
0139     cpa_1g_checked++;
0140 }
0141 
0142 static inline void cpa_inc_2m_checked(void)
0143 {
0144     cpa_2m_checked++;
0145 }
0146 
0147 static inline void cpa_inc_4k_install(void)
0148 {
0149     data_race(cpa_4k_install++);
0150 }
0151 
0152 static inline void cpa_inc_lp_sameprot(int level)
0153 {
0154     if (level == PG_LEVEL_1G)
0155         cpa_1g_sameprot++;
0156     else
0157         cpa_2m_sameprot++;
0158 }
0159 
0160 static inline void cpa_inc_lp_preserved(int level)
0161 {
0162     if (level == PG_LEVEL_1G)
0163         cpa_1g_preserved++;
0164     else
0165         cpa_2m_preserved++;
0166 }
0167 
0168 static int cpastats_show(struct seq_file *m, void *p)
0169 {
0170     seq_printf(m, "1G pages checked:     %16lu\n", cpa_1g_checked);
0171     seq_printf(m, "1G pages sameprot:    %16lu\n", cpa_1g_sameprot);
0172     seq_printf(m, "1G pages preserved:   %16lu\n", cpa_1g_preserved);
0173     seq_printf(m, "2M pages checked:     %16lu\n", cpa_2m_checked);
0174     seq_printf(m, "2M pages sameprot:    %16lu\n", cpa_2m_sameprot);
0175     seq_printf(m, "2M pages preserved:   %16lu\n", cpa_2m_preserved);
0176     seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
0177     return 0;
0178 }
0179 
0180 static int cpastats_open(struct inode *inode, struct file *file)
0181 {
0182     return single_open(file, cpastats_show, NULL);
0183 }
0184 
0185 static const struct file_operations cpastats_fops = {
0186     .open       = cpastats_open,
0187     .read       = seq_read,
0188     .llseek     = seq_lseek,
0189     .release    = single_release,
0190 };
0191 
0192 static int __init cpa_stats_init(void)
0193 {
0194     debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
0195                 &cpastats_fops);
0196     return 0;
0197 }
0198 late_initcall(cpa_stats_init);
0199 #else
0200 static inline void cpa_inc_1g_checked(void) { }
0201 static inline void cpa_inc_2m_checked(void) { }
0202 static inline void cpa_inc_4k_install(void) { }
0203 static inline void cpa_inc_lp_sameprot(int level) { }
0204 static inline void cpa_inc_lp_preserved(int level) { }
0205 #endif
0206 
0207 
0208 static inline int
0209 within(unsigned long addr, unsigned long start, unsigned long end)
0210 {
0211     return addr >= start && addr < end;
0212 }
0213 
0214 static inline int
0215 within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
0216 {
0217     return addr >= start && addr <= end;
0218 }
0219 
0220 #ifdef CONFIG_X86_64
0221 
0222 static inline unsigned long highmap_start_pfn(void)
0223 {
0224     return __pa_symbol(_text) >> PAGE_SHIFT;
0225 }
0226 
0227 static inline unsigned long highmap_end_pfn(void)
0228 {
0229     /* Do not reference physical address outside the kernel. */
0230     return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
0231 }
0232 
0233 static bool __cpa_pfn_in_highmap(unsigned long pfn)
0234 {
0235     /*
0236      * Kernel text has an alias mapping at a high address, known
0237      * here as "highmap".
0238      */
0239     return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
0240 }
0241 
0242 #else
0243 
0244 static bool __cpa_pfn_in_highmap(unsigned long pfn)
0245 {
0246     /* There is no highmap on 32-bit */
0247     return false;
0248 }
0249 
0250 #endif
0251 
0252 /*
0253  * See set_mce_nospec().
0254  *
0255  * Machine check recovery code needs to change cache mode of poisoned pages to
0256  * UC to avoid speculative access logging another error. But passing the
0257  * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
0258  * speculative access. So we cheat and flip the top bit of the address. This
0259  * works fine for the code that updates the page tables. But at the end of the
0260  * process we need to flush the TLB and cache and the non-canonical address
0261  * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
0262  *
0263  * But in the common case we already have a canonical address. This code
0264  * will fix the top bit if needed and is a no-op otherwise.
0265  */
0266 static inline unsigned long fix_addr(unsigned long addr)
0267 {
0268 #ifdef CONFIG_X86_64
0269     return (long)(addr << 1) >> 1;
0270 #else
0271     return addr;
0272 #endif
0273 }
0274 
0275 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
0276 {
0277     if (cpa->flags & CPA_PAGES_ARRAY) {
0278         struct page *page = cpa->pages[idx];
0279 
0280         if (unlikely(PageHighMem(page)))
0281             return 0;
0282 
0283         return (unsigned long)page_address(page);
0284     }
0285 
0286     if (cpa->flags & CPA_ARRAY)
0287         return cpa->vaddr[idx];
0288 
0289     return *cpa->vaddr + idx * PAGE_SIZE;
0290 }
0291 
0292 /*
0293  * Flushing functions
0294  */
0295 
0296 static void clflush_cache_range_opt(void *vaddr, unsigned int size)
0297 {
0298     const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
0299     void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
0300     void *vend = vaddr + size;
0301 
0302     if (p >= vend)
0303         return;
0304 
0305     for (; p < vend; p += clflush_size)
0306         clflushopt(p);
0307 }
0308 
0309 /**
0310  * clflush_cache_range - flush a cache range with clflush
0311  * @vaddr:  virtual start address
0312  * @size:   number of bytes to flush
0313  *
0314  * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
0315  * SFENCE to avoid ordering issues.
0316  */
0317 void clflush_cache_range(void *vaddr, unsigned int size)
0318 {
0319     mb();
0320     clflush_cache_range_opt(vaddr, size);
0321     mb();
0322 }
0323 EXPORT_SYMBOL_GPL(clflush_cache_range);
0324 
0325 #ifdef CONFIG_ARCH_HAS_PMEM_API
0326 void arch_invalidate_pmem(void *addr, size_t size)
0327 {
0328     clflush_cache_range(addr, size);
0329 }
0330 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
0331 #endif
0332 
0333 static void __cpa_flush_all(void *arg)
0334 {
0335     unsigned long cache = (unsigned long)arg;
0336 
0337     /*
0338      * Flush all to work around Errata in early athlons regarding
0339      * large page flushing.
0340      */
0341     __flush_tlb_all();
0342 
0343     if (cache && boot_cpu_data.x86 >= 4)
0344         wbinvd();
0345 }
0346 
0347 static void cpa_flush_all(unsigned long cache)
0348 {
0349     BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
0350 
0351     on_each_cpu(__cpa_flush_all, (void *) cache, 1);
0352 }
0353 
0354 static void __cpa_flush_tlb(void *data)
0355 {
0356     struct cpa_data *cpa = data;
0357     unsigned int i;
0358 
0359     for (i = 0; i < cpa->numpages; i++)
0360         flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
0361 }
0362 
0363 static void cpa_flush(struct cpa_data *data, int cache)
0364 {
0365     struct cpa_data *cpa = data;
0366     unsigned int i;
0367 
0368     BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
0369 
0370     if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
0371         cpa_flush_all(cache);
0372         return;
0373     }
0374 
0375     if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
0376         flush_tlb_all();
0377     else
0378         on_each_cpu(__cpa_flush_tlb, cpa, 1);
0379 
0380     if (!cache)
0381         return;
0382 
0383     mb();
0384     for (i = 0; i < cpa->numpages; i++) {
0385         unsigned long addr = __cpa_addr(cpa, i);
0386         unsigned int level;
0387 
0388         pte_t *pte = lookup_address(addr, &level);
0389 
0390         /*
0391          * Only flush present addresses:
0392          */
0393         if (pte && (pte_val(*pte) & _PAGE_PRESENT))
0394             clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
0395     }
0396     mb();
0397 }
0398 
0399 static bool overlaps(unsigned long r1_start, unsigned long r1_end,
0400              unsigned long r2_start, unsigned long r2_end)
0401 {
0402     return (r1_start <= r2_end && r1_end >= r2_start) ||
0403         (r2_start <= r1_end && r2_end >= r1_start);
0404 }
0405 
0406 #ifdef CONFIG_PCI_BIOS
0407 /*
0408  * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
0409  * based config access (CONFIG_PCI_GOBIOS) support.
0410  */
0411 #define BIOS_PFN    PFN_DOWN(BIOS_BEGIN)
0412 #define BIOS_PFN_END    PFN_DOWN(BIOS_END - 1)
0413 
0414 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
0415 {
0416     if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
0417         return _PAGE_NX;
0418     return 0;
0419 }
0420 #else
0421 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
0422 {
0423     return 0;
0424 }
0425 #endif
0426 
0427 /*
0428  * The .rodata section needs to be read-only. Using the pfn catches all
0429  * aliases.  This also includes __ro_after_init, so do not enforce until
0430  * kernel_set_to_readonly is true.
0431  */
0432 static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
0433 {
0434     unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
0435 
0436     /*
0437      * Note: __end_rodata is at page aligned and not inclusive, so
0438      * subtract 1 to get the last enforced PFN in the rodata area.
0439      */
0440     epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
0441 
0442     if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
0443         return _PAGE_RW;
0444     return 0;
0445 }
0446 
0447 /*
0448  * Protect kernel text against becoming non executable by forbidding
0449  * _PAGE_NX.  This protects only the high kernel mapping (_text -> _etext)
0450  * out of which the kernel actually executes.  Do not protect the low
0451  * mapping.
0452  *
0453  * This does not cover __inittext since that is gone after boot.
0454  */
0455 static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
0456 {
0457     unsigned long t_end = (unsigned long)_etext - 1;
0458     unsigned long t_start = (unsigned long)_text;
0459 
0460     if (overlaps(start, end, t_start, t_end))
0461         return _PAGE_NX;
0462     return 0;
0463 }
0464 
0465 #if defined(CONFIG_X86_64)
0466 /*
0467  * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
0468  * kernel text mappings for the large page aligned text, rodata sections
0469  * will be always read-only. For the kernel identity mappings covering the
0470  * holes caused by this alignment can be anything that user asks.
0471  *
0472  * This will preserve the large page mappings for kernel text/data at no
0473  * extra cost.
0474  */
0475 static pgprotval_t protect_kernel_text_ro(unsigned long start,
0476                       unsigned long end)
0477 {
0478     unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
0479     unsigned long t_start = (unsigned long)_text;
0480     unsigned int level;
0481 
0482     if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
0483         return 0;
0484     /*
0485      * Don't enforce the !RW mapping for the kernel text mapping, if
0486      * the current mapping is already using small page mapping.  No
0487      * need to work hard to preserve large page mappings in this case.
0488      *
0489      * This also fixes the Linux Xen paravirt guest boot failure caused
0490      * by unexpected read-only mappings for kernel identity
0491      * mappings. In this paravirt guest case, the kernel text mapping
0492      * and the kernel identity mapping share the same page-table pages,
0493      * so the protections for kernel text and identity mappings have to
0494      * be the same.
0495      */
0496     if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
0497         return _PAGE_RW;
0498     return 0;
0499 }
0500 #else
0501 static pgprotval_t protect_kernel_text_ro(unsigned long start,
0502                       unsigned long end)
0503 {
0504     return 0;
0505 }
0506 #endif
0507 
0508 static inline bool conflicts(pgprot_t prot, pgprotval_t val)
0509 {
0510     return (pgprot_val(prot) & ~val) != pgprot_val(prot);
0511 }
0512 
0513 static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
0514                   unsigned long start, unsigned long end,
0515                   unsigned long pfn, const char *txt)
0516 {
0517     static const char *lvltxt[] = {
0518         [CPA_CONFLICT]  = "conflict",
0519         [CPA_PROTECT]   = "protect",
0520         [CPA_DETECT]    = "detect",
0521     };
0522 
0523     if (warnlvl > cpa_warn_level || !conflicts(prot, val))
0524         return;
0525 
0526     pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
0527         lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
0528         (unsigned long long)val);
0529 }
0530 
0531 /*
0532  * Certain areas of memory on x86 require very specific protection flags,
0533  * for example the BIOS area or kernel text. Callers don't always get this
0534  * right (again, ioremap() on BIOS memory is not uncommon) so this function
0535  * checks and fixes these known static required protection bits.
0536  */
0537 static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
0538                       unsigned long pfn, unsigned long npg,
0539                       unsigned long lpsize, int warnlvl)
0540 {
0541     pgprotval_t forbidden, res;
0542     unsigned long end;
0543 
0544     /*
0545      * There is no point in checking RW/NX conflicts when the requested
0546      * mapping is setting the page !PRESENT.
0547      */
0548     if (!(pgprot_val(prot) & _PAGE_PRESENT))
0549         return prot;
0550 
0551     /* Operate on the virtual address */
0552     end = start + npg * PAGE_SIZE - 1;
0553 
0554     res = protect_kernel_text(start, end);
0555     check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
0556     forbidden = res;
0557 
0558     /*
0559      * Special case to preserve a large page. If the change spawns the
0560      * full large page mapping then there is no point to split it
0561      * up. Happens with ftrace and is going to be removed once ftrace
0562      * switched to text_poke().
0563      */
0564     if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
0565         res = protect_kernel_text_ro(start, end);
0566         check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
0567         forbidden |= res;
0568     }
0569 
0570     /* Check the PFN directly */
0571     res = protect_pci_bios(pfn, pfn + npg - 1);
0572     check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
0573     forbidden |= res;
0574 
0575     res = protect_rodata(pfn, pfn + npg - 1);
0576     check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
0577     forbidden |= res;
0578 
0579     return __pgprot(pgprot_val(prot) & ~forbidden);
0580 }
0581 
0582 /*
0583  * Lookup the page table entry for a virtual address in a specific pgd.
0584  * Return a pointer to the entry and the level of the mapping.
0585  */
0586 pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
0587                  unsigned int *level)
0588 {
0589     p4d_t *p4d;
0590     pud_t *pud;
0591     pmd_t *pmd;
0592 
0593     *level = PG_LEVEL_NONE;
0594 
0595     if (pgd_none(*pgd))
0596         return NULL;
0597 
0598     p4d = p4d_offset(pgd, address);
0599     if (p4d_none(*p4d))
0600         return NULL;
0601 
0602     *level = PG_LEVEL_512G;
0603     if (p4d_large(*p4d) || !p4d_present(*p4d))
0604         return (pte_t *)p4d;
0605 
0606     pud = pud_offset(p4d, address);
0607     if (pud_none(*pud))
0608         return NULL;
0609 
0610     *level = PG_LEVEL_1G;
0611     if (pud_large(*pud) || !pud_present(*pud))
0612         return (pte_t *)pud;
0613 
0614     pmd = pmd_offset(pud, address);
0615     if (pmd_none(*pmd))
0616         return NULL;
0617 
0618     *level = PG_LEVEL_2M;
0619     if (pmd_large(*pmd) || !pmd_present(*pmd))
0620         return (pte_t *)pmd;
0621 
0622     *level = PG_LEVEL_4K;
0623 
0624     return pte_offset_kernel(pmd, address);
0625 }
0626 
0627 /*
0628  * Lookup the page table entry for a virtual address. Return a pointer
0629  * to the entry and the level of the mapping.
0630  *
0631  * Note: We return pud and pmd either when the entry is marked large
0632  * or when the present bit is not set. Otherwise we would return a
0633  * pointer to a nonexisting mapping.
0634  */
0635 pte_t *lookup_address(unsigned long address, unsigned int *level)
0636 {
0637     return lookup_address_in_pgd(pgd_offset_k(address), address, level);
0638 }
0639 EXPORT_SYMBOL_GPL(lookup_address);
0640 
0641 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
0642                   unsigned int *level)
0643 {
0644     if (cpa->pgd)
0645         return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
0646                            address, level);
0647 
0648     return lookup_address(address, level);
0649 }
0650 
0651 /*
0652  * Lookup the PMD entry for a virtual address. Return a pointer to the entry
0653  * or NULL if not present.
0654  */
0655 pmd_t *lookup_pmd_address(unsigned long address)
0656 {
0657     pgd_t *pgd;
0658     p4d_t *p4d;
0659     pud_t *pud;
0660 
0661     pgd = pgd_offset_k(address);
0662     if (pgd_none(*pgd))
0663         return NULL;
0664 
0665     p4d = p4d_offset(pgd, address);
0666     if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
0667         return NULL;
0668 
0669     pud = pud_offset(p4d, address);
0670     if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
0671         return NULL;
0672 
0673     return pmd_offset(pud, address);
0674 }
0675 
0676 /*
0677  * This is necessary because __pa() does not work on some
0678  * kinds of memory, like vmalloc() or the alloc_remap()
0679  * areas on 32-bit NUMA systems.  The percpu areas can
0680  * end up in this kind of memory, for instance.
0681  *
0682  * This could be optimized, but it is only intended to be
0683  * used at initialization time, and keeping it
0684  * unoptimized should increase the testing coverage for
0685  * the more obscure platforms.
0686  */
0687 phys_addr_t slow_virt_to_phys(void *__virt_addr)
0688 {
0689     unsigned long virt_addr = (unsigned long)__virt_addr;
0690     phys_addr_t phys_addr;
0691     unsigned long offset;
0692     enum pg_level level;
0693     pte_t *pte;
0694 
0695     pte = lookup_address(virt_addr, &level);
0696     BUG_ON(!pte);
0697 
0698     /*
0699      * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
0700      * before being left-shifted PAGE_SHIFT bits -- this trick is to
0701      * make 32-PAE kernel work correctly.
0702      */
0703     switch (level) {
0704     case PG_LEVEL_1G:
0705         phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
0706         offset = virt_addr & ~PUD_PAGE_MASK;
0707         break;
0708     case PG_LEVEL_2M:
0709         phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
0710         offset = virt_addr & ~PMD_PAGE_MASK;
0711         break;
0712     default:
0713         phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
0714         offset = virt_addr & ~PAGE_MASK;
0715     }
0716 
0717     return (phys_addr_t)(phys_addr | offset);
0718 }
0719 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
0720 
0721 /*
0722  * Set the new pmd in all the pgds we know about:
0723  */
0724 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
0725 {
0726     /* change init_mm */
0727     set_pte_atomic(kpte, pte);
0728 #ifdef CONFIG_X86_32
0729     if (!SHARED_KERNEL_PMD) {
0730         struct page *page;
0731 
0732         list_for_each_entry(page, &pgd_list, lru) {
0733             pgd_t *pgd;
0734             p4d_t *p4d;
0735             pud_t *pud;
0736             pmd_t *pmd;
0737 
0738             pgd = (pgd_t *)page_address(page) + pgd_index(address);
0739             p4d = p4d_offset(pgd, address);
0740             pud = pud_offset(p4d, address);
0741             pmd = pmd_offset(pud, address);
0742             set_pte_atomic((pte_t *)pmd, pte);
0743         }
0744     }
0745 #endif
0746 }
0747 
0748 static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
0749 {
0750     /*
0751      * _PAGE_GLOBAL means "global page" for present PTEs.
0752      * But, it is also used to indicate _PAGE_PROTNONE
0753      * for non-present PTEs.
0754      *
0755      * This ensures that a _PAGE_GLOBAL PTE going from
0756      * present to non-present is not confused as
0757      * _PAGE_PROTNONE.
0758      */
0759     if (!(pgprot_val(prot) & _PAGE_PRESENT))
0760         pgprot_val(prot) &= ~_PAGE_GLOBAL;
0761 
0762     return prot;
0763 }
0764 
0765 static int __should_split_large_page(pte_t *kpte, unsigned long address,
0766                      struct cpa_data *cpa)
0767 {
0768     unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
0769     pgprot_t old_prot, new_prot, req_prot, chk_prot;
0770     pte_t new_pte, *tmp;
0771     enum pg_level level;
0772 
0773     /*
0774      * Check for races, another CPU might have split this page
0775      * up already:
0776      */
0777     tmp = _lookup_address_cpa(cpa, address, &level);
0778     if (tmp != kpte)
0779         return 1;
0780 
0781     switch (level) {
0782     case PG_LEVEL_2M:
0783         old_prot = pmd_pgprot(*(pmd_t *)kpte);
0784         old_pfn = pmd_pfn(*(pmd_t *)kpte);
0785         cpa_inc_2m_checked();
0786         break;
0787     case PG_LEVEL_1G:
0788         old_prot = pud_pgprot(*(pud_t *)kpte);
0789         old_pfn = pud_pfn(*(pud_t *)kpte);
0790         cpa_inc_1g_checked();
0791         break;
0792     default:
0793         return -EINVAL;
0794     }
0795 
0796     psize = page_level_size(level);
0797     pmask = page_level_mask(level);
0798 
0799     /*
0800      * Calculate the number of pages, which fit into this large
0801      * page starting at address:
0802      */
0803     lpaddr = (address + psize) & pmask;
0804     numpages = (lpaddr - address) >> PAGE_SHIFT;
0805     if (numpages < cpa->numpages)
0806         cpa->numpages = numpages;
0807 
0808     /*
0809      * We are safe now. Check whether the new pgprot is the same:
0810      * Convert protection attributes to 4k-format, as cpa->mask* are set
0811      * up accordingly.
0812      */
0813 
0814     /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
0815     req_prot = pgprot_large_2_4k(old_prot);
0816 
0817     pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
0818     pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
0819 
0820     /*
0821      * req_prot is in format of 4k pages. It must be converted to large
0822      * page format: the caching mode includes the PAT bit located at
0823      * different bit positions in the two formats.
0824      */
0825     req_prot = pgprot_4k_2_large(req_prot);
0826     req_prot = pgprot_clear_protnone_bits(req_prot);
0827     if (pgprot_val(req_prot) & _PAGE_PRESENT)
0828         pgprot_val(req_prot) |= _PAGE_PSE;
0829 
0830     /*
0831      * old_pfn points to the large page base pfn. So we need to add the
0832      * offset of the virtual address:
0833      */
0834     pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
0835     cpa->pfn = pfn;
0836 
0837     /*
0838      * Calculate the large page base address and the number of 4K pages
0839      * in the large page
0840      */
0841     lpaddr = address & pmask;
0842     numpages = psize >> PAGE_SHIFT;
0843 
0844     /*
0845      * Sanity check that the existing mapping is correct versus the static
0846      * protections. static_protections() guards against !PRESENT, so no
0847      * extra conditional required here.
0848      */
0849     chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
0850                       psize, CPA_CONFLICT);
0851 
0852     if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
0853         /*
0854          * Split the large page and tell the split code to
0855          * enforce static protections.
0856          */
0857         cpa->force_static_prot = 1;
0858         return 1;
0859     }
0860 
0861     /*
0862      * Optimization: If the requested pgprot is the same as the current
0863      * pgprot, then the large page can be preserved and no updates are
0864      * required independent of alignment and length of the requested
0865      * range. The above already established that the current pgprot is
0866      * correct, which in consequence makes the requested pgprot correct
0867      * as well if it is the same. The static protection scan below will
0868      * not come to a different conclusion.
0869      */
0870     if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
0871         cpa_inc_lp_sameprot(level);
0872         return 0;
0873     }
0874 
0875     /*
0876      * If the requested range does not cover the full page, split it up
0877      */
0878     if (address != lpaddr || cpa->numpages != numpages)
0879         return 1;
0880 
0881     /*
0882      * Check whether the requested pgprot is conflicting with a static
0883      * protection requirement in the large page.
0884      */
0885     new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
0886                       psize, CPA_DETECT);
0887 
0888     /*
0889      * If there is a conflict, split the large page.
0890      *
0891      * There used to be a 4k wise evaluation trying really hard to
0892      * preserve the large pages, but experimentation has shown, that this
0893      * does not help at all. There might be corner cases which would
0894      * preserve one large page occasionally, but it's really not worth the
0895      * extra code and cycles for the common case.
0896      */
0897     if (pgprot_val(req_prot) != pgprot_val(new_prot))
0898         return 1;
0899 
0900     /* All checks passed. Update the large page mapping. */
0901     new_pte = pfn_pte(old_pfn, new_prot);
0902     __set_pmd_pte(kpte, address, new_pte);
0903     cpa->flags |= CPA_FLUSHTLB;
0904     cpa_inc_lp_preserved(level);
0905     return 0;
0906 }
0907 
0908 static int should_split_large_page(pte_t *kpte, unsigned long address,
0909                    struct cpa_data *cpa)
0910 {
0911     int do_split;
0912 
0913     if (cpa->force_split)
0914         return 1;
0915 
0916     spin_lock(&pgd_lock);
0917     do_split = __should_split_large_page(kpte, address, cpa);
0918     spin_unlock(&pgd_lock);
0919 
0920     return do_split;
0921 }
0922 
0923 static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
0924               pgprot_t ref_prot, unsigned long address,
0925               unsigned long size)
0926 {
0927     unsigned int npg = PFN_DOWN(size);
0928     pgprot_t prot;
0929 
0930     /*
0931      * If should_split_large_page() discovered an inconsistent mapping,
0932      * remove the invalid protection in the split mapping.
0933      */
0934     if (!cpa->force_static_prot)
0935         goto set;
0936 
0937     /* Hand in lpsize = 0 to enforce the protection mechanism */
0938     prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
0939 
0940     if (pgprot_val(prot) == pgprot_val(ref_prot))
0941         goto set;
0942 
0943     /*
0944      * If this is splitting a PMD, fix it up. PUD splits cannot be
0945      * fixed trivially as that would require to rescan the newly
0946      * installed PMD mappings after returning from split_large_page()
0947      * so an eventual further split can allocate the necessary PTE
0948      * pages. Warn for now and revisit it in case this actually
0949      * happens.
0950      */
0951     if (size == PAGE_SIZE)
0952         ref_prot = prot;
0953     else
0954         pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
0955 set:
0956     set_pte(pte, pfn_pte(pfn, ref_prot));
0957 }
0958 
0959 static int
0960 __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
0961            struct page *base)
0962 {
0963     unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
0964     pte_t *pbase = (pte_t *)page_address(base);
0965     unsigned int i, level;
0966     pgprot_t ref_prot;
0967     pte_t *tmp;
0968 
0969     spin_lock(&pgd_lock);
0970     /*
0971      * Check for races, another CPU might have split this page
0972      * up for us already:
0973      */
0974     tmp = _lookup_address_cpa(cpa, address, &level);
0975     if (tmp != kpte) {
0976         spin_unlock(&pgd_lock);
0977         return 1;
0978     }
0979 
0980     paravirt_alloc_pte(&init_mm, page_to_pfn(base));
0981 
0982     switch (level) {
0983     case PG_LEVEL_2M:
0984         ref_prot = pmd_pgprot(*(pmd_t *)kpte);
0985         /*
0986          * Clear PSE (aka _PAGE_PAT) and move
0987          * PAT bit to correct position.
0988          */
0989         ref_prot = pgprot_large_2_4k(ref_prot);
0990         ref_pfn = pmd_pfn(*(pmd_t *)kpte);
0991         lpaddr = address & PMD_MASK;
0992         lpinc = PAGE_SIZE;
0993         break;
0994 
0995     case PG_LEVEL_1G:
0996         ref_prot = pud_pgprot(*(pud_t *)kpte);
0997         ref_pfn = pud_pfn(*(pud_t *)kpte);
0998         pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
0999         lpaddr = address & PUD_MASK;
1000         lpinc = PMD_SIZE;
1001         /*
1002          * Clear the PSE flags if the PRESENT flag is not set
1003          * otherwise pmd_present/pmd_huge will return true
1004          * even on a non present pmd.
1005          */
1006         if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
1007             pgprot_val(ref_prot) &= ~_PAGE_PSE;
1008         break;
1009 
1010     default:
1011         spin_unlock(&pgd_lock);
1012         return 1;
1013     }
1014 
1015     ref_prot = pgprot_clear_protnone_bits(ref_prot);
1016 
1017     /*
1018      * Get the target pfn from the original entry:
1019      */
1020     pfn = ref_pfn;
1021     for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1022         split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1023 
1024     if (virt_addr_valid(address)) {
1025         unsigned long pfn = PFN_DOWN(__pa(address));
1026 
1027         if (pfn_range_is_mapped(pfn, pfn + 1))
1028             split_page_count(level);
1029     }
1030 
1031     /*
1032      * Install the new, split up pagetable.
1033      *
1034      * We use the standard kernel pagetable protections for the new
1035      * pagetable protections, the actual ptes set above control the
1036      * primary protection behavior:
1037      */
1038     __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1039 
1040     /*
1041      * Do a global flush tlb after splitting the large page
1042      * and before we do the actual change page attribute in the PTE.
1043      *
1044      * Without this, we violate the TLB application note, that says:
1045      * "The TLBs may contain both ordinary and large-page
1046      *  translations for a 4-KByte range of linear addresses. This
1047      *  may occur if software modifies the paging structures so that
1048      *  the page size used for the address range changes. If the two
1049      *  translations differ with respect to page frame or attributes
1050      *  (e.g., permissions), processor behavior is undefined and may
1051      *  be implementation-specific."
1052      *
1053      * We do this global tlb flush inside the cpa_lock, so that we
1054      * don't allow any other cpu, with stale tlb entries change the
1055      * page attribute in parallel, that also falls into the
1056      * just split large page entry.
1057      */
1058     flush_tlb_all();
1059     spin_unlock(&pgd_lock);
1060 
1061     return 0;
1062 }
1063 
1064 static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1065                 unsigned long address)
1066 {
1067     struct page *base;
1068 
1069     if (!debug_pagealloc_enabled())
1070         spin_unlock(&cpa_lock);
1071     base = alloc_pages(GFP_KERNEL, 0);
1072     if (!debug_pagealloc_enabled())
1073         spin_lock(&cpa_lock);
1074     if (!base)
1075         return -ENOMEM;
1076 
1077     if (__split_large_page(cpa, kpte, address, base))
1078         __free_page(base);
1079 
1080     return 0;
1081 }
1082 
1083 static bool try_to_free_pte_page(pte_t *pte)
1084 {
1085     int i;
1086 
1087     for (i = 0; i < PTRS_PER_PTE; i++)
1088         if (!pte_none(pte[i]))
1089             return false;
1090 
1091     free_page((unsigned long)pte);
1092     return true;
1093 }
1094 
1095 static bool try_to_free_pmd_page(pmd_t *pmd)
1096 {
1097     int i;
1098 
1099     for (i = 0; i < PTRS_PER_PMD; i++)
1100         if (!pmd_none(pmd[i]))
1101             return false;
1102 
1103     free_page((unsigned long)pmd);
1104     return true;
1105 }
1106 
1107 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1108 {
1109     pte_t *pte = pte_offset_kernel(pmd, start);
1110 
1111     while (start < end) {
1112         set_pte(pte, __pte(0));
1113 
1114         start += PAGE_SIZE;
1115         pte++;
1116     }
1117 
1118     if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1119         pmd_clear(pmd);
1120         return true;
1121     }
1122     return false;
1123 }
1124 
1125 static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1126                   unsigned long start, unsigned long end)
1127 {
1128     if (unmap_pte_range(pmd, start, end))
1129         if (try_to_free_pmd_page(pud_pgtable(*pud)))
1130             pud_clear(pud);
1131 }
1132 
1133 static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1134 {
1135     pmd_t *pmd = pmd_offset(pud, start);
1136 
1137     /*
1138      * Not on a 2MB page boundary?
1139      */
1140     if (start & (PMD_SIZE - 1)) {
1141         unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1142         unsigned long pre_end = min_t(unsigned long, end, next_page);
1143 
1144         __unmap_pmd_range(pud, pmd, start, pre_end);
1145 
1146         start = pre_end;
1147         pmd++;
1148     }
1149 
1150     /*
1151      * Try to unmap in 2M chunks.
1152      */
1153     while (end - start >= PMD_SIZE) {
1154         if (pmd_large(*pmd))
1155             pmd_clear(pmd);
1156         else
1157             __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1158 
1159         start += PMD_SIZE;
1160         pmd++;
1161     }
1162 
1163     /*
1164      * 4K leftovers?
1165      */
1166     if (start < end)
1167         return __unmap_pmd_range(pud, pmd, start, end);
1168 
1169     /*
1170      * Try again to free the PMD page if haven't succeeded above.
1171      */
1172     if (!pud_none(*pud))
1173         if (try_to_free_pmd_page(pud_pgtable(*pud)))
1174             pud_clear(pud);
1175 }
1176 
1177 static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1178 {
1179     pud_t *pud = pud_offset(p4d, start);
1180 
1181     /*
1182      * Not on a GB page boundary?
1183      */
1184     if (start & (PUD_SIZE - 1)) {
1185         unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1186         unsigned long pre_end   = min_t(unsigned long, end, next_page);
1187 
1188         unmap_pmd_range(pud, start, pre_end);
1189 
1190         start = pre_end;
1191         pud++;
1192     }
1193 
1194     /*
1195      * Try to unmap in 1G chunks?
1196      */
1197     while (end - start >= PUD_SIZE) {
1198 
1199         if (pud_large(*pud))
1200             pud_clear(pud);
1201         else
1202             unmap_pmd_range(pud, start, start + PUD_SIZE);
1203 
1204         start += PUD_SIZE;
1205         pud++;
1206     }
1207 
1208     /*
1209      * 2M leftovers?
1210      */
1211     if (start < end)
1212         unmap_pmd_range(pud, start, end);
1213 
1214     /*
1215      * No need to try to free the PUD page because we'll free it in
1216      * populate_pgd's error path
1217      */
1218 }
1219 
1220 static int alloc_pte_page(pmd_t *pmd)
1221 {
1222     pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1223     if (!pte)
1224         return -1;
1225 
1226     set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1227     return 0;
1228 }
1229 
1230 static int alloc_pmd_page(pud_t *pud)
1231 {
1232     pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1233     if (!pmd)
1234         return -1;
1235 
1236     set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1237     return 0;
1238 }
1239 
1240 static void populate_pte(struct cpa_data *cpa,
1241              unsigned long start, unsigned long end,
1242              unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1243 {
1244     pte_t *pte;
1245 
1246     pte = pte_offset_kernel(pmd, start);
1247 
1248     pgprot = pgprot_clear_protnone_bits(pgprot);
1249 
1250     while (num_pages-- && start < end) {
1251         set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1252 
1253         start    += PAGE_SIZE;
1254         cpa->pfn++;
1255         pte++;
1256     }
1257 }
1258 
1259 static long populate_pmd(struct cpa_data *cpa,
1260              unsigned long start, unsigned long end,
1261              unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1262 {
1263     long cur_pages = 0;
1264     pmd_t *pmd;
1265     pgprot_t pmd_pgprot;
1266 
1267     /*
1268      * Not on a 2M boundary?
1269      */
1270     if (start & (PMD_SIZE - 1)) {
1271         unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1272         unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1273 
1274         pre_end   = min_t(unsigned long, pre_end, next_page);
1275         cur_pages = (pre_end - start) >> PAGE_SHIFT;
1276         cur_pages = min_t(unsigned int, num_pages, cur_pages);
1277 
1278         /*
1279          * Need a PTE page?
1280          */
1281         pmd = pmd_offset(pud, start);
1282         if (pmd_none(*pmd))
1283             if (alloc_pte_page(pmd))
1284                 return -1;
1285 
1286         populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1287 
1288         start = pre_end;
1289     }
1290 
1291     /*
1292      * We mapped them all?
1293      */
1294     if (num_pages == cur_pages)
1295         return cur_pages;
1296 
1297     pmd_pgprot = pgprot_4k_2_large(pgprot);
1298 
1299     while (end - start >= PMD_SIZE) {
1300 
1301         /*
1302          * We cannot use a 1G page so allocate a PMD page if needed.
1303          */
1304         if (pud_none(*pud))
1305             if (alloc_pmd_page(pud))
1306                 return -1;
1307 
1308         pmd = pmd_offset(pud, start);
1309 
1310         set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1311                     canon_pgprot(pmd_pgprot))));
1312 
1313         start     += PMD_SIZE;
1314         cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
1315         cur_pages += PMD_SIZE >> PAGE_SHIFT;
1316     }
1317 
1318     /*
1319      * Map trailing 4K pages.
1320      */
1321     if (start < end) {
1322         pmd = pmd_offset(pud, start);
1323         if (pmd_none(*pmd))
1324             if (alloc_pte_page(pmd))
1325                 return -1;
1326 
1327         populate_pte(cpa, start, end, num_pages - cur_pages,
1328                  pmd, pgprot);
1329     }
1330     return num_pages;
1331 }
1332 
1333 static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1334             pgprot_t pgprot)
1335 {
1336     pud_t *pud;
1337     unsigned long end;
1338     long cur_pages = 0;
1339     pgprot_t pud_pgprot;
1340 
1341     end = start + (cpa->numpages << PAGE_SHIFT);
1342 
1343     /*
1344      * Not on a Gb page boundary? => map everything up to it with
1345      * smaller pages.
1346      */
1347     if (start & (PUD_SIZE - 1)) {
1348         unsigned long pre_end;
1349         unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1350 
1351         pre_end   = min_t(unsigned long, end, next_page);
1352         cur_pages = (pre_end - start) >> PAGE_SHIFT;
1353         cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1354 
1355         pud = pud_offset(p4d, start);
1356 
1357         /*
1358          * Need a PMD page?
1359          */
1360         if (pud_none(*pud))
1361             if (alloc_pmd_page(pud))
1362                 return -1;
1363 
1364         cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1365                      pud, pgprot);
1366         if (cur_pages < 0)
1367             return cur_pages;
1368 
1369         start = pre_end;
1370     }
1371 
1372     /* We mapped them all? */
1373     if (cpa->numpages == cur_pages)
1374         return cur_pages;
1375 
1376     pud = pud_offset(p4d, start);
1377     pud_pgprot = pgprot_4k_2_large(pgprot);
1378 
1379     /*
1380      * Map everything starting from the Gb boundary, possibly with 1G pages
1381      */
1382     while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1383         set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1384                    canon_pgprot(pud_pgprot))));
1385 
1386         start     += PUD_SIZE;
1387         cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
1388         cur_pages += PUD_SIZE >> PAGE_SHIFT;
1389         pud++;
1390     }
1391 
1392     /* Map trailing leftover */
1393     if (start < end) {
1394         long tmp;
1395 
1396         pud = pud_offset(p4d, start);
1397         if (pud_none(*pud))
1398             if (alloc_pmd_page(pud))
1399                 return -1;
1400 
1401         tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1402                    pud, pgprot);
1403         if (tmp < 0)
1404             return cur_pages;
1405 
1406         cur_pages += tmp;
1407     }
1408     return cur_pages;
1409 }
1410 
1411 /*
1412  * Restrictions for kernel page table do not necessarily apply when mapping in
1413  * an alternate PGD.
1414  */
1415 static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1416 {
1417     pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1418     pud_t *pud = NULL;  /* shut up gcc */
1419     p4d_t *p4d;
1420     pgd_t *pgd_entry;
1421     long ret;
1422 
1423     pgd_entry = cpa->pgd + pgd_index(addr);
1424 
1425     if (pgd_none(*pgd_entry)) {
1426         p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1427         if (!p4d)
1428             return -1;
1429 
1430         set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1431     }
1432 
1433     /*
1434      * Allocate a PUD page and hand it down for mapping.
1435      */
1436     p4d = p4d_offset(pgd_entry, addr);
1437     if (p4d_none(*p4d)) {
1438         pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1439         if (!pud)
1440             return -1;
1441 
1442         set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1443     }
1444 
1445     pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1446     pgprot_val(pgprot) |=  pgprot_val(cpa->mask_set);
1447 
1448     ret = populate_pud(cpa, addr, p4d, pgprot);
1449     if (ret < 0) {
1450         /*
1451          * Leave the PUD page in place in case some other CPU or thread
1452          * already found it, but remove any useless entries we just
1453          * added to it.
1454          */
1455         unmap_pud_range(p4d, addr,
1456                 addr + (cpa->numpages << PAGE_SHIFT));
1457         return ret;
1458     }
1459 
1460     cpa->numpages = ret;
1461     return 0;
1462 }
1463 
1464 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1465                    int primary)
1466 {
1467     if (cpa->pgd) {
1468         /*
1469          * Right now, we only execute this code path when mapping
1470          * the EFI virtual memory map regions, no other users
1471          * provide a ->pgd value. This may change in the future.
1472          */
1473         return populate_pgd(cpa, vaddr);
1474     }
1475 
1476     /*
1477      * Ignore all non primary paths.
1478      */
1479     if (!primary) {
1480         cpa->numpages = 1;
1481         return 0;
1482     }
1483 
1484     /*
1485      * Ignore the NULL PTE for kernel identity mapping, as it is expected
1486      * to have holes.
1487      * Also set numpages to '1' indicating that we processed cpa req for
1488      * one virtual address page and its pfn. TBD: numpages can be set based
1489      * on the initial value and the level returned by lookup_address().
1490      */
1491     if (within(vaddr, PAGE_OFFSET,
1492            PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1493         cpa->numpages = 1;
1494         cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1495         return 0;
1496 
1497     } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1498         /* Faults in the highmap are OK, so do not warn: */
1499         return -EFAULT;
1500     } else {
1501         WARN(1, KERN_WARNING "CPA: called for zero pte. "
1502             "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1503             *cpa->vaddr);
1504 
1505         return -EFAULT;
1506     }
1507 }
1508 
1509 static int __change_page_attr(struct cpa_data *cpa, int primary)
1510 {
1511     unsigned long address;
1512     int do_split, err;
1513     unsigned int level;
1514     pte_t *kpte, old_pte;
1515 
1516     address = __cpa_addr(cpa, cpa->curpage);
1517 repeat:
1518     kpte = _lookup_address_cpa(cpa, address, &level);
1519     if (!kpte)
1520         return __cpa_process_fault(cpa, address, primary);
1521 
1522     old_pte = *kpte;
1523     if (pte_none(old_pte))
1524         return __cpa_process_fault(cpa, address, primary);
1525 
1526     if (level == PG_LEVEL_4K) {
1527         pte_t new_pte;
1528         pgprot_t new_prot = pte_pgprot(old_pte);
1529         unsigned long pfn = pte_pfn(old_pte);
1530 
1531         pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1532         pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1533 
1534         cpa_inc_4k_install();
1535         /* Hand in lpsize = 0 to enforce the protection mechanism */
1536         new_prot = static_protections(new_prot, address, pfn, 1, 0,
1537                           CPA_PROTECT);
1538 
1539         new_prot = pgprot_clear_protnone_bits(new_prot);
1540 
1541         /*
1542          * We need to keep the pfn from the existing PTE,
1543          * after all we're only going to change it's attributes
1544          * not the memory it points to
1545          */
1546         new_pte = pfn_pte(pfn, new_prot);
1547         cpa->pfn = pfn;
1548         /*
1549          * Do we really change anything ?
1550          */
1551         if (pte_val(old_pte) != pte_val(new_pte)) {
1552             set_pte_atomic(kpte, new_pte);
1553             cpa->flags |= CPA_FLUSHTLB;
1554         }
1555         cpa->numpages = 1;
1556         return 0;
1557     }
1558 
1559     /*
1560      * Check, whether we can keep the large page intact
1561      * and just change the pte:
1562      */
1563     do_split = should_split_large_page(kpte, address, cpa);
1564     /*
1565      * When the range fits into the existing large page,
1566      * return. cp->numpages and cpa->tlbflush have been updated in
1567      * try_large_page:
1568      */
1569     if (do_split <= 0)
1570         return do_split;
1571 
1572     /*
1573      * We have to split the large page:
1574      */
1575     err = split_large_page(cpa, kpte, address);
1576     if (!err)
1577         goto repeat;
1578 
1579     return err;
1580 }
1581 
1582 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1583 
1584 static int cpa_process_alias(struct cpa_data *cpa)
1585 {
1586     struct cpa_data alias_cpa;
1587     unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1588     unsigned long vaddr;
1589     int ret;
1590 
1591     if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1592         return 0;
1593 
1594     /*
1595      * No need to redo, when the primary call touched the direct
1596      * mapping already:
1597      */
1598     vaddr = __cpa_addr(cpa, cpa->curpage);
1599     if (!(within(vaddr, PAGE_OFFSET,
1600             PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1601 
1602         alias_cpa = *cpa;
1603         alias_cpa.vaddr = &laddr;
1604         alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1605         alias_cpa.curpage = 0;
1606 
1607         cpa->force_flush_all = 1;
1608 
1609         ret = __change_page_attr_set_clr(&alias_cpa, 0);
1610         if (ret)
1611             return ret;
1612     }
1613 
1614 #ifdef CONFIG_X86_64
1615     /*
1616      * If the primary call didn't touch the high mapping already
1617      * and the physical address is inside the kernel map, we need
1618      * to touch the high mapped kernel as well:
1619      */
1620     if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1621         __cpa_pfn_in_highmap(cpa->pfn)) {
1622         unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1623                            __START_KERNEL_map - phys_base;
1624         alias_cpa = *cpa;
1625         alias_cpa.vaddr = &temp_cpa_vaddr;
1626         alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1627         alias_cpa.curpage = 0;
1628 
1629         cpa->force_flush_all = 1;
1630         /*
1631          * The high mapping range is imprecise, so ignore the
1632          * return value.
1633          */
1634         __change_page_attr_set_clr(&alias_cpa, 0);
1635     }
1636 #endif
1637 
1638     return 0;
1639 }
1640 
1641 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1642 {
1643     unsigned long numpages = cpa->numpages;
1644     unsigned long rempages = numpages;
1645     int ret = 0;
1646 
1647     while (rempages) {
1648         /*
1649          * Store the remaining nr of pages for the large page
1650          * preservation check.
1651          */
1652         cpa->numpages = rempages;
1653         /* for array changes, we can't use large page */
1654         if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1655             cpa->numpages = 1;
1656 
1657         if (!debug_pagealloc_enabled())
1658             spin_lock(&cpa_lock);
1659         ret = __change_page_attr(cpa, checkalias);
1660         if (!debug_pagealloc_enabled())
1661             spin_unlock(&cpa_lock);
1662         if (ret)
1663             goto out;
1664 
1665         if (checkalias) {
1666             ret = cpa_process_alias(cpa);
1667             if (ret)
1668                 goto out;
1669         }
1670 
1671         /*
1672          * Adjust the number of pages with the result of the
1673          * CPA operation. Either a large page has been
1674          * preserved or a single page update happened.
1675          */
1676         BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1677         rempages -= cpa->numpages;
1678         cpa->curpage += cpa->numpages;
1679     }
1680 
1681 out:
1682     /* Restore the original numpages */
1683     cpa->numpages = numpages;
1684     return ret;
1685 }
1686 
1687 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1688                     pgprot_t mask_set, pgprot_t mask_clr,
1689                     int force_split, int in_flag,
1690                     struct page **pages)
1691 {
1692     struct cpa_data cpa;
1693     int ret, cache, checkalias;
1694 
1695     memset(&cpa, 0, sizeof(cpa));
1696 
1697     /*
1698      * Check, if we are requested to set a not supported
1699      * feature.  Clearing non-supported features is OK.
1700      */
1701     mask_set = canon_pgprot(mask_set);
1702 
1703     if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1704         return 0;
1705 
1706     /* Ensure we are PAGE_SIZE aligned */
1707     if (in_flag & CPA_ARRAY) {
1708         int i;
1709         for (i = 0; i < numpages; i++) {
1710             if (addr[i] & ~PAGE_MASK) {
1711                 addr[i] &= PAGE_MASK;
1712                 WARN_ON_ONCE(1);
1713             }
1714         }
1715     } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1716         /*
1717          * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1718          * No need to check in that case
1719          */
1720         if (*addr & ~PAGE_MASK) {
1721             *addr &= PAGE_MASK;
1722             /*
1723              * People should not be passing in unaligned addresses:
1724              */
1725             WARN_ON_ONCE(1);
1726         }
1727     }
1728 
1729     /* Must avoid aliasing mappings in the highmem code */
1730     kmap_flush_unused();
1731 
1732     vm_unmap_aliases();
1733 
1734     cpa.vaddr = addr;
1735     cpa.pages = pages;
1736     cpa.numpages = numpages;
1737     cpa.mask_set = mask_set;
1738     cpa.mask_clr = mask_clr;
1739     cpa.flags = 0;
1740     cpa.curpage = 0;
1741     cpa.force_split = force_split;
1742 
1743     if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1744         cpa.flags |= in_flag;
1745 
1746     /* No alias checking for _NX bit modifications */
1747     checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1748     /* Has caller explicitly disabled alias checking? */
1749     if (in_flag & CPA_NO_CHECK_ALIAS)
1750         checkalias = 0;
1751 
1752     ret = __change_page_attr_set_clr(&cpa, checkalias);
1753 
1754     /*
1755      * Check whether we really changed something:
1756      */
1757     if (!(cpa.flags & CPA_FLUSHTLB))
1758         goto out;
1759 
1760     /*
1761      * No need to flush, when we did not set any of the caching
1762      * attributes:
1763      */
1764     cache = !!pgprot2cachemode(mask_set);
1765 
1766     /*
1767      * On error; flush everything to be sure.
1768      */
1769     if (ret) {
1770         cpa_flush_all(cache);
1771         goto out;
1772     }
1773 
1774     cpa_flush(&cpa, cache);
1775 out:
1776     return ret;
1777 }
1778 
1779 static inline int change_page_attr_set(unsigned long *addr, int numpages,
1780                        pgprot_t mask, int array)
1781 {
1782     return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1783         (array ? CPA_ARRAY : 0), NULL);
1784 }
1785 
1786 static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1787                      pgprot_t mask, int array)
1788 {
1789     return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1790         (array ? CPA_ARRAY : 0), NULL);
1791 }
1792 
1793 static inline int cpa_set_pages_array(struct page **pages, int numpages,
1794                        pgprot_t mask)
1795 {
1796     return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1797         CPA_PAGES_ARRAY, pages);
1798 }
1799 
1800 static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1801                      pgprot_t mask)
1802 {
1803     return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1804         CPA_PAGES_ARRAY, pages);
1805 }
1806 
1807 /*
1808  * __set_memory_prot is an internal helper for callers that have been passed
1809  * a pgprot_t value from upper layers and a reservation has already been taken.
1810  * If you want to set the pgprot to a specific page protocol, use the
1811  * set_memory_xx() functions.
1812  */
1813 int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
1814 {
1815     return change_page_attr_set_clr(&addr, numpages, prot,
1816                     __pgprot(~pgprot_val(prot)), 0, 0,
1817                     NULL);
1818 }
1819 
1820 int _set_memory_uc(unsigned long addr, int numpages)
1821 {
1822     /*
1823      * for now UC MINUS. see comments in ioremap()
1824      * If you really need strong UC use ioremap_uc(), but note
1825      * that you cannot override IO areas with set_memory_*() as
1826      * these helpers cannot work with IO memory.
1827      */
1828     return change_page_attr_set(&addr, numpages,
1829                     cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1830                     0);
1831 }
1832 
1833 int set_memory_uc(unsigned long addr, int numpages)
1834 {
1835     int ret;
1836 
1837     /*
1838      * for now UC MINUS. see comments in ioremap()
1839      */
1840     ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1841                   _PAGE_CACHE_MODE_UC_MINUS, NULL);
1842     if (ret)
1843         goto out_err;
1844 
1845     ret = _set_memory_uc(addr, numpages);
1846     if (ret)
1847         goto out_free;
1848 
1849     return 0;
1850 
1851 out_free:
1852     memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1853 out_err:
1854     return ret;
1855 }
1856 EXPORT_SYMBOL(set_memory_uc);
1857 
1858 int _set_memory_wc(unsigned long addr, int numpages)
1859 {
1860     int ret;
1861 
1862     ret = change_page_attr_set(&addr, numpages,
1863                    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1864                    0);
1865     if (!ret) {
1866         ret = change_page_attr_set_clr(&addr, numpages,
1867                            cachemode2pgprot(_PAGE_CACHE_MODE_WC),
1868                            __pgprot(_PAGE_CACHE_MASK),
1869                            0, 0, NULL);
1870     }
1871     return ret;
1872 }
1873 
1874 int set_memory_wc(unsigned long addr, int numpages)
1875 {
1876     int ret;
1877 
1878     ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1879         _PAGE_CACHE_MODE_WC, NULL);
1880     if (ret)
1881         return ret;
1882 
1883     ret = _set_memory_wc(addr, numpages);
1884     if (ret)
1885         memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1886 
1887     return ret;
1888 }
1889 EXPORT_SYMBOL(set_memory_wc);
1890 
1891 int _set_memory_wt(unsigned long addr, int numpages)
1892 {
1893     return change_page_attr_set(&addr, numpages,
1894                     cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1895 }
1896 
1897 int _set_memory_wb(unsigned long addr, int numpages)
1898 {
1899     /* WB cache mode is hard wired to all cache attribute bits being 0 */
1900     return change_page_attr_clear(&addr, numpages,
1901                       __pgprot(_PAGE_CACHE_MASK), 0);
1902 }
1903 
1904 int set_memory_wb(unsigned long addr, int numpages)
1905 {
1906     int ret;
1907 
1908     ret = _set_memory_wb(addr, numpages);
1909     if (ret)
1910         return ret;
1911 
1912     memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1913     return 0;
1914 }
1915 EXPORT_SYMBOL(set_memory_wb);
1916 
1917 /* Prevent speculative access to a page by marking it not-present */
1918 #ifdef CONFIG_X86_64
1919 int set_mce_nospec(unsigned long pfn)
1920 {
1921     unsigned long decoy_addr;
1922     int rc;
1923 
1924     /* SGX pages are not in the 1:1 map */
1925     if (arch_is_platform_page(pfn << PAGE_SHIFT))
1926         return 0;
1927     /*
1928      * We would like to just call:
1929      *      set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
1930      * but doing that would radically increase the odds of a
1931      * speculative access to the poison page because we'd have
1932      * the virtual address of the kernel 1:1 mapping sitting
1933      * around in registers.
1934      * Instead we get tricky.  We create a non-canonical address
1935      * that looks just like the one we want, but has bit 63 flipped.
1936      * This relies on set_memory_XX() properly sanitizing any __pa()
1937      * results with __PHYSICAL_MASK or PTE_PFN_MASK.
1938      */
1939     decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
1940 
1941     rc = set_memory_np(decoy_addr, 1);
1942     if (rc)
1943         pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
1944     return rc;
1945 }
1946 
1947 static int set_memory_present(unsigned long *addr, int numpages)
1948 {
1949     return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1950 }
1951 
1952 /* Restore full speculative operation to the pfn. */
1953 int clear_mce_nospec(unsigned long pfn)
1954 {
1955     unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
1956 
1957     return set_memory_present(&addr, 1);
1958 }
1959 EXPORT_SYMBOL_GPL(clear_mce_nospec);
1960 #endif /* CONFIG_X86_64 */
1961 
1962 int set_memory_x(unsigned long addr, int numpages)
1963 {
1964     if (!(__supported_pte_mask & _PAGE_NX))
1965         return 0;
1966 
1967     return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1968 }
1969 
1970 int set_memory_nx(unsigned long addr, int numpages)
1971 {
1972     if (!(__supported_pte_mask & _PAGE_NX))
1973         return 0;
1974 
1975     return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1976 }
1977 
1978 int set_memory_ro(unsigned long addr, int numpages)
1979 {
1980     return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1981 }
1982 
1983 int set_memory_rw(unsigned long addr, int numpages)
1984 {
1985     return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1986 }
1987 
1988 int set_memory_np(unsigned long addr, int numpages)
1989 {
1990     return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1991 }
1992 
1993 int set_memory_np_noalias(unsigned long addr, int numpages)
1994 {
1995     int cpa_flags = CPA_NO_CHECK_ALIAS;
1996 
1997     return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1998                     __pgprot(_PAGE_PRESENT), 0,
1999                     cpa_flags, NULL);
2000 }
2001 
2002 int set_memory_4k(unsigned long addr, int numpages)
2003 {
2004     return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
2005                     __pgprot(0), 1, 0, NULL);
2006 }
2007 
2008 int set_memory_nonglobal(unsigned long addr, int numpages)
2009 {
2010     return change_page_attr_clear(&addr, numpages,
2011                       __pgprot(_PAGE_GLOBAL), 0);
2012 }
2013 
2014 int set_memory_global(unsigned long addr, int numpages)
2015 {
2016     return change_page_attr_set(&addr, numpages,
2017                     __pgprot(_PAGE_GLOBAL), 0);
2018 }
2019 
2020 /*
2021  * __set_memory_enc_pgtable() is used for the hypervisors that get
2022  * informed about "encryption" status via page tables.
2023  */
2024 static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
2025 {
2026     pgprot_t empty = __pgprot(0);
2027     struct cpa_data cpa;
2028     int ret;
2029 
2030     /* Should not be working on unaligned addresses */
2031     if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
2032         addr &= PAGE_MASK;
2033 
2034     memset(&cpa, 0, sizeof(cpa));
2035     cpa.vaddr = &addr;
2036     cpa.numpages = numpages;
2037     cpa.mask_set = enc ? pgprot_encrypted(empty) : pgprot_decrypted(empty);
2038     cpa.mask_clr = enc ? pgprot_decrypted(empty) : pgprot_encrypted(empty);
2039     cpa.pgd = init_mm.pgd;
2040 
2041     /* Must avoid aliasing mappings in the highmem code */
2042     kmap_flush_unused();
2043     vm_unmap_aliases();
2044 
2045     /* Flush the caches as needed before changing the encryption attribute. */
2046     if (x86_platform.guest.enc_tlb_flush_required(enc))
2047         cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
2048 
2049     /* Notify hypervisor that we are about to set/clr encryption attribute. */
2050     x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
2051 
2052     ret = __change_page_attr_set_clr(&cpa, 1);
2053 
2054     /*
2055      * After changing the encryption attribute, we need to flush TLBs again
2056      * in case any speculative TLB caching occurred (but no need to flush
2057      * caches again).  We could just use cpa_flush_all(), but in case TLB
2058      * flushing gets optimized in the cpa_flush() path use the same logic
2059      * as above.
2060      */
2061     cpa_flush(&cpa, 0);
2062 
2063     /* Notify hypervisor that we have successfully set/clr encryption attribute. */
2064     if (!ret) {
2065         if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
2066             ret = -EIO;
2067     }
2068 
2069     return ret;
2070 }
2071 
2072 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2073 {
2074     if (hv_is_isolation_supported())
2075         return hv_set_mem_host_visibility(addr, numpages, !enc);
2076 
2077     if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
2078         return __set_memory_enc_pgtable(addr, numpages, enc);
2079 
2080     return 0;
2081 }
2082 
2083 int set_memory_encrypted(unsigned long addr, int numpages)
2084 {
2085     return __set_memory_enc_dec(addr, numpages, true);
2086 }
2087 EXPORT_SYMBOL_GPL(set_memory_encrypted);
2088 
2089 int set_memory_decrypted(unsigned long addr, int numpages)
2090 {
2091     return __set_memory_enc_dec(addr, numpages, false);
2092 }
2093 EXPORT_SYMBOL_GPL(set_memory_decrypted);
2094 
2095 int set_pages_uc(struct page *page, int numpages)
2096 {
2097     unsigned long addr = (unsigned long)page_address(page);
2098 
2099     return set_memory_uc(addr, numpages);
2100 }
2101 EXPORT_SYMBOL(set_pages_uc);
2102 
2103 static int _set_pages_array(struct page **pages, int numpages,
2104         enum page_cache_mode new_type)
2105 {
2106     unsigned long start;
2107     unsigned long end;
2108     enum page_cache_mode set_type;
2109     int i;
2110     int free_idx;
2111     int ret;
2112 
2113     for (i = 0; i < numpages; i++) {
2114         if (PageHighMem(pages[i]))
2115             continue;
2116         start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2117         end = start + PAGE_SIZE;
2118         if (memtype_reserve(start, end, new_type, NULL))
2119             goto err_out;
2120     }
2121 
2122     /* If WC, set to UC- first and then WC */
2123     set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2124                 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2125 
2126     ret = cpa_set_pages_array(pages, numpages,
2127                   cachemode2pgprot(set_type));
2128     if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2129         ret = change_page_attr_set_clr(NULL, numpages,
2130                            cachemode2pgprot(
2131                         _PAGE_CACHE_MODE_WC),
2132                            __pgprot(_PAGE_CACHE_MASK),
2133                            0, CPA_PAGES_ARRAY, pages);
2134     if (ret)
2135         goto err_out;
2136     return 0; /* Success */
2137 err_out:
2138     free_idx = i;
2139     for (i = 0; i < free_idx; i++) {
2140         if (PageHighMem(pages[i]))
2141             continue;
2142         start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2143         end = start + PAGE_SIZE;
2144         memtype_free(start, end);
2145     }
2146     return -EINVAL;
2147 }
2148 
2149 int set_pages_array_uc(struct page **pages, int numpages)
2150 {
2151     return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2152 }
2153 EXPORT_SYMBOL(set_pages_array_uc);
2154 
2155 int set_pages_array_wc(struct page **pages, int numpages)
2156 {
2157     return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2158 }
2159 EXPORT_SYMBOL(set_pages_array_wc);
2160 
2161 int set_pages_wb(struct page *page, int numpages)
2162 {
2163     unsigned long addr = (unsigned long)page_address(page);
2164 
2165     return set_memory_wb(addr, numpages);
2166 }
2167 EXPORT_SYMBOL(set_pages_wb);
2168 
2169 int set_pages_array_wb(struct page **pages, int numpages)
2170 {
2171     int retval;
2172     unsigned long start;
2173     unsigned long end;
2174     int i;
2175 
2176     /* WB cache mode is hard wired to all cache attribute bits being 0 */
2177     retval = cpa_clear_pages_array(pages, numpages,
2178             __pgprot(_PAGE_CACHE_MASK));
2179     if (retval)
2180         return retval;
2181 
2182     for (i = 0; i < numpages; i++) {
2183         if (PageHighMem(pages[i]))
2184             continue;
2185         start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2186         end = start + PAGE_SIZE;
2187         memtype_free(start, end);
2188     }
2189 
2190     return 0;
2191 }
2192 EXPORT_SYMBOL(set_pages_array_wb);
2193 
2194 int set_pages_ro(struct page *page, int numpages)
2195 {
2196     unsigned long addr = (unsigned long)page_address(page);
2197 
2198     return set_memory_ro(addr, numpages);
2199 }
2200 
2201 int set_pages_rw(struct page *page, int numpages)
2202 {
2203     unsigned long addr = (unsigned long)page_address(page);
2204 
2205     return set_memory_rw(addr, numpages);
2206 }
2207 
2208 static int __set_pages_p(struct page *page, int numpages)
2209 {
2210     unsigned long tempaddr = (unsigned long) page_address(page);
2211     struct cpa_data cpa = { .vaddr = &tempaddr,
2212                 .pgd = NULL,
2213                 .numpages = numpages,
2214                 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2215                 .mask_clr = __pgprot(0),
2216                 .flags = 0};
2217 
2218     /*
2219      * No alias checking needed for setting present flag. otherwise,
2220      * we may need to break large pages for 64-bit kernel text
2221      * mappings (this adds to complexity if we want to do this from
2222      * atomic context especially). Let's keep it simple!
2223      */
2224     return __change_page_attr_set_clr(&cpa, 0);
2225 }
2226 
2227 static int __set_pages_np(struct page *page, int numpages)
2228 {
2229     unsigned long tempaddr = (unsigned long) page_address(page);
2230     struct cpa_data cpa = { .vaddr = &tempaddr,
2231                 .pgd = NULL,
2232                 .numpages = numpages,
2233                 .mask_set = __pgprot(0),
2234                 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2235                 .flags = 0};
2236 
2237     /*
2238      * No alias checking needed for setting not present flag. otherwise,
2239      * we may need to break large pages for 64-bit kernel text
2240      * mappings (this adds to complexity if we want to do this from
2241      * atomic context especially). Let's keep it simple!
2242      */
2243     return __change_page_attr_set_clr(&cpa, 0);
2244 }
2245 
2246 int set_direct_map_invalid_noflush(struct page *page)
2247 {
2248     return __set_pages_np(page, 1);
2249 }
2250 
2251 int set_direct_map_default_noflush(struct page *page)
2252 {
2253     return __set_pages_p(page, 1);
2254 }
2255 
2256 #ifdef CONFIG_DEBUG_PAGEALLOC
2257 void __kernel_map_pages(struct page *page, int numpages, int enable)
2258 {
2259     if (PageHighMem(page))
2260         return;
2261     if (!enable) {
2262         debug_check_no_locks_freed(page_address(page),
2263                        numpages * PAGE_SIZE);
2264     }
2265 
2266     /*
2267      * The return value is ignored as the calls cannot fail.
2268      * Large pages for identity mappings are not used at boot time
2269      * and hence no memory allocations during large page split.
2270      */
2271     if (enable)
2272         __set_pages_p(page, numpages);
2273     else
2274         __set_pages_np(page, numpages);
2275 
2276     /*
2277      * We should perform an IPI and flush all tlbs,
2278      * but that can deadlock->flush only current cpu.
2279      * Preemption needs to be disabled around __flush_tlb_all() due to
2280      * CR3 reload in __native_flush_tlb().
2281      */
2282     preempt_disable();
2283     __flush_tlb_all();
2284     preempt_enable();
2285 
2286     arch_flush_lazy_mmu_mode();
2287 }
2288 #endif /* CONFIG_DEBUG_PAGEALLOC */
2289 
2290 bool kernel_page_present(struct page *page)
2291 {
2292     unsigned int level;
2293     pte_t *pte;
2294 
2295     if (PageHighMem(page))
2296         return false;
2297 
2298     pte = lookup_address((unsigned long)page_address(page), &level);
2299     return (pte_val(*pte) & _PAGE_PRESENT);
2300 }
2301 
2302 int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2303                    unsigned numpages, unsigned long page_flags)
2304 {
2305     int retval = -EINVAL;
2306 
2307     struct cpa_data cpa = {
2308         .vaddr = &address,
2309         .pfn = pfn,
2310         .pgd = pgd,
2311         .numpages = numpages,
2312         .mask_set = __pgprot(0),
2313         .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2314         .flags = 0,
2315     };
2316 
2317     WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2318 
2319     if (!(__supported_pte_mask & _PAGE_NX))
2320         goto out;
2321 
2322     if (!(page_flags & _PAGE_ENC))
2323         cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2324 
2325     cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2326 
2327     retval = __change_page_attr_set_clr(&cpa, 0);
2328     __flush_tlb_all();
2329 
2330 out:
2331     return retval;
2332 }
2333 
2334 /*
2335  * __flush_tlb_all() flushes mappings only on current CPU and hence this
2336  * function shouldn't be used in an SMP environment. Presently, it's used only
2337  * during boot (way before smp_init()) by EFI subsystem and hence is ok.
2338  */
2339 int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2340                      unsigned long numpages)
2341 {
2342     int retval;
2343 
2344     /*
2345      * The typical sequence for unmapping is to find a pte through
2346      * lookup_address_in_pgd() (ideally, it should never return NULL because
2347      * the address is already mapped) and change it's protections. As pfn is
2348      * the *target* of a mapping, it's not useful while unmapping.
2349      */
2350     struct cpa_data cpa = {
2351         .vaddr      = &address,
2352         .pfn        = 0,
2353         .pgd        = pgd,
2354         .numpages   = numpages,
2355         .mask_set   = __pgprot(0),
2356         .mask_clr   = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2357         .flags      = 0,
2358     };
2359 
2360     WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2361 
2362     retval = __change_page_attr_set_clr(&cpa, 0);
2363     __flush_tlb_all();
2364 
2365     return retval;
2366 }
2367 
2368 /*
2369  * The testcases use internal knowledge of the implementation that shouldn't
2370  * be exposed to the rest of the kernel. Include these directly here.
2371  */
2372 #ifdef CONFIG_CPA_DEBUG
2373 #include "cpa-test.c"
2374 #endif