Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright IBM Corp. 2008
0004  *
0005  * Guest page hinting for unused pages.
0006  *
0007  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
0008  */
0009 
0010 #include <linux/kernel.h>
0011 #include <linux/errno.h>
0012 #include <linux/types.h>
0013 #include <linux/mm.h>
0014 #include <linux/memblock.h>
0015 #include <linux/gfp.h>
0016 #include <linux/init.h>
0017 #include <asm/asm-extable.h>
0018 #include <asm/facility.h>
0019 #include <asm/page-states.h>
0020 
0021 static int cmma_flag = 1;
0022 
0023 static int __init cmma(char *str)
0024 {
0025     bool enabled;
0026 
0027     if (!kstrtobool(str, &enabled))
0028         cmma_flag = enabled;
0029     return 1;
0030 }
0031 __setup("cmma=", cmma);
0032 
0033 static inline int cmma_test_essa(void)
0034 {
0035     unsigned long tmp = 0;
0036     int rc = -EOPNOTSUPP;
0037 
0038     /* test ESSA_GET_STATE */
0039     asm volatile(
0040         "   .insn   rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
0041         "0:     la      %[rc],0\n"
0042         "1:\n"
0043         EX_TABLE(0b,1b)
0044         : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
0045         : [cmd] "i" (ESSA_GET_STATE));
0046     return rc;
0047 }
0048 
0049 void __init cmma_init(void)
0050 {
0051     if (!cmma_flag)
0052         return;
0053     if (cmma_test_essa()) {
0054         cmma_flag = 0;
0055         return;
0056     }
0057     if (test_facility(147))
0058         cmma_flag = 2;
0059 }
0060 
0061 static inline unsigned char get_page_state(struct page *page)
0062 {
0063     unsigned char state;
0064 
0065     asm volatile("  .insn   rrf,0xb9ab0000,%0,%1,%2,0"
0066              : "=&d" (state)
0067              : "a" (page_to_phys(page)),
0068                "i" (ESSA_GET_STATE));
0069     return state & 0x3f;
0070 }
0071 
0072 static inline void set_page_unused(struct page *page, int order)
0073 {
0074     int i, rc;
0075 
0076     for (i = 0; i < (1 << order); i++)
0077         asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
0078                  : "=&d" (rc)
0079                  : "a" (page_to_phys(page + i)),
0080                    "i" (ESSA_SET_UNUSED));
0081 }
0082 
0083 static inline void set_page_stable_dat(struct page *page, int order)
0084 {
0085     int i, rc;
0086 
0087     for (i = 0; i < (1 << order); i++)
0088         asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
0089                  : "=&d" (rc)
0090                  : "a" (page_to_phys(page + i)),
0091                    "i" (ESSA_SET_STABLE));
0092 }
0093 
0094 static inline void set_page_stable_nodat(struct page *page, int order)
0095 {
0096     int i, rc;
0097 
0098     for (i = 0; i < (1 << order); i++)
0099         asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
0100                  : "=&d" (rc)
0101                  : "a" (page_to_phys(page + i)),
0102                    "i" (ESSA_SET_STABLE_NODAT));
0103 }
0104 
0105 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
0106 {
0107     unsigned long next;
0108     struct page *page;
0109     pmd_t *pmd;
0110 
0111     pmd = pmd_offset(pud, addr);
0112     do {
0113         next = pmd_addr_end(addr, end);
0114         if (pmd_none(*pmd) || pmd_large(*pmd))
0115             continue;
0116         page = phys_to_page(pmd_val(*pmd));
0117         set_bit(PG_arch_1, &page->flags);
0118     } while (pmd++, addr = next, addr != end);
0119 }
0120 
0121 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
0122 {
0123     unsigned long next;
0124     struct page *page;
0125     pud_t *pud;
0126     int i;
0127 
0128     pud = pud_offset(p4d, addr);
0129     do {
0130         next = pud_addr_end(addr, end);
0131         if (pud_none(*pud) || pud_large(*pud))
0132             continue;
0133         if (!pud_folded(*pud)) {
0134             page = phys_to_page(pud_val(*pud));
0135             for (i = 0; i < 3; i++)
0136                 set_bit(PG_arch_1, &page[i].flags);
0137         }
0138         mark_kernel_pmd(pud, addr, next);
0139     } while (pud++, addr = next, addr != end);
0140 }
0141 
0142 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
0143 {
0144     unsigned long next;
0145     struct page *page;
0146     p4d_t *p4d;
0147     int i;
0148 
0149     p4d = p4d_offset(pgd, addr);
0150     do {
0151         next = p4d_addr_end(addr, end);
0152         if (p4d_none(*p4d))
0153             continue;
0154         if (!p4d_folded(*p4d)) {
0155             page = phys_to_page(p4d_val(*p4d));
0156             for (i = 0; i < 3; i++)
0157                 set_bit(PG_arch_1, &page[i].flags);
0158         }
0159         mark_kernel_pud(p4d, addr, next);
0160     } while (p4d++, addr = next, addr != end);
0161 }
0162 
0163 static void mark_kernel_pgd(void)
0164 {
0165     unsigned long addr, next;
0166     struct page *page;
0167     pgd_t *pgd;
0168     int i;
0169 
0170     addr = 0;
0171     pgd = pgd_offset_k(addr);
0172     do {
0173         next = pgd_addr_end(addr, MODULES_END);
0174         if (pgd_none(*pgd))
0175             continue;
0176         if (!pgd_folded(*pgd)) {
0177             page = phys_to_page(pgd_val(*pgd));
0178             for (i = 0; i < 3; i++)
0179                 set_bit(PG_arch_1, &page[i].flags);
0180         }
0181         mark_kernel_p4d(pgd, addr, next);
0182     } while (pgd++, addr = next, addr != MODULES_END);
0183 }
0184 
0185 void __init cmma_init_nodat(void)
0186 {
0187     struct page *page;
0188     unsigned long start, end, ix;
0189     int i;
0190 
0191     if (cmma_flag < 2)
0192         return;
0193     /* Mark pages used in kernel page tables */
0194     mark_kernel_pgd();
0195 
0196     /* Set all kernel pages not used for page tables to stable/no-dat */
0197     for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
0198         page = pfn_to_page(start);
0199         for (ix = start; ix < end; ix++, page++) {
0200             if (__test_and_clear_bit(PG_arch_1, &page->flags))
0201                 continue;   /* skip page table pages */
0202             if (!list_empty(&page->lru))
0203                 continue;   /* skip free pages */
0204             set_page_stable_nodat(page, 0);
0205         }
0206     }
0207 }
0208 
0209 void arch_free_page(struct page *page, int order)
0210 {
0211     if (!cmma_flag)
0212         return;
0213     set_page_unused(page, order);
0214 }
0215 
0216 void arch_alloc_page(struct page *page, int order)
0217 {
0218     if (!cmma_flag)
0219         return;
0220     if (cmma_flag < 2)
0221         set_page_stable_dat(page, order);
0222     else
0223         set_page_stable_nodat(page, order);
0224 }
0225 
0226 void arch_set_page_dat(struct page *page, int order)
0227 {
0228     if (!cmma_flag)
0229         return;
0230     set_page_stable_dat(page, order);
0231 }