0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/mm.h>
0007 #include <linux/module.h>
0008 #include <linux/sched.h>
0009 #include <linux/vmalloc.h>
0010
0011 #include <asm/cacheflush.h>
0012 #include <asm/set_memory.h>
0013 #include <asm/tlbflush.h>
0014
0015 struct page_change_data {
0016 pgprot_t set_mask;
0017 pgprot_t clear_mask;
0018 };
0019
0020 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
0021
0022 bool can_set_direct_map(void)
0023 {
0024 return rodata_full || debug_pagealloc_enabled();
0025 }
0026
0027 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
0028 {
0029 struct page_change_data *cdata = data;
0030 pte_t pte = READ_ONCE(*ptep);
0031
0032 pte = clear_pte_bit(pte, cdata->clear_mask);
0033 pte = set_pte_bit(pte, cdata->set_mask);
0034
0035 set_pte(ptep, pte);
0036 return 0;
0037 }
0038
0039
0040
0041
0042 static int __change_memory_common(unsigned long start, unsigned long size,
0043 pgprot_t set_mask, pgprot_t clear_mask)
0044 {
0045 struct page_change_data data;
0046 int ret;
0047
0048 data.set_mask = set_mask;
0049 data.clear_mask = clear_mask;
0050
0051 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
0052 &data);
0053
0054 flush_tlb_kernel_range(start, start + size);
0055 return ret;
0056 }
0057
0058 static int change_memory_common(unsigned long addr, int numpages,
0059 pgprot_t set_mask, pgprot_t clear_mask)
0060 {
0061 unsigned long start = addr;
0062 unsigned long size = PAGE_SIZE * numpages;
0063 unsigned long end = start + size;
0064 struct vm_struct *area;
0065 int i;
0066
0067 if (!PAGE_ALIGNED(addr)) {
0068 start &= PAGE_MASK;
0069 end = start + size;
0070 WARN_ON_ONCE(1);
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 area = find_vm_area((void *)addr);
0087 if (!area ||
0088 end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
0089 !(area->flags & VM_ALLOC))
0090 return -EINVAL;
0091
0092 if (!numpages)
0093 return 0;
0094
0095
0096
0097
0098
0099 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
0100 pgprot_val(clear_mask) == PTE_RDONLY)) {
0101 for (i = 0; i < area->nr_pages; i++) {
0102 __change_memory_common((u64)page_address(area->pages[i]),
0103 PAGE_SIZE, set_mask, clear_mask);
0104 }
0105 }
0106
0107
0108
0109
0110
0111 vm_unmap_aliases();
0112
0113 return __change_memory_common(start, size, set_mask, clear_mask);
0114 }
0115
0116 int set_memory_ro(unsigned long addr, int numpages)
0117 {
0118 return change_memory_common(addr, numpages,
0119 __pgprot(PTE_RDONLY),
0120 __pgprot(PTE_WRITE));
0121 }
0122
0123 int set_memory_rw(unsigned long addr, int numpages)
0124 {
0125 return change_memory_common(addr, numpages,
0126 __pgprot(PTE_WRITE),
0127 __pgprot(PTE_RDONLY));
0128 }
0129
0130 int set_memory_nx(unsigned long addr, int numpages)
0131 {
0132 return change_memory_common(addr, numpages,
0133 __pgprot(PTE_PXN),
0134 __pgprot(PTE_MAYBE_GP));
0135 }
0136
0137 int set_memory_x(unsigned long addr, int numpages)
0138 {
0139 return change_memory_common(addr, numpages,
0140 __pgprot(PTE_MAYBE_GP),
0141 __pgprot(PTE_PXN));
0142 }
0143
0144 int set_memory_valid(unsigned long addr, int numpages, int enable)
0145 {
0146 if (enable)
0147 return __change_memory_common(addr, PAGE_SIZE * numpages,
0148 __pgprot(PTE_VALID),
0149 __pgprot(0));
0150 else
0151 return __change_memory_common(addr, PAGE_SIZE * numpages,
0152 __pgprot(0),
0153 __pgprot(PTE_VALID));
0154 }
0155
0156 int set_direct_map_invalid_noflush(struct page *page)
0157 {
0158 struct page_change_data data = {
0159 .set_mask = __pgprot(0),
0160 .clear_mask = __pgprot(PTE_VALID),
0161 };
0162
0163 if (!can_set_direct_map())
0164 return 0;
0165
0166 return apply_to_page_range(&init_mm,
0167 (unsigned long)page_address(page),
0168 PAGE_SIZE, change_page_range, &data);
0169 }
0170
0171 int set_direct_map_default_noflush(struct page *page)
0172 {
0173 struct page_change_data data = {
0174 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
0175 .clear_mask = __pgprot(PTE_RDONLY),
0176 };
0177
0178 if (!can_set_direct_map())
0179 return 0;
0180
0181 return apply_to_page_range(&init_mm,
0182 (unsigned long)page_address(page),
0183 PAGE_SIZE, change_page_range, &data);
0184 }
0185
0186 #ifdef CONFIG_DEBUG_PAGEALLOC
0187 void __kernel_map_pages(struct page *page, int numpages, int enable)
0188 {
0189 if (!can_set_direct_map())
0190 return;
0191
0192 set_memory_valid((unsigned long)page_address(page), numpages, enable);
0193 }
0194 #endif
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 bool kernel_page_present(struct page *page)
0206 {
0207 pgd_t *pgdp;
0208 p4d_t *p4dp;
0209 pud_t *pudp, pud;
0210 pmd_t *pmdp, pmd;
0211 pte_t *ptep;
0212 unsigned long addr = (unsigned long)page_address(page);
0213
0214 if (!can_set_direct_map())
0215 return true;
0216
0217 pgdp = pgd_offset_k(addr);
0218 if (pgd_none(READ_ONCE(*pgdp)))
0219 return false;
0220
0221 p4dp = p4d_offset(pgdp, addr);
0222 if (p4d_none(READ_ONCE(*p4dp)))
0223 return false;
0224
0225 pudp = pud_offset(p4dp, addr);
0226 pud = READ_ONCE(*pudp);
0227 if (pud_none(pud))
0228 return false;
0229 if (pud_sect(pud))
0230 return true;
0231
0232 pmdp = pmd_offset(pudp, addr);
0233 pmd = READ_ONCE(*pmdp);
0234 if (pmd_none(pmd))
0235 return false;
0236 if (pmd_sect(pmd))
0237 return true;
0238
0239 ptep = pte_offset_kernel(pmdp, addr);
0240 return pte_valid(READ_ONCE(*ptep));
0241 }