0001
0002
0003
0004
0005 #include <linux/mm.h>
0006 #include <linux/module.h>
0007
0008 #include <asm/tlbflush.h>
0009 #include <asm/set_memory.h>
0010
0011 struct page_change_data {
0012 pgprot_t set_mask;
0013 pgprot_t clear_mask;
0014 };
0015
0016 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
0017 {
0018 struct page_change_data *cdata = data;
0019 pte_t pte = *ptep;
0020
0021 pte = clear_pte_bit(pte, cdata->clear_mask);
0022 pte = set_pte_bit(pte, cdata->set_mask);
0023
0024 set_pte_ext(ptep, pte, 0);
0025 return 0;
0026 }
0027
0028 static bool in_range(unsigned long start, unsigned long size,
0029 unsigned long range_start, unsigned long range_end)
0030 {
0031 return start >= range_start && start < range_end &&
0032 size <= range_end - start;
0033 }
0034
0035
0036
0037
0038 static int __change_memory_common(unsigned long start, unsigned long size,
0039 pgprot_t set_mask, pgprot_t clear_mask)
0040 {
0041 struct page_change_data data;
0042 int ret;
0043
0044 data.set_mask = set_mask;
0045 data.clear_mask = clear_mask;
0046
0047 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
0048 &data);
0049
0050 flush_tlb_kernel_range(start, start + size);
0051 return ret;
0052 }
0053
0054 static int change_memory_common(unsigned long addr, int numpages,
0055 pgprot_t set_mask, pgprot_t clear_mask)
0056 {
0057 unsigned long start = addr & PAGE_MASK;
0058 unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
0059 unsigned long size = end - start;
0060
0061 WARN_ON_ONCE(start != addr);
0062
0063 if (!size)
0064 return 0;
0065
0066 if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
0067 !in_range(start, size, VMALLOC_START, VMALLOC_END))
0068 return -EINVAL;
0069
0070 return __change_memory_common(start, size, set_mask, clear_mask);
0071 }
0072
0073 int set_memory_ro(unsigned long addr, int numpages)
0074 {
0075 return change_memory_common(addr, numpages,
0076 __pgprot(L_PTE_RDONLY),
0077 __pgprot(0));
0078 }
0079
0080 int set_memory_rw(unsigned long addr, int numpages)
0081 {
0082 return change_memory_common(addr, numpages,
0083 __pgprot(0),
0084 __pgprot(L_PTE_RDONLY));
0085 }
0086
0087 int set_memory_nx(unsigned long addr, int numpages)
0088 {
0089 return change_memory_common(addr, numpages,
0090 __pgprot(L_PTE_XN),
0091 __pgprot(0));
0092 }
0093
0094 int set_memory_x(unsigned long addr, int numpages)
0095 {
0096 return change_memory_common(addr, numpages,
0097 __pgprot(0),
0098 __pgprot(L_PTE_XN));
0099 }
0100
0101 int set_memory_valid(unsigned long addr, int numpages, int enable)
0102 {
0103 if (enable)
0104 return __change_memory_common(addr, PAGE_SIZE * numpages,
0105 __pgprot(L_PTE_VALID),
0106 __pgprot(0));
0107 else
0108 return __change_memory_common(addr, PAGE_SIZE * numpages,
0109 __pgprot(0),
0110 __pgprot(L_PTE_VALID));
0111 }