0001
0002
0003
0004
0005
0006 #include <linux/stddef.h>
0007 #include <linux/module.h>
0008 #include <linux/memblock.h>
0009 #include <linux/highmem.h>
0010 #include <linux/mm.h>
0011 #include <linux/swap.h>
0012 #include <linux/slab.h>
0013 #include <asm/fixmap.h>
0014 #include <asm/page.h>
0015 #include <as-layout.h>
0016 #include <init.h>
0017 #include <kern.h>
0018 #include <kern_util.h>
0019 #include <mem_user.h>
0020 #include <os.h>
0021 #include <linux/sched/task.h>
0022
0023 #ifdef CONFIG_KASAN
0024 int kasan_um_is_ready;
0025 void kasan_init(void)
0026 {
0027
0028
0029
0030
0031 kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
0032 init_task.kasan_depth = 0;
0033 kasan_um_is_ready = true;
0034 }
0035
0036 static void (*kasan_init_ptr)(void)
0037 __section(".kasan_init") __used
0038 = kasan_init;
0039 #endif
0040
0041
0042 unsigned long *empty_zero_page = NULL;
0043 EXPORT_SYMBOL(empty_zero_page);
0044
0045
0046
0047
0048
0049 pgd_t swapper_pg_dir[PTRS_PER_PGD];
0050
0051
0052 unsigned long long highmem;
0053 EXPORT_SYMBOL(highmem);
0054 int kmalloc_ok = 0;
0055
0056
0057 static unsigned long brk_end;
0058
0059 void __init mem_init(void)
0060 {
0061
0062 memset(empty_zero_page, 0, PAGE_SIZE);
0063
0064
0065
0066
0067 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
0068 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
0069 memblock_free((void *)brk_end, uml_reserved - brk_end);
0070 uml_reserved = brk_end;
0071
0072
0073 memblock_free_all();
0074 max_low_pfn = totalram_pages();
0075 max_pfn = max_low_pfn;
0076 kmalloc_ok = 1;
0077 }
0078
0079
0080
0081
0082
0083 static void __init one_page_table_init(pmd_t *pmd)
0084 {
0085 if (pmd_none(*pmd)) {
0086 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
0087 PAGE_SIZE);
0088 if (!pte)
0089 panic("%s: Failed to allocate %lu bytes align=%lx\n",
0090 __func__, PAGE_SIZE, PAGE_SIZE);
0091
0092 set_pmd(pmd, __pmd(_KERNPG_TABLE +
0093 (unsigned long) __pa(pte)));
0094 BUG_ON(pte != pte_offset_kernel(pmd, 0));
0095 }
0096 }
0097
0098 static void __init one_md_table_init(pud_t *pud)
0099 {
0100 #ifdef CONFIG_3_LEVEL_PGTABLES
0101 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
0102 if (!pmd_table)
0103 panic("%s: Failed to allocate %lu bytes align=%lx\n",
0104 __func__, PAGE_SIZE, PAGE_SIZE);
0105
0106 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
0107 BUG_ON(pmd_table != pmd_offset(pud, 0));
0108 #endif
0109 }
0110
0111 static void __init fixrange_init(unsigned long start, unsigned long end,
0112 pgd_t *pgd_base)
0113 {
0114 pgd_t *pgd;
0115 p4d_t *p4d;
0116 pud_t *pud;
0117 pmd_t *pmd;
0118 int i, j;
0119 unsigned long vaddr;
0120
0121 vaddr = start;
0122 i = pgd_index(vaddr);
0123 j = pmd_index(vaddr);
0124 pgd = pgd_base + i;
0125
0126 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
0127 p4d = p4d_offset(pgd, vaddr);
0128 pud = pud_offset(p4d, vaddr);
0129 if (pud_none(*pud))
0130 one_md_table_init(pud);
0131 pmd = pmd_offset(pud, vaddr);
0132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
0133 one_page_table_init(pmd);
0134 vaddr += PMD_SIZE;
0135 }
0136 j = 0;
0137 }
0138 }
0139
0140 static void __init fixaddr_user_init( void)
0141 {
0142 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
0143 long size = FIXADDR_USER_END - FIXADDR_USER_START;
0144 pte_t *pte;
0145 phys_t p;
0146 unsigned long v, vaddr = FIXADDR_USER_START;
0147
0148 if (!size)
0149 return;
0150
0151 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
0152 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
0153 if (!v)
0154 panic("%s: Failed to allocate %lu bytes align=%lx\n",
0155 __func__, size, PAGE_SIZE);
0156
0157 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
0158 p = __pa(v);
0159 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
0160 p += PAGE_SIZE) {
0161 pte = virt_to_kpte(vaddr);
0162 pte_set_val(*pte, p, PAGE_READONLY);
0163 }
0164 #endif
0165 }
0166
0167 void __init paging_init(void)
0168 {
0169 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
0170 unsigned long vaddr;
0171
0172 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
0173 PAGE_SIZE);
0174 if (!empty_zero_page)
0175 panic("%s: Failed to allocate %lu bytes align=%lx\n",
0176 __func__, PAGE_SIZE, PAGE_SIZE);
0177
0178 max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
0179 free_area_init(max_zone_pfn);
0180
0181
0182
0183
0184
0185 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
0186 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
0187
0188 fixaddr_user_init();
0189 }
0190
0191
0192
0193
0194
0195
0196 void free_initmem(void)
0197 {
0198 }
0199
0200
0201
0202 pgd_t *pgd_alloc(struct mm_struct *mm)
0203 {
0204 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
0205
0206 if (pgd) {
0207 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
0208 memcpy(pgd + USER_PTRS_PER_PGD,
0209 swapper_pg_dir + USER_PTRS_PER_PGD,
0210 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
0211 }
0212 return pgd;
0213 }
0214
0215 void *uml_kmalloc(int size, int flags)
0216 {
0217 return kmalloc(size, flags);
0218 }
0219
0220 static const pgprot_t protection_map[16] = {
0221 [VM_NONE] = PAGE_NONE,
0222 [VM_READ] = PAGE_READONLY,
0223 [VM_WRITE] = PAGE_COPY,
0224 [VM_WRITE | VM_READ] = PAGE_COPY,
0225 [VM_EXEC] = PAGE_READONLY,
0226 [VM_EXEC | VM_READ] = PAGE_READONLY,
0227 [VM_EXEC | VM_WRITE] = PAGE_COPY,
0228 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
0229 [VM_SHARED] = PAGE_NONE,
0230 [VM_SHARED | VM_READ] = PAGE_READONLY,
0231 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
0232 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
0233 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
0234 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
0235 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
0236 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
0237 };
0238 DECLARE_VM_GET_PAGE_PROT