0001
0002
0003
0004 #include <linux/bug.h>
0005 #include <linux/module.h>
0006 #include <linux/init.h>
0007 #include <linux/signal.h>
0008 #include <linux/sched.h>
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/string.h>
0012 #include <linux/types.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/ptrace.h>
0015 #include <linux/mman.h>
0016 #include <linux/mm.h>
0017 #include <linux/highmem.h>
0018 #include <linux/memblock.h>
0019 #include <linux/swap.h>
0020 #include <linux/proc_fs.h>
0021 #include <linux/pfn.h>
0022 #include <linux/initrd.h>
0023
0024 #include <asm/setup.h>
0025 #include <asm/cachectl.h>
0026 #include <asm/dma.h>
0027 #include <asm/pgalloc.h>
0028 #include <asm/mmu_context.h>
0029 #include <asm/sections.h>
0030 #include <asm/tlb.h>
0031 #include <asm/cacheflush.h>
0032
0033 #define PTRS_KERN_TABLE \
0034 ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE)
0035
0036 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
0037 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
0038 pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss;
0039
0040 EXPORT_SYMBOL(invalid_pte_table);
0041 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
0042 __page_aligned_bss;
0043 EXPORT_SYMBOL(empty_zero_page);
0044
0045 #ifdef CONFIG_BLK_DEV_INITRD
0046 static void __init setup_initrd(void)
0047 {
0048 unsigned long size;
0049
0050 if (initrd_start >= initrd_end) {
0051 pr_err("initrd not found or empty");
0052 goto disable;
0053 }
0054
0055 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
0056 pr_err("initrd extends beyond end of memory");
0057 goto disable;
0058 }
0059
0060 size = initrd_end - initrd_start;
0061
0062 if (memblock_is_region_reserved(__pa(initrd_start), size)) {
0063 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
0064 __pa(initrd_start), size);
0065 goto disable;
0066 }
0067
0068 memblock_reserve(__pa(initrd_start), size);
0069
0070 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
0071 (void *)(initrd_start), size);
0072
0073 initrd_below_start_ok = 1;
0074
0075 return;
0076
0077 disable:
0078 initrd_start = initrd_end = 0;
0079
0080 pr_err(" - disabling initrd\n");
0081 }
0082 #endif
0083
0084 void __init mem_init(void)
0085 {
0086 #ifdef CONFIG_HIGHMEM
0087 unsigned long tmp;
0088
0089 set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET);
0090 #else
0091 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
0092 #endif
0093 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
0094
0095 #ifdef CONFIG_BLK_DEV_INITRD
0096 setup_initrd();
0097 #endif
0098
0099 memblock_free_all();
0100
0101 #ifdef CONFIG_HIGHMEM
0102 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
0103 struct page *page = pfn_to_page(tmp);
0104
0105
0106 if (!memblock_is_reserved(tmp << PAGE_SHIFT))
0107 free_highmem_page(page);
0108 }
0109 #endif
0110 }
0111
0112 void free_initmem(void)
0113 {
0114 free_initmem_default(-1);
0115 }
0116
0117 void pgd_init(unsigned long *p)
0118 {
0119 int i;
0120
0121 for (i = 0; i < PTRS_PER_PGD; i++)
0122 p[i] = __pa(invalid_pte_table);
0123
0124 flush_tlb_all();
0125 local_icache_inv_all(NULL);
0126 }
0127
0128 void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
0129 {
0130 int i;
0131
0132 for (i = 0; i < USER_PTRS_PER_PGD; i++)
0133 swapper_pg_dir[i].pgd = __pa(invalid_pte_table);
0134
0135 for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++)
0136 swapper_pg_dir[i].pgd =
0137 __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD)));
0138
0139 for (i = 0; i < PTRS_KERN_TABLE; i++)
0140 set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL));
0141
0142 for (i = min_pfn; i < max_pfn; i++)
0143 set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL));
0144
0145 flush_tlb_all();
0146 local_icache_inv_all(NULL);
0147
0148
0149 write_mmu_pagemask(0);
0150
0151 setup_pgd(swapper_pg_dir, 0);
0152 }
0153
0154 void __init fixrange_init(unsigned long start, unsigned long end,
0155 pgd_t *pgd_base)
0156 {
0157 pgd_t *pgd;
0158 pud_t *pud;
0159 pmd_t *pmd;
0160 pte_t *pte;
0161 int i, j, k;
0162 unsigned long vaddr;
0163
0164 vaddr = start;
0165 i = pgd_index(vaddr);
0166 j = pud_index(vaddr);
0167 k = pmd_index(vaddr);
0168 pgd = pgd_base + i;
0169
0170 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
0171 pud = (pud_t *)pgd;
0172 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
0173 pmd = (pmd_t *)pud;
0174 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
0175 if (pmd_none(*pmd)) {
0176 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
0177 if (!pte)
0178 panic("%s: Failed to allocate %lu bytes align=%lx\n",
0179 __func__, PAGE_SIZE,
0180 PAGE_SIZE);
0181
0182 set_pmd(pmd, __pmd(__pa(pte)));
0183 BUG_ON(pte != pte_offset_kernel(pmd, 0));
0184 }
0185 vaddr += PMD_SIZE;
0186 }
0187 k = 0;
0188 }
0189 j = 0;
0190 }
0191 }
0192
0193 void __init fixaddr_init(void)
0194 {
0195 unsigned long vaddr;
0196
0197 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
0198 fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
0199 }
0200
0201 static const pgprot_t protection_map[16] = {
0202 [VM_NONE] = PAGE_NONE,
0203 [VM_READ] = PAGE_READ,
0204 [VM_WRITE] = PAGE_READ,
0205 [VM_WRITE | VM_READ] = PAGE_READ,
0206 [VM_EXEC] = PAGE_READ,
0207 [VM_EXEC | VM_READ] = PAGE_READ,
0208 [VM_EXEC | VM_WRITE] = PAGE_READ,
0209 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ,
0210 [VM_SHARED] = PAGE_NONE,
0211 [VM_SHARED | VM_READ] = PAGE_READ,
0212 [VM_SHARED | VM_WRITE] = PAGE_WRITE,
0213 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE,
0214 [VM_SHARED | VM_EXEC] = PAGE_READ,
0215 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ,
0216 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE,
0217 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE
0218 };
0219 DECLARE_VM_GET_PAGE_PROT