Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Provide common bits of early_ioremap() support for architectures needing
0004  * temporary mappings during boot before ioremap() is available.
0005  *
0006  * This is mostly a direct copy of the x86 early_ioremap implementation.
0007  *
0008  * (C) Copyright 1995 1996, 2014 Linus Torvalds
0009  *
0010  */
0011 #include <linux/kernel.h>
0012 #include <linux/init.h>
0013 #include <linux/io.h>
0014 #include <linux/module.h>
0015 #include <linux/slab.h>
0016 #include <linux/mm.h>
0017 #include <linux/vmalloc.h>
0018 #include <asm/fixmap.h>
0019 #include <asm/early_ioremap.h>
0020 #include "internal.h"
0021 
0022 #ifdef CONFIG_MMU
0023 static int early_ioremap_debug __initdata;
0024 
0025 static int __init early_ioremap_debug_setup(char *str)
0026 {
0027     early_ioremap_debug = 1;
0028 
0029     return 0;
0030 }
0031 early_param("early_ioremap_debug", early_ioremap_debug_setup);
0032 
0033 static int after_paging_init __initdata;
0034 
0035 pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
0036                             unsigned long size,
0037                             pgprot_t prot)
0038 {
0039     return prot;
0040 }
0041 
0042 void __init early_ioremap_reset(void)
0043 {
0044     after_paging_init = 1;
0045 }
0046 
0047 /*
0048  * Generally, ioremap() is available after paging_init() has been called.
0049  * Architectures wanting to allow early_ioremap after paging_init() can
0050  * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
0051  */
0052 #ifndef __late_set_fixmap
0053 static inline void __init __late_set_fixmap(enum fixed_addresses idx,
0054                         phys_addr_t phys, pgprot_t prot)
0055 {
0056     BUG();
0057 }
0058 #endif
0059 
0060 #ifndef __late_clear_fixmap
0061 static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
0062 {
0063     BUG();
0064 }
0065 #endif
0066 
0067 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
0068 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
0069 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
0070 
0071 void __init early_ioremap_setup(void)
0072 {
0073     int i;
0074 
0075     for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
0076         if (WARN_ON(prev_map[i]))
0077             break;
0078 
0079     for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
0080         slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
0081 }
0082 
0083 static int __init check_early_ioremap_leak(void)
0084 {
0085     int count = 0;
0086     int i;
0087 
0088     for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
0089         if (prev_map[i])
0090             count++;
0091 
0092     if (WARN(count, KERN_WARNING
0093          "Debug warning: early ioremap leak of %d areas detected.\n"
0094          "please boot with early_ioremap_debug and report the dmesg.\n",
0095          count))
0096         return 1;
0097     return 0;
0098 }
0099 late_initcall(check_early_ioremap_leak);
0100 
0101 static void __init __iomem *
0102 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
0103 {
0104     unsigned long offset;
0105     resource_size_t last_addr;
0106     unsigned int nrpages;
0107     enum fixed_addresses idx;
0108     int i, slot;
0109 
0110     WARN_ON(system_state >= SYSTEM_RUNNING);
0111 
0112     slot = -1;
0113     for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
0114         if (!prev_map[i]) {
0115             slot = i;
0116             break;
0117         }
0118     }
0119 
0120     if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
0121          __func__, &phys_addr, size))
0122         return NULL;
0123 
0124     /* Don't allow wraparound or zero size */
0125     last_addr = phys_addr + size - 1;
0126     if (WARN_ON(!size || last_addr < phys_addr))
0127         return NULL;
0128 
0129     prev_size[slot] = size;
0130     /*
0131      * Mappings have to be page-aligned
0132      */
0133     offset = offset_in_page(phys_addr);
0134     phys_addr &= PAGE_MASK;
0135     size = PAGE_ALIGN(last_addr + 1) - phys_addr;
0136 
0137     /*
0138      * Mappings have to fit in the FIX_BTMAP area.
0139      */
0140     nrpages = size >> PAGE_SHIFT;
0141     if (WARN_ON(nrpages > NR_FIX_BTMAPS))
0142         return NULL;
0143 
0144     /*
0145      * Ok, go for it..
0146      */
0147     idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
0148     while (nrpages > 0) {
0149         if (after_paging_init)
0150             __late_set_fixmap(idx, phys_addr, prot);
0151         else
0152             __early_set_fixmap(idx, phys_addr, prot);
0153         phys_addr += PAGE_SIZE;
0154         --idx;
0155         --nrpages;
0156     }
0157     WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
0158          __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
0159 
0160     prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
0161     return prev_map[slot];
0162 }
0163 
0164 void __init early_iounmap(void __iomem *addr, unsigned long size)
0165 {
0166     unsigned long virt_addr;
0167     unsigned long offset;
0168     unsigned int nrpages;
0169     enum fixed_addresses idx;
0170     int i, slot;
0171 
0172     slot = -1;
0173     for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
0174         if (prev_map[i] == addr) {
0175             slot = i;
0176             break;
0177         }
0178     }
0179 
0180     if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n",
0181           __func__, addr, size))
0182         return;
0183 
0184     if (WARN(prev_size[slot] != size,
0185          "%s(%p, %08lx) [%d] size not consistent %08lx\n",
0186           __func__, addr, size, slot, prev_size[slot]))
0187         return;
0188 
0189     WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n",
0190           __func__, addr, size, slot);
0191 
0192     virt_addr = (unsigned long)addr;
0193     if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
0194         return;
0195 
0196     offset = offset_in_page(virt_addr);
0197     nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
0198 
0199     idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
0200     while (nrpages > 0) {
0201         if (after_paging_init)
0202             __late_clear_fixmap(idx);
0203         else
0204             __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
0205         --idx;
0206         --nrpages;
0207     }
0208     prev_map[slot] = NULL;
0209 }
0210 
0211 /* Remap an IO device */
0212 void __init __iomem *
0213 early_ioremap(resource_size_t phys_addr, unsigned long size)
0214 {
0215     return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
0216 }
0217 
0218 /* Remap memory */
0219 void __init *
0220 early_memremap(resource_size_t phys_addr, unsigned long size)
0221 {
0222     pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
0223                              FIXMAP_PAGE_NORMAL);
0224 
0225     return (__force void *)__early_ioremap(phys_addr, size, prot);
0226 }
0227 #ifdef FIXMAP_PAGE_RO
0228 void __init *
0229 early_memremap_ro(resource_size_t phys_addr, unsigned long size)
0230 {
0231     pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
0232                              FIXMAP_PAGE_RO);
0233 
0234     return (__force void *)__early_ioremap(phys_addr, size, prot);
0235 }
0236 #endif
0237 
0238 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
0239 void __init *
0240 early_memremap_prot(resource_size_t phys_addr, unsigned long size,
0241             unsigned long prot_val)
0242 {
0243     return (__force void *)__early_ioremap(phys_addr, size,
0244                            __pgprot(prot_val));
0245 }
0246 #endif
0247 
0248 #define MAX_MAP_CHUNK   (NR_FIX_BTMAPS << PAGE_SHIFT)
0249 
0250 void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
0251 {
0252     unsigned long slop, clen;
0253     char *p;
0254 
0255     while (size) {
0256         slop = offset_in_page(src);
0257         clen = size;
0258         if (clen > MAX_MAP_CHUNK - slop)
0259             clen = MAX_MAP_CHUNK - slop;
0260         p = early_memremap(src & PAGE_MASK, clen + slop);
0261         memcpy(dest, p + slop, clen);
0262         early_memunmap(p, clen + slop);
0263         dest += clen;
0264         src += clen;
0265         size -= clen;
0266     }
0267 }
0268 
0269 #else /* CONFIG_MMU */
0270 
0271 void __init __iomem *
0272 early_ioremap(resource_size_t phys_addr, unsigned long size)
0273 {
0274     return (__force void __iomem *)phys_addr;
0275 }
0276 
0277 /* Remap memory */
0278 void __init *
0279 early_memremap(resource_size_t phys_addr, unsigned long size)
0280 {
0281     return (void *)phys_addr;
0282 }
0283 void __init *
0284 early_memremap_ro(resource_size_t phys_addr, unsigned long size)
0285 {
0286     return (void *)phys_addr;
0287 }
0288 
0289 void __init early_iounmap(void __iomem *addr, unsigned long size)
0290 {
0291 }
0292 
0293 #endif /* CONFIG_MMU */
0294 
0295 
0296 void __init early_memunmap(void *addr, unsigned long size)
0297 {
0298     early_iounmap((__force void __iomem *)addr, size);
0299 }