Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/ioremap.c
0004  *
0005  * Re-map IO memory to kernel address space so that we can access it.
0006  *
0007  * (C) Copyright 1995 1996 Linus Torvalds
0008  *
0009  * Hacked for ARM by Phil Blundell <philb@gnu.org>
0010  * Hacked to allow all architectures to build, and various cleanups
0011  * by Russell King
0012  *
0013  * This allows a driver to remap an arbitrary region of bus memory into
0014  * virtual space.  One should *only* use readl, writel, memcpy_toio and
0015  * so on with such remapped areas.
0016  *
0017  * Because the ARM only has a 32-bit address space we can't address the
0018  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
0019  * allows us to circumvent this restriction by splitting PCI space into
0020  * two 2GB chunks and mapping only one at a time into processor memory.
0021  * We use MMU protection domains to trap any attempt to access the bank
0022  * that is not currently mapped.  (This isn't fully implemented yet.)
0023  */
0024 #include <linux/module.h>
0025 #include <linux/errno.h>
0026 #include <linux/mm.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/io.h>
0029 #include <linux/sizes.h>
0030 #include <linux/memblock.h>
0031 
0032 #include <asm/cp15.h>
0033 #include <asm/cputype.h>
0034 #include <asm/cacheflush.h>
0035 #include <asm/early_ioremap.h>
0036 #include <asm/mmu_context.h>
0037 #include <asm/pgalloc.h>
0038 #include <asm/tlbflush.h>
0039 #include <asm/set_memory.h>
0040 #include <asm/system_info.h>
0041 
0042 #include <asm/mach/map.h>
0043 #include <asm/mach/pci.h>
0044 #include "mm.h"
0045 
0046 
0047 LIST_HEAD(static_vmlist);
0048 
0049 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
0050             size_t size, unsigned int mtype)
0051 {
0052     struct static_vm *svm;
0053     struct vm_struct *vm;
0054 
0055     list_for_each_entry(svm, &static_vmlist, list) {
0056         vm = &svm->vm;
0057         if (!(vm->flags & VM_ARM_STATIC_MAPPING))
0058             continue;
0059         if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
0060             continue;
0061 
0062         if (vm->phys_addr > paddr ||
0063             paddr + size - 1 > vm->phys_addr + vm->size - 1)
0064             continue;
0065 
0066         return svm;
0067     }
0068 
0069     return NULL;
0070 }
0071 
0072 struct static_vm *find_static_vm_vaddr(void *vaddr)
0073 {
0074     struct static_vm *svm;
0075     struct vm_struct *vm;
0076 
0077     list_for_each_entry(svm, &static_vmlist, list) {
0078         vm = &svm->vm;
0079 
0080         /* static_vmlist is ascending order */
0081         if (vm->addr > vaddr)
0082             break;
0083 
0084         if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
0085             return svm;
0086     }
0087 
0088     return NULL;
0089 }
0090 
0091 void __init add_static_vm_early(struct static_vm *svm)
0092 {
0093     struct static_vm *curr_svm;
0094     struct vm_struct *vm;
0095     void *vaddr;
0096 
0097     vm = &svm->vm;
0098     vm_area_add_early(vm);
0099     vaddr = vm->addr;
0100 
0101     list_for_each_entry(curr_svm, &static_vmlist, list) {
0102         vm = &curr_svm->vm;
0103 
0104         if (vm->addr > vaddr)
0105             break;
0106     }
0107     list_add_tail(&svm->list, &curr_svm->list);
0108 }
0109 
0110 int ioremap_page(unsigned long virt, unsigned long phys,
0111          const struct mem_type *mtype)
0112 {
0113     return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
0114                   __pgprot(mtype->prot_pte));
0115 }
0116 EXPORT_SYMBOL(ioremap_page);
0117 
0118 void __check_vmalloc_seq(struct mm_struct *mm)
0119 {
0120     int seq;
0121 
0122     do {
0123         seq = atomic_read(&init_mm.context.vmalloc_seq);
0124         memcpy(pgd_offset(mm, VMALLOC_START),
0125                pgd_offset_k(VMALLOC_START),
0126                sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
0127                     pgd_index(VMALLOC_START)));
0128         /*
0129          * Use a store-release so that other CPUs that observe the
0130          * counter's new value are guaranteed to see the results of the
0131          * memcpy as well.
0132          */
0133         atomic_set_release(&mm->context.vmalloc_seq, seq);
0134     } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
0135 }
0136 
0137 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
0138 /*
0139  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
0140  * the other CPUs will not see this change until their next context switch.
0141  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
0142  * which requires the new ioremap'd region to be referenced, the CPU will
0143  * reference the _old_ region.
0144  *
0145  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
0146  * mask the size back to 1MB aligned or we will overflow in the loop below.
0147  */
0148 static void unmap_area_sections(unsigned long virt, unsigned long size)
0149 {
0150     unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
0151     pmd_t *pmdp = pmd_off_k(addr);
0152 
0153     do {
0154         pmd_t pmd = *pmdp;
0155 
0156         if (!pmd_none(pmd)) {
0157             /*
0158              * Clear the PMD from the page table, and
0159              * increment the vmalloc sequence so others
0160              * notice this change.
0161              *
0162              * Note: this is still racy on SMP machines.
0163              */
0164             pmd_clear(pmdp);
0165             atomic_inc_return_release(&init_mm.context.vmalloc_seq);
0166 
0167             /*
0168              * Free the page table, if there was one.
0169              */
0170             if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
0171                 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
0172         }
0173 
0174         addr += PMD_SIZE;
0175         pmdp += 2;
0176     } while (addr < end);
0177 
0178     /*
0179      * Ensure that the active_mm is up to date - we want to
0180      * catch any use-after-iounmap cases.
0181      */
0182     check_vmalloc_seq(current->active_mm);
0183 
0184     flush_tlb_kernel_range(virt, end);
0185 }
0186 
0187 static int
0188 remap_area_sections(unsigned long virt, unsigned long pfn,
0189             size_t size, const struct mem_type *type)
0190 {
0191     unsigned long addr = virt, end = virt + size;
0192     pmd_t *pmd = pmd_off_k(addr);
0193 
0194     /*
0195      * Remove and free any PTE-based mapping, and
0196      * sync the current kernel mapping.
0197      */
0198     unmap_area_sections(virt, size);
0199 
0200     do {
0201         pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
0202         pfn += SZ_1M >> PAGE_SHIFT;
0203         pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
0204         pfn += SZ_1M >> PAGE_SHIFT;
0205         flush_pmd_entry(pmd);
0206 
0207         addr += PMD_SIZE;
0208         pmd += 2;
0209     } while (addr < end);
0210 
0211     return 0;
0212 }
0213 
0214 static int
0215 remap_area_supersections(unsigned long virt, unsigned long pfn,
0216              size_t size, const struct mem_type *type)
0217 {
0218     unsigned long addr = virt, end = virt + size;
0219     pmd_t *pmd = pmd_off_k(addr);
0220 
0221     /*
0222      * Remove and free any PTE-based mapping, and
0223      * sync the current kernel mapping.
0224      */
0225     unmap_area_sections(virt, size);
0226     do {
0227         unsigned long super_pmd_val, i;
0228 
0229         super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
0230                 PMD_SECT_SUPER;
0231         super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
0232 
0233         for (i = 0; i < 8; i++) {
0234             pmd[0] = __pmd(super_pmd_val);
0235             pmd[1] = __pmd(super_pmd_val);
0236             flush_pmd_entry(pmd);
0237 
0238             addr += PMD_SIZE;
0239             pmd += 2;
0240         }
0241 
0242         pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
0243     } while (addr < end);
0244 
0245     return 0;
0246 }
0247 #endif
0248 
0249 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
0250     unsigned long offset, size_t size, unsigned int mtype, void *caller)
0251 {
0252     const struct mem_type *type;
0253     int err;
0254     unsigned long addr;
0255     struct vm_struct *area;
0256     phys_addr_t paddr = __pfn_to_phys(pfn);
0257 
0258 #ifndef CONFIG_ARM_LPAE
0259     /*
0260      * High mappings must be supersection aligned
0261      */
0262     if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
0263         return NULL;
0264 #endif
0265 
0266     type = get_mem_type(mtype);
0267     if (!type)
0268         return NULL;
0269 
0270     /*
0271      * Page align the mapping size, taking account of any offset.
0272      */
0273     size = PAGE_ALIGN(offset + size);
0274 
0275     /*
0276      * Try to reuse one of the static mapping whenever possible.
0277      */
0278     if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
0279         struct static_vm *svm;
0280 
0281         svm = find_static_vm_paddr(paddr, size, mtype);
0282         if (svm) {
0283             addr = (unsigned long)svm->vm.addr;
0284             addr += paddr - svm->vm.phys_addr;
0285             return (void __iomem *) (offset + addr);
0286         }
0287     }
0288 
0289     /*
0290      * Don't allow RAM to be mapped with mismatched attributes - this
0291      * causes problems with ARMv6+
0292      */
0293     if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
0294             mtype != MT_MEMORY_RW))
0295         return NULL;
0296 
0297     area = get_vm_area_caller(size, VM_IOREMAP, caller);
0298     if (!area)
0299         return NULL;
0300     addr = (unsigned long)area->addr;
0301     area->phys_addr = paddr;
0302 
0303 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
0304     if (DOMAIN_IO == 0 &&
0305         (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
0306            cpu_is_xsc3()) && pfn >= 0x100000 &&
0307            !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
0308         area->flags |= VM_ARM_SECTION_MAPPING;
0309         err = remap_area_supersections(addr, pfn, size, type);
0310     } else if (!((paddr | size | addr) & ~PMD_MASK)) {
0311         area->flags |= VM_ARM_SECTION_MAPPING;
0312         err = remap_area_sections(addr, pfn, size, type);
0313     } else
0314 #endif
0315         err = ioremap_page_range(addr, addr + size, paddr,
0316                      __pgprot(type->prot_pte));
0317 
0318     if (err) {
0319         vunmap((void *)addr);
0320         return NULL;
0321     }
0322 
0323     flush_cache_vmap(addr, addr + size);
0324     return (void __iomem *) (offset + addr);
0325 }
0326 
0327 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
0328     unsigned int mtype, void *caller)
0329 {
0330     phys_addr_t last_addr;
0331     unsigned long offset = phys_addr & ~PAGE_MASK;
0332     unsigned long pfn = __phys_to_pfn(phys_addr);
0333 
0334     /*
0335      * Don't allow wraparound or zero size
0336      */
0337     last_addr = phys_addr + size - 1;
0338     if (!size || last_addr < phys_addr)
0339         return NULL;
0340 
0341     return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
0342             caller);
0343 }
0344 
0345 /*
0346  * Remap an arbitrary physical address space into the kernel virtual
0347  * address space. Needed when the kernel wants to access high addresses
0348  * directly.
0349  *
0350  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
0351  * have to convert them into an offset in a page-aligned mapping, but the
0352  * caller shouldn't need to know that small detail.
0353  */
0354 void __iomem *
0355 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
0356           unsigned int mtype)
0357 {
0358     return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
0359                     __builtin_return_address(0));
0360 }
0361 EXPORT_SYMBOL(__arm_ioremap_pfn);
0362 
0363 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
0364                       unsigned int, void *) =
0365     __arm_ioremap_caller;
0366 
0367 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
0368 {
0369     return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
0370                    __builtin_return_address(0));
0371 }
0372 EXPORT_SYMBOL(ioremap);
0373 
0374 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
0375 {
0376     return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
0377                    __builtin_return_address(0));
0378 }
0379 EXPORT_SYMBOL(ioremap_cache);
0380 
0381 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
0382 {
0383     return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
0384                    __builtin_return_address(0));
0385 }
0386 EXPORT_SYMBOL(ioremap_wc);
0387 
0388 /*
0389  * Remap an arbitrary physical address space into the kernel virtual
0390  * address space as memory. Needed when the kernel wants to execute
0391  * code in external memory. This is needed for reprogramming source
0392  * clocks that would affect normal memory for example. Please see
0393  * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
0394  */
0395 void __iomem *
0396 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
0397 {
0398     unsigned int mtype;
0399 
0400     if (cached)
0401         mtype = MT_MEMORY_RWX;
0402     else
0403         mtype = MT_MEMORY_RWX_NONCACHED;
0404 
0405     return __arm_ioremap_caller(phys_addr, size, mtype,
0406             __builtin_return_address(0));
0407 }
0408 
0409 void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
0410 {
0411     set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
0412 }
0413 
0414 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
0415 {
0416     return (__force void *)arch_ioremap_caller(phys_addr, size,
0417                            MT_MEMORY_RW,
0418                            __builtin_return_address(0));
0419 }
0420 
0421 void iounmap(volatile void __iomem *io_addr)
0422 {
0423     void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
0424     struct static_vm *svm;
0425 
0426     /* If this is a static mapping, we must leave it alone */
0427     svm = find_static_vm_vaddr(addr);
0428     if (svm)
0429         return;
0430 
0431 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
0432     {
0433         struct vm_struct *vm;
0434 
0435         vm = find_vm_area(addr);
0436 
0437         /*
0438          * If this is a section based mapping we need to handle it
0439          * specially as the VM subsystem does not know how to handle
0440          * such a beast.
0441          */
0442         if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
0443             unmap_area_sections((unsigned long)vm->addr, vm->size);
0444     }
0445 #endif
0446 
0447     vunmap(addr);
0448 }
0449 EXPORT_SYMBOL(iounmap);
0450 
0451 #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
0452 static int pci_ioremap_mem_type = MT_DEVICE;
0453 
0454 void pci_ioremap_set_mem_type(int mem_type)
0455 {
0456     pci_ioremap_mem_type = mem_type;
0457 }
0458 
0459 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
0460 {
0461     unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
0462 
0463     if (!(res->flags & IORESOURCE_IO))
0464         return -EINVAL;
0465 
0466     if (res->end > IO_SPACE_LIMIT)
0467         return -EINVAL;
0468 
0469     return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
0470                   __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
0471 }
0472 EXPORT_SYMBOL(pci_remap_iospace);
0473 
0474 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
0475 {
0476     return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
0477                    __builtin_return_address(0));
0478 }
0479 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
0480 #endif
0481 
0482 /*
0483  * Must be called after early_fixmap_init
0484  */
0485 void __init early_ioremap_init(void)
0486 {
0487     early_ioremap_setup();
0488 }
0489 
0490 bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
0491                  unsigned long flags)
0492 {
0493     unsigned long pfn = PHYS_PFN(offset);
0494 
0495     return memblock_is_map_memory(pfn);
0496 }