Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 
0003 #include <linux/io.h>
0004 #include <linux/slab.h>
0005 #include <linux/mmzone.h>
0006 #include <linux/vmalloc.h>
0007 #include <asm/io-workarounds.h>
0008 
0009 unsigned long ioremap_bot;
0010 EXPORT_SYMBOL(ioremap_bot);
0011 
0012 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
0013 {
0014     pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
0015     void *caller = __builtin_return_address(0);
0016 
0017     if (iowa_is_active())
0018         return iowa_ioremap(addr, size, prot, caller);
0019     return __ioremap_caller(addr, size, prot, caller);
0020 }
0021 EXPORT_SYMBOL(ioremap);
0022 
0023 void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
0024 {
0025     pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
0026     void *caller = __builtin_return_address(0);
0027 
0028     if (iowa_is_active())
0029         return iowa_ioremap(addr, size, prot, caller);
0030     return __ioremap_caller(addr, size, prot, caller);
0031 }
0032 EXPORT_SYMBOL(ioremap_wc);
0033 
0034 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
0035 {
0036     pgprot_t prot = pgprot_cached(PAGE_KERNEL);
0037     void *caller = __builtin_return_address(0);
0038 
0039     if (iowa_is_active())
0040         return iowa_ioremap(addr, size, prot, caller);
0041     return __ioremap_caller(addr, size, prot, caller);
0042 }
0043 
0044 void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
0045 {
0046     pte_t pte = __pte(flags);
0047     void *caller = __builtin_return_address(0);
0048 
0049     /* writeable implies dirty for kernel addresses */
0050     if (pte_write(pte))
0051         pte = pte_mkdirty(pte);
0052 
0053     /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
0054     pte = pte_exprotect(pte);
0055     pte = pte_mkprivileged(pte);
0056 
0057     if (iowa_is_active())
0058         return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
0059     return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
0060 }
0061 EXPORT_SYMBOL(ioremap_prot);
0062 
0063 int early_ioremap_range(unsigned long ea, phys_addr_t pa,
0064             unsigned long size, pgprot_t prot)
0065 {
0066     unsigned long i;
0067 
0068     for (i = 0; i < size; i += PAGE_SIZE) {
0069         int err = map_kernel_page(ea + i, pa + i, prot);
0070 
0071         if (WARN_ON_ONCE(err))  /* Should clean up */
0072             return err;
0073     }
0074 
0075     return 0;
0076 }
0077 
0078 void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
0079              pgprot_t prot, void *caller)
0080 {
0081     struct vm_struct *area;
0082     int ret;
0083     unsigned long va;
0084 
0085     area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
0086     if (area == NULL)
0087         return NULL;
0088 
0089     area->phys_addr = pa;
0090     va = (unsigned long)area->addr;
0091 
0092     ret = ioremap_page_range(va, va + size, pa, prot);
0093     if (!ret)
0094         return (void __iomem *)area->addr + offset;
0095 
0096     vunmap_range(va, va + size);
0097     free_vm_area(area);
0098 
0099     return NULL;
0100 }