Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #include <linux/vmalloc.h>
0007 #include <linux/init.h>
0008 #include <linux/module.h>
0009 #include <linux/io.h>
0010 #include <linux/mm.h>
0011 #include <linux/slab.h>
0012 #include <linux/cache.h>
0013 
0014 static inline bool arc_uncached_addr_space(phys_addr_t paddr)
0015 {
0016     if (is_isa_arcompact()) {
0017         if (paddr >= ARC_UNCACHED_ADDR_SPACE)
0018             return true;
0019     } else if (paddr >= perip_base && paddr <= perip_end) {
0020         return true;
0021     }
0022 
0023     return false;
0024 }
0025 
0026 void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
0027 {
0028     phys_addr_t end;
0029 
0030     /* Don't allow wraparound or zero size */
0031     end = paddr + size - 1;
0032     if (!size || (end < paddr))
0033         return NULL;
0034 
0035     /*
0036      * If the region is h/w uncached, MMU mapping can be elided as optim
0037      * The cast to u32 is fine as this region can only be inside 4GB
0038      */
0039     if (arc_uncached_addr_space(paddr))
0040         return (void __iomem *)(u32)paddr;
0041 
0042     return ioremap_prot(paddr, size,
0043                 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
0044 }
0045 EXPORT_SYMBOL(ioremap);
0046 
0047 /*
0048  * ioremap with access flags
0049  * Cache semantics wise it is same as ioremap - "forced" uncached.
0050  * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
0051  * ARC hardware uncached region, this one still goes thru the MMU as caller
0052  * might need finer access control (R/W/X)
0053  */
0054 void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
0055                unsigned long flags)
0056 {
0057     unsigned int off;
0058     unsigned long vaddr;
0059     struct vm_struct *area;
0060     phys_addr_t end;
0061     pgprot_t prot = __pgprot(flags);
0062 
0063     /* Don't allow wraparound, zero size */
0064     end = paddr + size - 1;
0065     if ((!size) || (end < paddr))
0066         return NULL;
0067 
0068     /* An early platform driver might end up here */
0069     if (!slab_is_available())
0070         return NULL;
0071 
0072     /* force uncached */
0073     prot = pgprot_noncached(prot);
0074 
0075     /* Mappings have to be page-aligned */
0076     off = paddr & ~PAGE_MASK;
0077     paddr &= PAGE_MASK_PHYS;
0078     size = PAGE_ALIGN(end + 1) - paddr;
0079 
0080     /*
0081      * Ok, go for it..
0082      */
0083     area = get_vm_area(size, VM_IOREMAP);
0084     if (!area)
0085         return NULL;
0086     area->phys_addr = paddr;
0087     vaddr = (unsigned long)area->addr;
0088     if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
0089         vunmap((void __force *)vaddr);
0090         return NULL;
0091     }
0092     return (void __iomem *)(off + (char __iomem *)vaddr);
0093 }
0094 EXPORT_SYMBOL(ioremap_prot);
0095 
0096 
0097 void iounmap(const void __iomem *addr)
0098 {
0099     /* weird double cast to handle phys_addr_t > 32 bits */
0100     if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
0101         return;
0102 
0103     vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
0104 }
0105 EXPORT_SYMBOL(iounmap);