Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #include <linux/device.h>
0003 #include <linux/types.h>
0004 #include <linux/io.h>
0005 #include <linux/mm.h>
0006 
0007 #ifndef ioremap_cache
0008 /* temporary while we convert existing ioremap_cache users to memremap */
0009 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
0010 {
0011     return ioremap(offset, size);
0012 }
0013 #endif
0014 
0015 #ifndef arch_memremap_wb
0016 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
0017 {
0018     return (__force void *)ioremap_cache(offset, size);
0019 }
0020 #endif
0021 
0022 #ifndef arch_memremap_can_ram_remap
0023 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
0024                     unsigned long flags)
0025 {
0026     return true;
0027 }
0028 #endif
0029 
0030 static void *try_ram_remap(resource_size_t offset, size_t size,
0031                unsigned long flags)
0032 {
0033     unsigned long pfn = PHYS_PFN(offset);
0034 
0035     /* In the simple case just return the existing linear address */
0036     if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
0037         arch_memremap_can_ram_remap(offset, size, flags))
0038         return __va(offset);
0039 
0040     return NULL; /* fallback to arch_memremap_wb */
0041 }
0042 
0043 /**
0044  * memremap() - remap an iomem_resource as cacheable memory
0045  * @offset: iomem resource start address
0046  * @size: size of remap
0047  * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
0048  *        MEMREMAP_ENC, MEMREMAP_DEC
0049  *
0050  * memremap() is "ioremap" for cases where it is known that the resource
0051  * being mapped does not have i/o side effects and the __iomem
0052  * annotation is not applicable. In the case of multiple flags, the different
0053  * mapping types will be attempted in the order listed below until one of
0054  * them succeeds.
0055  *
0056  * MEMREMAP_WB - matches the default mapping for System RAM on
0057  * the architecture.  This is usually a read-allocate write-back cache.
0058  * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
0059  * memremap() will bypass establishing a new mapping and instead return
0060  * a pointer into the direct map.
0061  *
0062  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
0063  * cache or are written through to memory and never exist in a
0064  * cache-dirty state with respect to program visibility.  Attempts to
0065  * map System RAM with this mapping type will fail.
0066  *
0067  * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
0068  * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
0069  * uncached. Attempts to map System RAM with this mapping type will fail.
0070  */
0071 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
0072 {
0073     int is_ram = region_intersects(offset, size,
0074                        IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
0075     void *addr = NULL;
0076 
0077     if (!flags)
0078         return NULL;
0079 
0080     if (is_ram == REGION_MIXED) {
0081         WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
0082                 &offset, (unsigned long) size);
0083         return NULL;
0084     }
0085 
0086     /* Try all mapping types requested until one returns non-NULL */
0087     if (flags & MEMREMAP_WB) {
0088         /*
0089          * MEMREMAP_WB is special in that it can be satisfied
0090          * from the direct map.  Some archs depend on the
0091          * capability of memremap() to autodetect cases where
0092          * the requested range is potentially in System RAM.
0093          */
0094         if (is_ram == REGION_INTERSECTS)
0095             addr = try_ram_remap(offset, size, flags);
0096         if (!addr)
0097             addr = arch_memremap_wb(offset, size);
0098     }
0099 
0100     /*
0101      * If we don't have a mapping yet and other request flags are
0102      * present then we will be attempting to establish a new virtual
0103      * address mapping.  Enforce that this mapping is not aliasing
0104      * System RAM.
0105      */
0106     if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
0107         WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
0108                 &offset, (unsigned long) size);
0109         return NULL;
0110     }
0111 
0112     if (!addr && (flags & MEMREMAP_WT))
0113         addr = ioremap_wt(offset, size);
0114 
0115     if (!addr && (flags & MEMREMAP_WC))
0116         addr = ioremap_wc(offset, size);
0117 
0118     return addr;
0119 }
0120 EXPORT_SYMBOL(memremap);
0121 
0122 void memunmap(void *addr)
0123 {
0124     if (is_ioremap_addr(addr))
0125         iounmap((void __iomem *) addr);
0126 }
0127 EXPORT_SYMBOL(memunmap);
0128 
0129 static void devm_memremap_release(struct device *dev, void *res)
0130 {
0131     memunmap(*(void **)res);
0132 }
0133 
0134 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
0135 {
0136     return *(void **)res == match_data;
0137 }
0138 
0139 void *devm_memremap(struct device *dev, resource_size_t offset,
0140         size_t size, unsigned long flags)
0141 {
0142     void **ptr, *addr;
0143 
0144     ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
0145             dev_to_node(dev));
0146     if (!ptr)
0147         return ERR_PTR(-ENOMEM);
0148 
0149     addr = memremap(offset, size, flags);
0150     if (addr) {
0151         *ptr = addr;
0152         devres_add(dev, ptr);
0153     } else {
0154         devres_free(ptr);
0155         return ERR_PTR(-ENXIO);
0156     }
0157 
0158     return addr;
0159 }
0160 EXPORT_SYMBOL(devm_memremap);
0161 
0162 void devm_memunmap(struct device *dev, void *addr)
0163 {
0164     WARN_ON(devres_release(dev, devm_memremap_release,
0165                 devm_memremap_match, addr));
0166 }
0167 EXPORT_SYMBOL(devm_memunmap);