0001
0002 #include <linux/device.h>
0003 #include <linux/types.h>
0004 #include <linux/io.h>
0005 #include <linux/mm.h>
0006
0007 #ifndef ioremap_cache
0008
0009 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
0010 {
0011 return ioremap(offset, size);
0012 }
0013 #endif
0014
0015 #ifndef arch_memremap_wb
0016 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
0017 {
0018 return (__force void *)ioremap_cache(offset, size);
0019 }
0020 #endif
0021
0022 #ifndef arch_memremap_can_ram_remap
0023 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
0024 unsigned long flags)
0025 {
0026 return true;
0027 }
0028 #endif
0029
0030 static void *try_ram_remap(resource_size_t offset, size_t size,
0031 unsigned long flags)
0032 {
0033 unsigned long pfn = PHYS_PFN(offset);
0034
0035
0036 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
0037 arch_memremap_can_ram_remap(offset, size, flags))
0038 return __va(offset);
0039
0040 return NULL;
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
0072 {
0073 int is_ram = region_intersects(offset, size,
0074 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
0075 void *addr = NULL;
0076
0077 if (!flags)
0078 return NULL;
0079
0080 if (is_ram == REGION_MIXED) {
0081 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
0082 &offset, (unsigned long) size);
0083 return NULL;
0084 }
0085
0086
0087 if (flags & MEMREMAP_WB) {
0088
0089
0090
0091
0092
0093
0094 if (is_ram == REGION_INTERSECTS)
0095 addr = try_ram_remap(offset, size, flags);
0096 if (!addr)
0097 addr = arch_memremap_wb(offset, size);
0098 }
0099
0100
0101
0102
0103
0104
0105
0106 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
0107 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
0108 &offset, (unsigned long) size);
0109 return NULL;
0110 }
0111
0112 if (!addr && (flags & MEMREMAP_WT))
0113 addr = ioremap_wt(offset, size);
0114
0115 if (!addr && (flags & MEMREMAP_WC))
0116 addr = ioremap_wc(offset, size);
0117
0118 return addr;
0119 }
0120 EXPORT_SYMBOL(memremap);
0121
0122 void memunmap(void *addr)
0123 {
0124 if (is_ioremap_addr(addr))
0125 iounmap((void __iomem *) addr);
0126 }
0127 EXPORT_SYMBOL(memunmap);
0128
0129 static void devm_memremap_release(struct device *dev, void *res)
0130 {
0131 memunmap(*(void **)res);
0132 }
0133
0134 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
0135 {
0136 return *(void **)res == match_data;
0137 }
0138
0139 void *devm_memremap(struct device *dev, resource_size_t offset,
0140 size_t size, unsigned long flags)
0141 {
0142 void **ptr, *addr;
0143
0144 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
0145 dev_to_node(dev));
0146 if (!ptr)
0147 return ERR_PTR(-ENOMEM);
0148
0149 addr = memremap(offset, size, flags);
0150 if (addr) {
0151 *ptr = addr;
0152 devres_add(dev, ptr);
0153 } else {
0154 devres_free(ptr);
0155 return ERR_PTR(-ENXIO);
0156 }
0157
0158 return addr;
0159 }
0160 EXPORT_SYMBOL(devm_memremap);
0161
0162 void devm_memunmap(struct device *dev, void *addr)
0163 {
0164 WARN_ON(devres_release(dev, devm_memremap_release,
0165 devm_memremap_match, addr));
0166 }
0167 EXPORT_SYMBOL(devm_memunmap);