Back to home page

LXR

 
 

    


0001 /*
0002  * Copyright(c) 2015 Intel Corporation. All rights reserved.
0003  *
0004  * This program is free software; you can redistribute it and/or modify
0005  * it under the terms of version 2 of the GNU General Public License as
0006  * published by the Free Software Foundation.
0007  *
0008  * This program is distributed in the hope that it will be useful, but
0009  * WITHOUT ANY WARRANTY; without even the implied warranty of
0010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0011  * General Public License for more details.
0012  */
0013 #include <linux/radix-tree.h>
0014 #include <linux/memremap.h>
0015 #include <linux/device.h>
0016 #include <linux/types.h>
0017 #include <linux/pfn_t.h>
0018 #include <linux/io.h>
0019 #include <linux/mm.h>
0020 #include <linux/memory_hotplug.h>
0021 
0022 #ifndef ioremap_cache
0023 /* temporary while we convert existing ioremap_cache users to memremap */
0024 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
0025 {
0026     return ioremap(offset, size);
0027 }
0028 #endif
0029 
0030 #ifndef arch_memremap_wb
0031 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
0032 {
0033     return (__force void *)ioremap_cache(offset, size);
0034 }
0035 #endif
0036 
0037 static void *try_ram_remap(resource_size_t offset, size_t size)
0038 {
0039     unsigned long pfn = PHYS_PFN(offset);
0040 
0041     /* In the simple case just return the existing linear address */
0042     if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
0043         return __va(offset);
0044     return NULL; /* fallback to arch_memremap_wb */
0045 }
0046 
0047 /**
0048  * memremap() - remap an iomem_resource as cacheable memory
0049  * @offset: iomem resource start address
0050  * @size: size of remap
0051  * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC
0052  *
0053  * memremap() is "ioremap" for cases where it is known that the resource
0054  * being mapped does not have i/o side effects and the __iomem
0055  * annotation is not applicable. In the case of multiple flags, the different
0056  * mapping types will be attempted in the order listed below until one of
0057  * them succeeds.
0058  *
0059  * MEMREMAP_WB - matches the default mapping for System RAM on
0060  * the architecture.  This is usually a read-allocate write-back cache.
0061  * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
0062  * memremap() will bypass establishing a new mapping and instead return
0063  * a pointer into the direct map.
0064  *
0065  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
0066  * cache or are written through to memory and never exist in a
0067  * cache-dirty state with respect to program visibility.  Attempts to
0068  * map System RAM with this mapping type will fail.
0069  *
0070  * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
0071  * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
0072  * uncached. Attempts to map System RAM with this mapping type will fail.
0073  */
0074 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
0075 {
0076     int is_ram = region_intersects(offset, size,
0077                        IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
0078     void *addr = NULL;
0079 
0080     if (!flags)
0081         return NULL;
0082 
0083     if (is_ram == REGION_MIXED) {
0084         WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
0085                 &offset, (unsigned long) size);
0086         return NULL;
0087     }
0088 
0089     /* Try all mapping types requested until one returns non-NULL */
0090     if (flags & MEMREMAP_WB) {
0091         /*
0092          * MEMREMAP_WB is special in that it can be satisifed
0093          * from the direct map.  Some archs depend on the
0094          * capability of memremap() to autodetect cases where
0095          * the requested range is potentially in System RAM.
0096          */
0097         if (is_ram == REGION_INTERSECTS)
0098             addr = try_ram_remap(offset, size);
0099         if (!addr)
0100             addr = arch_memremap_wb(offset, size);
0101     }
0102 
0103     /*
0104      * If we don't have a mapping yet and other request flags are
0105      * present then we will be attempting to establish a new virtual
0106      * address mapping.  Enforce that this mapping is not aliasing
0107      * System RAM.
0108      */
0109     if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
0110         WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
0111                 &offset, (unsigned long) size);
0112         return NULL;
0113     }
0114 
0115     if (!addr && (flags & MEMREMAP_WT))
0116         addr = ioremap_wt(offset, size);
0117 
0118     if (!addr && (flags & MEMREMAP_WC))
0119         addr = ioremap_wc(offset, size);
0120 
0121     return addr;
0122 }
0123 EXPORT_SYMBOL(memremap);
0124 
0125 void memunmap(void *addr)
0126 {
0127     if (is_vmalloc_addr(addr))
0128         iounmap((void __iomem *) addr);
0129 }
0130 EXPORT_SYMBOL(memunmap);
0131 
0132 static void devm_memremap_release(struct device *dev, void *res)
0133 {
0134     memunmap(*(void **)res);
0135 }
0136 
0137 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
0138 {
0139     return *(void **)res == match_data;
0140 }
0141 
0142 void *devm_memremap(struct device *dev, resource_size_t offset,
0143         size_t size, unsigned long flags)
0144 {
0145     void **ptr, *addr;
0146 
0147     ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
0148             dev_to_node(dev));
0149     if (!ptr)
0150         return ERR_PTR(-ENOMEM);
0151 
0152     addr = memremap(offset, size, flags);
0153     if (addr) {
0154         *ptr = addr;
0155         devres_add(dev, ptr);
0156     } else {
0157         devres_free(ptr);
0158         return ERR_PTR(-ENXIO);
0159     }
0160 
0161     return addr;
0162 }
0163 EXPORT_SYMBOL(devm_memremap);
0164 
0165 void devm_memunmap(struct device *dev, void *addr)
0166 {
0167     WARN_ON(devres_release(dev, devm_memremap_release,
0168                 devm_memremap_match, addr));
0169 }
0170 EXPORT_SYMBOL(devm_memunmap);
0171 
0172 #ifdef CONFIG_ZONE_DEVICE
0173 static DEFINE_MUTEX(pgmap_lock);
0174 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
0175 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
0176 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
0177 
0178 struct page_map {
0179     struct resource res;
0180     struct percpu_ref *ref;
0181     struct dev_pagemap pgmap;
0182     struct vmem_altmap altmap;
0183 };
0184 
0185 void get_zone_device_page(struct page *page)
0186 {
0187     percpu_ref_get(page->pgmap->ref);
0188 }
0189 EXPORT_SYMBOL(get_zone_device_page);
0190 
0191 void put_zone_device_page(struct page *page)
0192 {
0193     put_dev_pagemap(page->pgmap);
0194 }
0195 EXPORT_SYMBOL(put_zone_device_page);
0196 
0197 static void pgmap_radix_release(struct resource *res)
0198 {
0199     resource_size_t key, align_start, align_size, align_end;
0200 
0201     align_start = res->start & ~(SECTION_SIZE - 1);
0202     align_size = ALIGN(resource_size(res), SECTION_SIZE);
0203     align_end = align_start + align_size - 1;
0204 
0205     mutex_lock(&pgmap_lock);
0206     for (key = res->start; key <= res->end; key += SECTION_SIZE)
0207         radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
0208     mutex_unlock(&pgmap_lock);
0209 }
0210 
0211 static unsigned long pfn_first(struct page_map *page_map)
0212 {
0213     struct dev_pagemap *pgmap = &page_map->pgmap;
0214     const struct resource *res = &page_map->res;
0215     struct vmem_altmap *altmap = pgmap->altmap;
0216     unsigned long pfn;
0217 
0218     pfn = res->start >> PAGE_SHIFT;
0219     if (altmap)
0220         pfn += vmem_altmap_offset(altmap);
0221     return pfn;
0222 }
0223 
0224 static unsigned long pfn_end(struct page_map *page_map)
0225 {
0226     const struct resource *res = &page_map->res;
0227 
0228     return (res->start + resource_size(res)) >> PAGE_SHIFT;
0229 }
0230 
0231 #define for_each_device_pfn(pfn, map) \
0232     for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
0233 
0234 static void devm_memremap_pages_release(struct device *dev, void *data)
0235 {
0236     struct page_map *page_map = data;
0237     struct resource *res = &page_map->res;
0238     resource_size_t align_start, align_size;
0239     struct dev_pagemap *pgmap = &page_map->pgmap;
0240 
0241     if (percpu_ref_tryget_live(pgmap->ref)) {
0242         dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
0243         percpu_ref_put(pgmap->ref);
0244     }
0245 
0246     /* pages are dead and unused, undo the arch mapping */
0247     align_start = res->start & ~(SECTION_SIZE - 1);
0248     align_size = ALIGN(resource_size(res), SECTION_SIZE);
0249     mem_hotplug_begin();
0250     arch_remove_memory(align_start, align_size);
0251     mem_hotplug_done();
0252     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
0253     pgmap_radix_release(res);
0254     dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
0255             "%s: failed to free all reserved pages\n", __func__);
0256 }
0257 
0258 /* assumes rcu_read_lock() held at entry */
0259 struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
0260 {
0261     struct page_map *page_map;
0262 
0263     WARN_ON_ONCE(!rcu_read_lock_held());
0264 
0265     page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
0266     return page_map ? &page_map->pgmap : NULL;
0267 }
0268 
0269 /**
0270  * devm_memremap_pages - remap and provide memmap backing for the given resource
0271  * @dev: hosting device for @res
0272  * @res: "host memory" address range
0273  * @ref: a live per-cpu reference count
0274  * @altmap: optional descriptor for allocating the memmap from @res
0275  *
0276  * Notes:
0277  * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
0278  *    (or devm release event).
0279  *
0280  * 2/ @res is expected to be a host memory range that could feasibly be
0281  *    treated as a "System RAM" range, i.e. not a device mmio range, but
0282  *    this is not enforced.
0283  */
0284 void *devm_memremap_pages(struct device *dev, struct resource *res,
0285         struct percpu_ref *ref, struct vmem_altmap *altmap)
0286 {
0287     resource_size_t key, align_start, align_size, align_end;
0288     pgprot_t pgprot = PAGE_KERNEL;
0289     struct dev_pagemap *pgmap;
0290     struct page_map *page_map;
0291     int error, nid, is_ram;
0292     unsigned long pfn;
0293 
0294     align_start = res->start & ~(SECTION_SIZE - 1);
0295     align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
0296         - align_start;
0297     is_ram = region_intersects(align_start, align_size,
0298         IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
0299 
0300     if (is_ram == REGION_MIXED) {
0301         WARN_ONCE(1, "%s attempted on mixed region %pr\n",
0302                 __func__, res);
0303         return ERR_PTR(-ENXIO);
0304     }
0305 
0306     if (is_ram == REGION_INTERSECTS)
0307         return __va(res->start);
0308 
0309     if (!ref)
0310         return ERR_PTR(-EINVAL);
0311 
0312     page_map = devres_alloc_node(devm_memremap_pages_release,
0313             sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
0314     if (!page_map)
0315         return ERR_PTR(-ENOMEM);
0316     pgmap = &page_map->pgmap;
0317 
0318     memcpy(&page_map->res, res, sizeof(*res));
0319 
0320     pgmap->dev = dev;
0321     if (altmap) {
0322         memcpy(&page_map->altmap, altmap, sizeof(*altmap));
0323         pgmap->altmap = &page_map->altmap;
0324     }
0325     pgmap->ref = ref;
0326     pgmap->res = &page_map->res;
0327 
0328     mutex_lock(&pgmap_lock);
0329     error = 0;
0330     align_end = align_start + align_size - 1;
0331     for (key = align_start; key <= align_end; key += SECTION_SIZE) {
0332         struct dev_pagemap *dup;
0333 
0334         rcu_read_lock();
0335         dup = find_dev_pagemap(key);
0336         rcu_read_unlock();
0337         if (dup) {
0338             dev_err(dev, "%s: %pr collides with mapping for %s\n",
0339                     __func__, res, dev_name(dup->dev));
0340             error = -EBUSY;
0341             break;
0342         }
0343         error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
0344                 page_map);
0345         if (error) {
0346             dev_err(dev, "%s: failed: %d\n", __func__, error);
0347             break;
0348         }
0349     }
0350     mutex_unlock(&pgmap_lock);
0351     if (error)
0352         goto err_radix;
0353 
0354     nid = dev_to_node(dev);
0355     if (nid < 0)
0356         nid = numa_mem_id();
0357 
0358     error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
0359             align_size);
0360     if (error)
0361         goto err_pfn_remap;
0362 
0363     mem_hotplug_begin();
0364     error = arch_add_memory(nid, align_start, align_size, true);
0365     mem_hotplug_done();
0366     if (error)
0367         goto err_add_memory;
0368 
0369     for_each_device_pfn(pfn, page_map) {
0370         struct page *page = pfn_to_page(pfn);
0371 
0372         /*
0373          * ZONE_DEVICE pages union ->lru with a ->pgmap back
0374          * pointer.  It is a bug if a ZONE_DEVICE page is ever
0375          * freed or placed on a driver-private list.  Seed the
0376          * storage with LIST_POISON* values.
0377          */
0378         list_del(&page->lru);
0379         page->pgmap = pgmap;
0380     }
0381     devres_add(dev, page_map);
0382     return __va(res->start);
0383 
0384  err_add_memory:
0385     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
0386  err_pfn_remap:
0387  err_radix:
0388     pgmap_radix_release(res);
0389     devres_free(page_map);
0390     return ERR_PTR(error);
0391 }
0392 EXPORT_SYMBOL(devm_memremap_pages);
0393 
0394 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
0395 {
0396     /* number of pfns from base where pfn_to_page() is valid */
0397     return altmap->reserve + altmap->free;
0398 }
0399 
0400 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
0401 {
0402     altmap->alloc -= nr_pfns;
0403 }
0404 
0405 struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
0406 {
0407     /*
0408      * 'memmap_start' is the virtual address for the first "struct
0409      * page" in this range of the vmemmap array.  In the case of
0410      * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
0411      * pointer arithmetic, so we can perform this to_vmem_altmap()
0412      * conversion without concern for the initialization state of
0413      * the struct page fields.
0414      */
0415     struct page *page = (struct page *) memmap_start;
0416     struct dev_pagemap *pgmap;
0417 
0418     /*
0419      * Unconditionally retrieve a dev_pagemap associated with the
0420      * given physical address, this is only for use in the
0421      * arch_{add|remove}_memory() for setting up and tearing down
0422      * the memmap.
0423      */
0424     rcu_read_lock();
0425     pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
0426     rcu_read_unlock();
0427 
0428     return pgmap ? pgmap->altmap : NULL;
0429 }
0430 #endif /* CONFIG_ZONE_DEVICE */