Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
0004  */
0005 #include <linux/memremap.h>
0006 #include <linux/rculist.h>
0007 #include <linux/export.h>
0008 #include <linux/ioport.h>
0009 #include <linux/module.h>
0010 #include <linux/types.h>
0011 #include <linux/pfn_t.h>
0012 #include <linux/acpi.h>
0013 #include <linux/io.h>
0014 #include <linux/mm.h>
0015 #include "nfit_test.h"
0016 
0017 static LIST_HEAD(iomap_head);
0018 
0019 static struct iomap_ops {
0020     nfit_test_lookup_fn nfit_test_lookup;
0021     nfit_test_evaluate_dsm_fn evaluate_dsm;
0022     struct list_head list;
0023 } iomap_ops = {
0024     .list = LIST_HEAD_INIT(iomap_ops.list),
0025 };
0026 
0027 void nfit_test_setup(nfit_test_lookup_fn lookup,
0028         nfit_test_evaluate_dsm_fn evaluate)
0029 {
0030     iomap_ops.nfit_test_lookup = lookup;
0031     iomap_ops.evaluate_dsm = evaluate;
0032     list_add_rcu(&iomap_ops.list, &iomap_head);
0033 }
0034 EXPORT_SYMBOL(nfit_test_setup);
0035 
0036 void nfit_test_teardown(void)
0037 {
0038     list_del_rcu(&iomap_ops.list);
0039     synchronize_rcu();
0040 }
0041 EXPORT_SYMBOL(nfit_test_teardown);
0042 
0043 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
0044 {
0045     struct iomap_ops *ops;
0046 
0047     ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
0048     if (ops)
0049         return ops->nfit_test_lookup(resource);
0050     return NULL;
0051 }
0052 
0053 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
0054 {
0055     struct nfit_test_resource *res;
0056 
0057     rcu_read_lock();
0058     res = __get_nfit_res(resource);
0059     rcu_read_unlock();
0060 
0061     return res;
0062 }
0063 EXPORT_SYMBOL(get_nfit_res);
0064 
0065 #define __nfit_test_ioremap(offset, size, fallback_fn) ({       \
0066     struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
0067     nfit_res ?                          \
0068         (void __iomem *) nfit_res->buf + (offset)       \
0069             - nfit_res->res.start               \
0070     :                               \
0071         fallback_fn((offset), (size)) ;             \
0072 })
0073 
0074 void __iomem *__wrap_devm_ioremap(struct device *dev,
0075         resource_size_t offset, unsigned long size)
0076 {
0077     struct nfit_test_resource *nfit_res = get_nfit_res(offset);
0078 
0079     if (nfit_res)
0080         return (void __iomem *) nfit_res->buf + offset
0081             - nfit_res->res.start;
0082     return devm_ioremap(dev, offset, size);
0083 }
0084 EXPORT_SYMBOL(__wrap_devm_ioremap);
0085 
0086 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
0087         size_t size, unsigned long flags)
0088 {
0089     struct nfit_test_resource *nfit_res = get_nfit_res(offset);
0090 
0091     if (nfit_res)
0092         return nfit_res->buf + offset - nfit_res->res.start;
0093     return devm_memremap(dev, offset, size, flags);
0094 }
0095 EXPORT_SYMBOL(__wrap_devm_memremap);
0096 
0097 static void nfit_test_kill(void *_pgmap)
0098 {
0099     struct dev_pagemap *pgmap = _pgmap;
0100 
0101     WARN_ON(!pgmap);
0102 
0103     percpu_ref_kill(&pgmap->ref);
0104 
0105     wait_for_completion(&pgmap->done);
0106     percpu_ref_exit(&pgmap->ref);
0107 }
0108 
0109 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
0110 {
0111     struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
0112 
0113     complete(&pgmap->done);
0114 }
0115 
0116 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
0117 {
0118     int error;
0119     resource_size_t offset = pgmap->range.start;
0120     struct nfit_test_resource *nfit_res = get_nfit_res(offset);
0121 
0122     if (!nfit_res)
0123         return devm_memremap_pages(dev, pgmap);
0124 
0125     init_completion(&pgmap->done);
0126     error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
0127                 GFP_KERNEL);
0128     if (error)
0129         return ERR_PTR(error);
0130 
0131     error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
0132     if (error)
0133         return ERR_PTR(error);
0134     return nfit_res->buf + offset - nfit_res->res.start;
0135 }
0136 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
0137 
0138 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
0139 {
0140     struct nfit_test_resource *nfit_res = get_nfit_res(addr);
0141 
0142     if (nfit_res)
0143         flags &= ~PFN_MAP;
0144         return phys_to_pfn_t(addr, flags);
0145 }
0146 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
0147 
0148 void *__wrap_memremap(resource_size_t offset, size_t size,
0149         unsigned long flags)
0150 {
0151     struct nfit_test_resource *nfit_res = get_nfit_res(offset);
0152 
0153     if (nfit_res)
0154         return nfit_res->buf + offset - nfit_res->res.start;
0155     return memremap(offset, size, flags);
0156 }
0157 EXPORT_SYMBOL(__wrap_memremap);
0158 
0159 void __wrap_devm_memunmap(struct device *dev, void *addr)
0160 {
0161     struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
0162 
0163     if (nfit_res)
0164         return;
0165     return devm_memunmap(dev, addr);
0166 }
0167 EXPORT_SYMBOL(__wrap_devm_memunmap);
0168 
0169 void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
0170 {
0171     return __nfit_test_ioremap(offset, size, ioremap);
0172 }
0173 EXPORT_SYMBOL(__wrap_ioremap);
0174 
0175 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
0176 {
0177     return __nfit_test_ioremap(offset, size, ioremap_wc);
0178 }
0179 EXPORT_SYMBOL(__wrap_ioremap_wc);
0180 
0181 void __wrap_iounmap(volatile void __iomem *addr)
0182 {
0183     struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
0184     if (nfit_res)
0185         return;
0186     return iounmap(addr);
0187 }
0188 EXPORT_SYMBOL(__wrap_iounmap);
0189 
0190 void __wrap_memunmap(void *addr)
0191 {
0192     struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
0193 
0194     if (nfit_res)
0195         return;
0196     return memunmap(addr);
0197 }
0198 EXPORT_SYMBOL(__wrap_memunmap);
0199 
0200 static bool nfit_test_release_region(struct device *dev,
0201         struct resource *parent, resource_size_t start,
0202         resource_size_t n);
0203 
0204 static void nfit_devres_release(struct device *dev, void *data)
0205 {
0206     struct resource *res = *((struct resource **) data);
0207 
0208     WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
0209             resource_size(res)));
0210 }
0211 
0212 static int match(struct device *dev, void *__res, void *match_data)
0213 {
0214     struct resource *res = *((struct resource **) __res);
0215     resource_size_t start = *((resource_size_t *) match_data);
0216 
0217     return res->start == start;
0218 }
0219 
0220 static bool nfit_test_release_region(struct device *dev,
0221         struct resource *parent, resource_size_t start,
0222         resource_size_t n)
0223 {
0224     if (parent == &iomem_resource) {
0225         struct nfit_test_resource *nfit_res = get_nfit_res(start);
0226 
0227         if (nfit_res) {
0228             struct nfit_test_request *req;
0229             struct resource *res = NULL;
0230 
0231             if (dev) {
0232                 devres_release(dev, nfit_devres_release, match,
0233                         &start);
0234                 return true;
0235             }
0236 
0237             spin_lock(&nfit_res->lock);
0238             list_for_each_entry(req, &nfit_res->requests, list)
0239                 if (req->res.start == start) {
0240                     res = &req->res;
0241                     list_del(&req->list);
0242                     break;
0243                 }
0244             spin_unlock(&nfit_res->lock);
0245 
0246             WARN(!res || resource_size(res) != n,
0247                     "%s: start: %llx n: %llx mismatch: %pr\n",
0248                         __func__, start, n, res);
0249             if (res)
0250                 kfree(req);
0251             return true;
0252         }
0253     }
0254     return false;
0255 }
0256 
0257 static struct resource *nfit_test_request_region(struct device *dev,
0258         struct resource *parent, resource_size_t start,
0259         resource_size_t n, const char *name, int flags)
0260 {
0261     struct nfit_test_resource *nfit_res;
0262 
0263     if (parent == &iomem_resource) {
0264         nfit_res = get_nfit_res(start);
0265         if (nfit_res) {
0266             struct nfit_test_request *req;
0267             struct resource *res = NULL;
0268 
0269             if (start + n > nfit_res->res.start
0270                     + resource_size(&nfit_res->res)) {
0271                 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
0272                         __func__, start, n,
0273                         &nfit_res->res);
0274                 return NULL;
0275             }
0276 
0277             spin_lock(&nfit_res->lock);
0278             list_for_each_entry(req, &nfit_res->requests, list)
0279                 if (start == req->res.start) {
0280                     res = &req->res;
0281                     break;
0282                 }
0283             spin_unlock(&nfit_res->lock);
0284 
0285             if (res) {
0286                 WARN(1, "%pr already busy\n", res);
0287                 return NULL;
0288             }
0289 
0290             req = kzalloc(sizeof(*req), GFP_KERNEL);
0291             if (!req)
0292                 return NULL;
0293             INIT_LIST_HEAD(&req->list);
0294             res = &req->res;
0295 
0296             res->start = start;
0297             res->end = start + n - 1;
0298             res->name = name;
0299             res->flags = resource_type(parent);
0300             res->flags |= IORESOURCE_BUSY | flags;
0301             spin_lock(&nfit_res->lock);
0302             list_add(&req->list, &nfit_res->requests);
0303             spin_unlock(&nfit_res->lock);
0304 
0305             if (dev) {
0306                 struct resource **d;
0307 
0308                 d = devres_alloc(nfit_devres_release,
0309                         sizeof(struct resource *),
0310                         GFP_KERNEL);
0311                 if (!d)
0312                     return NULL;
0313                 *d = res;
0314                 devres_add(dev, d);
0315             }
0316 
0317             pr_debug("%s: %pr\n", __func__, res);
0318             return res;
0319         }
0320     }
0321     if (dev)
0322         return __devm_request_region(dev, parent, start, n, name);
0323     return __request_region(parent, start, n, name, flags);
0324 }
0325 
0326 struct resource *__wrap___request_region(struct resource *parent,
0327         resource_size_t start, resource_size_t n, const char *name,
0328         int flags)
0329 {
0330     return nfit_test_request_region(NULL, parent, start, n, name, flags);
0331 }
0332 EXPORT_SYMBOL(__wrap___request_region);
0333 
0334 int __wrap_insert_resource(struct resource *parent, struct resource *res)
0335 {
0336     if (get_nfit_res(res->start))
0337         return 0;
0338     return insert_resource(parent, res);
0339 }
0340 EXPORT_SYMBOL(__wrap_insert_resource);
0341 
0342 int __wrap_remove_resource(struct resource *res)
0343 {
0344     if (get_nfit_res(res->start))
0345         return 0;
0346     return remove_resource(res);
0347 }
0348 EXPORT_SYMBOL(__wrap_remove_resource);
0349 
0350 struct resource *__wrap___devm_request_region(struct device *dev,
0351         struct resource *parent, resource_size_t start,
0352         resource_size_t n, const char *name)
0353 {
0354     if (!dev)
0355         return NULL;
0356     return nfit_test_request_region(dev, parent, start, n, name, 0);
0357 }
0358 EXPORT_SYMBOL(__wrap___devm_request_region);
0359 
0360 void __wrap___release_region(struct resource *parent, resource_size_t start,
0361         resource_size_t n)
0362 {
0363     if (!nfit_test_release_region(NULL, parent, start, n))
0364         __release_region(parent, start, n);
0365 }
0366 EXPORT_SYMBOL(__wrap___release_region);
0367 
0368 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
0369         resource_size_t start, resource_size_t n)
0370 {
0371     if (!nfit_test_release_region(dev, parent, start, n))
0372         __devm_release_region(dev, parent, start, n);
0373 }
0374 EXPORT_SYMBOL(__wrap___devm_release_region);
0375 
0376 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
0377         struct acpi_object_list *p, struct acpi_buffer *buf)
0378 {
0379     struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
0380     union acpi_object **obj;
0381 
0382     if (!nfit_res || strcmp(path, "_FIT") || !buf)
0383         return acpi_evaluate_object(handle, path, p, buf);
0384 
0385     obj = nfit_res->buf;
0386     buf->length = sizeof(union acpi_object);
0387     buf->pointer = *obj;
0388     return AE_OK;
0389 }
0390 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
0391 
0392 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
0393         u64 rev, u64 func, union acpi_object *argv4)
0394 {
0395     union acpi_object *obj = ERR_PTR(-ENXIO);
0396     struct iomap_ops *ops;
0397 
0398     rcu_read_lock();
0399     ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
0400     if (ops)
0401         obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
0402     rcu_read_unlock();
0403 
0404     if (IS_ERR(obj))
0405         return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
0406     return obj;
0407 }
0408 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
0409 
0410 MODULE_LICENSE("GPL v2");