Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/err.h>
0003 #include <linux/pci.h>
0004 #include <linux/io.h>
0005 #include <linux/gfp.h>
0006 #include <linux/export.h>
0007 #include <linux/of_address.h>
0008 
0009 enum devm_ioremap_type {
0010     DEVM_IOREMAP = 0,
0011     DEVM_IOREMAP_UC,
0012     DEVM_IOREMAP_WC,
0013     DEVM_IOREMAP_NP,
0014 };
0015 
0016 void devm_ioremap_release(struct device *dev, void *res)
0017 {
0018     iounmap(*(void __iomem **)res);
0019 }
0020 
0021 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
0022 {
0023     return *(void **)res == match_data;
0024 }
0025 
0026 static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
0027                     resource_size_t size,
0028                     enum devm_ioremap_type type)
0029 {
0030     void __iomem **ptr, *addr = NULL;
0031 
0032     ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
0033                 dev_to_node(dev));
0034     if (!ptr)
0035         return NULL;
0036 
0037     switch (type) {
0038     case DEVM_IOREMAP:
0039         addr = ioremap(offset, size);
0040         break;
0041     case DEVM_IOREMAP_UC:
0042         addr = ioremap_uc(offset, size);
0043         break;
0044     case DEVM_IOREMAP_WC:
0045         addr = ioremap_wc(offset, size);
0046         break;
0047     case DEVM_IOREMAP_NP:
0048         addr = ioremap_np(offset, size);
0049         break;
0050     }
0051 
0052     if (addr) {
0053         *ptr = addr;
0054         devres_add(dev, ptr);
0055     } else
0056         devres_free(ptr);
0057 
0058     return addr;
0059 }
0060 
0061 /**
0062  * devm_ioremap - Managed ioremap()
0063  * @dev: Generic device to remap IO address for
0064  * @offset: Resource address to map
0065  * @size: Size of map
0066  *
0067  * Managed ioremap().  Map is automatically unmapped on driver detach.
0068  */
0069 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
0070                resource_size_t size)
0071 {
0072     return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
0073 }
0074 EXPORT_SYMBOL(devm_ioremap);
0075 
0076 /**
0077  * devm_ioremap_uc - Managed ioremap_uc()
0078  * @dev: Generic device to remap IO address for
0079  * @offset: Resource address to map
0080  * @size: Size of map
0081  *
0082  * Managed ioremap_uc().  Map is automatically unmapped on driver detach.
0083  */
0084 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
0085                   resource_size_t size)
0086 {
0087     return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
0088 }
0089 EXPORT_SYMBOL_GPL(devm_ioremap_uc);
0090 
0091 /**
0092  * devm_ioremap_wc - Managed ioremap_wc()
0093  * @dev: Generic device to remap IO address for
0094  * @offset: Resource address to map
0095  * @size: Size of map
0096  *
0097  * Managed ioremap_wc().  Map is automatically unmapped on driver detach.
0098  */
0099 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
0100                   resource_size_t size)
0101 {
0102     return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
0103 }
0104 EXPORT_SYMBOL(devm_ioremap_wc);
0105 
0106 /**
0107  * devm_ioremap_np - Managed ioremap_np()
0108  * @dev: Generic device to remap IO address for
0109  * @offset: Resource address to map
0110  * @size: Size of map
0111  *
0112  * Managed ioremap_np().  Map is automatically unmapped on driver detach.
0113  */
0114 void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
0115                   resource_size_t size)
0116 {
0117     return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
0118 }
0119 EXPORT_SYMBOL(devm_ioremap_np);
0120 
0121 /**
0122  * devm_iounmap - Managed iounmap()
0123  * @dev: Generic device to unmap for
0124  * @addr: Address to unmap
0125  *
0126  * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
0127  */
0128 void devm_iounmap(struct device *dev, void __iomem *addr)
0129 {
0130     WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
0131                    (__force void *)addr));
0132     iounmap(addr);
0133 }
0134 EXPORT_SYMBOL(devm_iounmap);
0135 
0136 static void __iomem *
0137 __devm_ioremap_resource(struct device *dev, const struct resource *res,
0138             enum devm_ioremap_type type)
0139 {
0140     resource_size_t size;
0141     void __iomem *dest_ptr;
0142     char *pretty_name;
0143 
0144     BUG_ON(!dev);
0145 
0146     if (!res || resource_type(res) != IORESOURCE_MEM) {
0147         dev_err(dev, "invalid resource\n");
0148         return IOMEM_ERR_PTR(-EINVAL);
0149     }
0150 
0151     if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
0152         type = DEVM_IOREMAP_NP;
0153 
0154     size = resource_size(res);
0155 
0156     if (res->name)
0157         pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
0158                          dev_name(dev), res->name);
0159     else
0160         pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
0161     if (!pretty_name) {
0162         dev_err(dev, "can't generate pretty name for resource %pR\n", res);
0163         return IOMEM_ERR_PTR(-ENOMEM);
0164     }
0165 
0166     if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
0167         dev_err(dev, "can't request region for resource %pR\n", res);
0168         return IOMEM_ERR_PTR(-EBUSY);
0169     }
0170 
0171     dest_ptr = __devm_ioremap(dev, res->start, size, type);
0172     if (!dest_ptr) {
0173         dev_err(dev, "ioremap failed for resource %pR\n", res);
0174         devm_release_mem_region(dev, res->start, size);
0175         dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
0176     }
0177 
0178     return dest_ptr;
0179 }
0180 
0181 /**
0182  * devm_ioremap_resource() - check, request region, and ioremap resource
0183  * @dev: generic device to handle the resource for
0184  * @res: resource to be handled
0185  *
0186  * Checks that a resource is a valid memory region, requests the memory
0187  * region and ioremaps it. All operations are managed and will be undone
0188  * on driver detach.
0189  *
0190  * Usage example:
0191  *
0192  *  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0193  *  base = devm_ioremap_resource(&pdev->dev, res);
0194  *  if (IS_ERR(base))
0195  *      return PTR_ERR(base);
0196  *
0197  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
0198  * on failure.
0199  */
0200 void __iomem *devm_ioremap_resource(struct device *dev,
0201                     const struct resource *res)
0202 {
0203     return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
0204 }
0205 EXPORT_SYMBOL(devm_ioremap_resource);
0206 
0207 /**
0208  * devm_ioremap_resource_wc() - write-combined variant of
0209  *              devm_ioremap_resource()
0210  * @dev: generic device to handle the resource for
0211  * @res: resource to be handled
0212  *
0213  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
0214  * on failure.
0215  */
0216 void __iomem *devm_ioremap_resource_wc(struct device *dev,
0217                        const struct resource *res)
0218 {
0219     return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
0220 }
0221 
0222 /*
0223  * devm_of_iomap - Requests a resource and maps the memory mapped IO
0224  *         for a given device_node managed by a given device
0225  *
0226  * Checks that a resource is a valid memory region, requests the memory
0227  * region and ioremaps it. All operations are managed and will be undone
0228  * on driver detach of the device.
0229  *
0230  * This is to be used when a device requests/maps resources described
0231  * by other device tree nodes (children or otherwise).
0232  *
0233  * @dev:    The device "managing" the resource
0234  * @node:       The device-tree node where the resource resides
0235  * @index:  index of the MMIO range in the "reg" property
0236  * @size:   Returns the size of the resource (pass NULL if not needed)
0237  *
0238  * Usage example:
0239  *
0240  *  base = devm_of_iomap(&pdev->dev, node, 0, NULL);
0241  *  if (IS_ERR(base))
0242  *      return PTR_ERR(base);
0243  *
0244  * Please Note: This is not a one-to-one replacement for of_iomap() because the
0245  * of_iomap() function does not track whether the region is already mapped.  If
0246  * two drivers try to map the same memory, the of_iomap() function will succeed
0247  * but the devm_of_iomap() function will return -EBUSY.
0248  *
0249  * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
0250  * error code on failure.
0251  */
0252 void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
0253                 resource_size_t *size)
0254 {
0255     struct resource res;
0256 
0257     if (of_address_to_resource(node, index, &res))
0258         return IOMEM_ERR_PTR(-EINVAL);
0259     if (size)
0260         *size = resource_size(&res);
0261     return devm_ioremap_resource(dev, &res);
0262 }
0263 EXPORT_SYMBOL(devm_of_iomap);
0264 
0265 #ifdef CONFIG_HAS_IOPORT_MAP
0266 /*
0267  * Generic iomap devres
0268  */
0269 static void devm_ioport_map_release(struct device *dev, void *res)
0270 {
0271     ioport_unmap(*(void __iomem **)res);
0272 }
0273 
0274 static int devm_ioport_map_match(struct device *dev, void *res,
0275                  void *match_data)
0276 {
0277     return *(void **)res == match_data;
0278 }
0279 
0280 /**
0281  * devm_ioport_map - Managed ioport_map()
0282  * @dev: Generic device to map ioport for
0283  * @port: Port to map
0284  * @nr: Number of ports to map
0285  *
0286  * Managed ioport_map().  Map is automatically unmapped on driver
0287  * detach.
0288  *
0289  * Return: a pointer to the remapped memory or NULL on failure.
0290  */
0291 void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
0292                    unsigned int nr)
0293 {
0294     void __iomem **ptr, *addr;
0295 
0296     ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
0297                 dev_to_node(dev));
0298     if (!ptr)
0299         return NULL;
0300 
0301     addr = ioport_map(port, nr);
0302     if (addr) {
0303         *ptr = addr;
0304         devres_add(dev, ptr);
0305     } else
0306         devres_free(ptr);
0307 
0308     return addr;
0309 }
0310 EXPORT_SYMBOL(devm_ioport_map);
0311 
0312 /**
0313  * devm_ioport_unmap - Managed ioport_unmap()
0314  * @dev: Generic device to unmap for
0315  * @addr: Address to unmap
0316  *
0317  * Managed ioport_unmap().  @addr must have been mapped using
0318  * devm_ioport_map().
0319  */
0320 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
0321 {
0322     ioport_unmap(addr);
0323     WARN_ON(devres_destroy(dev, devm_ioport_map_release,
0324                    devm_ioport_map_match, (__force void *)addr));
0325 }
0326 EXPORT_SYMBOL(devm_ioport_unmap);
0327 #endif /* CONFIG_HAS_IOPORT_MAP */
0328 
0329 #ifdef CONFIG_PCI
0330 /*
0331  * PCI iomap devres
0332  */
0333 #define PCIM_IOMAP_MAX  PCI_STD_NUM_BARS
0334 
0335 struct pcim_iomap_devres {
0336     void __iomem *table[PCIM_IOMAP_MAX];
0337 };
0338 
0339 static void pcim_iomap_release(struct device *gendev, void *res)
0340 {
0341     struct pci_dev *dev = to_pci_dev(gendev);
0342     struct pcim_iomap_devres *this = res;
0343     int i;
0344 
0345     for (i = 0; i < PCIM_IOMAP_MAX; i++)
0346         if (this->table[i])
0347             pci_iounmap(dev, this->table[i]);
0348 }
0349 
0350 /**
0351  * pcim_iomap_table - access iomap allocation table
0352  * @pdev: PCI device to access iomap table for
0353  *
0354  * Access iomap allocation table for @dev.  If iomap table doesn't
0355  * exist and @pdev is managed, it will be allocated.  All iomaps
0356  * recorded in the iomap table are automatically unmapped on driver
0357  * detach.
0358  *
0359  * This function might sleep when the table is first allocated but can
0360  * be safely called without context and guaranteed to succeed once
0361  * allocated.
0362  */
0363 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
0364 {
0365     struct pcim_iomap_devres *dr, *new_dr;
0366 
0367     dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
0368     if (dr)
0369         return dr->table;
0370 
0371     new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
0372                    dev_to_node(&pdev->dev));
0373     if (!new_dr)
0374         return NULL;
0375     dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
0376     return dr->table;
0377 }
0378 EXPORT_SYMBOL(pcim_iomap_table);
0379 
0380 /**
0381  * pcim_iomap - Managed pcim_iomap()
0382  * @pdev: PCI device to iomap for
0383  * @bar: BAR to iomap
0384  * @maxlen: Maximum length of iomap
0385  *
0386  * Managed pci_iomap().  Map is automatically unmapped on driver
0387  * detach.
0388  */
0389 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
0390 {
0391     void __iomem **tbl;
0392 
0393     BUG_ON(bar >= PCIM_IOMAP_MAX);
0394 
0395     tbl = (void __iomem **)pcim_iomap_table(pdev);
0396     if (!tbl || tbl[bar])   /* duplicate mappings not allowed */
0397         return NULL;
0398 
0399     tbl[bar] = pci_iomap(pdev, bar, maxlen);
0400     return tbl[bar];
0401 }
0402 EXPORT_SYMBOL(pcim_iomap);
0403 
0404 /**
0405  * pcim_iounmap - Managed pci_iounmap()
0406  * @pdev: PCI device to iounmap for
0407  * @addr: Address to unmap
0408  *
0409  * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
0410  */
0411 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
0412 {
0413     void __iomem **tbl;
0414     int i;
0415 
0416     pci_iounmap(pdev, addr);
0417 
0418     tbl = (void __iomem **)pcim_iomap_table(pdev);
0419     BUG_ON(!tbl);
0420 
0421     for (i = 0; i < PCIM_IOMAP_MAX; i++)
0422         if (tbl[i] == addr) {
0423             tbl[i] = NULL;
0424             return;
0425         }
0426     WARN_ON(1);
0427 }
0428 EXPORT_SYMBOL(pcim_iounmap);
0429 
0430 /**
0431  * pcim_iomap_regions - Request and iomap PCI BARs
0432  * @pdev: PCI device to map IO resources for
0433  * @mask: Mask of BARs to request and iomap
0434  * @name: Name used when requesting regions
0435  *
0436  * Request and iomap regions specified by @mask.
0437  */
0438 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
0439 {
0440     void __iomem * const *iomap;
0441     int i, rc;
0442 
0443     iomap = pcim_iomap_table(pdev);
0444     if (!iomap)
0445         return -ENOMEM;
0446 
0447     for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
0448         unsigned long len;
0449 
0450         if (!(mask & (1 << i)))
0451             continue;
0452 
0453         rc = -EINVAL;
0454         len = pci_resource_len(pdev, i);
0455         if (!len)
0456             goto err_inval;
0457 
0458         rc = pci_request_region(pdev, i, name);
0459         if (rc)
0460             goto err_inval;
0461 
0462         rc = -ENOMEM;
0463         if (!pcim_iomap(pdev, i, 0))
0464             goto err_region;
0465     }
0466 
0467     return 0;
0468 
0469  err_region:
0470     pci_release_region(pdev, i);
0471  err_inval:
0472     while (--i >= 0) {
0473         if (!(mask & (1 << i)))
0474             continue;
0475         pcim_iounmap(pdev, iomap[i]);
0476         pci_release_region(pdev, i);
0477     }
0478 
0479     return rc;
0480 }
0481 EXPORT_SYMBOL(pcim_iomap_regions);
0482 
0483 /**
0484  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
0485  * @pdev: PCI device to map IO resources for
0486  * @mask: Mask of BARs to iomap
0487  * @name: Name used when requesting regions
0488  *
0489  * Request all PCI BARs and iomap regions specified by @mask.
0490  */
0491 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
0492                    const char *name)
0493 {
0494     int request_mask = ((1 << 6) - 1) & ~mask;
0495     int rc;
0496 
0497     rc = pci_request_selected_regions(pdev, request_mask, name);
0498     if (rc)
0499         return rc;
0500 
0501     rc = pcim_iomap_regions(pdev, mask, name);
0502     if (rc)
0503         pci_release_selected_regions(pdev, request_mask);
0504     return rc;
0505 }
0506 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
0507 
0508 /**
0509  * pcim_iounmap_regions - Unmap and release PCI BARs
0510  * @pdev: PCI device to map IO resources for
0511  * @mask: Mask of BARs to unmap and release
0512  *
0513  * Unmap and release regions specified by @mask.
0514  */
0515 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
0516 {
0517     void __iomem * const *iomap;
0518     int i;
0519 
0520     iomap = pcim_iomap_table(pdev);
0521     if (!iomap)
0522         return;
0523 
0524     for (i = 0; i < PCIM_IOMAP_MAX; i++) {
0525         if (!(mask & (1 << i)))
0526             continue;
0527 
0528         pcim_iounmap(pdev, iomap[i]);
0529         pci_release_region(pdev, i);
0530     }
0531 }
0532 EXPORT_SYMBOL(pcim_iounmap_regions);
0533 #endif /* CONFIG_PCI */
0534 
0535 static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
0536 {
0537     arch_phys_wc_del(*((int *)res));
0538 }
0539 
0540 /**
0541  * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
0542  * @dev: Managed device
0543  * @base: Memory base address
0544  * @size: Size of memory range
0545  *
0546  * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
0547  * See arch_phys_wc_add() for more information.
0548  */
0549 int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
0550 {
0551     int *mtrr;
0552     int ret;
0553 
0554     mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
0555                  dev_to_node(dev));
0556     if (!mtrr)
0557         return -ENOMEM;
0558 
0559     ret = arch_phys_wc_add(base, size);
0560     if (ret < 0) {
0561         devres_free(mtrr);
0562         return ret;
0563     }
0564 
0565     *mtrr = ret;
0566     devres_add(dev, mtrr);
0567 
0568     return ret;
0569 }
0570 EXPORT_SYMBOL(devm_arch_phys_wc_add);
0571 
0572 struct arch_io_reserve_memtype_wc_devres {
0573     resource_size_t start;
0574     resource_size_t size;
0575 };
0576 
0577 static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
0578 {
0579     const struct arch_io_reserve_memtype_wc_devres *this = res;
0580 
0581     arch_io_free_memtype_wc(this->start, this->size);
0582 }
0583 
0584 /**
0585  * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
0586  * @dev: Managed device
0587  * @start: Memory base address
0588  * @size: Size of memory range
0589  *
0590  * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
0591  * and sets up a release callback See arch_io_reserve_memtype_wc() for more
0592  * information.
0593  */
0594 int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
0595                     resource_size_t size)
0596 {
0597     struct arch_io_reserve_memtype_wc_devres *dr;
0598     int ret;
0599 
0600     dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
0601                    dev_to_node(dev));
0602     if (!dr)
0603         return -ENOMEM;
0604 
0605     ret = arch_io_reserve_memtype_wc(start, size);
0606     if (ret < 0) {
0607         devres_free(dr);
0608         return ret;
0609     }
0610 
0611     dr->start = start;
0612     dr->size = size;
0613     devres_add(dev, dr);
0614 
0615     return ret;
0616 }
0617 EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);