0001
0002 #include <linux/err.h>
0003 #include <linux/pci.h>
0004 #include <linux/io.h>
0005 #include <linux/gfp.h>
0006 #include <linux/export.h>
0007 #include <linux/of_address.h>
0008
0009 enum devm_ioremap_type {
0010 DEVM_IOREMAP = 0,
0011 DEVM_IOREMAP_UC,
0012 DEVM_IOREMAP_WC,
0013 DEVM_IOREMAP_NP,
0014 };
0015
0016 void devm_ioremap_release(struct device *dev, void *res)
0017 {
0018 iounmap(*(void __iomem **)res);
0019 }
0020
0021 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
0022 {
0023 return *(void **)res == match_data;
0024 }
0025
0026 static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
0027 resource_size_t size,
0028 enum devm_ioremap_type type)
0029 {
0030 void __iomem **ptr, *addr = NULL;
0031
0032 ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
0033 dev_to_node(dev));
0034 if (!ptr)
0035 return NULL;
0036
0037 switch (type) {
0038 case DEVM_IOREMAP:
0039 addr = ioremap(offset, size);
0040 break;
0041 case DEVM_IOREMAP_UC:
0042 addr = ioremap_uc(offset, size);
0043 break;
0044 case DEVM_IOREMAP_WC:
0045 addr = ioremap_wc(offset, size);
0046 break;
0047 case DEVM_IOREMAP_NP:
0048 addr = ioremap_np(offset, size);
0049 break;
0050 }
0051
0052 if (addr) {
0053 *ptr = addr;
0054 devres_add(dev, ptr);
0055 } else
0056 devres_free(ptr);
0057
0058 return addr;
0059 }
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
0070 resource_size_t size)
0071 {
0072 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
0073 }
0074 EXPORT_SYMBOL(devm_ioremap);
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
0085 resource_size_t size)
0086 {
0087 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
0088 }
0089 EXPORT_SYMBOL_GPL(devm_ioremap_uc);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
0100 resource_size_t size)
0101 {
0102 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
0103 }
0104 EXPORT_SYMBOL(devm_ioremap_wc);
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
0115 resource_size_t size)
0116 {
0117 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
0118 }
0119 EXPORT_SYMBOL(devm_ioremap_np);
0120
0121
0122
0123
0124
0125
0126
0127
0128 void devm_iounmap(struct device *dev, void __iomem *addr)
0129 {
0130 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
0131 (__force void *)addr));
0132 iounmap(addr);
0133 }
0134 EXPORT_SYMBOL(devm_iounmap);
0135
0136 static void __iomem *
0137 __devm_ioremap_resource(struct device *dev, const struct resource *res,
0138 enum devm_ioremap_type type)
0139 {
0140 resource_size_t size;
0141 void __iomem *dest_ptr;
0142 char *pretty_name;
0143
0144 BUG_ON(!dev);
0145
0146 if (!res || resource_type(res) != IORESOURCE_MEM) {
0147 dev_err(dev, "invalid resource\n");
0148 return IOMEM_ERR_PTR(-EINVAL);
0149 }
0150
0151 if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
0152 type = DEVM_IOREMAP_NP;
0153
0154 size = resource_size(res);
0155
0156 if (res->name)
0157 pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
0158 dev_name(dev), res->name);
0159 else
0160 pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
0161 if (!pretty_name) {
0162 dev_err(dev, "can't generate pretty name for resource %pR\n", res);
0163 return IOMEM_ERR_PTR(-ENOMEM);
0164 }
0165
0166 if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
0167 dev_err(dev, "can't request region for resource %pR\n", res);
0168 return IOMEM_ERR_PTR(-EBUSY);
0169 }
0170
0171 dest_ptr = __devm_ioremap(dev, res->start, size, type);
0172 if (!dest_ptr) {
0173 dev_err(dev, "ioremap failed for resource %pR\n", res);
0174 devm_release_mem_region(dev, res->start, size);
0175 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
0176 }
0177
0178 return dest_ptr;
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 void __iomem *devm_ioremap_resource(struct device *dev,
0201 const struct resource *res)
0202 {
0203 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
0204 }
0205 EXPORT_SYMBOL(devm_ioremap_resource);
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 void __iomem *devm_ioremap_resource_wc(struct device *dev,
0217 const struct resource *res)
0218 {
0219 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
0253 resource_size_t *size)
0254 {
0255 struct resource res;
0256
0257 if (of_address_to_resource(node, index, &res))
0258 return IOMEM_ERR_PTR(-EINVAL);
0259 if (size)
0260 *size = resource_size(&res);
0261 return devm_ioremap_resource(dev, &res);
0262 }
0263 EXPORT_SYMBOL(devm_of_iomap);
0264
0265 #ifdef CONFIG_HAS_IOPORT_MAP
0266
0267
0268
0269 static void devm_ioport_map_release(struct device *dev, void *res)
0270 {
0271 ioport_unmap(*(void __iomem **)res);
0272 }
0273
0274 static int devm_ioport_map_match(struct device *dev, void *res,
0275 void *match_data)
0276 {
0277 return *(void **)res == match_data;
0278 }
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
0292 unsigned int nr)
0293 {
0294 void __iomem **ptr, *addr;
0295
0296 ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
0297 dev_to_node(dev));
0298 if (!ptr)
0299 return NULL;
0300
0301 addr = ioport_map(port, nr);
0302 if (addr) {
0303 *ptr = addr;
0304 devres_add(dev, ptr);
0305 } else
0306 devres_free(ptr);
0307
0308 return addr;
0309 }
0310 EXPORT_SYMBOL(devm_ioport_map);
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
0321 {
0322 ioport_unmap(addr);
0323 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
0324 devm_ioport_map_match, (__force void *)addr));
0325 }
0326 EXPORT_SYMBOL(devm_ioport_unmap);
0327 #endif
0328
0329 #ifdef CONFIG_PCI
0330
0331
0332
0333 #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
0334
0335 struct pcim_iomap_devres {
0336 void __iomem *table[PCIM_IOMAP_MAX];
0337 };
0338
0339 static void pcim_iomap_release(struct device *gendev, void *res)
0340 {
0341 struct pci_dev *dev = to_pci_dev(gendev);
0342 struct pcim_iomap_devres *this = res;
0343 int i;
0344
0345 for (i = 0; i < PCIM_IOMAP_MAX; i++)
0346 if (this->table[i])
0347 pci_iounmap(dev, this->table[i]);
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
0364 {
0365 struct pcim_iomap_devres *dr, *new_dr;
0366
0367 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
0368 if (dr)
0369 return dr->table;
0370
0371 new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
0372 dev_to_node(&pdev->dev));
0373 if (!new_dr)
0374 return NULL;
0375 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
0376 return dr->table;
0377 }
0378 EXPORT_SYMBOL(pcim_iomap_table);
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
0390 {
0391 void __iomem **tbl;
0392
0393 BUG_ON(bar >= PCIM_IOMAP_MAX);
0394
0395 tbl = (void __iomem **)pcim_iomap_table(pdev);
0396 if (!tbl || tbl[bar])
0397 return NULL;
0398
0399 tbl[bar] = pci_iomap(pdev, bar, maxlen);
0400 return tbl[bar];
0401 }
0402 EXPORT_SYMBOL(pcim_iomap);
0403
0404
0405
0406
0407
0408
0409
0410
0411 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
0412 {
0413 void __iomem **tbl;
0414 int i;
0415
0416 pci_iounmap(pdev, addr);
0417
0418 tbl = (void __iomem **)pcim_iomap_table(pdev);
0419 BUG_ON(!tbl);
0420
0421 for (i = 0; i < PCIM_IOMAP_MAX; i++)
0422 if (tbl[i] == addr) {
0423 tbl[i] = NULL;
0424 return;
0425 }
0426 WARN_ON(1);
0427 }
0428 EXPORT_SYMBOL(pcim_iounmap);
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
0439 {
0440 void __iomem * const *iomap;
0441 int i, rc;
0442
0443 iomap = pcim_iomap_table(pdev);
0444 if (!iomap)
0445 return -ENOMEM;
0446
0447 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
0448 unsigned long len;
0449
0450 if (!(mask & (1 << i)))
0451 continue;
0452
0453 rc = -EINVAL;
0454 len = pci_resource_len(pdev, i);
0455 if (!len)
0456 goto err_inval;
0457
0458 rc = pci_request_region(pdev, i, name);
0459 if (rc)
0460 goto err_inval;
0461
0462 rc = -ENOMEM;
0463 if (!pcim_iomap(pdev, i, 0))
0464 goto err_region;
0465 }
0466
0467 return 0;
0468
0469 err_region:
0470 pci_release_region(pdev, i);
0471 err_inval:
0472 while (--i >= 0) {
0473 if (!(mask & (1 << i)))
0474 continue;
0475 pcim_iounmap(pdev, iomap[i]);
0476 pci_release_region(pdev, i);
0477 }
0478
0479 return rc;
0480 }
0481 EXPORT_SYMBOL(pcim_iomap_regions);
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
0492 const char *name)
0493 {
0494 int request_mask = ((1 << 6) - 1) & ~mask;
0495 int rc;
0496
0497 rc = pci_request_selected_regions(pdev, request_mask, name);
0498 if (rc)
0499 return rc;
0500
0501 rc = pcim_iomap_regions(pdev, mask, name);
0502 if (rc)
0503 pci_release_selected_regions(pdev, request_mask);
0504 return rc;
0505 }
0506 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
0507
0508
0509
0510
0511
0512
0513
0514
0515 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
0516 {
0517 void __iomem * const *iomap;
0518 int i;
0519
0520 iomap = pcim_iomap_table(pdev);
0521 if (!iomap)
0522 return;
0523
0524 for (i = 0; i < PCIM_IOMAP_MAX; i++) {
0525 if (!(mask & (1 << i)))
0526 continue;
0527
0528 pcim_iounmap(pdev, iomap[i]);
0529 pci_release_region(pdev, i);
0530 }
0531 }
0532 EXPORT_SYMBOL(pcim_iounmap_regions);
0533 #endif
0534
0535 static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
0536 {
0537 arch_phys_wc_del(*((int *)res));
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549 int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
0550 {
0551 int *mtrr;
0552 int ret;
0553
0554 mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
0555 dev_to_node(dev));
0556 if (!mtrr)
0557 return -ENOMEM;
0558
0559 ret = arch_phys_wc_add(base, size);
0560 if (ret < 0) {
0561 devres_free(mtrr);
0562 return ret;
0563 }
0564
0565 *mtrr = ret;
0566 devres_add(dev, mtrr);
0567
0568 return ret;
0569 }
0570 EXPORT_SYMBOL(devm_arch_phys_wc_add);
0571
0572 struct arch_io_reserve_memtype_wc_devres {
0573 resource_size_t start;
0574 resource_size_t size;
0575 };
0576
0577 static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
0578 {
0579 const struct arch_io_reserve_memtype_wc_devres *this = res;
0580
0581 arch_io_free_memtype_wc(this->start, this->size);
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
0595 resource_size_t size)
0596 {
0597 struct arch_io_reserve_memtype_wc_devres *dr;
0598 int ret;
0599
0600 dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
0601 dev_to_node(dev));
0602 if (!dr)
0603 return -ENOMEM;
0604
0605 ret = arch_io_reserve_memtype_wc(start, size);
0606 if (ret < 0) {
0607 devres_free(dr);
0608 return ret;
0609 }
0610
0611 dr->start = start;
0612 dr->size = size;
0613 devres_add(dev, dr);
0614
0615 return ret;
0616 }
0617 EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);