0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/string.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/of_device.h>
0015 #include <linux/of_irq.h>
0016 #include <linux/module.h>
0017 #include <linux/init.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/ioport.h>
0020 #include <linux/dma-mapping.h>
0021 #include <linux/memblock.h>
0022 #include <linux/err.h>
0023 #include <linux/slab.h>
0024 #include <linux/pm_runtime.h>
0025 #include <linux/pm_domain.h>
0026 #include <linux/idr.h>
0027 #include <linux/acpi.h>
0028 #include <linux/clk/clk-conf.h>
0029 #include <linux/limits.h>
0030 #include <linux/property.h>
0031 #include <linux/kmemleak.h>
0032 #include <linux/types.h>
0033 #include <linux/iommu.h>
0034 #include <linux/dma-map-ops.h>
0035
0036 #include "base.h"
0037 #include "power/power.h"
0038
0039
0040 static DEFINE_IDA(platform_devid_ida);
0041
0042 struct device platform_bus = {
0043 .init_name = "platform",
0044 };
0045 EXPORT_SYMBOL_GPL(platform_bus);
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 struct resource *platform_get_resource(struct platform_device *dev,
0056 unsigned int type, unsigned int num)
0057 {
0058 u32 i;
0059
0060 for (i = 0; i < dev->num_resources; i++) {
0061 struct resource *r = &dev->resource[i];
0062
0063 if (type == resource_type(r) && num-- == 0)
0064 return r;
0065 }
0066 return NULL;
0067 }
0068 EXPORT_SYMBOL_GPL(platform_get_resource);
0069
0070 struct resource *platform_get_mem_or_io(struct platform_device *dev,
0071 unsigned int num)
0072 {
0073 u32 i;
0074
0075 for (i = 0; i < dev->num_resources; i++) {
0076 struct resource *r = &dev->resource[i];
0077
0078 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
0079 return r;
0080 }
0081 return NULL;
0082 }
0083 EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
0084
0085 #ifdef CONFIG_HAS_IOMEM
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 void __iomem *
0099 devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
0100 unsigned int index, struct resource **res)
0101 {
0102 struct resource *r;
0103
0104 r = platform_get_resource(pdev, IORESOURCE_MEM, index);
0105 if (res)
0106 *res = r;
0107 return devm_ioremap_resource(&pdev->dev, r);
0108 }
0109 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
0123 unsigned int index)
0124 {
0125 return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
0126 }
0127 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 void __iomem *
0142 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
0143 const char *name)
0144 {
0145 struct resource *res;
0146
0147 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
0148 return devm_ioremap_resource(&pdev->dev, res);
0149 }
0150 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
0151 #endif
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
0172 {
0173 int ret;
0174 #ifdef CONFIG_SPARC
0175
0176 if (!dev || num >= dev->archdata.num_irqs)
0177 goto out_not_found;
0178 ret = dev->archdata.irqs[num];
0179 goto out;
0180 #else
0181 struct resource *r;
0182
0183 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
0184 ret = of_irq_get(dev->dev.of_node, num);
0185 if (ret > 0 || ret == -EPROBE_DEFER)
0186 goto out;
0187 }
0188
0189 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
0190 if (has_acpi_companion(&dev->dev)) {
0191 if (r && r->flags & IORESOURCE_DISABLED) {
0192 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
0193 if (ret)
0194 goto out;
0195 }
0196 }
0197
0198
0199
0200
0201
0202
0203
0204 if (r && r->flags & IORESOURCE_BITS) {
0205 struct irq_data *irqd;
0206
0207 irqd = irq_get_irq_data(r->start);
0208 if (!irqd)
0209 goto out_not_found;
0210 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
0211 }
0212
0213 if (r) {
0214 ret = r->start;
0215 goto out;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224
0225 if (num == 0 && has_acpi_companion(&dev->dev)) {
0226 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
0227
0228 if (ret >= 0 || ret == -EPROBE_DEFER)
0229 goto out;
0230 }
0231
0232 #endif
0233 out_not_found:
0234 ret = -ENXIO;
0235 out:
0236 if (WARN(!ret, "0 is an invalid IRQ number\n"))
0237 return -EINVAL;
0238 return ret;
0239 }
0240 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 int platform_get_irq(struct platform_device *dev, unsigned int num)
0260 {
0261 int ret;
0262
0263 ret = platform_get_irq_optional(dev, num);
0264 if (ret < 0)
0265 return dev_err_probe(&dev->dev, ret,
0266 "IRQ index %u not found\n", num);
0267
0268 return ret;
0269 }
0270 EXPORT_SYMBOL_GPL(platform_get_irq);
0271
0272
0273
0274
0275
0276
0277
0278 int platform_irq_count(struct platform_device *dev)
0279 {
0280 int ret, nr = 0;
0281
0282 while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
0283 nr++;
0284
0285 if (ret == -EPROBE_DEFER)
0286 return ret;
0287
0288 return nr;
0289 }
0290 EXPORT_SYMBOL_GPL(platform_irq_count);
0291
0292 struct irq_affinity_devres {
0293 unsigned int count;
0294 unsigned int irq[];
0295 };
0296
0297 static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
0298 {
0299 struct resource *r;
0300
0301 r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
0302 if (r)
0303 irqresource_disabled(r, 0);
0304 }
0305
0306 static void devm_platform_get_irqs_affinity_release(struct device *dev,
0307 void *res)
0308 {
0309 struct irq_affinity_devres *ptr = res;
0310 int i;
0311
0312 for (i = 0; i < ptr->count; i++) {
0313 irq_dispose_mapping(ptr->irq[i]);
0314
0315 if (has_acpi_companion(dev))
0316 platform_disable_acpi_irq(to_platform_device(dev), i);
0317 }
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 int devm_platform_get_irqs_affinity(struct platform_device *dev,
0335 struct irq_affinity *affd,
0336 unsigned int minvec,
0337 unsigned int maxvec,
0338 int **irqs)
0339 {
0340 struct irq_affinity_devres *ptr;
0341 struct irq_affinity_desc *desc;
0342 size_t size;
0343 int i, ret, nvec;
0344
0345 if (!affd)
0346 return -EPERM;
0347
0348 if (maxvec < minvec)
0349 return -ERANGE;
0350
0351 nvec = platform_irq_count(dev);
0352 if (nvec < 0)
0353 return nvec;
0354
0355 if (nvec < minvec)
0356 return -ENOSPC;
0357
0358 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
0359 if (nvec < minvec)
0360 return -ENOSPC;
0361
0362 if (nvec > maxvec)
0363 nvec = maxvec;
0364
0365 size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
0366 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
0367 GFP_KERNEL);
0368 if (!ptr)
0369 return -ENOMEM;
0370
0371 ptr->count = nvec;
0372
0373 for (i = 0; i < nvec; i++) {
0374 int irq = platform_get_irq(dev, i);
0375 if (irq < 0) {
0376 ret = irq;
0377 goto err_free_devres;
0378 }
0379 ptr->irq[i] = irq;
0380 }
0381
0382 desc = irq_create_affinity_masks(nvec, affd);
0383 if (!desc) {
0384 ret = -ENOMEM;
0385 goto err_free_devres;
0386 }
0387
0388 for (i = 0; i < nvec; i++) {
0389 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
0390 if (ret) {
0391 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
0392 ptr->irq[i], ret);
0393 goto err_free_desc;
0394 }
0395 }
0396
0397 devres_add(&dev->dev, ptr);
0398
0399 kfree(desc);
0400
0401 *irqs = ptr->irq;
0402
0403 return nvec;
0404
0405 err_free_desc:
0406 kfree(desc);
0407 err_free_devres:
0408 devres_free(ptr);
0409 return ret;
0410 }
0411 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
0412
0413
0414
0415
0416
0417
0418
0419 struct resource *platform_get_resource_byname(struct platform_device *dev,
0420 unsigned int type,
0421 const char *name)
0422 {
0423 u32 i;
0424
0425 for (i = 0; i < dev->num_resources; i++) {
0426 struct resource *r = &dev->resource[i];
0427
0428 if (unlikely(!r->name))
0429 continue;
0430
0431 if (type == resource_type(r) && !strcmp(r->name, name))
0432 return r;
0433 }
0434 return NULL;
0435 }
0436 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
0437
0438 static int __platform_get_irq_byname(struct platform_device *dev,
0439 const char *name)
0440 {
0441 struct resource *r;
0442 int ret;
0443
0444 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
0445 ret = of_irq_get_byname(dev->dev.of_node, name);
0446 if (ret > 0 || ret == -EPROBE_DEFER)
0447 return ret;
0448 }
0449
0450 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
0451 if (r) {
0452 if (WARN(!r->start, "0 is an invalid IRQ number\n"))
0453 return -EINVAL;
0454 return r->start;
0455 }
0456
0457 return -ENXIO;
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 int platform_get_irq_byname(struct platform_device *dev, const char *name)
0470 {
0471 int ret;
0472
0473 ret = __platform_get_irq_byname(dev, name);
0474 if (ret < 0)
0475 return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
0476 name);
0477 return ret;
0478 }
0479 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 int platform_get_irq_byname_optional(struct platform_device *dev,
0492 const char *name)
0493 {
0494 return __platform_get_irq_byname(dev, name);
0495 }
0496 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
0497
0498
0499
0500
0501
0502
0503 int platform_add_devices(struct platform_device **devs, int num)
0504 {
0505 int i, ret = 0;
0506
0507 for (i = 0; i < num; i++) {
0508 ret = platform_device_register(devs[i]);
0509 if (ret) {
0510 while (--i >= 0)
0511 platform_device_unregister(devs[i]);
0512 break;
0513 }
0514 }
0515
0516 return ret;
0517 }
0518 EXPORT_SYMBOL_GPL(platform_add_devices);
0519
0520 struct platform_object {
0521 struct platform_device pdev;
0522 char name[];
0523 };
0524
0525
0526
0527
0528
0529 static void setup_pdev_dma_masks(struct platform_device *pdev)
0530 {
0531 pdev->dev.dma_parms = &pdev->dma_parms;
0532
0533 if (!pdev->dev.coherent_dma_mask)
0534 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
0535 if (!pdev->dev.dma_mask) {
0536 pdev->platform_dma_mask = DMA_BIT_MASK(32);
0537 pdev->dev.dma_mask = &pdev->platform_dma_mask;
0538 }
0539 };
0540
0541
0542
0543
0544
0545
0546
0547
0548 void platform_device_put(struct platform_device *pdev)
0549 {
0550 if (!IS_ERR_OR_NULL(pdev))
0551 put_device(&pdev->dev);
0552 }
0553 EXPORT_SYMBOL_GPL(platform_device_put);
0554
0555 static void platform_device_release(struct device *dev)
0556 {
0557 struct platform_object *pa = container_of(dev, struct platform_object,
0558 pdev.dev);
0559
0560 of_node_put(pa->pdev.dev.of_node);
0561 kfree(pa->pdev.dev.platform_data);
0562 kfree(pa->pdev.mfd_cell);
0563 kfree(pa->pdev.resource);
0564 kfree(pa->pdev.driver_override);
0565 kfree(pa);
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 struct platform_device *platform_device_alloc(const char *name, int id)
0577 {
0578 struct platform_object *pa;
0579
0580 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
0581 if (pa) {
0582 strcpy(pa->name, name);
0583 pa->pdev.name = pa->name;
0584 pa->pdev.id = id;
0585 device_initialize(&pa->pdev.dev);
0586 pa->pdev.dev.release = platform_device_release;
0587 setup_pdev_dma_masks(&pa->pdev);
0588 }
0589
0590 return pa ? &pa->pdev : NULL;
0591 }
0592 EXPORT_SYMBOL_GPL(platform_device_alloc);
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 int platform_device_add_resources(struct platform_device *pdev,
0605 const struct resource *res, unsigned int num)
0606 {
0607 struct resource *r = NULL;
0608
0609 if (res) {
0610 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
0611 if (!r)
0612 return -ENOMEM;
0613 }
0614
0615 kfree(pdev->resource);
0616 pdev->resource = r;
0617 pdev->num_resources = num;
0618 return 0;
0619 }
0620 EXPORT_SYMBOL_GPL(platform_device_add_resources);
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 int platform_device_add_data(struct platform_device *pdev, const void *data,
0633 size_t size)
0634 {
0635 void *d = NULL;
0636
0637 if (data) {
0638 d = kmemdup(data, size, GFP_KERNEL);
0639 if (!d)
0640 return -ENOMEM;
0641 }
0642
0643 kfree(pdev->dev.platform_data);
0644 pdev->dev.platform_data = d;
0645 return 0;
0646 }
0647 EXPORT_SYMBOL_GPL(platform_device_add_data);
0648
0649
0650
0651
0652
0653
0654
0655
0656 int platform_device_add(struct platform_device *pdev)
0657 {
0658 u32 i;
0659 int ret;
0660
0661 if (!pdev)
0662 return -EINVAL;
0663
0664 if (!pdev->dev.parent)
0665 pdev->dev.parent = &platform_bus;
0666
0667 pdev->dev.bus = &platform_bus_type;
0668
0669 switch (pdev->id) {
0670 default:
0671 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
0672 break;
0673 case PLATFORM_DEVID_NONE:
0674 dev_set_name(&pdev->dev, "%s", pdev->name);
0675 break;
0676 case PLATFORM_DEVID_AUTO:
0677
0678
0679
0680
0681
0682 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
0683 if (ret < 0)
0684 goto err_out;
0685 pdev->id = ret;
0686 pdev->id_auto = true;
0687 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
0688 break;
0689 }
0690
0691 for (i = 0; i < pdev->num_resources; i++) {
0692 struct resource *p, *r = &pdev->resource[i];
0693
0694 if (r->name == NULL)
0695 r->name = dev_name(&pdev->dev);
0696
0697 p = r->parent;
0698 if (!p) {
0699 if (resource_type(r) == IORESOURCE_MEM)
0700 p = &iomem_resource;
0701 else if (resource_type(r) == IORESOURCE_IO)
0702 p = &ioport_resource;
0703 }
0704
0705 if (p) {
0706 ret = insert_resource(p, r);
0707 if (ret) {
0708 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
0709 goto failed;
0710 }
0711 }
0712 }
0713
0714 pr_debug("Registering platform device '%s'. Parent at %s\n",
0715 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
0716
0717 ret = device_add(&pdev->dev);
0718 if (ret == 0)
0719 return ret;
0720
0721 failed:
0722 if (pdev->id_auto) {
0723 ida_free(&platform_devid_ida, pdev->id);
0724 pdev->id = PLATFORM_DEVID_AUTO;
0725 }
0726
0727 while (i--) {
0728 struct resource *r = &pdev->resource[i];
0729 if (r->parent)
0730 release_resource(r);
0731 }
0732
0733 err_out:
0734 return ret;
0735 }
0736 EXPORT_SYMBOL_GPL(platform_device_add);
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 void platform_device_del(struct platform_device *pdev)
0747 {
0748 u32 i;
0749
0750 if (!IS_ERR_OR_NULL(pdev)) {
0751 device_del(&pdev->dev);
0752
0753 if (pdev->id_auto) {
0754 ida_free(&platform_devid_ida, pdev->id);
0755 pdev->id = PLATFORM_DEVID_AUTO;
0756 }
0757
0758 for (i = 0; i < pdev->num_resources; i++) {
0759 struct resource *r = &pdev->resource[i];
0760 if (r->parent)
0761 release_resource(r);
0762 }
0763 }
0764 }
0765 EXPORT_SYMBOL_GPL(platform_device_del);
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 int platform_device_register(struct platform_device *pdev)
0776 {
0777 device_initialize(&pdev->dev);
0778 setup_pdev_dma_masks(pdev);
0779 return platform_device_add(pdev);
0780 }
0781 EXPORT_SYMBOL_GPL(platform_device_register);
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 void platform_device_unregister(struct platform_device *pdev)
0792 {
0793 platform_device_del(pdev);
0794 platform_device_put(pdev);
0795 }
0796 EXPORT_SYMBOL_GPL(platform_device_unregister);
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 struct platform_device *platform_device_register_full(
0807 const struct platform_device_info *pdevinfo)
0808 {
0809 int ret;
0810 struct platform_device *pdev;
0811
0812 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
0813 if (!pdev)
0814 return ERR_PTR(-ENOMEM);
0815
0816 pdev->dev.parent = pdevinfo->parent;
0817 pdev->dev.fwnode = pdevinfo->fwnode;
0818 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
0819 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
0820
0821 if (pdevinfo->dma_mask) {
0822 pdev->platform_dma_mask = pdevinfo->dma_mask;
0823 pdev->dev.dma_mask = &pdev->platform_dma_mask;
0824 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
0825 }
0826
0827 ret = platform_device_add_resources(pdev,
0828 pdevinfo->res, pdevinfo->num_res);
0829 if (ret)
0830 goto err;
0831
0832 ret = platform_device_add_data(pdev,
0833 pdevinfo->data, pdevinfo->size_data);
0834 if (ret)
0835 goto err;
0836
0837 if (pdevinfo->properties) {
0838 ret = device_create_managed_software_node(&pdev->dev,
0839 pdevinfo->properties, NULL);
0840 if (ret)
0841 goto err;
0842 }
0843
0844 ret = platform_device_add(pdev);
0845 if (ret) {
0846 err:
0847 ACPI_COMPANION_SET(&pdev->dev, NULL);
0848 platform_device_put(pdev);
0849 return ERR_PTR(ret);
0850 }
0851
0852 return pdev;
0853 }
0854 EXPORT_SYMBOL_GPL(platform_device_register_full);
0855
0856
0857
0858
0859
0860
0861 int __platform_driver_register(struct platform_driver *drv,
0862 struct module *owner)
0863 {
0864 drv->driver.owner = owner;
0865 drv->driver.bus = &platform_bus_type;
0866
0867 return driver_register(&drv->driver);
0868 }
0869 EXPORT_SYMBOL_GPL(__platform_driver_register);
0870
0871
0872
0873
0874
0875 void platform_driver_unregister(struct platform_driver *drv)
0876 {
0877 driver_unregister(&drv->driver);
0878 }
0879 EXPORT_SYMBOL_GPL(platform_driver_unregister);
0880
0881 static int platform_probe_fail(struct platform_device *pdev)
0882 {
0883 return -ENXIO;
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
0907 int (*probe)(struct platform_device *), struct module *module)
0908 {
0909 int retval, code;
0910
0911 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
0912 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
0913 drv->driver.name, __func__);
0914 return -EINVAL;
0915 }
0916
0917
0918
0919
0920
0921
0922 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
0923
0924
0925
0926
0927
0928 drv->prevent_deferred_probe = true;
0929
0930
0931 drv->driver.suppress_bind_attrs = true;
0932
0933
0934 drv->probe = probe;
0935 retval = code = __platform_driver_register(drv, module);
0936 if (retval)
0937 return retval;
0938
0939
0940
0941
0942
0943
0944
0945 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
0946 drv->probe = platform_probe_fail;
0947 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
0948 retval = -ENODEV;
0949 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
0950
0951 if (code != retval)
0952 platform_driver_unregister(drv);
0953 return retval;
0954 }
0955 EXPORT_SYMBOL_GPL(__platform_driver_probe);
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972 struct platform_device * __init_or_module __platform_create_bundle(
0973 struct platform_driver *driver,
0974 int (*probe)(struct platform_device *),
0975 struct resource *res, unsigned int n_res,
0976 const void *data, size_t size, struct module *module)
0977 {
0978 struct platform_device *pdev;
0979 int error;
0980
0981 pdev = platform_device_alloc(driver->driver.name, -1);
0982 if (!pdev) {
0983 error = -ENOMEM;
0984 goto err_out;
0985 }
0986
0987 error = platform_device_add_resources(pdev, res, n_res);
0988 if (error)
0989 goto err_pdev_put;
0990
0991 error = platform_device_add_data(pdev, data, size);
0992 if (error)
0993 goto err_pdev_put;
0994
0995 error = platform_device_add(pdev);
0996 if (error)
0997 goto err_pdev_put;
0998
0999 error = __platform_driver_probe(driver, probe, module);
1000 if (error)
1001 goto err_pdev_del;
1002
1003 return pdev;
1004
1005 err_pdev_del:
1006 platform_device_del(pdev);
1007 err_pdev_put:
1008 platform_device_put(pdev);
1009 err_out:
1010 return ERR_PTR(error);
1011 }
1012 EXPORT_SYMBOL_GPL(__platform_create_bundle);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 int __platform_register_drivers(struct platform_driver * const *drivers,
1028 unsigned int count, struct module *owner)
1029 {
1030 unsigned int i;
1031 int err;
1032
1033 for (i = 0; i < count; i++) {
1034 pr_debug("registering platform driver %ps\n", drivers[i]);
1035
1036 err = __platform_driver_register(drivers[i], owner);
1037 if (err < 0) {
1038 pr_err("failed to register platform driver %ps: %d\n",
1039 drivers[i], err);
1040 goto error;
1041 }
1042 }
1043
1044 return 0;
1045
1046 error:
1047 while (i--) {
1048 pr_debug("unregistering platform driver %ps\n", drivers[i]);
1049 platform_driver_unregister(drivers[i]);
1050 }
1051
1052 return err;
1053 }
1054 EXPORT_SYMBOL_GPL(__platform_register_drivers);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 void platform_unregister_drivers(struct platform_driver * const *drivers,
1066 unsigned int count)
1067 {
1068 while (count--) {
1069 pr_debug("unregistering platform driver %ps\n", drivers[count]);
1070 platform_driver_unregister(drivers[count]);
1071 }
1072 }
1073 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1074
1075 static const struct platform_device_id *platform_match_id(
1076 const struct platform_device_id *id,
1077 struct platform_device *pdev)
1078 {
1079 while (id->name[0]) {
1080 if (strcmp(pdev->name, id->name) == 0) {
1081 pdev->id_entry = id;
1082 return id;
1083 }
1084 id++;
1085 }
1086 return NULL;
1087 }
1088
1089 #ifdef CONFIG_PM_SLEEP
1090
1091 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1092 {
1093 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1094 struct platform_device *pdev = to_platform_device(dev);
1095 int ret = 0;
1096
1097 if (dev->driver && pdrv->suspend)
1098 ret = pdrv->suspend(pdev, mesg);
1099
1100 return ret;
1101 }
1102
1103 static int platform_legacy_resume(struct device *dev)
1104 {
1105 struct platform_driver *pdrv = to_platform_driver(dev->driver);
1106 struct platform_device *pdev = to_platform_device(dev);
1107 int ret = 0;
1108
1109 if (dev->driver && pdrv->resume)
1110 ret = pdrv->resume(pdev);
1111
1112 return ret;
1113 }
1114
1115 #endif
1116
1117 #ifdef CONFIG_SUSPEND
1118
1119 int platform_pm_suspend(struct device *dev)
1120 {
1121 struct device_driver *drv = dev->driver;
1122 int ret = 0;
1123
1124 if (!drv)
1125 return 0;
1126
1127 if (drv->pm) {
1128 if (drv->pm->suspend)
1129 ret = drv->pm->suspend(dev);
1130 } else {
1131 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1132 }
1133
1134 return ret;
1135 }
1136
1137 int platform_pm_resume(struct device *dev)
1138 {
1139 struct device_driver *drv = dev->driver;
1140 int ret = 0;
1141
1142 if (!drv)
1143 return 0;
1144
1145 if (drv->pm) {
1146 if (drv->pm->resume)
1147 ret = drv->pm->resume(dev);
1148 } else {
1149 ret = platform_legacy_resume(dev);
1150 }
1151
1152 return ret;
1153 }
1154
1155 #endif
1156
1157 #ifdef CONFIG_HIBERNATE_CALLBACKS
1158
1159 int platform_pm_freeze(struct device *dev)
1160 {
1161 struct device_driver *drv = dev->driver;
1162 int ret = 0;
1163
1164 if (!drv)
1165 return 0;
1166
1167 if (drv->pm) {
1168 if (drv->pm->freeze)
1169 ret = drv->pm->freeze(dev);
1170 } else {
1171 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1172 }
1173
1174 return ret;
1175 }
1176
1177 int platform_pm_thaw(struct device *dev)
1178 {
1179 struct device_driver *drv = dev->driver;
1180 int ret = 0;
1181
1182 if (!drv)
1183 return 0;
1184
1185 if (drv->pm) {
1186 if (drv->pm->thaw)
1187 ret = drv->pm->thaw(dev);
1188 } else {
1189 ret = platform_legacy_resume(dev);
1190 }
1191
1192 return ret;
1193 }
1194
1195 int platform_pm_poweroff(struct device *dev)
1196 {
1197 struct device_driver *drv = dev->driver;
1198 int ret = 0;
1199
1200 if (!drv)
1201 return 0;
1202
1203 if (drv->pm) {
1204 if (drv->pm->poweroff)
1205 ret = drv->pm->poweroff(dev);
1206 } else {
1207 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1208 }
1209
1210 return ret;
1211 }
1212
1213 int platform_pm_restore(struct device *dev)
1214 {
1215 struct device_driver *drv = dev->driver;
1216 int ret = 0;
1217
1218 if (!drv)
1219 return 0;
1220
1221 if (drv->pm) {
1222 if (drv->pm->restore)
1223 ret = drv->pm->restore(dev);
1224 } else {
1225 ret = platform_legacy_resume(dev);
1226 }
1227
1228 return ret;
1229 }
1230
1231 #endif
1232
1233
1234
1235
1236
1237
1238
1239 static ssize_t modalias_show(struct device *dev,
1240 struct device_attribute *attr, char *buf)
1241 {
1242 struct platform_device *pdev = to_platform_device(dev);
1243 int len;
1244
1245 len = of_device_modalias(dev, buf, PAGE_SIZE);
1246 if (len != -ENODEV)
1247 return len;
1248
1249 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1250 if (len != -ENODEV)
1251 return len;
1252
1253 return sysfs_emit(buf, "platform:%s\n", pdev->name);
1254 }
1255 static DEVICE_ATTR_RO(modalias);
1256
1257 static ssize_t numa_node_show(struct device *dev,
1258 struct device_attribute *attr, char *buf)
1259 {
1260 return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1261 }
1262 static DEVICE_ATTR_RO(numa_node);
1263
1264 static ssize_t driver_override_show(struct device *dev,
1265 struct device_attribute *attr, char *buf)
1266 {
1267 struct platform_device *pdev = to_platform_device(dev);
1268 ssize_t len;
1269
1270 device_lock(dev);
1271 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1272 device_unlock(dev);
1273
1274 return len;
1275 }
1276
1277 static ssize_t driver_override_store(struct device *dev,
1278 struct device_attribute *attr,
1279 const char *buf, size_t count)
1280 {
1281 struct platform_device *pdev = to_platform_device(dev);
1282 int ret;
1283
1284 ret = driver_set_override(dev, &pdev->driver_override, buf, count);
1285 if (ret)
1286 return ret;
1287
1288 return count;
1289 }
1290 static DEVICE_ATTR_RW(driver_override);
1291
1292 static struct attribute *platform_dev_attrs[] = {
1293 &dev_attr_modalias.attr,
1294 &dev_attr_numa_node.attr,
1295 &dev_attr_driver_override.attr,
1296 NULL,
1297 };
1298
1299 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1300 int n)
1301 {
1302 struct device *dev = container_of(kobj, typeof(*dev), kobj);
1303
1304 if (a == &dev_attr_numa_node.attr &&
1305 dev_to_node(dev) == NUMA_NO_NODE)
1306 return 0;
1307
1308 return a->mode;
1309 }
1310
1311 static const struct attribute_group platform_dev_group = {
1312 .attrs = platform_dev_attrs,
1313 .is_visible = platform_dev_attrs_visible,
1314 };
1315 __ATTRIBUTE_GROUPS(platform_dev);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 static int platform_match(struct device *dev, struct device_driver *drv)
1332 {
1333 struct platform_device *pdev = to_platform_device(dev);
1334 struct platform_driver *pdrv = to_platform_driver(drv);
1335
1336
1337 if (pdev->driver_override)
1338 return !strcmp(pdev->driver_override, drv->name);
1339
1340
1341 if (of_driver_match_device(dev, drv))
1342 return 1;
1343
1344
1345 if (acpi_driver_match_device(dev, drv))
1346 return 1;
1347
1348
1349 if (pdrv->id_table)
1350 return platform_match_id(pdrv->id_table, pdev) != NULL;
1351
1352
1353 return (strcmp(pdev->name, drv->name) == 0);
1354 }
1355
1356 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1357 {
1358 struct platform_device *pdev = to_platform_device(dev);
1359 int rc;
1360
1361
1362 rc = of_device_uevent_modalias(dev, env);
1363 if (rc != -ENODEV)
1364 return rc;
1365
1366 rc = acpi_device_uevent_modalias(dev, env);
1367 if (rc != -ENODEV)
1368 return rc;
1369
1370 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1371 pdev->name);
1372 return 0;
1373 }
1374
1375 static int platform_probe(struct device *_dev)
1376 {
1377 struct platform_driver *drv = to_platform_driver(_dev->driver);
1378 struct platform_device *dev = to_platform_device(_dev);
1379 int ret;
1380
1381
1382
1383
1384
1385
1386
1387
1388 if (unlikely(drv->probe == platform_probe_fail))
1389 return -ENXIO;
1390
1391 ret = of_clk_set_defaults(_dev->of_node, false);
1392 if (ret < 0)
1393 return ret;
1394
1395 ret = dev_pm_domain_attach(_dev, true);
1396 if (ret)
1397 goto out;
1398
1399 if (drv->probe) {
1400 ret = drv->probe(dev);
1401 if (ret)
1402 dev_pm_domain_detach(_dev, true);
1403 }
1404
1405 out:
1406 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1407 dev_warn(_dev, "probe deferral not supported\n");
1408 ret = -ENXIO;
1409 }
1410
1411 return ret;
1412 }
1413
1414 static void platform_remove(struct device *_dev)
1415 {
1416 struct platform_driver *drv = to_platform_driver(_dev->driver);
1417 struct platform_device *dev = to_platform_device(_dev);
1418
1419 if (drv->remove) {
1420 int ret = drv->remove(dev);
1421
1422 if (ret)
1423 dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
1424 }
1425 dev_pm_domain_detach(_dev, true);
1426 }
1427
1428 static void platform_shutdown(struct device *_dev)
1429 {
1430 struct platform_device *dev = to_platform_device(_dev);
1431 struct platform_driver *drv;
1432
1433 if (!_dev->driver)
1434 return;
1435
1436 drv = to_platform_driver(_dev->driver);
1437 if (drv->shutdown)
1438 drv->shutdown(dev);
1439 }
1440
1441 static int platform_dma_configure(struct device *dev)
1442 {
1443 struct platform_driver *drv = to_platform_driver(dev->driver);
1444 enum dev_dma_attr attr;
1445 int ret = 0;
1446
1447 if (dev->of_node) {
1448 ret = of_dma_configure(dev, dev->of_node, true);
1449 } else if (has_acpi_companion(dev)) {
1450 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1451 ret = acpi_dma_configure(dev, attr);
1452 }
1453
1454 if (!ret && !drv->driver_managed_dma) {
1455 ret = iommu_device_use_default_domain(dev);
1456 if (ret)
1457 arch_teardown_dma_ops(dev);
1458 }
1459
1460 return ret;
1461 }
1462
1463 static void platform_dma_cleanup(struct device *dev)
1464 {
1465 struct platform_driver *drv = to_platform_driver(dev->driver);
1466
1467 if (!drv->driver_managed_dma)
1468 iommu_device_unuse_default_domain(dev);
1469 }
1470
1471 static const struct dev_pm_ops platform_dev_pm_ops = {
1472 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
1473 USE_PLATFORM_PM_SLEEP_OPS
1474 };
1475
1476 struct bus_type platform_bus_type = {
1477 .name = "platform",
1478 .dev_groups = platform_dev_groups,
1479 .match = platform_match,
1480 .uevent = platform_uevent,
1481 .probe = platform_probe,
1482 .remove = platform_remove,
1483 .shutdown = platform_shutdown,
1484 .dma_configure = platform_dma_configure,
1485 .dma_cleanup = platform_dma_cleanup,
1486 .pm = &platform_dev_pm_ops,
1487 };
1488 EXPORT_SYMBOL_GPL(platform_bus_type);
1489
1490 static inline int __platform_match(struct device *dev, const void *drv)
1491 {
1492 return platform_match(dev, (struct device_driver *)drv);
1493 }
1494
1495
1496
1497
1498
1499
1500
1501 struct device *platform_find_device_by_driver(struct device *start,
1502 const struct device_driver *drv)
1503 {
1504 return bus_find_device(&platform_bus_type, start, drv,
1505 __platform_match);
1506 }
1507 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1508
1509 void __weak __init early_platform_cleanup(void) { }
1510
1511 int __init platform_bus_init(void)
1512 {
1513 int error;
1514
1515 early_platform_cleanup();
1516
1517 error = device_register(&platform_bus);
1518 if (error) {
1519 put_device(&platform_bus);
1520 return error;
1521 }
1522 error = bus_register(&platform_bus_type);
1523 if (error)
1524 device_unregister(&platform_bus);
1525 of_platform_register_reconfig_notifier();
1526 return error;
1527 }