0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/acpi.h>
0012 #include <linux/cpufreq.h>
0013 #include <linux/device.h>
0014 #include <linux/err.h>
0015 #include <linux/fwnode.h>
0016 #include <linux/init.h>
0017 #include <linux/module.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/kdev_t.h>
0021 #include <linux/notifier.h>
0022 #include <linux/of.h>
0023 #include <linux/of_device.h>
0024 #include <linux/blkdev.h>
0025 #include <linux/mutex.h>
0026 #include <linux/pm_runtime.h>
0027 #include <linux/netdevice.h>
0028 #include <linux/sched/signal.h>
0029 #include <linux/sched/mm.h>
0030 #include <linux/swiotlb.h>
0031 #include <linux/sysfs.h>
0032 #include <linux/dma-map-ops.h> /* for dma_default_coherent */
0033
0034 #include "base.h"
0035 #include "physical_location.h"
0036 #include "power/power.h"
0037
0038 #ifdef CONFIG_SYSFS_DEPRECATED
0039 #ifdef CONFIG_SYSFS_DEPRECATED_V2
0040 long sysfs_deprecated = 1;
0041 #else
0042 long sysfs_deprecated = 0;
0043 #endif
0044 static int __init sysfs_deprecated_setup(char *arg)
0045 {
0046 return kstrtol(arg, 10, &sysfs_deprecated);
0047 }
0048 early_param("sysfs.deprecated", sysfs_deprecated_setup);
0049 #endif
0050
0051
0052 static LIST_HEAD(deferred_sync);
0053 static unsigned int defer_sync_state_count = 1;
0054 static DEFINE_MUTEX(fwnode_link_lock);
0055 static bool fw_devlink_is_permissive(void);
0056 static bool fw_devlink_drv_reg_done;
0057 static bool fw_devlink_best_effort;
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
0077 {
0078 struct fwnode_link *link;
0079 int ret = 0;
0080
0081 mutex_lock(&fwnode_link_lock);
0082
0083 list_for_each_entry(link, &sup->consumers, s_hook)
0084 if (link->consumer == con)
0085 goto out;
0086
0087 link = kzalloc(sizeof(*link), GFP_KERNEL);
0088 if (!link) {
0089 ret = -ENOMEM;
0090 goto out;
0091 }
0092
0093 link->supplier = sup;
0094 INIT_LIST_HEAD(&link->s_hook);
0095 link->consumer = con;
0096 INIT_LIST_HEAD(&link->c_hook);
0097
0098 list_add(&link->s_hook, &sup->consumers);
0099 list_add(&link->c_hook, &con->suppliers);
0100 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
0101 con, sup);
0102 out:
0103 mutex_unlock(&fwnode_link_lock);
0104
0105 return ret;
0106 }
0107
0108
0109
0110
0111
0112
0113
0114 static void __fwnode_link_del(struct fwnode_link *link)
0115 {
0116 pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
0117 link->consumer, link->supplier);
0118 list_del(&link->s_hook);
0119 list_del(&link->c_hook);
0120 kfree(link);
0121 }
0122
0123
0124
0125
0126
0127
0128
0129 static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
0130 {
0131 struct fwnode_link *link, *tmp;
0132
0133 mutex_lock(&fwnode_link_lock);
0134 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
0135 __fwnode_link_del(link);
0136 mutex_unlock(&fwnode_link_lock);
0137 }
0138
0139
0140
0141
0142
0143
0144
0145 static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
0146 {
0147 struct fwnode_link *link, *tmp;
0148
0149 mutex_lock(&fwnode_link_lock);
0150 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
0151 __fwnode_link_del(link);
0152 mutex_unlock(&fwnode_link_lock);
0153 }
0154
0155
0156
0157
0158
0159
0160
0161 void fwnode_links_purge(struct fwnode_handle *fwnode)
0162 {
0163 fwnode_links_purge_suppliers(fwnode);
0164 fwnode_links_purge_consumers(fwnode);
0165 }
0166
0167 void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
0168 {
0169 struct fwnode_handle *child;
0170
0171
0172 if (fwnode->dev)
0173 return;
0174
0175 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
0176 fwnode_links_purge_consumers(fwnode);
0177
0178 fwnode_for_each_available_child_node(fwnode, child)
0179 fw_devlink_purge_absent_suppliers(child);
0180 }
0181 EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
0182
0183 #ifdef CONFIG_SRCU
0184 static DEFINE_MUTEX(device_links_lock);
0185 DEFINE_STATIC_SRCU(device_links_srcu);
0186
0187 static inline void device_links_write_lock(void)
0188 {
0189 mutex_lock(&device_links_lock);
0190 }
0191
0192 static inline void device_links_write_unlock(void)
0193 {
0194 mutex_unlock(&device_links_lock);
0195 }
0196
0197 int device_links_read_lock(void) __acquires(&device_links_srcu)
0198 {
0199 return srcu_read_lock(&device_links_srcu);
0200 }
0201
0202 void device_links_read_unlock(int idx) __releases(&device_links_srcu)
0203 {
0204 srcu_read_unlock(&device_links_srcu, idx);
0205 }
0206
0207 int device_links_read_lock_held(void)
0208 {
0209 return srcu_read_lock_held(&device_links_srcu);
0210 }
0211
0212 static void device_link_synchronize_removal(void)
0213 {
0214 synchronize_srcu(&device_links_srcu);
0215 }
0216
0217 static void device_link_remove_from_lists(struct device_link *link)
0218 {
0219 list_del_rcu(&link->s_node);
0220 list_del_rcu(&link->c_node);
0221 }
0222 #else
0223 static DECLARE_RWSEM(device_links_lock);
0224
0225 static inline void device_links_write_lock(void)
0226 {
0227 down_write(&device_links_lock);
0228 }
0229
0230 static inline void device_links_write_unlock(void)
0231 {
0232 up_write(&device_links_lock);
0233 }
0234
0235 int device_links_read_lock(void)
0236 {
0237 down_read(&device_links_lock);
0238 return 0;
0239 }
0240
0241 void device_links_read_unlock(int not_used)
0242 {
0243 up_read(&device_links_lock);
0244 }
0245
0246 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0247 int device_links_read_lock_held(void)
0248 {
0249 return lockdep_is_held(&device_links_lock);
0250 }
0251 #endif
0252
0253 static inline void device_link_synchronize_removal(void)
0254 {
0255 }
0256
0257 static void device_link_remove_from_lists(struct device_link *link)
0258 {
0259 list_del(&link->s_node);
0260 list_del(&link->c_node);
0261 }
0262 #endif
0263
0264 static bool device_is_ancestor(struct device *dev, struct device *target)
0265 {
0266 while (target->parent) {
0267 target = target->parent;
0268 if (dev == target)
0269 return true;
0270 }
0271 return false;
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 int device_is_dependent(struct device *dev, void *target)
0283 {
0284 struct device_link *link;
0285 int ret;
0286
0287
0288
0289
0290
0291
0292 if (dev == target || device_is_ancestor(dev, target))
0293 return 1;
0294
0295 ret = device_for_each_child(dev, target, device_is_dependent);
0296 if (ret)
0297 return ret;
0298
0299 list_for_each_entry(link, &dev->links.consumers, s_node) {
0300 if ((link->flags & ~DL_FLAG_INFERRED) ==
0301 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
0302 continue;
0303
0304 if (link->consumer == target)
0305 return 1;
0306
0307 ret = device_is_dependent(link->consumer, target);
0308 if (ret)
0309 break;
0310 }
0311 return ret;
0312 }
0313
0314 static void device_link_init_status(struct device_link *link,
0315 struct device *consumer,
0316 struct device *supplier)
0317 {
0318 switch (supplier->links.status) {
0319 case DL_DEV_PROBING:
0320 switch (consumer->links.status) {
0321 case DL_DEV_PROBING:
0322
0323
0324
0325
0326
0327
0328
0329 link->status = DL_STATE_CONSUMER_PROBE;
0330 break;
0331 default:
0332 link->status = DL_STATE_DORMANT;
0333 break;
0334 }
0335 break;
0336 case DL_DEV_DRIVER_BOUND:
0337 switch (consumer->links.status) {
0338 case DL_DEV_PROBING:
0339 link->status = DL_STATE_CONSUMER_PROBE;
0340 break;
0341 case DL_DEV_DRIVER_BOUND:
0342 link->status = DL_STATE_ACTIVE;
0343 break;
0344 default:
0345 link->status = DL_STATE_AVAILABLE;
0346 break;
0347 }
0348 break;
0349 case DL_DEV_UNBINDING:
0350 link->status = DL_STATE_SUPPLIER_UNBIND;
0351 break;
0352 default:
0353 link->status = DL_STATE_DORMANT;
0354 break;
0355 }
0356 }
0357
0358 static int device_reorder_to_tail(struct device *dev, void *not_used)
0359 {
0360 struct device_link *link;
0361
0362
0363
0364
0365
0366 if (device_is_registered(dev))
0367 devices_kset_move_last(dev);
0368
0369 if (device_pm_initialized(dev))
0370 device_pm_move_last(dev);
0371
0372 device_for_each_child(dev, NULL, device_reorder_to_tail);
0373 list_for_each_entry(link, &dev->links.consumers, s_node) {
0374 if ((link->flags & ~DL_FLAG_INFERRED) ==
0375 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
0376 continue;
0377 device_reorder_to_tail(link->consumer, NULL);
0378 }
0379
0380 return 0;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 void device_pm_move_to_tail(struct device *dev)
0393 {
0394 int idx;
0395
0396 idx = device_links_read_lock();
0397 device_pm_lock();
0398 device_reorder_to_tail(dev, NULL);
0399 device_pm_unlock();
0400 device_links_read_unlock(idx);
0401 }
0402
0403 #define to_devlink(dev) container_of((dev), struct device_link, link_dev)
0404
0405 static ssize_t status_show(struct device *dev,
0406 struct device_attribute *attr, char *buf)
0407 {
0408 const char *output;
0409
0410 switch (to_devlink(dev)->status) {
0411 case DL_STATE_NONE:
0412 output = "not tracked";
0413 break;
0414 case DL_STATE_DORMANT:
0415 output = "dormant";
0416 break;
0417 case DL_STATE_AVAILABLE:
0418 output = "available";
0419 break;
0420 case DL_STATE_CONSUMER_PROBE:
0421 output = "consumer probing";
0422 break;
0423 case DL_STATE_ACTIVE:
0424 output = "active";
0425 break;
0426 case DL_STATE_SUPPLIER_UNBIND:
0427 output = "supplier unbinding";
0428 break;
0429 default:
0430 output = "unknown";
0431 break;
0432 }
0433
0434 return sysfs_emit(buf, "%s\n", output);
0435 }
0436 static DEVICE_ATTR_RO(status);
0437
0438 static ssize_t auto_remove_on_show(struct device *dev,
0439 struct device_attribute *attr, char *buf)
0440 {
0441 struct device_link *link = to_devlink(dev);
0442 const char *output;
0443
0444 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
0445 output = "supplier unbind";
0446 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
0447 output = "consumer unbind";
0448 else
0449 output = "never";
0450
0451 return sysfs_emit(buf, "%s\n", output);
0452 }
0453 static DEVICE_ATTR_RO(auto_remove_on);
0454
0455 static ssize_t runtime_pm_show(struct device *dev,
0456 struct device_attribute *attr, char *buf)
0457 {
0458 struct device_link *link = to_devlink(dev);
0459
0460 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
0461 }
0462 static DEVICE_ATTR_RO(runtime_pm);
0463
0464 static ssize_t sync_state_only_show(struct device *dev,
0465 struct device_attribute *attr, char *buf)
0466 {
0467 struct device_link *link = to_devlink(dev);
0468
0469 return sysfs_emit(buf, "%d\n",
0470 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
0471 }
0472 static DEVICE_ATTR_RO(sync_state_only);
0473
0474 static struct attribute *devlink_attrs[] = {
0475 &dev_attr_status.attr,
0476 &dev_attr_auto_remove_on.attr,
0477 &dev_attr_runtime_pm.attr,
0478 &dev_attr_sync_state_only.attr,
0479 NULL,
0480 };
0481 ATTRIBUTE_GROUPS(devlink);
0482
0483 static void device_link_release_fn(struct work_struct *work)
0484 {
0485 struct device_link *link = container_of(work, struct device_link, rm_work);
0486
0487
0488 device_link_synchronize_removal();
0489
0490 pm_runtime_release_supplier(link);
0491
0492
0493
0494
0495
0496
0497
0498 if (link->supplier_preactivated)
0499 pm_runtime_put_noidle(link->supplier);
0500
0501 pm_request_idle(link->supplier);
0502
0503 put_device(link->consumer);
0504 put_device(link->supplier);
0505 kfree(link);
0506 }
0507
0508 static void devlink_dev_release(struct device *dev)
0509 {
0510 struct device_link *link = to_devlink(dev);
0511
0512 INIT_WORK(&link->rm_work, device_link_release_fn);
0513
0514
0515
0516
0517
0518
0519 queue_work(system_long_wq, &link->rm_work);
0520 }
0521
0522 static struct class devlink_class = {
0523 .name = "devlink",
0524 .owner = THIS_MODULE,
0525 .dev_groups = devlink_groups,
0526 .dev_release = devlink_dev_release,
0527 };
0528
0529 static int devlink_add_symlinks(struct device *dev,
0530 struct class_interface *class_intf)
0531 {
0532 int ret;
0533 size_t len;
0534 struct device_link *link = to_devlink(dev);
0535 struct device *sup = link->supplier;
0536 struct device *con = link->consumer;
0537 char *buf;
0538
0539 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
0540 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
0541 len += strlen(":");
0542 len += strlen("supplier:") + 1;
0543 buf = kzalloc(len, GFP_KERNEL);
0544 if (!buf)
0545 return -ENOMEM;
0546
0547 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
0548 if (ret)
0549 goto out;
0550
0551 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
0552 if (ret)
0553 goto err_con;
0554
0555 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
0556 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
0557 if (ret)
0558 goto err_con_dev;
0559
0560 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
0561 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
0562 if (ret)
0563 goto err_sup_dev;
0564
0565 goto out;
0566
0567 err_sup_dev:
0568 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
0569 sysfs_remove_link(&sup->kobj, buf);
0570 err_con_dev:
0571 sysfs_remove_link(&link->link_dev.kobj, "consumer");
0572 err_con:
0573 sysfs_remove_link(&link->link_dev.kobj, "supplier");
0574 out:
0575 kfree(buf);
0576 return ret;
0577 }
0578
0579 static void devlink_remove_symlinks(struct device *dev,
0580 struct class_interface *class_intf)
0581 {
0582 struct device_link *link = to_devlink(dev);
0583 size_t len;
0584 struct device *sup = link->supplier;
0585 struct device *con = link->consumer;
0586 char *buf;
0587
0588 sysfs_remove_link(&link->link_dev.kobj, "consumer");
0589 sysfs_remove_link(&link->link_dev.kobj, "supplier");
0590
0591 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
0592 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
0593 len += strlen(":");
0594 len += strlen("supplier:") + 1;
0595 buf = kzalloc(len, GFP_KERNEL);
0596 if (!buf) {
0597 WARN(1, "Unable to properly free device link symlinks!\n");
0598 return;
0599 }
0600
0601 if (device_is_registered(con)) {
0602 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
0603 sysfs_remove_link(&con->kobj, buf);
0604 }
0605 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
0606 sysfs_remove_link(&sup->kobj, buf);
0607 kfree(buf);
0608 }
0609
0610 static struct class_interface devlink_class_intf = {
0611 .class = &devlink_class,
0612 .add_dev = devlink_add_symlinks,
0613 .remove_dev = devlink_remove_symlinks,
0614 };
0615
0616 static int __init devlink_class_init(void)
0617 {
0618 int ret;
0619
0620 ret = class_register(&devlink_class);
0621 if (ret)
0622 return ret;
0623
0624 ret = class_interface_register(&devlink_class_intf);
0625 if (ret)
0626 class_unregister(&devlink_class);
0627
0628 return ret;
0629 }
0630 postcore_initcall(devlink_class_init);
0631
0632 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
0633 DL_FLAG_AUTOREMOVE_SUPPLIER | \
0634 DL_FLAG_AUTOPROBE_CONSUMER | \
0635 DL_FLAG_SYNC_STATE_ONLY | \
0636 DL_FLAG_INFERRED)
0637
0638 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
0639 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 struct device_link *device_link_add(struct device *consumer,
0698 struct device *supplier, u32 flags)
0699 {
0700 struct device_link *link;
0701
0702 if (!consumer || !supplier || consumer == supplier ||
0703 flags & ~DL_ADD_VALID_FLAGS ||
0704 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
0705 (flags & DL_FLAG_SYNC_STATE_ONLY &&
0706 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
0707 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
0708 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
0709 DL_FLAG_AUTOREMOVE_SUPPLIER)))
0710 return NULL;
0711
0712 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
0713 if (pm_runtime_get_sync(supplier) < 0) {
0714 pm_runtime_put_noidle(supplier);
0715 return NULL;
0716 }
0717 }
0718
0719 if (!(flags & DL_FLAG_STATELESS))
0720 flags |= DL_FLAG_MANAGED;
0721
0722 device_links_write_lock();
0723 device_pm_lock();
0724
0725
0726
0727
0728
0729
0730
0731
0732 if (!device_pm_initialized(supplier)
0733 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
0734 device_is_dependent(consumer, supplier))) {
0735 link = NULL;
0736 goto out;
0737 }
0738
0739
0740
0741
0742
0743 if (flags & DL_FLAG_SYNC_STATE_ONLY &&
0744 consumer->links.status != DL_DEV_NO_DRIVER &&
0745 consumer->links.status != DL_DEV_PROBING) {
0746 link = NULL;
0747 goto out;
0748 }
0749
0750
0751
0752
0753
0754
0755 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
0756 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
0757
0758 list_for_each_entry(link, &supplier->links.consumers, s_node) {
0759 if (link->consumer != consumer)
0760 continue;
0761
0762 if (link->flags & DL_FLAG_INFERRED &&
0763 !(flags & DL_FLAG_INFERRED))
0764 link->flags &= ~DL_FLAG_INFERRED;
0765
0766 if (flags & DL_FLAG_PM_RUNTIME) {
0767 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
0768 pm_runtime_new_link(consumer);
0769 link->flags |= DL_FLAG_PM_RUNTIME;
0770 }
0771 if (flags & DL_FLAG_RPM_ACTIVE)
0772 refcount_inc(&link->rpm_active);
0773 }
0774
0775 if (flags & DL_FLAG_STATELESS) {
0776 kref_get(&link->kref);
0777 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
0778 !(link->flags & DL_FLAG_STATELESS)) {
0779 link->flags |= DL_FLAG_STATELESS;
0780 goto reorder;
0781 } else {
0782 link->flags |= DL_FLAG_STATELESS;
0783 goto out;
0784 }
0785 }
0786
0787
0788
0789
0790
0791
0792 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
0793 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
0794 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
0795 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
0796 }
0797 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
0798 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
0799 DL_FLAG_AUTOREMOVE_SUPPLIER);
0800 }
0801 if (!(link->flags & DL_FLAG_MANAGED)) {
0802 kref_get(&link->kref);
0803 link->flags |= DL_FLAG_MANAGED;
0804 device_link_init_status(link, consumer, supplier);
0805 }
0806 if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
0807 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
0808 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
0809 goto reorder;
0810 }
0811
0812 goto out;
0813 }
0814
0815 link = kzalloc(sizeof(*link), GFP_KERNEL);
0816 if (!link)
0817 goto out;
0818
0819 refcount_set(&link->rpm_active, 1);
0820
0821 get_device(supplier);
0822 link->supplier = supplier;
0823 INIT_LIST_HEAD(&link->s_node);
0824 get_device(consumer);
0825 link->consumer = consumer;
0826 INIT_LIST_HEAD(&link->c_node);
0827 link->flags = flags;
0828 kref_init(&link->kref);
0829
0830 link->link_dev.class = &devlink_class;
0831 device_set_pm_not_required(&link->link_dev);
0832 dev_set_name(&link->link_dev, "%s:%s--%s:%s",
0833 dev_bus_name(supplier), dev_name(supplier),
0834 dev_bus_name(consumer), dev_name(consumer));
0835 if (device_register(&link->link_dev)) {
0836 put_device(&link->link_dev);
0837 link = NULL;
0838 goto out;
0839 }
0840
0841 if (flags & DL_FLAG_PM_RUNTIME) {
0842 if (flags & DL_FLAG_RPM_ACTIVE)
0843 refcount_inc(&link->rpm_active);
0844
0845 pm_runtime_new_link(consumer);
0846 }
0847
0848
0849 if (flags & DL_FLAG_STATELESS)
0850 link->status = DL_STATE_NONE;
0851 else
0852 device_link_init_status(link, consumer, supplier);
0853
0854
0855
0856
0857
0858 if (link->status == DL_STATE_CONSUMER_PROBE &&
0859 flags & DL_FLAG_PM_RUNTIME)
0860 pm_runtime_resume(supplier);
0861
0862 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
0863 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
0864
0865 if (flags & DL_FLAG_SYNC_STATE_ONLY) {
0866 dev_dbg(consumer,
0867 "Linked as a sync state only consumer to %s\n",
0868 dev_name(supplier));
0869 goto out;
0870 }
0871
0872 reorder:
0873
0874
0875
0876
0877
0878
0879
0880 device_reorder_to_tail(consumer, NULL);
0881
0882 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
0883
0884 out:
0885 device_pm_unlock();
0886 device_links_write_unlock();
0887
0888 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
0889 pm_runtime_put(supplier);
0890
0891 return link;
0892 }
0893 EXPORT_SYMBOL_GPL(device_link_add);
0894
0895 static void __device_link_del(struct kref *kref)
0896 {
0897 struct device_link *link = container_of(kref, struct device_link, kref);
0898
0899 dev_dbg(link->consumer, "Dropping the link to %s\n",
0900 dev_name(link->supplier));
0901
0902 pm_runtime_drop_link(link);
0903
0904 device_link_remove_from_lists(link);
0905 device_unregister(&link->link_dev);
0906 }
0907
0908 static void device_link_put_kref(struct device_link *link)
0909 {
0910 if (link->flags & DL_FLAG_STATELESS)
0911 kref_put(&link->kref, __device_link_del);
0912 else if (!device_is_registered(link->consumer))
0913 __device_link_del(&link->kref);
0914 else
0915 WARN(1, "Unable to drop a managed device link reference\n");
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927 void device_link_del(struct device_link *link)
0928 {
0929 device_links_write_lock();
0930 device_link_put_kref(link);
0931 device_links_write_unlock();
0932 }
0933 EXPORT_SYMBOL_GPL(device_link_del);
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943 void device_link_remove(void *consumer, struct device *supplier)
0944 {
0945 struct device_link *link;
0946
0947 if (WARN_ON(consumer == supplier))
0948 return;
0949
0950 device_links_write_lock();
0951
0952 list_for_each_entry(link, &supplier->links.consumers, s_node) {
0953 if (link->consumer == consumer) {
0954 device_link_put_kref(link);
0955 break;
0956 }
0957 }
0958
0959 device_links_write_unlock();
0960 }
0961 EXPORT_SYMBOL_GPL(device_link_remove);
0962
0963 static void device_links_missing_supplier(struct device *dev)
0964 {
0965 struct device_link *link;
0966
0967 list_for_each_entry(link, &dev->links.suppliers, c_node) {
0968 if (link->status != DL_STATE_CONSUMER_PROBE)
0969 continue;
0970
0971 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
0972 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
0973 } else {
0974 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
0975 WRITE_ONCE(link->status, DL_STATE_DORMANT);
0976 }
0977 }
0978 }
0979
0980 static bool dev_is_best_effort(struct device *dev)
0981 {
0982 return (fw_devlink_best_effort && dev->can_match) ||
0983 (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
0984 }
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002 int device_links_check_suppliers(struct device *dev)
1003 {
1004 struct device_link *link;
1005 int ret = 0, fwnode_ret = 0;
1006 struct fwnode_handle *sup_fw;
1007
1008
1009
1010
1011
1012 mutex_lock(&fwnode_link_lock);
1013 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
1014 !fw_devlink_is_permissive()) {
1015 sup_fw = list_first_entry(&dev->fwnode->suppliers,
1016 struct fwnode_link,
1017 c_hook)->supplier;
1018 if (!dev_is_best_effort(dev)) {
1019 fwnode_ret = -EPROBE_DEFER;
1020 dev_err_probe(dev, -EPROBE_DEFER,
1021 "wait for supplier %pfwP\n", sup_fw);
1022 } else {
1023 fwnode_ret = -EAGAIN;
1024 }
1025 }
1026 mutex_unlock(&fwnode_link_lock);
1027 if (fwnode_ret == -EPROBE_DEFER)
1028 return fwnode_ret;
1029
1030 device_links_write_lock();
1031
1032 list_for_each_entry(link, &dev->links.suppliers, c_node) {
1033 if (!(link->flags & DL_FLAG_MANAGED))
1034 continue;
1035
1036 if (link->status != DL_STATE_AVAILABLE &&
1037 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
1038
1039 if (dev_is_best_effort(dev) &&
1040 link->flags & DL_FLAG_INFERRED &&
1041 !link->supplier->can_match) {
1042 ret = -EAGAIN;
1043 continue;
1044 }
1045
1046 device_links_missing_supplier(dev);
1047 dev_err_probe(dev, -EPROBE_DEFER,
1048 "supplier %s not ready\n",
1049 dev_name(link->supplier));
1050 ret = -EPROBE_DEFER;
1051 break;
1052 }
1053 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1054 }
1055 dev->links.status = DL_DEV_PROBING;
1056
1057 device_links_write_unlock();
1058
1059 return ret ? ret : fwnode_ret;
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 static void __device_links_queue_sync_state(struct device *dev,
1081 struct list_head *list)
1082 {
1083 struct device_link *link;
1084
1085 if (!dev_has_sync_state(dev))
1086 return;
1087 if (dev->state_synced)
1088 return;
1089
1090 list_for_each_entry(link, &dev->links.consumers, s_node) {
1091 if (!(link->flags & DL_FLAG_MANAGED))
1092 continue;
1093 if (link->status != DL_STATE_ACTIVE)
1094 return;
1095 }
1096
1097
1098
1099
1100
1101
1102 dev->state_synced = true;
1103
1104 if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1105 return;
1106
1107 get_device(dev);
1108 list_add_tail(&dev->links.defer_sync, list);
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 static void device_links_flush_sync_list(struct list_head *list,
1122 struct device *dont_lock_dev)
1123 {
1124 struct device *dev, *tmp;
1125
1126 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1127 list_del_init(&dev->links.defer_sync);
1128
1129 if (dev != dont_lock_dev)
1130 device_lock(dev);
1131
1132 if (dev->bus->sync_state)
1133 dev->bus->sync_state(dev);
1134 else if (dev->driver && dev->driver->sync_state)
1135 dev->driver->sync_state(dev);
1136
1137 if (dev != dont_lock_dev)
1138 device_unlock(dev);
1139
1140 put_device(dev);
1141 }
1142 }
1143
1144 void device_links_supplier_sync_state_pause(void)
1145 {
1146 device_links_write_lock();
1147 defer_sync_state_count++;
1148 device_links_write_unlock();
1149 }
1150
1151 void device_links_supplier_sync_state_resume(void)
1152 {
1153 struct device *dev, *tmp;
1154 LIST_HEAD(sync_list);
1155
1156 device_links_write_lock();
1157 if (!defer_sync_state_count) {
1158 WARN(true, "Unmatched sync_state pause/resume!");
1159 goto out;
1160 }
1161 defer_sync_state_count--;
1162 if (defer_sync_state_count)
1163 goto out;
1164
1165 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1166
1167
1168
1169
1170 list_del_init(&dev->links.defer_sync);
1171 __device_links_queue_sync_state(dev, &sync_list);
1172 }
1173 out:
1174 device_links_write_unlock();
1175
1176 device_links_flush_sync_list(&sync_list, NULL);
1177 }
1178
1179 static int sync_state_resume_initcall(void)
1180 {
1181 device_links_supplier_sync_state_resume();
1182 return 0;
1183 }
1184 late_initcall(sync_state_resume_initcall);
1185
1186 static void __device_links_supplier_defer_sync(struct device *sup)
1187 {
1188 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1189 list_add_tail(&sup->links.defer_sync, &deferred_sync);
1190 }
1191
1192 static void device_link_drop_managed(struct device_link *link)
1193 {
1194 link->flags &= ~DL_FLAG_MANAGED;
1195 WRITE_ONCE(link->status, DL_STATE_NONE);
1196 kref_put(&link->kref, __device_link_del);
1197 }
1198
1199 static ssize_t waiting_for_supplier_show(struct device *dev,
1200 struct device_attribute *attr,
1201 char *buf)
1202 {
1203 bool val;
1204
1205 device_lock(dev);
1206 val = !list_empty(&dev->fwnode->suppliers);
1207 device_unlock(dev);
1208 return sysfs_emit(buf, "%u\n", val);
1209 }
1210 static DEVICE_ATTR_RO(waiting_for_supplier);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 void device_links_force_bind(struct device *dev)
1227 {
1228 struct device_link *link, *ln;
1229
1230 device_links_write_lock();
1231
1232 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1233 if (!(link->flags & DL_FLAG_MANAGED))
1234 continue;
1235
1236 if (link->status != DL_STATE_AVAILABLE) {
1237 device_link_drop_managed(link);
1238 continue;
1239 }
1240 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1241 }
1242 dev->links.status = DL_DEV_PROBING;
1243
1244 device_links_write_unlock();
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 void device_links_driver_bound(struct device *dev)
1259 {
1260 struct device_link *link, *ln;
1261 LIST_HEAD(sync_list);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (dev->fwnode && dev->fwnode->dev == dev) {
1275 struct fwnode_handle *child;
1276 fwnode_links_purge_suppliers(dev->fwnode);
1277 fwnode_for_each_available_child_node(dev->fwnode, child)
1278 fw_devlink_purge_absent_suppliers(child);
1279 }
1280 device_remove_file(dev, &dev_attr_waiting_for_supplier);
1281
1282 device_links_write_lock();
1283
1284 list_for_each_entry(link, &dev->links.consumers, s_node) {
1285 if (!(link->flags & DL_FLAG_MANAGED))
1286 continue;
1287
1288
1289
1290
1291
1292
1293
1294 if (link->status == DL_STATE_CONSUMER_PROBE ||
1295 link->status == DL_STATE_ACTIVE)
1296 continue;
1297
1298 WARN_ON(link->status != DL_STATE_DORMANT);
1299 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1300
1301 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1302 driver_deferred_probe_add(link->consumer);
1303 }
1304
1305 if (defer_sync_state_count)
1306 __device_links_supplier_defer_sync(dev);
1307 else
1308 __device_links_queue_sync_state(dev, &sync_list);
1309
1310 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1311 struct device *supplier;
1312
1313 if (!(link->flags & DL_FLAG_MANAGED))
1314 continue;
1315
1316 supplier = link->supplier;
1317 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1318
1319
1320
1321
1322
1323 device_link_drop_managed(link);
1324 } else if (dev_is_best_effort(dev) &&
1325 link->flags & DL_FLAG_INFERRED &&
1326 link->status != DL_STATE_CONSUMER_PROBE &&
1327 !link->supplier->can_match) {
1328
1329
1330
1331
1332
1333
1334
1335 device_link_drop_managed(link);
1336 } else {
1337 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1338 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1339 }
1340
1341
1342
1343
1344
1345
1346
1347 if (defer_sync_state_count)
1348 __device_links_supplier_defer_sync(supplier);
1349 else
1350 __device_links_queue_sync_state(supplier, &sync_list);
1351 }
1352
1353 dev->links.status = DL_DEV_DRIVER_BOUND;
1354
1355 device_links_write_unlock();
1356
1357 device_links_flush_sync_list(&sync_list, dev);
1358 }
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 static void __device_links_no_driver(struct device *dev)
1373 {
1374 struct device_link *link, *ln;
1375
1376 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1377 if (!(link->flags & DL_FLAG_MANAGED))
1378 continue;
1379
1380 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1381 device_link_drop_managed(link);
1382 continue;
1383 }
1384
1385 if (link->status != DL_STATE_CONSUMER_PROBE &&
1386 link->status != DL_STATE_ACTIVE)
1387 continue;
1388
1389 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1390 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1391 } else {
1392 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1393 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1394 }
1395 }
1396
1397 dev->links.status = DL_DEV_NO_DRIVER;
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 void device_links_no_driver(struct device *dev)
1411 {
1412 struct device_link *link;
1413
1414 device_links_write_lock();
1415
1416 list_for_each_entry(link, &dev->links.consumers, s_node) {
1417 if (!(link->flags & DL_FLAG_MANAGED))
1418 continue;
1419
1420
1421
1422
1423
1424
1425
1426
1427 if (link->status == DL_STATE_CONSUMER_PROBE ||
1428 link->status == DL_STATE_ACTIVE)
1429 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1430 }
1431
1432 __device_links_no_driver(dev);
1433
1434 device_links_write_unlock();
1435 }
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447 void device_links_driver_cleanup(struct device *dev)
1448 {
1449 struct device_link *link, *ln;
1450
1451 device_links_write_lock();
1452
1453 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1454 if (!(link->flags & DL_FLAG_MANAGED))
1455 continue;
1456
1457 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1458 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1459
1460
1461
1462
1463
1464
1465 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1466 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1467 device_link_drop_managed(link);
1468
1469 WRITE_ONCE(link->status, DL_STATE_DORMANT);
1470 }
1471
1472 list_del_init(&dev->links.defer_sync);
1473 __device_links_no_driver(dev);
1474
1475 device_links_write_unlock();
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 bool device_links_busy(struct device *dev)
1493 {
1494 struct device_link *link;
1495 bool ret = false;
1496
1497 device_links_write_lock();
1498
1499 list_for_each_entry(link, &dev->links.consumers, s_node) {
1500 if (!(link->flags & DL_FLAG_MANAGED))
1501 continue;
1502
1503 if (link->status == DL_STATE_CONSUMER_PROBE
1504 || link->status == DL_STATE_ACTIVE) {
1505 ret = true;
1506 break;
1507 }
1508 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1509 }
1510
1511 dev->links.status = DL_DEV_UNBINDING;
1512
1513 device_links_write_unlock();
1514 return ret;
1515 }
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 void device_links_unbind_consumers(struct device *dev)
1533 {
1534 struct device_link *link;
1535
1536 start:
1537 device_links_write_lock();
1538
1539 list_for_each_entry(link, &dev->links.consumers, s_node) {
1540 enum device_link_state status;
1541
1542 if (!(link->flags & DL_FLAG_MANAGED) ||
1543 link->flags & DL_FLAG_SYNC_STATE_ONLY)
1544 continue;
1545
1546 status = link->status;
1547 if (status == DL_STATE_CONSUMER_PROBE) {
1548 device_links_write_unlock();
1549
1550 wait_for_device_probe();
1551 goto start;
1552 }
1553 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1554 if (status == DL_STATE_ACTIVE) {
1555 struct device *consumer = link->consumer;
1556
1557 get_device(consumer);
1558
1559 device_links_write_unlock();
1560
1561 device_release_driver_internal(consumer, NULL,
1562 consumer->parent);
1563 put_device(consumer);
1564 goto start;
1565 }
1566 }
1567
1568 device_links_write_unlock();
1569 }
1570
1571
1572
1573
1574
1575 static void device_links_purge(struct device *dev)
1576 {
1577 struct device_link *link, *ln;
1578
1579 if (dev->class == &devlink_class)
1580 return;
1581
1582
1583
1584
1585
1586 device_links_write_lock();
1587
1588 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1589 WARN_ON(link->status == DL_STATE_ACTIVE);
1590 __device_link_del(&link->kref);
1591 }
1592
1593 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1594 WARN_ON(link->status != DL_STATE_DORMANT &&
1595 link->status != DL_STATE_NONE);
1596 __device_link_del(&link->kref);
1597 }
1598
1599 device_links_write_unlock();
1600 }
1601
1602 #define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1603 DL_FLAG_SYNC_STATE_ONLY)
1604 #define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1605 DL_FLAG_AUTOPROBE_CONSUMER)
1606 #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1607 DL_FLAG_PM_RUNTIME)
1608
1609 static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1610 static int __init fw_devlink_setup(char *arg)
1611 {
1612 if (!arg)
1613 return -EINVAL;
1614
1615 if (strcmp(arg, "off") == 0) {
1616 fw_devlink_flags = 0;
1617 } else if (strcmp(arg, "permissive") == 0) {
1618 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1619 } else if (strcmp(arg, "on") == 0) {
1620 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1621 } else if (strcmp(arg, "rpm") == 0) {
1622 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1623 }
1624 return 0;
1625 }
1626 early_param("fw_devlink", fw_devlink_setup);
1627
1628 static bool fw_devlink_strict;
1629 static int __init fw_devlink_strict_setup(char *arg)
1630 {
1631 return strtobool(arg, &fw_devlink_strict);
1632 }
1633 early_param("fw_devlink.strict", fw_devlink_strict_setup);
1634
1635 u32 fw_devlink_get_flags(void)
1636 {
1637 return fw_devlink_flags;
1638 }
1639
1640 static bool fw_devlink_is_permissive(void)
1641 {
1642 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1643 }
1644
1645 bool fw_devlink_is_strict(void)
1646 {
1647 return fw_devlink_strict && !fw_devlink_is_permissive();
1648 }
1649
1650 static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1651 {
1652 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1653 return;
1654
1655 fwnode_call_int_op(fwnode, add_links);
1656 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1657 }
1658
1659 static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1660 {
1661 struct fwnode_handle *child = NULL;
1662
1663 fw_devlink_parse_fwnode(fwnode);
1664
1665 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1666 fw_devlink_parse_fwtree(child);
1667 }
1668
1669 static void fw_devlink_relax_link(struct device_link *link)
1670 {
1671 if (!(link->flags & DL_FLAG_INFERRED))
1672 return;
1673
1674 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
1675 return;
1676
1677 pm_runtime_drop_link(link);
1678 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1679 dev_dbg(link->consumer, "Relaxing link with %s\n",
1680 dev_name(link->supplier));
1681 }
1682
1683 static int fw_devlink_no_driver(struct device *dev, void *data)
1684 {
1685 struct device_link *link = to_devlink(dev);
1686
1687 if (!link->supplier->can_match)
1688 fw_devlink_relax_link(link);
1689
1690 return 0;
1691 }
1692
1693 void fw_devlink_drivers_done(void)
1694 {
1695 fw_devlink_drv_reg_done = true;
1696 device_links_write_lock();
1697 class_for_each_device(&devlink_class, NULL, NULL,
1698 fw_devlink_no_driver);
1699 device_links_write_unlock();
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 void __init wait_for_init_devices_probe(void)
1736 {
1737 if (!fw_devlink_flags || fw_devlink_is_permissive())
1738 return;
1739
1740
1741
1742
1743
1744 wait_for_device_probe();
1745
1746 pr_info("Trying to probe devices needed for running init ...\n");
1747 fw_devlink_best_effort = true;
1748 driver_deferred_probe_trigger();
1749
1750
1751
1752
1753
1754 wait_for_device_probe();
1755 fw_devlink_best_effort = false;
1756 }
1757
1758 static void fw_devlink_unblock_consumers(struct device *dev)
1759 {
1760 struct device_link *link;
1761
1762 if (!fw_devlink_flags || fw_devlink_is_permissive())
1763 return;
1764
1765 device_links_write_lock();
1766 list_for_each_entry(link, &dev->links.consumers, s_node)
1767 fw_devlink_relax_link(link);
1768 device_links_write_unlock();
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 static int fw_devlink_relax_cycle(struct device *con, void *sup)
1786 {
1787 struct device_link *link;
1788 int ret;
1789
1790 if (con == sup)
1791 return 1;
1792
1793 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1794 if (ret)
1795 return ret;
1796
1797 list_for_each_entry(link, &con->links.consumers, s_node) {
1798 if ((link->flags & ~DL_FLAG_INFERRED) ==
1799 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1800 continue;
1801
1802 if (!fw_devlink_relax_cycle(link->consumer, sup))
1803 continue;
1804
1805 ret = 1;
1806
1807 fw_devlink_relax_link(link);
1808 }
1809 return ret;
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 static int fw_devlink_create_devlink(struct device *con,
1833 struct fwnode_handle *sup_handle, u32 flags)
1834 {
1835 struct device *sup_dev;
1836 int ret = 0;
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
1854 fwnode_is_ancestor_of(sup_handle, con->fwnode))
1855 return -EINVAL;
1856
1857 sup_dev = get_dev_from_fwnode(sup_handle);
1858 if (sup_dev) {
1859
1860
1861
1862
1863
1864 if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1865 sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1866 ret = -EINVAL;
1867 goto out;
1868 }
1869
1870
1871
1872
1873
1874 if (!device_link_add(con, sup_dev, flags) &&
1875 !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1876 dev_info(con, "Fixing up cyclic dependency with %s\n",
1877 dev_name(sup_dev));
1878 device_links_write_lock();
1879 fw_devlink_relax_cycle(con, sup_dev);
1880 device_links_write_unlock();
1881 device_link_add(con, sup_dev,
1882 FW_DEVLINK_FLAGS_PERMISSIVE);
1883 ret = -EINVAL;
1884 }
1885
1886 goto out;
1887 }
1888
1889
1890 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1891 return -EINVAL;
1892
1893
1894
1895
1896
1897
1898 if (flags & DL_FLAG_SYNC_STATE_ONLY)
1899 return -EAGAIN;
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 sup_dev = fwnode_get_next_parent_dev(sup_handle);
1916 if (sup_dev && device_is_dependent(con, sup_dev)) {
1917 dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1918 sup_handle, dev_name(sup_dev));
1919 device_links_write_lock();
1920 fw_devlink_relax_cycle(con, sup_dev);
1921 device_links_write_unlock();
1922 ret = -EINVAL;
1923 } else {
1924
1925
1926
1927
1928 ret = -EAGAIN;
1929 }
1930
1931 out:
1932 put_device(sup_dev);
1933 return ret;
1934 }
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952 static void __fw_devlink_link_to_consumers(struct device *dev)
1953 {
1954 struct fwnode_handle *fwnode = dev->fwnode;
1955 struct fwnode_link *link, *tmp;
1956
1957 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1958 u32 dl_flags = fw_devlink_get_flags();
1959 struct device *con_dev;
1960 bool own_link = true;
1961 int ret;
1962
1963 con_dev = get_dev_from_fwnode(link->consumer);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 if (!con_dev) {
1975 con_dev = fwnode_get_next_parent_dev(link->consumer);
1976
1977
1978
1979
1980
1981
1982 if (con_dev &&
1983 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1984 put_device(con_dev);
1985 con_dev = NULL;
1986 } else {
1987 own_link = false;
1988 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1989 }
1990 }
1991
1992 if (!con_dev)
1993 continue;
1994
1995 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1996 put_device(con_dev);
1997 if (!own_link || ret == -EAGAIN)
1998 continue;
1999
2000 __fwnode_link_del(link);
2001 }
2002 }
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static void __fw_devlink_link_to_suppliers(struct device *dev,
2031 struct fwnode_handle *fwnode)
2032 {
2033 bool own_link = (dev->fwnode == fwnode);
2034 struct fwnode_link *link, *tmp;
2035 struct fwnode_handle *child = NULL;
2036 u32 dl_flags;
2037
2038 if (own_link)
2039 dl_flags = fw_devlink_get_flags();
2040 else
2041 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
2042
2043 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
2044 int ret;
2045 struct device *sup_dev;
2046 struct fwnode_handle *sup = link->supplier;
2047
2048 ret = fw_devlink_create_devlink(dev, sup, dl_flags);
2049 if (!own_link || ret == -EAGAIN)
2050 continue;
2051
2052 __fwnode_link_del(link);
2053
2054
2055 if (ret)
2056 continue;
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 sup_dev = get_dev_from_fwnode(sup);
2072 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
2073 put_device(sup_dev);
2074 }
2075
2076
2077
2078
2079
2080
2081
2082 while ((child = fwnode_get_next_available_child_node(fwnode, child)))
2083 __fw_devlink_link_to_suppliers(dev, child);
2084 }
2085
2086 static void fw_devlink_link_device(struct device *dev)
2087 {
2088 struct fwnode_handle *fwnode = dev->fwnode;
2089
2090 if (!fw_devlink_flags)
2091 return;
2092
2093 fw_devlink_parse_fwtree(fwnode);
2094
2095 mutex_lock(&fwnode_link_lock);
2096 __fw_devlink_link_to_consumers(dev);
2097 __fw_devlink_link_to_suppliers(dev, fwnode);
2098 mutex_unlock(&fwnode_link_lock);
2099 }
2100
2101
2102
2103 int (*platform_notify)(struct device *dev) = NULL;
2104 int (*platform_notify_remove)(struct device *dev) = NULL;
2105 static struct kobject *dev_kobj;
2106 struct kobject *sysfs_dev_char_kobj;
2107 struct kobject *sysfs_dev_block_kobj;
2108
2109 static DEFINE_MUTEX(device_hotplug_lock);
2110
2111 void lock_device_hotplug(void)
2112 {
2113 mutex_lock(&device_hotplug_lock);
2114 }
2115
2116 void unlock_device_hotplug(void)
2117 {
2118 mutex_unlock(&device_hotplug_lock);
2119 }
2120
2121 int lock_device_hotplug_sysfs(void)
2122 {
2123 if (mutex_trylock(&device_hotplug_lock))
2124 return 0;
2125
2126
2127 msleep(5);
2128 return restart_syscall();
2129 }
2130
2131 #ifdef CONFIG_BLOCK
2132 static inline int device_is_not_partition(struct device *dev)
2133 {
2134 return !(dev->type == &part_type);
2135 }
2136 #else
2137 static inline int device_is_not_partition(struct device *dev)
2138 {
2139 return 1;
2140 }
2141 #endif
2142
2143 static void device_platform_notify(struct device *dev)
2144 {
2145 acpi_device_notify(dev);
2146
2147 software_node_notify(dev);
2148
2149 if (platform_notify)
2150 platform_notify(dev);
2151 }
2152
2153 static void device_platform_notify_remove(struct device *dev)
2154 {
2155 acpi_device_notify_remove(dev);
2156
2157 software_node_notify_remove(dev);
2158
2159 if (platform_notify_remove)
2160 platform_notify_remove(dev);
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172 const char *dev_driver_string(const struct device *dev)
2173 {
2174 struct device_driver *drv;
2175
2176
2177
2178
2179
2180 drv = READ_ONCE(dev->driver);
2181 return drv ? drv->name : dev_bus_name(dev);
2182 }
2183 EXPORT_SYMBOL(dev_driver_string);
2184
2185 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2186
2187 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2188 char *buf)
2189 {
2190 struct device_attribute *dev_attr = to_dev_attr(attr);
2191 struct device *dev = kobj_to_dev(kobj);
2192 ssize_t ret = -EIO;
2193
2194 if (dev_attr->show)
2195 ret = dev_attr->show(dev, dev_attr, buf);
2196 if (ret >= (ssize_t)PAGE_SIZE) {
2197 printk("dev_attr_show: %pS returned bad count\n",
2198 dev_attr->show);
2199 }
2200 return ret;
2201 }
2202
2203 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2204 const char *buf, size_t count)
2205 {
2206 struct device_attribute *dev_attr = to_dev_attr(attr);
2207 struct device *dev = kobj_to_dev(kobj);
2208 ssize_t ret = -EIO;
2209
2210 if (dev_attr->store)
2211 ret = dev_attr->store(dev, dev_attr, buf, count);
2212 return ret;
2213 }
2214
2215 static const struct sysfs_ops dev_sysfs_ops = {
2216 .show = dev_attr_show,
2217 .store = dev_attr_store,
2218 };
2219
2220 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2221
2222 ssize_t device_store_ulong(struct device *dev,
2223 struct device_attribute *attr,
2224 const char *buf, size_t size)
2225 {
2226 struct dev_ext_attribute *ea = to_ext_attr(attr);
2227 int ret;
2228 unsigned long new;
2229
2230 ret = kstrtoul(buf, 0, &new);
2231 if (ret)
2232 return ret;
2233 *(unsigned long *)(ea->var) = new;
2234
2235 return size;
2236 }
2237 EXPORT_SYMBOL_GPL(device_store_ulong);
2238
2239 ssize_t device_show_ulong(struct device *dev,
2240 struct device_attribute *attr,
2241 char *buf)
2242 {
2243 struct dev_ext_attribute *ea = to_ext_attr(attr);
2244 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2245 }
2246 EXPORT_SYMBOL_GPL(device_show_ulong);
2247
2248 ssize_t device_store_int(struct device *dev,
2249 struct device_attribute *attr,
2250 const char *buf, size_t size)
2251 {
2252 struct dev_ext_attribute *ea = to_ext_attr(attr);
2253 int ret;
2254 long new;
2255
2256 ret = kstrtol(buf, 0, &new);
2257 if (ret)
2258 return ret;
2259
2260 if (new > INT_MAX || new < INT_MIN)
2261 return -EINVAL;
2262 *(int *)(ea->var) = new;
2263
2264 return size;
2265 }
2266 EXPORT_SYMBOL_GPL(device_store_int);
2267
2268 ssize_t device_show_int(struct device *dev,
2269 struct device_attribute *attr,
2270 char *buf)
2271 {
2272 struct dev_ext_attribute *ea = to_ext_attr(attr);
2273
2274 return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2275 }
2276 EXPORT_SYMBOL_GPL(device_show_int);
2277
2278 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2279 const char *buf, size_t size)
2280 {
2281 struct dev_ext_attribute *ea = to_ext_attr(attr);
2282
2283 if (strtobool(buf, ea->var) < 0)
2284 return -EINVAL;
2285
2286 return size;
2287 }
2288 EXPORT_SYMBOL_GPL(device_store_bool);
2289
2290 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2291 char *buf)
2292 {
2293 struct dev_ext_attribute *ea = to_ext_attr(attr);
2294
2295 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2296 }
2297 EXPORT_SYMBOL_GPL(device_show_bool);
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 static void device_release(struct kobject *kobj)
2308 {
2309 struct device *dev = kobj_to_dev(kobj);
2310 struct device_private *p = dev->p;
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321 devres_release_all(dev);
2322
2323 kfree(dev->dma_range_map);
2324
2325 if (dev->release)
2326 dev->release(dev);
2327 else if (dev->type && dev->type->release)
2328 dev->type->release(dev);
2329 else if (dev->class && dev->class->dev_release)
2330 dev->class->dev_release(dev);
2331 else
2332 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2333 dev_name(dev));
2334 kfree(p);
2335 }
2336
2337 static const void *device_namespace(struct kobject *kobj)
2338 {
2339 struct device *dev = kobj_to_dev(kobj);
2340 const void *ns = NULL;
2341
2342 if (dev->class && dev->class->ns_type)
2343 ns = dev->class->namespace(dev);
2344
2345 return ns;
2346 }
2347
2348 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2349 {
2350 struct device *dev = kobj_to_dev(kobj);
2351
2352 if (dev->class && dev->class->get_ownership)
2353 dev->class->get_ownership(dev, uid, gid);
2354 }
2355
2356 static struct kobj_type device_ktype = {
2357 .release = device_release,
2358 .sysfs_ops = &dev_sysfs_ops,
2359 .namespace = device_namespace,
2360 .get_ownership = device_get_ownership,
2361 };
2362
2363
2364 static int dev_uevent_filter(struct kobject *kobj)
2365 {
2366 const struct kobj_type *ktype = get_ktype(kobj);
2367
2368 if (ktype == &device_ktype) {
2369 struct device *dev = kobj_to_dev(kobj);
2370 if (dev->bus)
2371 return 1;
2372 if (dev->class)
2373 return 1;
2374 }
2375 return 0;
2376 }
2377
2378 static const char *dev_uevent_name(struct kobject *kobj)
2379 {
2380 struct device *dev = kobj_to_dev(kobj);
2381
2382 if (dev->bus)
2383 return dev->bus->name;
2384 if (dev->class)
2385 return dev->class->name;
2386 return NULL;
2387 }
2388
2389 static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
2390 {
2391 struct device *dev = kobj_to_dev(kobj);
2392 int retval = 0;
2393
2394
2395 if (MAJOR(dev->devt)) {
2396 const char *tmp;
2397 const char *name;
2398 umode_t mode = 0;
2399 kuid_t uid = GLOBAL_ROOT_UID;
2400 kgid_t gid = GLOBAL_ROOT_GID;
2401
2402 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2403 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2404 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2405 if (name) {
2406 add_uevent_var(env, "DEVNAME=%s", name);
2407 if (mode)
2408 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2409 if (!uid_eq(uid, GLOBAL_ROOT_UID))
2410 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2411 if (!gid_eq(gid, GLOBAL_ROOT_GID))
2412 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2413 kfree(tmp);
2414 }
2415 }
2416
2417 if (dev->type && dev->type->name)
2418 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2419
2420 if (dev->driver)
2421 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2422
2423
2424 of_device_uevent(dev, env);
2425
2426
2427 if (dev->bus && dev->bus->uevent) {
2428 retval = dev->bus->uevent(dev, env);
2429 if (retval)
2430 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2431 dev_name(dev), __func__, retval);
2432 }
2433
2434
2435 if (dev->class && dev->class->dev_uevent) {
2436 retval = dev->class->dev_uevent(dev, env);
2437 if (retval)
2438 pr_debug("device: '%s': %s: class uevent() "
2439 "returned %d\n", dev_name(dev),
2440 __func__, retval);
2441 }
2442
2443
2444 if (dev->type && dev->type->uevent) {
2445 retval = dev->type->uevent(dev, env);
2446 if (retval)
2447 pr_debug("device: '%s': %s: dev_type uevent() "
2448 "returned %d\n", dev_name(dev),
2449 __func__, retval);
2450 }
2451
2452 return retval;
2453 }
2454
2455 static const struct kset_uevent_ops device_uevent_ops = {
2456 .filter = dev_uevent_filter,
2457 .name = dev_uevent_name,
2458 .uevent = dev_uevent,
2459 };
2460
2461 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2462 char *buf)
2463 {
2464 struct kobject *top_kobj;
2465 struct kset *kset;
2466 struct kobj_uevent_env *env = NULL;
2467 int i;
2468 int len = 0;
2469 int retval;
2470
2471
2472 top_kobj = &dev->kobj;
2473 while (!top_kobj->kset && top_kobj->parent)
2474 top_kobj = top_kobj->parent;
2475 if (!top_kobj->kset)
2476 goto out;
2477
2478 kset = top_kobj->kset;
2479 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2480 goto out;
2481
2482
2483 if (kset->uevent_ops && kset->uevent_ops->filter)
2484 if (!kset->uevent_ops->filter(&dev->kobj))
2485 goto out;
2486
2487 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2488 if (!env)
2489 return -ENOMEM;
2490
2491
2492 retval = kset->uevent_ops->uevent(&dev->kobj, env);
2493 if (retval)
2494 goto out;
2495
2496
2497 for (i = 0; i < env->envp_idx; i++)
2498 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2499 out:
2500 kfree(env);
2501 return len;
2502 }
2503
2504 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2505 const char *buf, size_t count)
2506 {
2507 int rc;
2508
2509 rc = kobject_synth_uevent(&dev->kobj, buf, count);
2510
2511 if (rc) {
2512 dev_err(dev, "uevent: failed to send synthetic uevent\n");
2513 return rc;
2514 }
2515
2516 return count;
2517 }
2518 static DEVICE_ATTR_RW(uevent);
2519
2520 static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2521 char *buf)
2522 {
2523 bool val;
2524
2525 device_lock(dev);
2526 val = !dev->offline;
2527 device_unlock(dev);
2528 return sysfs_emit(buf, "%u\n", val);
2529 }
2530
2531 static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2532 const char *buf, size_t count)
2533 {
2534 bool val;
2535 int ret;
2536
2537 ret = strtobool(buf, &val);
2538 if (ret < 0)
2539 return ret;
2540
2541 ret = lock_device_hotplug_sysfs();
2542 if (ret)
2543 return ret;
2544
2545 ret = val ? device_online(dev) : device_offline(dev);
2546 unlock_device_hotplug();
2547 return ret < 0 ? ret : count;
2548 }
2549 static DEVICE_ATTR_RW(online);
2550
2551 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
2552 char *buf)
2553 {
2554 const char *loc;
2555
2556 switch (dev->removable) {
2557 case DEVICE_REMOVABLE:
2558 loc = "removable";
2559 break;
2560 case DEVICE_FIXED:
2561 loc = "fixed";
2562 break;
2563 default:
2564 loc = "unknown";
2565 }
2566 return sysfs_emit(buf, "%s\n", loc);
2567 }
2568 static DEVICE_ATTR_RO(removable);
2569
2570 int device_add_groups(struct device *dev, const struct attribute_group **groups)
2571 {
2572 return sysfs_create_groups(&dev->kobj, groups);
2573 }
2574 EXPORT_SYMBOL_GPL(device_add_groups);
2575
2576 void device_remove_groups(struct device *dev,
2577 const struct attribute_group **groups)
2578 {
2579 sysfs_remove_groups(&dev->kobj, groups);
2580 }
2581 EXPORT_SYMBOL_GPL(device_remove_groups);
2582
2583 union device_attr_group_devres {
2584 const struct attribute_group *group;
2585 const struct attribute_group **groups;
2586 };
2587
2588 static int devm_attr_group_match(struct device *dev, void *res, void *data)
2589 {
2590 return ((union device_attr_group_devres *)res)->group == data;
2591 }
2592
2593 static void devm_attr_group_remove(struct device *dev, void *res)
2594 {
2595 union device_attr_group_devres *devres = res;
2596 const struct attribute_group *group = devres->group;
2597
2598 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2599 sysfs_remove_group(&dev->kobj, group);
2600 }
2601
2602 static void devm_attr_groups_remove(struct device *dev, void *res)
2603 {
2604 union device_attr_group_devres *devres = res;
2605 const struct attribute_group **groups = devres->groups;
2606
2607 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2608 sysfs_remove_groups(&dev->kobj, groups);
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621 int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2622 {
2623 union device_attr_group_devres *devres;
2624 int error;
2625
2626 devres = devres_alloc(devm_attr_group_remove,
2627 sizeof(*devres), GFP_KERNEL);
2628 if (!devres)
2629 return -ENOMEM;
2630
2631 error = sysfs_create_group(&dev->kobj, grp);
2632 if (error) {
2633 devres_free(devres);
2634 return error;
2635 }
2636
2637 devres->group = grp;
2638 devres_add(dev, devres);
2639 return 0;
2640 }
2641 EXPORT_SYMBOL_GPL(devm_device_add_group);
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651 void devm_device_remove_group(struct device *dev,
2652 const struct attribute_group *grp)
2653 {
2654 WARN_ON(devres_release(dev, devm_attr_group_remove,
2655 devm_attr_group_match,
2656 (void *)grp));
2657 }
2658 EXPORT_SYMBOL_GPL(devm_device_remove_group);
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 int devm_device_add_groups(struct device *dev,
2674 const struct attribute_group **groups)
2675 {
2676 union device_attr_group_devres *devres;
2677 int error;
2678
2679 devres = devres_alloc(devm_attr_groups_remove,
2680 sizeof(*devres), GFP_KERNEL);
2681 if (!devres)
2682 return -ENOMEM;
2683
2684 error = sysfs_create_groups(&dev->kobj, groups);
2685 if (error) {
2686 devres_free(devres);
2687 return error;
2688 }
2689
2690 devres->groups = groups;
2691 devres_add(dev, devres);
2692 return 0;
2693 }
2694 EXPORT_SYMBOL_GPL(devm_device_add_groups);
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 void devm_device_remove_groups(struct device *dev,
2705 const struct attribute_group **groups)
2706 {
2707 WARN_ON(devres_release(dev, devm_attr_groups_remove,
2708 devm_attr_group_match,
2709 (void *)groups));
2710 }
2711 EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2712
2713 static int device_add_attrs(struct device *dev)
2714 {
2715 struct class *class = dev->class;
2716 const struct device_type *type = dev->type;
2717 int error;
2718
2719 if (class) {
2720 error = device_add_groups(dev, class->dev_groups);
2721 if (error)
2722 return error;
2723 }
2724
2725 if (type) {
2726 error = device_add_groups(dev, type->groups);
2727 if (error)
2728 goto err_remove_class_groups;
2729 }
2730
2731 error = device_add_groups(dev, dev->groups);
2732 if (error)
2733 goto err_remove_type_groups;
2734
2735 if (device_supports_offline(dev) && !dev->offline_disabled) {
2736 error = device_create_file(dev, &dev_attr_online);
2737 if (error)
2738 goto err_remove_dev_groups;
2739 }
2740
2741 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2742 error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2743 if (error)
2744 goto err_remove_dev_online;
2745 }
2746
2747 if (dev_removable_is_valid(dev)) {
2748 error = device_create_file(dev, &dev_attr_removable);
2749 if (error)
2750 goto err_remove_dev_waiting_for_supplier;
2751 }
2752
2753 if (dev_add_physical_location(dev)) {
2754 error = device_add_group(dev,
2755 &dev_attr_physical_location_group);
2756 if (error)
2757 goto err_remove_dev_removable;
2758 }
2759
2760 return 0;
2761
2762 err_remove_dev_removable:
2763 device_remove_file(dev, &dev_attr_removable);
2764 err_remove_dev_waiting_for_supplier:
2765 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2766 err_remove_dev_online:
2767 device_remove_file(dev, &dev_attr_online);
2768 err_remove_dev_groups:
2769 device_remove_groups(dev, dev->groups);
2770 err_remove_type_groups:
2771 if (type)
2772 device_remove_groups(dev, type->groups);
2773 err_remove_class_groups:
2774 if (class)
2775 device_remove_groups(dev, class->dev_groups);
2776
2777 return error;
2778 }
2779
2780 static void device_remove_attrs(struct device *dev)
2781 {
2782 struct class *class = dev->class;
2783 const struct device_type *type = dev->type;
2784
2785 if (dev->physical_location) {
2786 device_remove_group(dev, &dev_attr_physical_location_group);
2787 kfree(dev->physical_location);
2788 }
2789
2790 device_remove_file(dev, &dev_attr_removable);
2791 device_remove_file(dev, &dev_attr_waiting_for_supplier);
2792 device_remove_file(dev, &dev_attr_online);
2793 device_remove_groups(dev, dev->groups);
2794
2795 if (type)
2796 device_remove_groups(dev, type->groups);
2797
2798 if (class)
2799 device_remove_groups(dev, class->dev_groups);
2800 }
2801
2802 static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2803 char *buf)
2804 {
2805 return print_dev_t(buf, dev->devt);
2806 }
2807 static DEVICE_ATTR_RO(dev);
2808
2809
2810 struct kset *devices_kset;
2811
2812
2813
2814
2815
2816
2817 static void devices_kset_move_before(struct device *deva, struct device *devb)
2818 {
2819 if (!devices_kset)
2820 return;
2821 pr_debug("devices_kset: Moving %s before %s\n",
2822 dev_name(deva), dev_name(devb));
2823 spin_lock(&devices_kset->list_lock);
2824 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2825 spin_unlock(&devices_kset->list_lock);
2826 }
2827
2828
2829
2830
2831
2832
2833 static void devices_kset_move_after(struct device *deva, struct device *devb)
2834 {
2835 if (!devices_kset)
2836 return;
2837 pr_debug("devices_kset: Moving %s after %s\n",
2838 dev_name(deva), dev_name(devb));
2839 spin_lock(&devices_kset->list_lock);
2840 list_move(&deva->kobj.entry, &devb->kobj.entry);
2841 spin_unlock(&devices_kset->list_lock);
2842 }
2843
2844
2845
2846
2847
2848 void devices_kset_move_last(struct device *dev)
2849 {
2850 if (!devices_kset)
2851 return;
2852 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2853 spin_lock(&devices_kset->list_lock);
2854 list_move_tail(&dev->kobj.entry, &devices_kset->list);
2855 spin_unlock(&devices_kset->list_lock);
2856 }
2857
2858
2859
2860
2861
2862
2863 int device_create_file(struct device *dev,
2864 const struct device_attribute *attr)
2865 {
2866 int error = 0;
2867
2868 if (dev) {
2869 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2870 "Attribute %s: write permission without 'store'\n",
2871 attr->attr.name);
2872 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2873 "Attribute %s: read permission without 'show'\n",
2874 attr->attr.name);
2875 error = sysfs_create_file(&dev->kobj, &attr->attr);
2876 }
2877
2878 return error;
2879 }
2880 EXPORT_SYMBOL_GPL(device_create_file);
2881
2882
2883
2884
2885
2886
2887 void device_remove_file(struct device *dev,
2888 const struct device_attribute *attr)
2889 {
2890 if (dev)
2891 sysfs_remove_file(&dev->kobj, &attr->attr);
2892 }
2893 EXPORT_SYMBOL_GPL(device_remove_file);
2894
2895
2896
2897
2898
2899
2900
2901
2902 bool device_remove_file_self(struct device *dev,
2903 const struct device_attribute *attr)
2904 {
2905 if (dev)
2906 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2907 else
2908 return false;
2909 }
2910 EXPORT_SYMBOL_GPL(device_remove_file_self);
2911
2912
2913
2914
2915
2916
2917 int device_create_bin_file(struct device *dev,
2918 const struct bin_attribute *attr)
2919 {
2920 int error = -EINVAL;
2921 if (dev)
2922 error = sysfs_create_bin_file(&dev->kobj, attr);
2923 return error;
2924 }
2925 EXPORT_SYMBOL_GPL(device_create_bin_file);
2926
2927
2928
2929
2930
2931
2932 void device_remove_bin_file(struct device *dev,
2933 const struct bin_attribute *attr)
2934 {
2935 if (dev)
2936 sysfs_remove_bin_file(&dev->kobj, attr);
2937 }
2938 EXPORT_SYMBOL_GPL(device_remove_bin_file);
2939
2940 static void klist_children_get(struct klist_node *n)
2941 {
2942 struct device_private *p = to_device_private_parent(n);
2943 struct device *dev = p->device;
2944
2945 get_device(dev);
2946 }
2947
2948 static void klist_children_put(struct klist_node *n)
2949 {
2950 struct device_private *p = to_device_private_parent(n);
2951 struct device *dev = p->device;
2952
2953 put_device(dev);
2954 }
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976 void device_initialize(struct device *dev)
2977 {
2978 dev->kobj.kset = devices_kset;
2979 kobject_init(&dev->kobj, &device_ktype);
2980 INIT_LIST_HEAD(&dev->dma_pools);
2981 mutex_init(&dev->mutex);
2982 lockdep_set_novalidate_class(&dev->mutex);
2983 spin_lock_init(&dev->devres_lock);
2984 INIT_LIST_HEAD(&dev->devres_head);
2985 device_pm_init(dev);
2986 set_dev_node(dev, NUMA_NO_NODE);
2987 INIT_LIST_HEAD(&dev->links.consumers);
2988 INIT_LIST_HEAD(&dev->links.suppliers);
2989 INIT_LIST_HEAD(&dev->links.defer_sync);
2990 dev->links.status = DL_DEV_NO_DRIVER;
2991 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2992 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2993 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2994 dev->dma_coherent = dma_default_coherent;
2995 #endif
2996 #ifdef CONFIG_SWIOTLB
2997 dev->dma_io_tlb_mem = &io_tlb_default_mem;
2998 #endif
2999 }
3000 EXPORT_SYMBOL_GPL(device_initialize);
3001
3002 struct kobject *virtual_device_parent(struct device *dev)
3003 {
3004 static struct kobject *virtual_dir = NULL;
3005
3006 if (!virtual_dir)
3007 virtual_dir = kobject_create_and_add("virtual",
3008 &devices_kset->kobj);
3009
3010 return virtual_dir;
3011 }
3012
3013 struct class_dir {
3014 struct kobject kobj;
3015 struct class *class;
3016 };
3017
3018 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
3019
3020 static void class_dir_release(struct kobject *kobj)
3021 {
3022 struct class_dir *dir = to_class_dir(kobj);
3023 kfree(dir);
3024 }
3025
3026 static const
3027 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
3028 {
3029 struct class_dir *dir = to_class_dir(kobj);
3030 return dir->class->ns_type;
3031 }
3032
3033 static struct kobj_type class_dir_ktype = {
3034 .release = class_dir_release,
3035 .sysfs_ops = &kobj_sysfs_ops,
3036 .child_ns_type = class_dir_child_ns_type
3037 };
3038
3039 static struct kobject *
3040 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
3041 {
3042 struct class_dir *dir;
3043 int retval;
3044
3045 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
3046 if (!dir)
3047 return ERR_PTR(-ENOMEM);
3048
3049 dir->class = class;
3050 kobject_init(&dir->kobj, &class_dir_ktype);
3051
3052 dir->kobj.kset = &class->p->glue_dirs;
3053
3054 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
3055 if (retval < 0) {
3056 kobject_put(&dir->kobj);
3057 return ERR_PTR(retval);
3058 }
3059 return &dir->kobj;
3060 }
3061
3062 static DEFINE_MUTEX(gdp_mutex);
3063
3064 static struct kobject *get_device_parent(struct device *dev,
3065 struct device *parent)
3066 {
3067 if (dev->class) {
3068 struct kobject *kobj = NULL;
3069 struct kobject *parent_kobj;
3070 struct kobject *k;
3071
3072 #ifdef CONFIG_BLOCK
3073
3074 if (sysfs_deprecated && dev->class == &block_class) {
3075 if (parent && parent->class == &block_class)
3076 return &parent->kobj;
3077 return &block_class.p->subsys.kobj;
3078 }
3079 #endif
3080
3081
3082
3083
3084
3085
3086 if (parent == NULL)
3087 parent_kobj = virtual_device_parent(dev);
3088 else if (parent->class && !dev->class->ns_type)
3089 return &parent->kobj;
3090 else
3091 parent_kobj = &parent->kobj;
3092
3093 mutex_lock(&gdp_mutex);
3094
3095
3096 spin_lock(&dev->class->p->glue_dirs.list_lock);
3097 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
3098 if (k->parent == parent_kobj) {
3099 kobj = kobject_get(k);
3100 break;
3101 }
3102 spin_unlock(&dev->class->p->glue_dirs.list_lock);
3103 if (kobj) {
3104 mutex_unlock(&gdp_mutex);
3105 return kobj;
3106 }
3107
3108
3109 k = class_dir_create_and_add(dev->class, parent_kobj);
3110
3111 mutex_unlock(&gdp_mutex);
3112 return k;
3113 }
3114
3115
3116 if (!parent && dev->bus && dev->bus->dev_root)
3117 return &dev->bus->dev_root->kobj;
3118
3119 if (parent)
3120 return &parent->kobj;
3121 return NULL;
3122 }
3123
3124 static inline bool live_in_glue_dir(struct kobject *kobj,
3125 struct device *dev)
3126 {
3127 if (!kobj || !dev->class ||
3128 kobj->kset != &dev->class->p->glue_dirs)
3129 return false;
3130 return true;
3131 }
3132
3133 static inline struct kobject *get_glue_dir(struct device *dev)
3134 {
3135 return dev->kobj.parent;
3136 }
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148 static inline bool kobject_has_children(struct kobject *kobj)
3149 {
3150 WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
3151
3152 return kobj->sd && kobj->sd->dir.subdirs;
3153 }
3154
3155
3156
3157
3158
3159
3160 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
3161 {
3162 unsigned int ref;
3163
3164
3165 if (!live_in_glue_dir(glue_dir, dev))
3166 return;
3167
3168 mutex_lock(&gdp_mutex);
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217 ref = kref_read(&glue_dir->kref);
3218 if (!kobject_has_children(glue_dir) && !--ref)
3219 kobject_del(glue_dir);
3220 kobject_put(glue_dir);
3221 mutex_unlock(&gdp_mutex);
3222 }
3223
3224 static int device_add_class_symlinks(struct device *dev)
3225 {
3226 struct device_node *of_node = dev_of_node(dev);
3227 int error;
3228
3229 if (of_node) {
3230 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3231 if (error)
3232 dev_warn(dev, "Error %d creating of_node link\n",error);
3233
3234 }
3235
3236 if (!dev->class)
3237 return 0;
3238
3239 error = sysfs_create_link(&dev->kobj,
3240 &dev->class->p->subsys.kobj,
3241 "subsystem");
3242 if (error)
3243 goto out_devnode;
3244
3245 if (dev->parent && device_is_not_partition(dev)) {
3246 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3247 "device");
3248 if (error)
3249 goto out_subsys;
3250 }
3251
3252 #ifdef CONFIG_BLOCK
3253
3254 if (sysfs_deprecated && dev->class == &block_class)
3255 return 0;
3256 #endif
3257
3258
3259 error = sysfs_create_link(&dev->class->p->subsys.kobj,
3260 &dev->kobj, dev_name(dev));
3261 if (error)
3262 goto out_device;
3263
3264 return 0;
3265
3266 out_device:
3267 sysfs_remove_link(&dev->kobj, "device");
3268
3269 out_subsys:
3270 sysfs_remove_link(&dev->kobj, "subsystem");
3271 out_devnode:
3272 sysfs_remove_link(&dev->kobj, "of_node");
3273 return error;
3274 }
3275
3276 static void device_remove_class_symlinks(struct device *dev)
3277 {
3278 if (dev_of_node(dev))
3279 sysfs_remove_link(&dev->kobj, "of_node");
3280
3281 if (!dev->class)
3282 return;
3283
3284 if (dev->parent && device_is_not_partition(dev))
3285 sysfs_remove_link(&dev->kobj, "device");
3286 sysfs_remove_link(&dev->kobj, "subsystem");
3287 #ifdef CONFIG_BLOCK
3288 if (sysfs_deprecated && dev->class == &block_class)
3289 return;
3290 #endif
3291 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3292 }
3293
3294
3295
3296
3297
3298
3299 int dev_set_name(struct device *dev, const char *fmt, ...)
3300 {
3301 va_list vargs;
3302 int err;
3303
3304 va_start(vargs, fmt);
3305 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3306 va_end(vargs);
3307 return err;
3308 }
3309 EXPORT_SYMBOL_GPL(dev_set_name);
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322 static struct kobject *device_to_dev_kobj(struct device *dev)
3323 {
3324 struct kobject *kobj;
3325
3326 if (dev->class)
3327 kobj = dev->class->dev_kobj;
3328 else
3329 kobj = sysfs_dev_char_kobj;
3330
3331 return kobj;
3332 }
3333
3334 static int device_create_sys_dev_entry(struct device *dev)
3335 {
3336 struct kobject *kobj = device_to_dev_kobj(dev);
3337 int error = 0;
3338 char devt_str[15];
3339
3340 if (kobj) {
3341 format_dev_t(devt_str, dev->devt);
3342 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3343 }
3344
3345 return error;
3346 }
3347
3348 static void device_remove_sys_dev_entry(struct device *dev)
3349 {
3350 struct kobject *kobj = device_to_dev_kobj(dev);
3351 char devt_str[15];
3352
3353 if (kobj) {
3354 format_dev_t(devt_str, dev->devt);
3355 sysfs_remove_link(kobj, devt_str);
3356 }
3357 }
3358
3359 static int device_private_init(struct device *dev)
3360 {
3361 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3362 if (!dev->p)
3363 return -ENOMEM;
3364 dev->p->device = dev;
3365 klist_init(&dev->p->klist_children, klist_children_get,
3366 klist_children_put);
3367 INIT_LIST_HEAD(&dev->p->deferred_probe);
3368 return 0;
3369 }
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398 int device_add(struct device *dev)
3399 {
3400 struct device *parent;
3401 struct kobject *kobj;
3402 struct class_interface *class_intf;
3403 int error = -EINVAL;
3404 struct kobject *glue_dir = NULL;
3405
3406 dev = get_device(dev);
3407 if (!dev)
3408 goto done;
3409
3410 if (!dev->p) {
3411 error = device_private_init(dev);
3412 if (error)
3413 goto done;
3414 }
3415
3416
3417
3418
3419
3420
3421 if (dev->init_name) {
3422 dev_set_name(dev, "%s", dev->init_name);
3423 dev->init_name = NULL;
3424 }
3425
3426
3427 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3428 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3429
3430 if (!dev_name(dev)) {
3431 error = -EINVAL;
3432 goto name_error;
3433 }
3434
3435 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3436
3437 parent = get_device(dev->parent);
3438 kobj = get_device_parent(dev, parent);
3439 if (IS_ERR(kobj)) {
3440 error = PTR_ERR(kobj);
3441 goto parent_error;
3442 }
3443 if (kobj)
3444 dev->kobj.parent = kobj;
3445
3446
3447 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3448 set_dev_node(dev, dev_to_node(parent));
3449
3450
3451
3452 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3453 if (error) {
3454 glue_dir = get_glue_dir(dev);
3455 goto Error;
3456 }
3457
3458
3459 device_platform_notify(dev);
3460
3461 error = device_create_file(dev, &dev_attr_uevent);
3462 if (error)
3463 goto attrError;
3464
3465 error = device_add_class_symlinks(dev);
3466 if (error)
3467 goto SymlinkError;
3468 error = device_add_attrs(dev);
3469 if (error)
3470 goto AttrsError;
3471 error = bus_add_device(dev);
3472 if (error)
3473 goto BusError;
3474 error = dpm_sysfs_add(dev);
3475 if (error)
3476 goto DPMError;
3477 device_pm_add(dev);
3478
3479 if (MAJOR(dev->devt)) {
3480 error = device_create_file(dev, &dev_attr_dev);
3481 if (error)
3482 goto DevAttrError;
3483
3484 error = device_create_sys_dev_entry(dev);
3485 if (error)
3486 goto SysEntryError;
3487
3488 devtmpfs_create_node(dev);
3489 }
3490
3491
3492
3493
3494 if (dev->bus)
3495 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3496 BUS_NOTIFY_ADD_DEVICE, dev);
3497
3498 kobject_uevent(&dev->kobj, KOBJ_ADD);
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512 if (dev->fwnode && !dev->fwnode->dev) {
3513 dev->fwnode->dev = dev;
3514 fw_devlink_link_device(dev);
3515 }
3516
3517 bus_probe_device(dev);
3518
3519
3520
3521
3522
3523
3524 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3525 fw_devlink_unblock_consumers(dev);
3526
3527 if (parent)
3528 klist_add_tail(&dev->p->knode_parent,
3529 &parent->p->klist_children);
3530
3531 if (dev->class) {
3532 mutex_lock(&dev->class->p->mutex);
3533
3534 klist_add_tail(&dev->p->knode_class,
3535 &dev->class->p->klist_devices);
3536
3537
3538 list_for_each_entry(class_intf,
3539 &dev->class->p->interfaces, node)
3540 if (class_intf->add_dev)
3541 class_intf->add_dev(dev, class_intf);
3542 mutex_unlock(&dev->class->p->mutex);
3543 }
3544 done:
3545 put_device(dev);
3546 return error;
3547 SysEntryError:
3548 if (MAJOR(dev->devt))
3549 device_remove_file(dev, &dev_attr_dev);
3550 DevAttrError:
3551 device_pm_remove(dev);
3552 dpm_sysfs_remove(dev);
3553 DPMError:
3554 bus_remove_device(dev);
3555 BusError:
3556 device_remove_attrs(dev);
3557 AttrsError:
3558 device_remove_class_symlinks(dev);
3559 SymlinkError:
3560 device_remove_file(dev, &dev_attr_uevent);
3561 attrError:
3562 device_platform_notify_remove(dev);
3563 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3564 glue_dir = get_glue_dir(dev);
3565 kobject_del(&dev->kobj);
3566 Error:
3567 cleanup_glue_dir(dev, glue_dir);
3568 parent_error:
3569 put_device(parent);
3570 name_error:
3571 kfree(dev->p);
3572 dev->p = NULL;
3573 goto done;
3574 }
3575 EXPORT_SYMBOL_GPL(device_add);
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 int device_register(struct device *dev)
3596 {
3597 device_initialize(dev);
3598 return device_add(dev);
3599 }
3600 EXPORT_SYMBOL_GPL(device_register);
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610 struct device *get_device(struct device *dev)
3611 {
3612 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3613 }
3614 EXPORT_SYMBOL_GPL(get_device);
3615
3616
3617
3618
3619
3620 void put_device(struct device *dev)
3621 {
3622
3623 if (dev)
3624 kobject_put(&dev->kobj);
3625 }
3626 EXPORT_SYMBOL_GPL(put_device);
3627
3628 bool kill_device(struct device *dev)
3629 {
3630
3631
3632
3633
3634
3635
3636
3637 device_lock_assert(dev);
3638
3639 if (dev->p->dead)
3640 return false;
3641 dev->p->dead = true;
3642 return true;
3643 }
3644 EXPORT_SYMBOL_GPL(kill_device);
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659 void device_del(struct device *dev)
3660 {
3661 struct device *parent = dev->parent;
3662 struct kobject *glue_dir = NULL;
3663 struct class_interface *class_intf;
3664 unsigned int noio_flag;
3665
3666 device_lock(dev);
3667 kill_device(dev);
3668 device_unlock(dev);
3669
3670 if (dev->fwnode && dev->fwnode->dev == dev)
3671 dev->fwnode->dev = NULL;
3672
3673
3674
3675
3676 noio_flag = memalloc_noio_save();
3677 if (dev->bus)
3678 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3679 BUS_NOTIFY_DEL_DEVICE, dev);
3680
3681 dpm_sysfs_remove(dev);
3682 if (parent)
3683 klist_del(&dev->p->knode_parent);
3684 if (MAJOR(dev->devt)) {
3685 devtmpfs_delete_node(dev);
3686 device_remove_sys_dev_entry(dev);
3687 device_remove_file(dev, &dev_attr_dev);
3688 }
3689 if (dev->class) {
3690 device_remove_class_symlinks(dev);
3691
3692 mutex_lock(&dev->class->p->mutex);
3693
3694 list_for_each_entry(class_intf,
3695 &dev->class->p->interfaces, node)
3696 if (class_intf->remove_dev)
3697 class_intf->remove_dev(dev, class_intf);
3698
3699 klist_del(&dev->p->knode_class);
3700 mutex_unlock(&dev->class->p->mutex);
3701 }
3702 device_remove_file(dev, &dev_attr_uevent);
3703 device_remove_attrs(dev);
3704 bus_remove_device(dev);
3705 device_pm_remove(dev);
3706 driver_deferred_probe_del(dev);
3707 device_platform_notify_remove(dev);
3708 device_links_purge(dev);
3709
3710 if (dev->bus)
3711 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3712 BUS_NOTIFY_REMOVED_DEVICE, dev);
3713 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3714 glue_dir = get_glue_dir(dev);
3715 kobject_del(&dev->kobj);
3716 cleanup_glue_dir(dev, glue_dir);
3717 memalloc_noio_restore(noio_flag);
3718 put_device(parent);
3719 }
3720 EXPORT_SYMBOL_GPL(device_del);
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733 void device_unregister(struct device *dev)
3734 {
3735 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3736 device_del(dev);
3737 put_device(dev);
3738 }
3739 EXPORT_SYMBOL_GPL(device_unregister);
3740
3741 static struct device *prev_device(struct klist_iter *i)
3742 {
3743 struct klist_node *n = klist_prev(i);
3744 struct device *dev = NULL;
3745 struct device_private *p;
3746
3747 if (n) {
3748 p = to_device_private_parent(n);
3749 dev = p->device;
3750 }
3751 return dev;
3752 }
3753
3754 static struct device *next_device(struct klist_iter *i)
3755 {
3756 struct klist_node *n = klist_next(i);
3757 struct device *dev = NULL;
3758 struct device_private *p;
3759
3760 if (n) {
3761 p = to_device_private_parent(n);
3762 dev = p->device;
3763 }
3764 return dev;
3765 }
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780 const char *device_get_devnode(struct device *dev,
3781 umode_t *mode, kuid_t *uid, kgid_t *gid,
3782 const char **tmp)
3783 {
3784 char *s;
3785
3786 *tmp = NULL;
3787
3788
3789 if (dev->type && dev->type->devnode)
3790 *tmp = dev->type->devnode(dev, mode, uid, gid);
3791 if (*tmp)
3792 return *tmp;
3793
3794
3795 if (dev->class && dev->class->devnode)
3796 *tmp = dev->class->devnode(dev, mode);
3797 if (*tmp)
3798 return *tmp;
3799
3800
3801 if (strchr(dev_name(dev), '!') == NULL)
3802 return dev_name(dev);
3803
3804
3805 s = kstrdup(dev_name(dev), GFP_KERNEL);
3806 if (!s)
3807 return NULL;
3808 strreplace(s, '!', '/');
3809 return *tmp = s;
3810 }
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824 int device_for_each_child(struct device *parent, void *data,
3825 int (*fn)(struct device *dev, void *data))
3826 {
3827 struct klist_iter i;
3828 struct device *child;
3829 int error = 0;
3830
3831 if (!parent->p)
3832 return 0;
3833
3834 klist_iter_init(&parent->p->klist_children, &i);
3835 while (!error && (child = next_device(&i)))
3836 error = fn(child, data);
3837 klist_iter_exit(&i);
3838 return error;
3839 }
3840 EXPORT_SYMBOL_GPL(device_for_each_child);
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854 int device_for_each_child_reverse(struct device *parent, void *data,
3855 int (*fn)(struct device *dev, void *data))
3856 {
3857 struct klist_iter i;
3858 struct device *child;
3859 int error = 0;
3860
3861 if (!parent->p)
3862 return 0;
3863
3864 klist_iter_init(&parent->p->klist_children, &i);
3865 while ((child = prev_device(&i)) && !error)
3866 error = fn(child, data);
3867 klist_iter_exit(&i);
3868 return error;
3869 }
3870 EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889 struct device *device_find_child(struct device *parent, void *data,
3890 int (*match)(struct device *dev, void *data))
3891 {
3892 struct klist_iter i;
3893 struct device *child;
3894
3895 if (!parent)
3896 return NULL;
3897
3898 klist_iter_init(&parent->p->klist_children, &i);
3899 while ((child = next_device(&i)))
3900 if (match(child, data) && get_device(child))
3901 break;
3902 klist_iter_exit(&i);
3903 return child;
3904 }
3905 EXPORT_SYMBOL_GPL(device_find_child);
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917 struct device *device_find_child_by_name(struct device *parent,
3918 const char *name)
3919 {
3920 struct klist_iter i;
3921 struct device *child;
3922
3923 if (!parent)
3924 return NULL;
3925
3926 klist_iter_init(&parent->p->klist_children, &i);
3927 while ((child = next_device(&i)))
3928 if (sysfs_streq(dev_name(child), name) && get_device(child))
3929 break;
3930 klist_iter_exit(&i);
3931 return child;
3932 }
3933 EXPORT_SYMBOL_GPL(device_find_child_by_name);
3934
3935 static int match_any(struct device *dev, void *unused)
3936 {
3937 return 1;
3938 }
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949 struct device *device_find_any_child(struct device *parent)
3950 {
3951 return device_find_child(parent, NULL, match_any);
3952 }
3953 EXPORT_SYMBOL_GPL(device_find_any_child);
3954
3955 int __init devices_init(void)
3956 {
3957 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3958 if (!devices_kset)
3959 return -ENOMEM;
3960 dev_kobj = kobject_create_and_add("dev", NULL);
3961 if (!dev_kobj)
3962 goto dev_kobj_err;
3963 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3964 if (!sysfs_dev_block_kobj)
3965 goto block_kobj_err;
3966 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3967 if (!sysfs_dev_char_kobj)
3968 goto char_kobj_err;
3969
3970 return 0;
3971
3972 char_kobj_err:
3973 kobject_put(sysfs_dev_block_kobj);
3974 block_kobj_err:
3975 kobject_put(dev_kobj);
3976 dev_kobj_err:
3977 kset_unregister(devices_kset);
3978 return -ENOMEM;
3979 }
3980
3981 static int device_check_offline(struct device *dev, void *not_used)
3982 {
3983 int ret;
3984
3985 ret = device_for_each_child(dev, NULL, device_check_offline);
3986 if (ret)
3987 return ret;
3988
3989 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3990 }
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003 int device_offline(struct device *dev)
4004 {
4005 int ret;
4006
4007 if (dev->offline_disabled)
4008 return -EPERM;
4009
4010 ret = device_for_each_child(dev, NULL, device_check_offline);
4011 if (ret)
4012 return ret;
4013
4014 device_lock(dev);
4015 if (device_supports_offline(dev)) {
4016 if (dev->offline) {
4017 ret = 1;
4018 } else {
4019 ret = dev->bus->offline(dev);
4020 if (!ret) {
4021 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
4022 dev->offline = true;
4023 }
4024 }
4025 }
4026 device_unlock(dev);
4027
4028 return ret;
4029 }
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041 int device_online(struct device *dev)
4042 {
4043 int ret = 0;
4044
4045 device_lock(dev);
4046 if (device_supports_offline(dev)) {
4047 if (dev->offline) {
4048 ret = dev->bus->online(dev);
4049 if (!ret) {
4050 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
4051 dev->offline = false;
4052 }
4053 } else {
4054 ret = 1;
4055 }
4056 }
4057 device_unlock(dev);
4058
4059 return ret;
4060 }
4061
4062 struct root_device {
4063 struct device dev;
4064 struct module *owner;
4065 };
4066
4067 static inline struct root_device *to_root_device(struct device *d)
4068 {
4069 return container_of(d, struct root_device, dev);
4070 }
4071
4072 static void root_device_release(struct device *dev)
4073 {
4074 kfree(to_root_device(dev));
4075 }
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099 struct device *__root_device_register(const char *name, struct module *owner)
4100 {
4101 struct root_device *root;
4102 int err = -ENOMEM;
4103
4104 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
4105 if (!root)
4106 return ERR_PTR(err);
4107
4108 err = dev_set_name(&root->dev, "%s", name);
4109 if (err) {
4110 kfree(root);
4111 return ERR_PTR(err);
4112 }
4113
4114 root->dev.release = root_device_release;
4115
4116 err = device_register(&root->dev);
4117 if (err) {
4118 put_device(&root->dev);
4119 return ERR_PTR(err);
4120 }
4121
4122 #ifdef CONFIG_MODULES
4123 if (owner) {
4124 struct module_kobject *mk = &owner->mkobj;
4125
4126 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
4127 if (err) {
4128 device_unregister(&root->dev);
4129 return ERR_PTR(err);
4130 }
4131 root->owner = owner;
4132 }
4133 #endif
4134
4135 return &root->dev;
4136 }
4137 EXPORT_SYMBOL_GPL(__root_device_register);
4138
4139
4140
4141
4142
4143
4144
4145
4146 void root_device_unregister(struct device *dev)
4147 {
4148 struct root_device *root = to_root_device(dev);
4149
4150 if (root->owner)
4151 sysfs_remove_link(&root->dev.kobj, "module");
4152
4153 device_unregister(dev);
4154 }
4155 EXPORT_SYMBOL_GPL(root_device_unregister);
4156
4157
4158 static void device_create_release(struct device *dev)
4159 {
4160 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
4161 kfree(dev);
4162 }
4163
4164 static __printf(6, 0) struct device *
4165 device_create_groups_vargs(struct class *class, struct device *parent,
4166 dev_t devt, void *drvdata,
4167 const struct attribute_group **groups,
4168 const char *fmt, va_list args)
4169 {
4170 struct device *dev = NULL;
4171 int retval = -ENODEV;
4172
4173 if (class == NULL || IS_ERR(class))
4174 goto error;
4175
4176 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
4177 if (!dev) {
4178 retval = -ENOMEM;
4179 goto error;
4180 }
4181
4182 device_initialize(dev);
4183 dev->devt = devt;
4184 dev->class = class;
4185 dev->parent = parent;
4186 dev->groups = groups;
4187 dev->release = device_create_release;
4188 dev_set_drvdata(dev, drvdata);
4189
4190 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
4191 if (retval)
4192 goto error;
4193
4194 retval = device_add(dev);
4195 if (retval)
4196 goto error;
4197
4198 return dev;
4199
4200 error:
4201 put_device(dev);
4202 return ERR_PTR(retval);
4203 }
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229 struct device *device_create(struct class *class, struct device *parent,
4230 dev_t devt, void *drvdata, const char *fmt, ...)
4231 {
4232 va_list vargs;
4233 struct device *dev;
4234
4235 va_start(vargs, fmt);
4236 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4237 fmt, vargs);
4238 va_end(vargs);
4239 return dev;
4240 }
4241 EXPORT_SYMBOL_GPL(device_create);
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270 struct device *device_create_with_groups(struct class *class,
4271 struct device *parent, dev_t devt,
4272 void *drvdata,
4273 const struct attribute_group **groups,
4274 const char *fmt, ...)
4275 {
4276 va_list vargs;
4277 struct device *dev;
4278
4279 va_start(vargs, fmt);
4280 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4281 fmt, vargs);
4282 va_end(vargs);
4283 return dev;
4284 }
4285 EXPORT_SYMBOL_GPL(device_create_with_groups);
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295 void device_destroy(struct class *class, dev_t devt)
4296 {
4297 struct device *dev;
4298
4299 dev = class_find_device_by_devt(class, devt);
4300 if (dev) {
4301 put_device(dev);
4302 device_unregister(dev);
4303 }
4304 }
4305 EXPORT_SYMBOL_GPL(device_destroy);
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346 int device_rename(struct device *dev, const char *new_name)
4347 {
4348 struct kobject *kobj = &dev->kobj;
4349 char *old_device_name = NULL;
4350 int error;
4351
4352 dev = get_device(dev);
4353 if (!dev)
4354 return -EINVAL;
4355
4356 dev_dbg(dev, "renaming to %s\n", new_name);
4357
4358 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4359 if (!old_device_name) {
4360 error = -ENOMEM;
4361 goto out;
4362 }
4363
4364 if (dev->class) {
4365 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4366 kobj, old_device_name,
4367 new_name, kobject_namespace(kobj));
4368 if (error)
4369 goto out;
4370 }
4371
4372 error = kobject_rename(kobj, new_name);
4373 if (error)
4374 goto out;
4375
4376 out:
4377 put_device(dev);
4378
4379 kfree(old_device_name);
4380
4381 return error;
4382 }
4383 EXPORT_SYMBOL_GPL(device_rename);
4384
4385 static int device_move_class_links(struct device *dev,
4386 struct device *old_parent,
4387 struct device *new_parent)
4388 {
4389 int error = 0;
4390
4391 if (old_parent)
4392 sysfs_remove_link(&dev->kobj, "device");
4393 if (new_parent)
4394 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4395 "device");
4396 return error;
4397 }
4398
4399
4400
4401
4402
4403
4404
4405 int device_move(struct device *dev, struct device *new_parent,
4406 enum dpm_order dpm_order)
4407 {
4408 int error;
4409 struct device *old_parent;
4410 struct kobject *new_parent_kobj;
4411
4412 dev = get_device(dev);
4413 if (!dev)
4414 return -EINVAL;
4415
4416 device_pm_lock();
4417 new_parent = get_device(new_parent);
4418 new_parent_kobj = get_device_parent(dev, new_parent);
4419 if (IS_ERR(new_parent_kobj)) {
4420 error = PTR_ERR(new_parent_kobj);
4421 put_device(new_parent);
4422 goto out;
4423 }
4424
4425 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4426 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4427 error = kobject_move(&dev->kobj, new_parent_kobj);
4428 if (error) {
4429 cleanup_glue_dir(dev, new_parent_kobj);
4430 put_device(new_parent);
4431 goto out;
4432 }
4433 old_parent = dev->parent;
4434 dev->parent = new_parent;
4435 if (old_parent)
4436 klist_remove(&dev->p->knode_parent);
4437 if (new_parent) {
4438 klist_add_tail(&dev->p->knode_parent,
4439 &new_parent->p->klist_children);
4440 set_dev_node(dev, dev_to_node(new_parent));
4441 }
4442
4443 if (dev->class) {
4444 error = device_move_class_links(dev, old_parent, new_parent);
4445 if (error) {
4446
4447 device_move_class_links(dev, new_parent, old_parent);
4448 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4449 if (new_parent)
4450 klist_remove(&dev->p->knode_parent);
4451 dev->parent = old_parent;
4452 if (old_parent) {
4453 klist_add_tail(&dev->p->knode_parent,
4454 &old_parent->p->klist_children);
4455 set_dev_node(dev, dev_to_node(old_parent));
4456 }
4457 }
4458 cleanup_glue_dir(dev, new_parent_kobj);
4459 put_device(new_parent);
4460 goto out;
4461 }
4462 }
4463 switch (dpm_order) {
4464 case DPM_ORDER_NONE:
4465 break;
4466 case DPM_ORDER_DEV_AFTER_PARENT:
4467 device_pm_move_after(dev, new_parent);
4468 devices_kset_move_after(dev, new_parent);
4469 break;
4470 case DPM_ORDER_PARENT_BEFORE_DEV:
4471 device_pm_move_before(new_parent, dev);
4472 devices_kset_move_before(new_parent, dev);
4473 break;
4474 case DPM_ORDER_DEV_LAST:
4475 device_pm_move_last(dev);
4476 devices_kset_move_last(dev);
4477 break;
4478 }
4479
4480 put_device(old_parent);
4481 out:
4482 device_pm_unlock();
4483 put_device(dev);
4484 return error;
4485 }
4486 EXPORT_SYMBOL_GPL(device_move);
4487
4488 static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4489 kgid_t kgid)
4490 {
4491 struct kobject *kobj = &dev->kobj;
4492 struct class *class = dev->class;
4493 const struct device_type *type = dev->type;
4494 int error;
4495
4496 if (class) {
4497
4498
4499
4500
4501 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4502 kgid);
4503 if (error)
4504 return error;
4505 }
4506
4507 if (type) {
4508
4509
4510
4511
4512 error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4513 kgid);
4514 if (error)
4515 return error;
4516 }
4517
4518
4519 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4520 if (error)
4521 return error;
4522
4523 if (device_supports_offline(dev) && !dev->offline_disabled) {
4524
4525 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4526 kuid, kgid);
4527 if (error)
4528 return error;
4529 }
4530
4531 return 0;
4532 }
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4547 {
4548 int error;
4549 struct kobject *kobj = &dev->kobj;
4550
4551 dev = get_device(dev);
4552 if (!dev)
4553 return -EINVAL;
4554
4555
4556
4557
4558
4559 error = sysfs_change_owner(kobj, kuid, kgid);
4560 if (error)
4561 goto out;
4562
4563
4564
4565
4566
4567
4568 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4569 kgid);
4570 if (error)
4571 goto out;
4572
4573
4574
4575
4576
4577
4578 error = device_attrs_change_owner(dev, kuid, kgid);
4579 if (error)
4580 goto out;
4581
4582 error = dpm_sysfs_change_owner(dev, kuid, kgid);
4583 if (error)
4584 goto out;
4585
4586 #ifdef CONFIG_BLOCK
4587 if (sysfs_deprecated && dev->class == &block_class)
4588 goto out;
4589 #endif
4590
4591
4592
4593
4594
4595
4596
4597 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4598 dev_name(dev), kuid, kgid);
4599 if (error)
4600 goto out;
4601
4602 out:
4603 put_device(dev);
4604 return error;
4605 }
4606 EXPORT_SYMBOL_GPL(device_change_owner);
4607
4608
4609
4610
4611 void device_shutdown(void)
4612 {
4613 struct device *dev, *parent;
4614
4615 wait_for_device_probe();
4616 device_block_probing();
4617
4618 cpufreq_suspend();
4619
4620 spin_lock(&devices_kset->list_lock);
4621
4622
4623
4624
4625
4626 while (!list_empty(&devices_kset->list)) {
4627 dev = list_entry(devices_kset->list.prev, struct device,
4628 kobj.entry);
4629
4630
4631
4632
4633
4634
4635 parent = get_device(dev->parent);
4636 get_device(dev);
4637
4638
4639
4640
4641 list_del_init(&dev->kobj.entry);
4642 spin_unlock(&devices_kset->list_lock);
4643
4644
4645 if (parent)
4646 device_lock(parent);
4647 device_lock(dev);
4648
4649
4650 pm_runtime_get_noresume(dev);
4651 pm_runtime_barrier(dev);
4652
4653 if (dev->class && dev->class->shutdown_pre) {
4654 if (initcall_debug)
4655 dev_info(dev, "shutdown_pre\n");
4656 dev->class->shutdown_pre(dev);
4657 }
4658 if (dev->bus && dev->bus->shutdown) {
4659 if (initcall_debug)
4660 dev_info(dev, "shutdown\n");
4661 dev->bus->shutdown(dev);
4662 } else if (dev->driver && dev->driver->shutdown) {
4663 if (initcall_debug)
4664 dev_info(dev, "shutdown\n");
4665 dev->driver->shutdown(dev);
4666 }
4667
4668 device_unlock(dev);
4669 if (parent)
4670 device_unlock(parent);
4671
4672 put_device(dev);
4673 put_device(parent);
4674
4675 spin_lock(&devices_kset->list_lock);
4676 }
4677 spin_unlock(&devices_kset->list_lock);
4678 }
4679
4680
4681
4682
4683
4684 #ifdef CONFIG_PRINTK
4685 static void
4686 set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4687 {
4688 const char *subsys;
4689
4690 memset(dev_info, 0, sizeof(*dev_info));
4691
4692 if (dev->class)
4693 subsys = dev->class->name;
4694 else if (dev->bus)
4695 subsys = dev->bus->name;
4696 else
4697 return;
4698
4699 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4700
4701
4702
4703
4704
4705
4706
4707
4708 if (MAJOR(dev->devt)) {
4709 char c;
4710
4711 if (strcmp(subsys, "block") == 0)
4712 c = 'b';
4713 else
4714 c = 'c';
4715
4716 snprintf(dev_info->device, sizeof(dev_info->device),
4717 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4718 } else if (strcmp(subsys, "net") == 0) {
4719 struct net_device *net = to_net_dev(dev);
4720
4721 snprintf(dev_info->device, sizeof(dev_info->device),
4722 "n%u", net->ifindex);
4723 } else {
4724 snprintf(dev_info->device, sizeof(dev_info->device),
4725 "+%s:%s", subsys, dev_name(dev));
4726 }
4727 }
4728
4729 int dev_vprintk_emit(int level, const struct device *dev,
4730 const char *fmt, va_list args)
4731 {
4732 struct dev_printk_info dev_info;
4733
4734 set_dev_info(dev, &dev_info);
4735
4736 return vprintk_emit(0, level, &dev_info, fmt, args);
4737 }
4738 EXPORT_SYMBOL(dev_vprintk_emit);
4739
4740 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4741 {
4742 va_list args;
4743 int r;
4744
4745 va_start(args, fmt);
4746
4747 r = dev_vprintk_emit(level, dev, fmt, args);
4748
4749 va_end(args);
4750
4751 return r;
4752 }
4753 EXPORT_SYMBOL(dev_printk_emit);
4754
4755 static void __dev_printk(const char *level, const struct device *dev,
4756 struct va_format *vaf)
4757 {
4758 if (dev)
4759 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4760 dev_driver_string(dev), dev_name(dev), vaf);
4761 else
4762 printk("%s(NULL device *): %pV", level, vaf);
4763 }
4764
4765 void _dev_printk(const char *level, const struct device *dev,
4766 const char *fmt, ...)
4767 {
4768 struct va_format vaf;
4769 va_list args;
4770
4771 va_start(args, fmt);
4772
4773 vaf.fmt = fmt;
4774 vaf.va = &args;
4775
4776 __dev_printk(level, dev, &vaf);
4777
4778 va_end(args);
4779 }
4780 EXPORT_SYMBOL(_dev_printk);
4781
4782 #define define_dev_printk_level(func, kern_level) \
4783 void func(const struct device *dev, const char *fmt, ...) \
4784 { \
4785 struct va_format vaf; \
4786 va_list args; \
4787 \
4788 va_start(args, fmt); \
4789 \
4790 vaf.fmt = fmt; \
4791 vaf.va = &args; \
4792 \
4793 __dev_printk(kern_level, dev, &vaf); \
4794 \
4795 va_end(args); \
4796 } \
4797 EXPORT_SYMBOL(func);
4798
4799 define_dev_printk_level(_dev_emerg, KERN_EMERG);
4800 define_dev_printk_level(_dev_alert, KERN_ALERT);
4801 define_dev_printk_level(_dev_crit, KERN_CRIT);
4802 define_dev_printk_level(_dev_err, KERN_ERR);
4803 define_dev_printk_level(_dev_warn, KERN_WARNING);
4804 define_dev_printk_level(_dev_notice, KERN_NOTICE);
4805 define_dev_printk_level(_dev_info, KERN_INFO);
4806
4807 #endif
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4842 {
4843 struct va_format vaf;
4844 va_list args;
4845
4846 va_start(args, fmt);
4847 vaf.fmt = fmt;
4848 vaf.va = &args;
4849
4850 if (err != -EPROBE_DEFER) {
4851 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4852 } else {
4853 device_set_deferred_probe_reason(dev, &vaf);
4854 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4855 }
4856
4857 va_end(args);
4858
4859 return err;
4860 }
4861 EXPORT_SYMBOL_GPL(dev_err_probe);
4862
4863 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4864 {
4865 return fwnode && !IS_ERR(fwnode->secondary);
4866 }
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4883 {
4884 struct device *parent = dev->parent;
4885 struct fwnode_handle *fn = dev->fwnode;
4886
4887 if (fwnode) {
4888 if (fwnode_is_primary(fn))
4889 fn = fn->secondary;
4890
4891 if (fn) {
4892 WARN_ON(fwnode->secondary);
4893 fwnode->secondary = fn;
4894 }
4895 dev->fwnode = fwnode;
4896 } else {
4897 if (fwnode_is_primary(fn)) {
4898 dev->fwnode = fn->secondary;
4899
4900 if (!(parent && fn == parent->fwnode))
4901 fn->secondary = NULL;
4902 } else {
4903 dev->fwnode = NULL;
4904 }
4905 }
4906 }
4907 EXPORT_SYMBOL_GPL(set_primary_fwnode);
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4919 {
4920 if (fwnode)
4921 fwnode->secondary = ERR_PTR(-ENODEV);
4922
4923 if (fwnode_is_primary(dev->fwnode))
4924 dev->fwnode->secondary = fwnode;
4925 else
4926 dev->fwnode = fwnode;
4927 }
4928 EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4939 {
4940 of_node_put(dev->of_node);
4941 dev->of_node = of_node_get(dev2->of_node);
4942 dev->of_node_reused = true;
4943 }
4944 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4945
4946 void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
4947 {
4948 dev->fwnode = fwnode;
4949 dev->of_node = to_of_node(fwnode);
4950 }
4951 EXPORT_SYMBOL_GPL(device_set_node);
4952
4953 int device_match_name(struct device *dev, const void *name)
4954 {
4955 return sysfs_streq(dev_name(dev), name);
4956 }
4957 EXPORT_SYMBOL_GPL(device_match_name);
4958
4959 int device_match_of_node(struct device *dev, const void *np)
4960 {
4961 return dev->of_node == np;
4962 }
4963 EXPORT_SYMBOL_GPL(device_match_of_node);
4964
4965 int device_match_fwnode(struct device *dev, const void *fwnode)
4966 {
4967 return dev_fwnode(dev) == fwnode;
4968 }
4969 EXPORT_SYMBOL_GPL(device_match_fwnode);
4970
4971 int device_match_devt(struct device *dev, const void *pdevt)
4972 {
4973 return dev->devt == *(dev_t *)pdevt;
4974 }
4975 EXPORT_SYMBOL_GPL(device_match_devt);
4976
4977 int device_match_acpi_dev(struct device *dev, const void *adev)
4978 {
4979 return ACPI_COMPANION(dev) == adev;
4980 }
4981 EXPORT_SYMBOL(device_match_acpi_dev);
4982
4983 int device_match_acpi_handle(struct device *dev, const void *handle)
4984 {
4985 return ACPI_HANDLE(dev) == handle;
4986 }
4987 EXPORT_SYMBOL(device_match_acpi_handle);
4988
4989 int device_match_any(struct device *dev, const void *unused)
4990 {
4991 return 1;
4992 }
4993 EXPORT_SYMBOL_GPL(device_match_any);