0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/export.h>
0012 #include <linux/module.h>
0013 #include <linux/err.h>
0014 #include <linux/device.h>
0015 #include <linux/slab.h>
0016 #include <linux/of.h>
0017 #include <linux/phy/phy.h>
0018 #include <linux/idr.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/regulator/consumer.h>
0021
0022 static struct class *phy_class;
0023 static DEFINE_MUTEX(phy_provider_mutex);
0024 static LIST_HEAD(phy_provider_list);
0025 static LIST_HEAD(phys);
0026 static DEFINE_IDA(phy_ida);
0027
0028 static void devm_phy_release(struct device *dev, void *res)
0029 {
0030 struct phy *phy = *(struct phy **)res;
0031
0032 phy_put(dev, phy);
0033 }
0034
0035 static void devm_phy_provider_release(struct device *dev, void *res)
0036 {
0037 struct phy_provider *phy_provider = *(struct phy_provider **)res;
0038
0039 of_phy_provider_unregister(phy_provider);
0040 }
0041
0042 static void devm_phy_consume(struct device *dev, void *res)
0043 {
0044 struct phy *phy = *(struct phy **)res;
0045
0046 phy_destroy(phy);
0047 }
0048
0049 static int devm_phy_match(struct device *dev, void *res, void *match_data)
0050 {
0051 struct phy **phy = res;
0052
0053 return *phy == match_data;
0054 }
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
0065 {
0066 struct phy_lookup *pl;
0067
0068 if (!phy || !dev_id || !con_id)
0069 return -EINVAL;
0070
0071 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
0072 if (!pl)
0073 return -ENOMEM;
0074
0075 pl->dev_id = dev_id;
0076 pl->con_id = con_id;
0077 pl->phy = phy;
0078
0079 mutex_lock(&phy_provider_mutex);
0080 list_add_tail(&pl->node, &phys);
0081 mutex_unlock(&phy_provider_mutex);
0082
0083 return 0;
0084 }
0085 EXPORT_SYMBOL_GPL(phy_create_lookup);
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
0097 {
0098 struct phy_lookup *pl;
0099
0100 if (!phy || !dev_id || !con_id)
0101 return;
0102
0103 mutex_lock(&phy_provider_mutex);
0104 list_for_each_entry(pl, &phys, node)
0105 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
0106 !strcmp(pl->con_id, con_id)) {
0107 list_del(&pl->node);
0108 kfree(pl);
0109 break;
0110 }
0111 mutex_unlock(&phy_provider_mutex);
0112 }
0113 EXPORT_SYMBOL_GPL(phy_remove_lookup);
0114
0115 static struct phy *phy_find(struct device *dev, const char *con_id)
0116 {
0117 const char *dev_id = dev_name(dev);
0118 struct phy_lookup *p, *pl = NULL;
0119
0120 mutex_lock(&phy_provider_mutex);
0121 list_for_each_entry(p, &phys, node)
0122 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
0123 pl = p;
0124 break;
0125 }
0126 mutex_unlock(&phy_provider_mutex);
0127
0128 return pl ? pl->phy : ERR_PTR(-ENODEV);
0129 }
0130
0131 static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
0132 {
0133 struct phy_provider *phy_provider;
0134 struct device_node *child;
0135
0136 list_for_each_entry(phy_provider, &phy_provider_list, list) {
0137 if (phy_provider->dev->of_node == node)
0138 return phy_provider;
0139
0140 for_each_child_of_node(phy_provider->children, child)
0141 if (child == node)
0142 return phy_provider;
0143 }
0144
0145 return ERR_PTR(-EPROBE_DEFER);
0146 }
0147
0148 int phy_pm_runtime_get(struct phy *phy)
0149 {
0150 int ret;
0151
0152 if (!phy)
0153 return 0;
0154
0155 if (!pm_runtime_enabled(&phy->dev))
0156 return -ENOTSUPP;
0157
0158 ret = pm_runtime_get(&phy->dev);
0159 if (ret < 0 && ret != -EINPROGRESS)
0160 pm_runtime_put_noidle(&phy->dev);
0161
0162 return ret;
0163 }
0164 EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
0165
0166 int phy_pm_runtime_get_sync(struct phy *phy)
0167 {
0168 int ret;
0169
0170 if (!phy)
0171 return 0;
0172
0173 if (!pm_runtime_enabled(&phy->dev))
0174 return -ENOTSUPP;
0175
0176 ret = pm_runtime_get_sync(&phy->dev);
0177 if (ret < 0)
0178 pm_runtime_put_sync(&phy->dev);
0179
0180 return ret;
0181 }
0182 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
0183
0184 int phy_pm_runtime_put(struct phy *phy)
0185 {
0186 if (!phy)
0187 return 0;
0188
0189 if (!pm_runtime_enabled(&phy->dev))
0190 return -ENOTSUPP;
0191
0192 return pm_runtime_put(&phy->dev);
0193 }
0194 EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
0195
0196 int phy_pm_runtime_put_sync(struct phy *phy)
0197 {
0198 if (!phy)
0199 return 0;
0200
0201 if (!pm_runtime_enabled(&phy->dev))
0202 return -ENOTSUPP;
0203
0204 return pm_runtime_put_sync(&phy->dev);
0205 }
0206 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
0207
0208 void phy_pm_runtime_allow(struct phy *phy)
0209 {
0210 if (!phy)
0211 return;
0212
0213 if (!pm_runtime_enabled(&phy->dev))
0214 return;
0215
0216 pm_runtime_allow(&phy->dev);
0217 }
0218 EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
0219
0220 void phy_pm_runtime_forbid(struct phy *phy)
0221 {
0222 if (!phy)
0223 return;
0224
0225 if (!pm_runtime_enabled(&phy->dev))
0226 return;
0227
0228 pm_runtime_forbid(&phy->dev);
0229 }
0230 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 int phy_init(struct phy *phy)
0244 {
0245 int ret;
0246
0247 if (!phy)
0248 return 0;
0249
0250 ret = phy_pm_runtime_get_sync(phy);
0251 if (ret < 0 && ret != -ENOTSUPP)
0252 return ret;
0253 ret = 0;
0254
0255 mutex_lock(&phy->mutex);
0256 if (phy->power_count > phy->init_count)
0257 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
0258
0259 if (phy->init_count == 0 && phy->ops->init) {
0260 ret = phy->ops->init(phy);
0261 if (ret < 0) {
0262 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
0263 goto out;
0264 }
0265 }
0266 ++phy->init_count;
0267
0268 out:
0269 mutex_unlock(&phy->mutex);
0270 phy_pm_runtime_put(phy);
0271 return ret;
0272 }
0273 EXPORT_SYMBOL_GPL(phy_init);
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 int phy_exit(struct phy *phy)
0284 {
0285 int ret;
0286
0287 if (!phy)
0288 return 0;
0289
0290 ret = phy_pm_runtime_get_sync(phy);
0291 if (ret < 0 && ret != -ENOTSUPP)
0292 return ret;
0293 ret = 0;
0294
0295 mutex_lock(&phy->mutex);
0296 if (phy->init_count == 1 && phy->ops->exit) {
0297 ret = phy->ops->exit(phy);
0298 if (ret < 0) {
0299 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
0300 goto out;
0301 }
0302 }
0303 --phy->init_count;
0304
0305 out:
0306 mutex_unlock(&phy->mutex);
0307 phy_pm_runtime_put(phy);
0308 return ret;
0309 }
0310 EXPORT_SYMBOL_GPL(phy_exit);
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 int phy_power_on(struct phy *phy)
0321 {
0322 int ret = 0;
0323
0324 if (!phy)
0325 goto out;
0326
0327 if (phy->pwr) {
0328 ret = regulator_enable(phy->pwr);
0329 if (ret)
0330 goto out;
0331 }
0332
0333 ret = phy_pm_runtime_get_sync(phy);
0334 if (ret < 0 && ret != -ENOTSUPP)
0335 goto err_pm_sync;
0336
0337 ret = 0;
0338
0339 mutex_lock(&phy->mutex);
0340 if (phy->power_count == 0 && phy->ops->power_on) {
0341 ret = phy->ops->power_on(phy);
0342 if (ret < 0) {
0343 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
0344 goto err_pwr_on;
0345 }
0346 }
0347 ++phy->power_count;
0348 mutex_unlock(&phy->mutex);
0349 return 0;
0350
0351 err_pwr_on:
0352 mutex_unlock(&phy->mutex);
0353 phy_pm_runtime_put_sync(phy);
0354 err_pm_sync:
0355 if (phy->pwr)
0356 regulator_disable(phy->pwr);
0357 out:
0358 return ret;
0359 }
0360 EXPORT_SYMBOL_GPL(phy_power_on);
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370 int phy_power_off(struct phy *phy)
0371 {
0372 int ret;
0373
0374 if (!phy)
0375 return 0;
0376
0377 mutex_lock(&phy->mutex);
0378 if (phy->power_count == 1 && phy->ops->power_off) {
0379 ret = phy->ops->power_off(phy);
0380 if (ret < 0) {
0381 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
0382 mutex_unlock(&phy->mutex);
0383 return ret;
0384 }
0385 }
0386 --phy->power_count;
0387 mutex_unlock(&phy->mutex);
0388 phy_pm_runtime_put(phy);
0389
0390 if (phy->pwr)
0391 regulator_disable(phy->pwr);
0392
0393 return 0;
0394 }
0395 EXPORT_SYMBOL_GPL(phy_power_off);
0396
0397 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
0398 {
0399 int ret;
0400
0401 if (!phy || !phy->ops->set_mode)
0402 return 0;
0403
0404 mutex_lock(&phy->mutex);
0405 ret = phy->ops->set_mode(phy, mode, submode);
0406 if (!ret)
0407 phy->attrs.mode = mode;
0408 mutex_unlock(&phy->mutex);
0409
0410 return ret;
0411 }
0412 EXPORT_SYMBOL_GPL(phy_set_mode_ext);
0413
0414 int phy_set_media(struct phy *phy, enum phy_media media)
0415 {
0416 int ret;
0417
0418 if (!phy || !phy->ops->set_media)
0419 return 0;
0420
0421 mutex_lock(&phy->mutex);
0422 ret = phy->ops->set_media(phy, media);
0423 mutex_unlock(&phy->mutex);
0424
0425 return ret;
0426 }
0427 EXPORT_SYMBOL_GPL(phy_set_media);
0428
0429 int phy_set_speed(struct phy *phy, int speed)
0430 {
0431 int ret;
0432
0433 if (!phy || !phy->ops->set_speed)
0434 return 0;
0435
0436 mutex_lock(&phy->mutex);
0437 ret = phy->ops->set_speed(phy, speed);
0438 mutex_unlock(&phy->mutex);
0439
0440 return ret;
0441 }
0442 EXPORT_SYMBOL_GPL(phy_set_speed);
0443
0444 int phy_reset(struct phy *phy)
0445 {
0446 int ret;
0447
0448 if (!phy || !phy->ops->reset)
0449 return 0;
0450
0451 ret = phy_pm_runtime_get_sync(phy);
0452 if (ret < 0 && ret != -ENOTSUPP)
0453 return ret;
0454
0455 mutex_lock(&phy->mutex);
0456 ret = phy->ops->reset(phy);
0457 mutex_unlock(&phy->mutex);
0458
0459 phy_pm_runtime_put(phy);
0460
0461 return ret;
0462 }
0463 EXPORT_SYMBOL_GPL(phy_reset);
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 int phy_calibrate(struct phy *phy)
0476 {
0477 int ret;
0478
0479 if (!phy || !phy->ops->calibrate)
0480 return 0;
0481
0482 mutex_lock(&phy->mutex);
0483 ret = phy->ops->calibrate(phy);
0484 mutex_unlock(&phy->mutex);
0485
0486 return ret;
0487 }
0488 EXPORT_SYMBOL_GPL(phy_calibrate);
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 int phy_configure(struct phy *phy, union phy_configure_opts *opts)
0502 {
0503 int ret;
0504
0505 if (!phy)
0506 return -EINVAL;
0507
0508 if (!phy->ops->configure)
0509 return -EOPNOTSUPP;
0510
0511 mutex_lock(&phy->mutex);
0512 ret = phy->ops->configure(phy, opts);
0513 mutex_unlock(&phy->mutex);
0514
0515 return ret;
0516 }
0517 EXPORT_SYMBOL_GPL(phy_configure);
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
0536 union phy_configure_opts *opts)
0537 {
0538 int ret;
0539
0540 if (!phy)
0541 return -EINVAL;
0542
0543 if (!phy->ops->validate)
0544 return -EOPNOTSUPP;
0545
0546 mutex_lock(&phy->mutex);
0547 ret = phy->ops->validate(phy, mode, submode, opts);
0548 mutex_unlock(&phy->mutex);
0549
0550 return ret;
0551 }
0552 EXPORT_SYMBOL_GPL(phy_validate);
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 static struct phy *_of_phy_get(struct device_node *np, int index)
0566 {
0567 int ret;
0568 struct phy_provider *phy_provider;
0569 struct phy *phy = NULL;
0570 struct of_phandle_args args;
0571
0572 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
0573 index, &args);
0574 if (ret)
0575 return ERR_PTR(-ENODEV);
0576
0577
0578 if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
0579 return ERR_PTR(-ENODEV);
0580
0581 mutex_lock(&phy_provider_mutex);
0582 phy_provider = of_phy_provider_lookup(args.np);
0583 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
0584 phy = ERR_PTR(-EPROBE_DEFER);
0585 goto out_unlock;
0586 }
0587
0588 if (!of_device_is_available(args.np)) {
0589 dev_warn(phy_provider->dev, "Requested PHY is disabled\n");
0590 phy = ERR_PTR(-ENODEV);
0591 goto out_put_module;
0592 }
0593
0594 phy = phy_provider->of_xlate(phy_provider->dev, &args);
0595
0596 out_put_module:
0597 module_put(phy_provider->owner);
0598
0599 out_unlock:
0600 mutex_unlock(&phy_provider_mutex);
0601 of_node_put(args.np);
0602
0603 return phy;
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615 struct phy *of_phy_get(struct device_node *np, const char *con_id)
0616 {
0617 struct phy *phy = NULL;
0618 int index = 0;
0619
0620 if (con_id)
0621 index = of_property_match_string(np, "phy-names", con_id);
0622
0623 phy = _of_phy_get(np, index);
0624 if (IS_ERR(phy))
0625 return phy;
0626
0627 if (!try_module_get(phy->ops->owner))
0628 return ERR_PTR(-EPROBE_DEFER);
0629
0630 get_device(&phy->dev);
0631
0632 return phy;
0633 }
0634 EXPORT_SYMBOL_GPL(of_phy_get);
0635
0636
0637
0638
0639
0640
0641
0642 void of_phy_put(struct phy *phy)
0643 {
0644 if (!phy || IS_ERR(phy))
0645 return;
0646
0647 mutex_lock(&phy->mutex);
0648 if (phy->ops->release)
0649 phy->ops->release(phy);
0650 mutex_unlock(&phy->mutex);
0651
0652 module_put(phy->ops->owner);
0653 put_device(&phy->dev);
0654 }
0655 EXPORT_SYMBOL_GPL(of_phy_put);
0656
0657
0658
0659
0660
0661
0662
0663
0664 void phy_put(struct device *dev, struct phy *phy)
0665 {
0666 device_link_remove(dev, &phy->dev);
0667 of_phy_put(phy);
0668 }
0669 EXPORT_SYMBOL_GPL(phy_put);
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 void devm_phy_put(struct device *dev, struct phy *phy)
0680 {
0681 int r;
0682
0683 if (!phy)
0684 return;
0685
0686 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
0687 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
0688 }
0689 EXPORT_SYMBOL_GPL(devm_phy_put);
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
0702 *args)
0703 {
0704 struct phy *phy;
0705 struct class_dev_iter iter;
0706
0707 class_dev_iter_init(&iter, phy_class, NULL, NULL);
0708 while ((dev = class_dev_iter_next(&iter))) {
0709 phy = to_phy(dev);
0710 if (args->np != phy->dev.of_node)
0711 continue;
0712
0713 class_dev_iter_exit(&iter);
0714 return phy;
0715 }
0716
0717 class_dev_iter_exit(&iter);
0718 return ERR_PTR(-ENODEV);
0719 }
0720 EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 struct phy *phy_get(struct device *dev, const char *string)
0733 {
0734 int index = 0;
0735 struct phy *phy;
0736 struct device_link *link;
0737
0738 if (dev->of_node) {
0739 if (string)
0740 index = of_property_match_string(dev->of_node, "phy-names",
0741 string);
0742 else
0743 index = 0;
0744 phy = _of_phy_get(dev->of_node, index);
0745 } else {
0746 if (string == NULL) {
0747 dev_WARN(dev, "missing string\n");
0748 return ERR_PTR(-EINVAL);
0749 }
0750 phy = phy_find(dev, string);
0751 }
0752 if (IS_ERR(phy))
0753 return phy;
0754
0755 if (!try_module_get(phy->ops->owner))
0756 return ERR_PTR(-EPROBE_DEFER);
0757
0758 get_device(&phy->dev);
0759
0760 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
0761 if (!link)
0762 dev_dbg(dev, "failed to create device link to %s\n",
0763 dev_name(phy->dev.parent));
0764
0765 return phy;
0766 }
0767 EXPORT_SYMBOL_GPL(phy_get);
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779 struct phy *phy_optional_get(struct device *dev, const char *string)
0780 {
0781 struct phy *phy = phy_get(dev, string);
0782
0783 if (PTR_ERR(phy) == -ENODEV)
0784 phy = NULL;
0785
0786 return phy;
0787 }
0788 EXPORT_SYMBOL_GPL(phy_optional_get);
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800 struct phy *devm_phy_get(struct device *dev, const char *string)
0801 {
0802 struct phy **ptr, *phy;
0803
0804 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
0805 if (!ptr)
0806 return ERR_PTR(-ENOMEM);
0807
0808 phy = phy_get(dev, string);
0809 if (!IS_ERR(phy)) {
0810 *ptr = phy;
0811 devres_add(dev, ptr);
0812 } else {
0813 devres_free(ptr);
0814 }
0815
0816 return phy;
0817 }
0818 EXPORT_SYMBOL_GPL(devm_phy_get);
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833 struct phy *devm_phy_optional_get(struct device *dev, const char *string)
0834 {
0835 struct phy *phy = devm_phy_get(dev, string);
0836
0837 if (PTR_ERR(phy) == -ENODEV)
0838 phy = NULL;
0839
0840 return phy;
0841 }
0842 EXPORT_SYMBOL_GPL(devm_phy_optional_get);
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
0855 const char *con_id)
0856 {
0857 struct phy **ptr, *phy;
0858 struct device_link *link;
0859
0860 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
0861 if (!ptr)
0862 return ERR_PTR(-ENOMEM);
0863
0864 phy = of_phy_get(np, con_id);
0865 if (!IS_ERR(phy)) {
0866 *ptr = phy;
0867 devres_add(dev, ptr);
0868 } else {
0869 devres_free(ptr);
0870 return phy;
0871 }
0872
0873 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
0874 if (!link)
0875 dev_dbg(dev, "failed to create device link to %s\n",
0876 dev_name(phy->dev.parent));
0877
0878 return phy;
0879 }
0880 EXPORT_SYMBOL_GPL(devm_of_phy_get);
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
0895 int index)
0896 {
0897 struct phy **ptr, *phy;
0898 struct device_link *link;
0899
0900 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
0901 if (!ptr)
0902 return ERR_PTR(-ENOMEM);
0903
0904 phy = _of_phy_get(np, index);
0905 if (IS_ERR(phy)) {
0906 devres_free(ptr);
0907 return phy;
0908 }
0909
0910 if (!try_module_get(phy->ops->owner)) {
0911 devres_free(ptr);
0912 return ERR_PTR(-EPROBE_DEFER);
0913 }
0914
0915 get_device(&phy->dev);
0916
0917 *ptr = phy;
0918 devres_add(dev, ptr);
0919
0920 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
0921 if (!link)
0922 dev_dbg(dev, "failed to create device link to %s\n",
0923 dev_name(phy->dev.parent));
0924
0925 return phy;
0926 }
0927 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 struct phy *phy_create(struct device *dev, struct device_node *node,
0938 const struct phy_ops *ops)
0939 {
0940 int ret;
0941 int id;
0942 struct phy *phy;
0943
0944 if (WARN_ON(!dev))
0945 return ERR_PTR(-EINVAL);
0946
0947 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
0948 if (!phy)
0949 return ERR_PTR(-ENOMEM);
0950
0951 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
0952 if (id < 0) {
0953 dev_err(dev, "unable to get id\n");
0954 ret = id;
0955 goto free_phy;
0956 }
0957
0958 device_initialize(&phy->dev);
0959 mutex_init(&phy->mutex);
0960
0961 phy->dev.class = phy_class;
0962 phy->dev.parent = dev;
0963 phy->dev.of_node = node ?: dev->of_node;
0964 phy->id = id;
0965 phy->ops = ops;
0966
0967 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
0968 if (ret)
0969 goto put_dev;
0970
0971
0972 phy->pwr = regulator_get_optional(&phy->dev, "phy");
0973 if (IS_ERR(phy->pwr)) {
0974 ret = PTR_ERR(phy->pwr);
0975 if (ret == -EPROBE_DEFER)
0976 goto put_dev;
0977
0978 phy->pwr = NULL;
0979 }
0980
0981 ret = device_add(&phy->dev);
0982 if (ret)
0983 goto put_dev;
0984
0985 if (pm_runtime_enabled(dev)) {
0986 pm_runtime_enable(&phy->dev);
0987 pm_runtime_no_callbacks(&phy->dev);
0988 }
0989
0990 return phy;
0991
0992 put_dev:
0993 put_device(&phy->dev);
0994 return ERR_PTR(ret);
0995
0996 free_phy:
0997 kfree(phy);
0998 return ERR_PTR(ret);
0999 }
1000 EXPORT_SYMBOL_GPL(phy_create);
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 struct phy *devm_phy_create(struct device *dev, struct device_node *node,
1014 const struct phy_ops *ops)
1015 {
1016 struct phy **ptr, *phy;
1017
1018 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
1019 if (!ptr)
1020 return ERR_PTR(-ENOMEM);
1021
1022 phy = phy_create(dev, node, ops);
1023 if (!IS_ERR(phy)) {
1024 *ptr = phy;
1025 devres_add(dev, ptr);
1026 } else {
1027 devres_free(ptr);
1028 }
1029
1030 return phy;
1031 }
1032 EXPORT_SYMBOL_GPL(devm_phy_create);
1033
1034
1035
1036
1037
1038
1039
1040 void phy_destroy(struct phy *phy)
1041 {
1042 pm_runtime_disable(&phy->dev);
1043 device_unregister(&phy->dev);
1044 }
1045 EXPORT_SYMBOL_GPL(phy_destroy);
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 void devm_phy_destroy(struct device *dev, struct phy *phy)
1056 {
1057 int r;
1058
1059 r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
1060 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
1061 }
1062 EXPORT_SYMBOL_GPL(devm_phy_destroy);
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 struct phy_provider *__of_phy_provider_register(struct device *dev,
1083 struct device_node *children, struct module *owner,
1084 struct phy * (*of_xlate)(struct device *dev,
1085 struct of_phandle_args *args))
1086 {
1087 struct phy_provider *phy_provider;
1088
1089
1090
1091
1092
1093
1094 if (children) {
1095 struct device_node *parent = of_node_get(children), *next;
1096
1097 while (parent) {
1098 if (parent == dev->of_node)
1099 break;
1100
1101 next = of_get_parent(parent);
1102 of_node_put(parent);
1103 parent = next;
1104 }
1105
1106 if (!parent)
1107 return ERR_PTR(-EINVAL);
1108
1109 of_node_put(parent);
1110 } else {
1111 children = dev->of_node;
1112 }
1113
1114 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
1115 if (!phy_provider)
1116 return ERR_PTR(-ENOMEM);
1117
1118 phy_provider->dev = dev;
1119 phy_provider->children = of_node_get(children);
1120 phy_provider->owner = owner;
1121 phy_provider->of_xlate = of_xlate;
1122
1123 mutex_lock(&phy_provider_mutex);
1124 list_add_tail(&phy_provider->list, &phy_provider_list);
1125 mutex_unlock(&phy_provider_mutex);
1126
1127 return phy_provider;
1128 }
1129 EXPORT_SYMBOL_GPL(__of_phy_provider_register);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
1146 struct device_node *children, struct module *owner,
1147 struct phy * (*of_xlate)(struct device *dev,
1148 struct of_phandle_args *args))
1149 {
1150 struct phy_provider **ptr, *phy_provider;
1151
1152 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
1153 if (!ptr)
1154 return ERR_PTR(-ENOMEM);
1155
1156 phy_provider = __of_phy_provider_register(dev, children, owner,
1157 of_xlate);
1158 if (!IS_ERR(phy_provider)) {
1159 *ptr = phy_provider;
1160 devres_add(dev, ptr);
1161 } else {
1162 devres_free(ptr);
1163 }
1164
1165 return phy_provider;
1166 }
1167 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
1168
1169
1170
1171
1172
1173
1174
1175 void of_phy_provider_unregister(struct phy_provider *phy_provider)
1176 {
1177 if (IS_ERR(phy_provider))
1178 return;
1179
1180 mutex_lock(&phy_provider_mutex);
1181 list_del(&phy_provider->list);
1182 of_node_put(phy_provider->children);
1183 kfree(phy_provider);
1184 mutex_unlock(&phy_provider_mutex);
1185 }
1186 EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 void devm_of_phy_provider_unregister(struct device *dev,
1197 struct phy_provider *phy_provider)
1198 {
1199 int r;
1200
1201 r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
1202 phy_provider);
1203 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
1204 }
1205 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
1206
1207
1208
1209
1210
1211
1212
1213
1214 static void phy_release(struct device *dev)
1215 {
1216 struct phy *phy;
1217
1218 phy = to_phy(dev);
1219 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
1220 regulator_put(phy->pwr);
1221 ida_simple_remove(&phy_ida, phy->id);
1222 kfree(phy);
1223 }
1224
1225 static int __init phy_core_init(void)
1226 {
1227 phy_class = class_create(THIS_MODULE, "phy");
1228 if (IS_ERR(phy_class)) {
1229 pr_err("failed to create phy class --> %ld\n",
1230 PTR_ERR(phy_class));
1231 return PTR_ERR(phy_class);
1232 }
1233
1234 phy_class->dev_release = phy_release;
1235
1236 return 0;
1237 }
1238 device_initcall(phy_core_init);