0001
0002
0003
0004
0005
0006
0007 #include <linux/atomic.h>
0008 #include <linux/device.h>
0009 #include <linux/err.h>
0010 #include <linux/export.h>
0011 #include <linux/kernel.h>
0012 #include <linux/kref.h>
0013 #include <linux/module.h>
0014 #include <linux/of.h>
0015 #include <linux/acpi.h>
0016 #include <linux/reset.h>
0017 #include <linux/reset-controller.h>
0018 #include <linux/slab.h>
0019
0020 static DEFINE_MUTEX(reset_list_mutex);
0021 static LIST_HEAD(reset_controller_list);
0022
0023 static DEFINE_MUTEX(reset_lookup_mutex);
0024 static LIST_HEAD(reset_lookup_list);
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 struct reset_control {
0043 struct reset_controller_dev *rcdev;
0044 struct list_head list;
0045 unsigned int id;
0046 struct kref refcnt;
0047 bool acquired;
0048 bool shared;
0049 bool array;
0050 atomic_t deassert_count;
0051 atomic_t triggered_count;
0052 };
0053
0054
0055
0056
0057
0058
0059
0060 struct reset_control_array {
0061 struct reset_control base;
0062 unsigned int num_rstcs;
0063 struct reset_control *rstc[];
0064 };
0065
0066 static const char *rcdev_name(struct reset_controller_dev *rcdev)
0067 {
0068 if (rcdev->dev)
0069 return dev_name(rcdev->dev);
0070
0071 if (rcdev->of_node)
0072 return rcdev->of_node->full_name;
0073
0074 return NULL;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
0088 const struct of_phandle_args *reset_spec)
0089 {
0090 if (reset_spec->args[0] >= rcdev->nr_resets)
0091 return -EINVAL;
0092
0093 return reset_spec->args[0];
0094 }
0095
0096
0097
0098
0099
0100 int reset_controller_register(struct reset_controller_dev *rcdev)
0101 {
0102 if (!rcdev->of_xlate) {
0103 rcdev->of_reset_n_cells = 1;
0104 rcdev->of_xlate = of_reset_simple_xlate;
0105 }
0106
0107 INIT_LIST_HEAD(&rcdev->reset_control_head);
0108
0109 mutex_lock(&reset_list_mutex);
0110 list_add(&rcdev->list, &reset_controller_list);
0111 mutex_unlock(&reset_list_mutex);
0112
0113 return 0;
0114 }
0115 EXPORT_SYMBOL_GPL(reset_controller_register);
0116
0117
0118
0119
0120
0121 void reset_controller_unregister(struct reset_controller_dev *rcdev)
0122 {
0123 mutex_lock(&reset_list_mutex);
0124 list_del(&rcdev->list);
0125 mutex_unlock(&reset_list_mutex);
0126 }
0127 EXPORT_SYMBOL_GPL(reset_controller_unregister);
0128
0129 static void devm_reset_controller_release(struct device *dev, void *res)
0130 {
0131 reset_controller_unregister(*(struct reset_controller_dev **)res);
0132 }
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 int devm_reset_controller_register(struct device *dev,
0144 struct reset_controller_dev *rcdev)
0145 {
0146 struct reset_controller_dev **rcdevp;
0147 int ret;
0148
0149 rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
0150 GFP_KERNEL);
0151 if (!rcdevp)
0152 return -ENOMEM;
0153
0154 ret = reset_controller_register(rcdev);
0155 if (ret) {
0156 devres_free(rcdevp);
0157 return ret;
0158 }
0159
0160 *rcdevp = rcdev;
0161 devres_add(dev, rcdevp);
0162
0163 return ret;
0164 }
0165 EXPORT_SYMBOL_GPL(devm_reset_controller_register);
0166
0167
0168
0169
0170
0171
0172 void reset_controller_add_lookup(struct reset_control_lookup *lookup,
0173 unsigned int num_entries)
0174 {
0175 struct reset_control_lookup *entry;
0176 unsigned int i;
0177
0178 mutex_lock(&reset_lookup_mutex);
0179 for (i = 0; i < num_entries; i++) {
0180 entry = &lookup[i];
0181
0182 if (!entry->dev_id || !entry->provider) {
0183 pr_warn("%s(): reset lookup entry badly specified, skipping\n",
0184 __func__);
0185 continue;
0186 }
0187
0188 list_add_tail(&entry->list, &reset_lookup_list);
0189 }
0190 mutex_unlock(&reset_lookup_mutex);
0191 }
0192 EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
0193
0194 static inline struct reset_control_array *
0195 rstc_to_array(struct reset_control *rstc) {
0196 return container_of(rstc, struct reset_control_array, base);
0197 }
0198
0199 static int reset_control_array_reset(struct reset_control_array *resets)
0200 {
0201 int ret, i;
0202
0203 for (i = 0; i < resets->num_rstcs; i++) {
0204 ret = reset_control_reset(resets->rstc[i]);
0205 if (ret)
0206 return ret;
0207 }
0208
0209 return 0;
0210 }
0211
0212 static int reset_control_array_rearm(struct reset_control_array *resets)
0213 {
0214 struct reset_control *rstc;
0215 int i;
0216
0217 for (i = 0; i < resets->num_rstcs; i++) {
0218 rstc = resets->rstc[i];
0219
0220 if (!rstc)
0221 continue;
0222
0223 if (WARN_ON(IS_ERR(rstc)))
0224 return -EINVAL;
0225
0226 if (rstc->shared) {
0227 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
0228 return -EINVAL;
0229 } else {
0230 if (!rstc->acquired)
0231 return -EPERM;
0232 }
0233 }
0234
0235 for (i = 0; i < resets->num_rstcs; i++) {
0236 rstc = resets->rstc[i];
0237
0238 if (rstc && rstc->shared)
0239 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
0240 }
0241
0242 return 0;
0243 }
0244
0245 static int reset_control_array_assert(struct reset_control_array *resets)
0246 {
0247 int ret, i;
0248
0249 for (i = 0; i < resets->num_rstcs; i++) {
0250 ret = reset_control_assert(resets->rstc[i]);
0251 if (ret)
0252 goto err;
0253 }
0254
0255 return 0;
0256
0257 err:
0258 while (i--)
0259 reset_control_deassert(resets->rstc[i]);
0260 return ret;
0261 }
0262
0263 static int reset_control_array_deassert(struct reset_control_array *resets)
0264 {
0265 int ret, i;
0266
0267 for (i = 0; i < resets->num_rstcs; i++) {
0268 ret = reset_control_deassert(resets->rstc[i]);
0269 if (ret)
0270 goto err;
0271 }
0272
0273 return 0;
0274
0275 err:
0276 while (i--)
0277 reset_control_assert(resets->rstc[i]);
0278 return ret;
0279 }
0280
0281 static int reset_control_array_acquire(struct reset_control_array *resets)
0282 {
0283 unsigned int i;
0284 int err;
0285
0286 for (i = 0; i < resets->num_rstcs; i++) {
0287 err = reset_control_acquire(resets->rstc[i]);
0288 if (err < 0)
0289 goto release;
0290 }
0291
0292 return 0;
0293
0294 release:
0295 while (i--)
0296 reset_control_release(resets->rstc[i]);
0297
0298 return err;
0299 }
0300
0301 static void reset_control_array_release(struct reset_control_array *resets)
0302 {
0303 unsigned int i;
0304
0305 for (i = 0; i < resets->num_rstcs; i++)
0306 reset_control_release(resets->rstc[i]);
0307 }
0308
0309 static inline bool reset_control_is_array(struct reset_control *rstc)
0310 {
0311 return rstc->array;
0312 }
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 int reset_control_reset(struct reset_control *rstc)
0328 {
0329 int ret;
0330
0331 if (!rstc)
0332 return 0;
0333
0334 if (WARN_ON(IS_ERR(rstc)))
0335 return -EINVAL;
0336
0337 if (reset_control_is_array(rstc))
0338 return reset_control_array_reset(rstc_to_array(rstc));
0339
0340 if (!rstc->rcdev->ops->reset)
0341 return -ENOTSUPP;
0342
0343 if (rstc->shared) {
0344 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
0345 return -EINVAL;
0346
0347 if (atomic_inc_return(&rstc->triggered_count) != 1)
0348 return 0;
0349 } else {
0350 if (!rstc->acquired)
0351 return -EPERM;
0352 }
0353
0354 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
0355 if (rstc->shared && ret)
0356 atomic_dec(&rstc->triggered_count);
0357
0358 return ret;
0359 }
0360 EXPORT_SYMBOL_GPL(reset_control_reset);
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 int reset_control_bulk_reset(int num_rstcs,
0372 struct reset_control_bulk_data *rstcs)
0373 {
0374 int ret, i;
0375
0376 for (i = 0; i < num_rstcs; i++) {
0377 ret = reset_control_reset(rstcs[i].rstc);
0378 if (ret)
0379 return ret;
0380 }
0381
0382 return 0;
0383 }
0384 EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401 int reset_control_rearm(struct reset_control *rstc)
0402 {
0403 if (!rstc)
0404 return 0;
0405
0406 if (WARN_ON(IS_ERR(rstc)))
0407 return -EINVAL;
0408
0409 if (reset_control_is_array(rstc))
0410 return reset_control_array_rearm(rstc_to_array(rstc));
0411
0412 if (rstc->shared) {
0413 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
0414 return -EINVAL;
0415
0416 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
0417 } else {
0418 if (!rstc->acquired)
0419 return -EPERM;
0420 }
0421
0422 return 0;
0423 }
0424 EXPORT_SYMBOL_GPL(reset_control_rearm);
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 int reset_control_assert(struct reset_control *rstc)
0443 {
0444 if (!rstc)
0445 return 0;
0446
0447 if (WARN_ON(IS_ERR(rstc)))
0448 return -EINVAL;
0449
0450 if (reset_control_is_array(rstc))
0451 return reset_control_array_assert(rstc_to_array(rstc));
0452
0453 if (rstc->shared) {
0454 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
0455 return -EINVAL;
0456
0457 if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
0458 return -EINVAL;
0459
0460 if (atomic_dec_return(&rstc->deassert_count) != 0)
0461 return 0;
0462
0463
0464
0465
0466
0467 if (!rstc->rcdev->ops->assert)
0468 return 0;
0469 } else {
0470
0471
0472
0473
0474
0475 if (!rstc->rcdev->ops->assert)
0476 return -ENOTSUPP;
0477
0478 if (!rstc->acquired) {
0479 WARN(1, "reset %s (ID: %u) is not acquired\n",
0480 rcdev_name(rstc->rcdev), rstc->id);
0481 return -EPERM;
0482 }
0483 }
0484
0485 return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
0486 }
0487 EXPORT_SYMBOL_GPL(reset_control_assert);
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 int reset_control_bulk_assert(int num_rstcs,
0500 struct reset_control_bulk_data *rstcs)
0501 {
0502 int ret, i;
0503
0504 for (i = 0; i < num_rstcs; i++) {
0505 ret = reset_control_assert(rstcs[i].rstc);
0506 if (ret)
0507 goto err;
0508 }
0509
0510 return 0;
0511
0512 err:
0513 while (i--)
0514 reset_control_deassert(rstcs[i].rstc);
0515 return ret;
0516 }
0517 EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 int reset_control_deassert(struct reset_control *rstc)
0531 {
0532 if (!rstc)
0533 return 0;
0534
0535 if (WARN_ON(IS_ERR(rstc)))
0536 return -EINVAL;
0537
0538 if (reset_control_is_array(rstc))
0539 return reset_control_array_deassert(rstc_to_array(rstc));
0540
0541 if (rstc->shared) {
0542 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
0543 return -EINVAL;
0544
0545 if (atomic_inc_return(&rstc->deassert_count) != 1)
0546 return 0;
0547 } else {
0548 if (!rstc->acquired) {
0549 WARN(1, "reset %s (ID: %u) is not acquired\n",
0550 rcdev_name(rstc->rcdev), rstc->id);
0551 return -EPERM;
0552 }
0553 }
0554
0555
0556
0557
0558
0559
0560
0561
0562 if (!rstc->rcdev->ops->deassert)
0563 return 0;
0564
0565 return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
0566 }
0567 EXPORT_SYMBOL_GPL(reset_control_deassert);
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579 int reset_control_bulk_deassert(int num_rstcs,
0580 struct reset_control_bulk_data *rstcs)
0581 {
0582 int ret, i;
0583
0584 for (i = num_rstcs - 1; i >= 0; i--) {
0585 ret = reset_control_deassert(rstcs[i].rstc);
0586 if (ret)
0587 goto err;
0588 }
0589
0590 return 0;
0591
0592 err:
0593 while (i < num_rstcs)
0594 reset_control_assert(rstcs[i++].rstc);
0595 return ret;
0596 }
0597 EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
0598
0599
0600
0601
0602
0603
0604
0605 int reset_control_status(struct reset_control *rstc)
0606 {
0607 if (!rstc)
0608 return 0;
0609
0610 if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
0611 return -EINVAL;
0612
0613 if (rstc->rcdev->ops->status)
0614 return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
0615
0616 return -ENOTSUPP;
0617 }
0618 EXPORT_SYMBOL_GPL(reset_control_status);
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640 int reset_control_acquire(struct reset_control *rstc)
0641 {
0642 struct reset_control *rc;
0643
0644 if (!rstc)
0645 return 0;
0646
0647 if (WARN_ON(IS_ERR(rstc)))
0648 return -EINVAL;
0649
0650 if (reset_control_is_array(rstc))
0651 return reset_control_array_acquire(rstc_to_array(rstc));
0652
0653 mutex_lock(&reset_list_mutex);
0654
0655 if (rstc->acquired) {
0656 mutex_unlock(&reset_list_mutex);
0657 return 0;
0658 }
0659
0660 list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
0661 if (rstc != rc && rstc->id == rc->id) {
0662 if (rc->acquired) {
0663 mutex_unlock(&reset_list_mutex);
0664 return -EBUSY;
0665 }
0666 }
0667 }
0668
0669 rstc->acquired = true;
0670
0671 mutex_unlock(&reset_list_mutex);
0672 return 0;
0673 }
0674 EXPORT_SYMBOL_GPL(reset_control_acquire);
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686 int reset_control_bulk_acquire(int num_rstcs,
0687 struct reset_control_bulk_data *rstcs)
0688 {
0689 int ret, i;
0690
0691 for (i = 0; i < num_rstcs; i++) {
0692 ret = reset_control_acquire(rstcs[i].rstc);
0693 if (ret)
0694 goto err;
0695 }
0696
0697 return 0;
0698
0699 err:
0700 while (i--)
0701 reset_control_release(rstcs[i].rstc);
0702 return ret;
0703 }
0704 EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716 void reset_control_release(struct reset_control *rstc)
0717 {
0718 if (!rstc || WARN_ON(IS_ERR(rstc)))
0719 return;
0720
0721 if (reset_control_is_array(rstc))
0722 reset_control_array_release(rstc_to_array(rstc));
0723 else
0724 rstc->acquired = false;
0725 }
0726 EXPORT_SYMBOL_GPL(reset_control_release);
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738 void reset_control_bulk_release(int num_rstcs,
0739 struct reset_control_bulk_data *rstcs)
0740 {
0741 int i;
0742
0743 for (i = 0; i < num_rstcs; i++)
0744 reset_control_release(rstcs[i].rstc);
0745 }
0746 EXPORT_SYMBOL_GPL(reset_control_bulk_release);
0747
0748 static struct reset_control *
0749 __reset_control_get_internal(struct reset_controller_dev *rcdev,
0750 unsigned int index, bool shared, bool acquired)
0751 {
0752 struct reset_control *rstc;
0753
0754 lockdep_assert_held(&reset_list_mutex);
0755
0756 list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
0757 if (rstc->id == index) {
0758
0759
0760
0761
0762
0763 if (!rstc->shared && !shared && !acquired)
0764 break;
0765
0766 if (WARN_ON(!rstc->shared || !shared))
0767 return ERR_PTR(-EBUSY);
0768
0769 kref_get(&rstc->refcnt);
0770 return rstc;
0771 }
0772 }
0773
0774 rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
0775 if (!rstc)
0776 return ERR_PTR(-ENOMEM);
0777
0778 if (!try_module_get(rcdev->owner)) {
0779 kfree(rstc);
0780 return ERR_PTR(-ENODEV);
0781 }
0782
0783 rstc->rcdev = rcdev;
0784 list_add(&rstc->list, &rcdev->reset_control_head);
0785 rstc->id = index;
0786 kref_init(&rstc->refcnt);
0787 rstc->acquired = acquired;
0788 rstc->shared = shared;
0789
0790 return rstc;
0791 }
0792
0793 static void __reset_control_release(struct kref *kref)
0794 {
0795 struct reset_control *rstc = container_of(kref, struct reset_control,
0796 refcnt);
0797
0798 lockdep_assert_held(&reset_list_mutex);
0799
0800 module_put(rstc->rcdev->owner);
0801
0802 list_del(&rstc->list);
0803 kfree(rstc);
0804 }
0805
0806 static void __reset_control_put_internal(struct reset_control *rstc)
0807 {
0808 lockdep_assert_held(&reset_list_mutex);
0809
0810 kref_put(&rstc->refcnt, __reset_control_release);
0811 }
0812
0813 struct reset_control *
0814 __of_reset_control_get(struct device_node *node, const char *id, int index,
0815 bool shared, bool optional, bool acquired)
0816 {
0817 struct reset_control *rstc;
0818 struct reset_controller_dev *r, *rcdev;
0819 struct of_phandle_args args;
0820 int rstc_id;
0821 int ret;
0822
0823 if (!node)
0824 return ERR_PTR(-EINVAL);
0825
0826 if (id) {
0827 index = of_property_match_string(node,
0828 "reset-names", id);
0829 if (index == -EILSEQ)
0830 return ERR_PTR(index);
0831 if (index < 0)
0832 return optional ? NULL : ERR_PTR(-ENOENT);
0833 }
0834
0835 ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
0836 index, &args);
0837 if (ret == -EINVAL)
0838 return ERR_PTR(ret);
0839 if (ret)
0840 return optional ? NULL : ERR_PTR(ret);
0841
0842 mutex_lock(&reset_list_mutex);
0843 rcdev = NULL;
0844 list_for_each_entry(r, &reset_controller_list, list) {
0845 if (args.np == r->of_node) {
0846 rcdev = r;
0847 break;
0848 }
0849 }
0850
0851 if (!rcdev) {
0852 rstc = ERR_PTR(-EPROBE_DEFER);
0853 goto out;
0854 }
0855
0856 if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
0857 rstc = ERR_PTR(-EINVAL);
0858 goto out;
0859 }
0860
0861 rstc_id = rcdev->of_xlate(rcdev, &args);
0862 if (rstc_id < 0) {
0863 rstc = ERR_PTR(rstc_id);
0864 goto out;
0865 }
0866
0867
0868 rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
0869
0870 out:
0871 mutex_unlock(&reset_list_mutex);
0872 of_node_put(args.np);
0873
0874 return rstc;
0875 }
0876 EXPORT_SYMBOL_GPL(__of_reset_control_get);
0877
0878 static struct reset_controller_dev *
0879 __reset_controller_by_name(const char *name)
0880 {
0881 struct reset_controller_dev *rcdev;
0882
0883 lockdep_assert_held(&reset_list_mutex);
0884
0885 list_for_each_entry(rcdev, &reset_controller_list, list) {
0886 if (!rcdev->dev)
0887 continue;
0888
0889 if (!strcmp(name, dev_name(rcdev->dev)))
0890 return rcdev;
0891 }
0892
0893 return NULL;
0894 }
0895
0896 static struct reset_control *
0897 __reset_control_get_from_lookup(struct device *dev, const char *con_id,
0898 bool shared, bool optional, bool acquired)
0899 {
0900 const struct reset_control_lookup *lookup;
0901 struct reset_controller_dev *rcdev;
0902 const char *dev_id = dev_name(dev);
0903 struct reset_control *rstc = NULL;
0904
0905 mutex_lock(&reset_lookup_mutex);
0906
0907 list_for_each_entry(lookup, &reset_lookup_list, list) {
0908 if (strcmp(lookup->dev_id, dev_id))
0909 continue;
0910
0911 if ((!con_id && !lookup->con_id) ||
0912 ((con_id && lookup->con_id) &&
0913 !strcmp(con_id, lookup->con_id))) {
0914 mutex_lock(&reset_list_mutex);
0915 rcdev = __reset_controller_by_name(lookup->provider);
0916 if (!rcdev) {
0917 mutex_unlock(&reset_list_mutex);
0918 mutex_unlock(&reset_lookup_mutex);
0919
0920 return ERR_PTR(-EPROBE_DEFER);
0921 }
0922
0923 rstc = __reset_control_get_internal(rcdev,
0924 lookup->index,
0925 shared, acquired);
0926 mutex_unlock(&reset_list_mutex);
0927 break;
0928 }
0929 }
0930
0931 mutex_unlock(&reset_lookup_mutex);
0932
0933 if (!rstc)
0934 return optional ? NULL : ERR_PTR(-ENOENT);
0935
0936 return rstc;
0937 }
0938
0939 struct reset_control *__reset_control_get(struct device *dev, const char *id,
0940 int index, bool shared, bool optional,
0941 bool acquired)
0942 {
0943 if (WARN_ON(shared && acquired))
0944 return ERR_PTR(-EINVAL);
0945
0946 if (dev->of_node)
0947 return __of_reset_control_get(dev->of_node, id, index, shared,
0948 optional, acquired);
0949
0950 return __reset_control_get_from_lookup(dev, id, shared, optional,
0951 acquired);
0952 }
0953 EXPORT_SYMBOL_GPL(__reset_control_get);
0954
0955 int __reset_control_bulk_get(struct device *dev, int num_rstcs,
0956 struct reset_control_bulk_data *rstcs,
0957 bool shared, bool optional, bool acquired)
0958 {
0959 int ret, i;
0960
0961 for (i = 0; i < num_rstcs; i++) {
0962 rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
0963 shared, optional, acquired);
0964 if (IS_ERR(rstcs[i].rstc)) {
0965 ret = PTR_ERR(rstcs[i].rstc);
0966 goto err;
0967 }
0968 }
0969
0970 return 0;
0971
0972 err:
0973 mutex_lock(&reset_list_mutex);
0974 while (i--)
0975 __reset_control_put_internal(rstcs[i].rstc);
0976 mutex_unlock(&reset_list_mutex);
0977 return ret;
0978 }
0979 EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
0980
0981 static void reset_control_array_put(struct reset_control_array *resets)
0982 {
0983 int i;
0984
0985 mutex_lock(&reset_list_mutex);
0986 for (i = 0; i < resets->num_rstcs; i++)
0987 __reset_control_put_internal(resets->rstc[i]);
0988 mutex_unlock(&reset_list_mutex);
0989 kfree(resets);
0990 }
0991
0992
0993
0994
0995
0996 void reset_control_put(struct reset_control *rstc)
0997 {
0998 if (IS_ERR_OR_NULL(rstc))
0999 return;
1000
1001 if (reset_control_is_array(rstc)) {
1002 reset_control_array_put(rstc_to_array(rstc));
1003 return;
1004 }
1005
1006 mutex_lock(&reset_list_mutex);
1007 __reset_control_put_internal(rstc);
1008 mutex_unlock(&reset_list_mutex);
1009 }
1010 EXPORT_SYMBOL_GPL(reset_control_put);
1011
1012
1013
1014
1015
1016
1017 void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
1018 {
1019 mutex_lock(&reset_list_mutex);
1020 while (num_rstcs--) {
1021 if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
1022 continue;
1023 __reset_control_put_internal(rstcs[num_rstcs].rstc);
1024 }
1025 mutex_unlock(&reset_list_mutex);
1026 }
1027 EXPORT_SYMBOL_GPL(reset_control_bulk_put);
1028
1029 static void devm_reset_control_release(struct device *dev, void *res)
1030 {
1031 reset_control_put(*(struct reset_control **)res);
1032 }
1033
1034 struct reset_control *
1035 __devm_reset_control_get(struct device *dev, const char *id, int index,
1036 bool shared, bool optional, bool acquired)
1037 {
1038 struct reset_control **ptr, *rstc;
1039
1040 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1041 GFP_KERNEL);
1042 if (!ptr)
1043 return ERR_PTR(-ENOMEM);
1044
1045 rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
1046 if (IS_ERR_OR_NULL(rstc)) {
1047 devres_free(ptr);
1048 return rstc;
1049 }
1050
1051 *ptr = rstc;
1052 devres_add(dev, ptr);
1053
1054 return rstc;
1055 }
1056 EXPORT_SYMBOL_GPL(__devm_reset_control_get);
1057
1058 struct reset_control_bulk_devres {
1059 int num_rstcs;
1060 struct reset_control_bulk_data *rstcs;
1061 };
1062
1063 static void devm_reset_control_bulk_release(struct device *dev, void *res)
1064 {
1065 struct reset_control_bulk_devres *devres = res;
1066
1067 reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
1068 }
1069
1070 int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
1071 struct reset_control_bulk_data *rstcs,
1072 bool shared, bool optional, bool acquired)
1073 {
1074 struct reset_control_bulk_devres *ptr;
1075 int ret;
1076
1077 ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
1078 GFP_KERNEL);
1079 if (!ptr)
1080 return -ENOMEM;
1081
1082 ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
1083 if (ret < 0) {
1084 devres_free(ptr);
1085 return ret;
1086 }
1087
1088 ptr->num_rstcs = num_rstcs;
1089 ptr->rstcs = rstcs;
1090 devres_add(dev, ptr);
1091
1092 return 0;
1093 }
1094 EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 int __device_reset(struct device *dev, bool optional)
1107 {
1108 struct reset_control *rstc;
1109 int ret;
1110
1111 #ifdef CONFIG_ACPI
1112 acpi_handle handle = ACPI_HANDLE(dev);
1113
1114 if (handle) {
1115 if (!acpi_has_method(handle, "_RST"))
1116 return optional ? 0 : -ENOENT;
1117 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
1118 NULL)))
1119 return -EIO;
1120 }
1121 #endif
1122
1123 rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
1124 if (IS_ERR(rstc))
1125 return PTR_ERR(rstc);
1126
1127 ret = reset_control_reset(rstc);
1128
1129 reset_control_put(rstc);
1130
1131 return ret;
1132 }
1133 EXPORT_SYMBOL_GPL(__device_reset);
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static int of_reset_control_get_count(struct device_node *node)
1148 {
1149 int count;
1150
1151 if (!node)
1152 return -EINVAL;
1153
1154 count = of_count_phandle_with_args(node, "resets", "#reset-cells");
1155 if (count == 0)
1156 count = -ENOENT;
1157
1158 return count;
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 struct reset_control *
1174 of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
1175 bool acquired)
1176 {
1177 struct reset_control_array *resets;
1178 struct reset_control *rstc;
1179 int num, i;
1180
1181 num = of_reset_control_get_count(np);
1182 if (num < 0)
1183 return optional ? NULL : ERR_PTR(num);
1184
1185 resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
1186 if (!resets)
1187 return ERR_PTR(-ENOMEM);
1188
1189 for (i = 0; i < num; i++) {
1190 rstc = __of_reset_control_get(np, NULL, i, shared, optional,
1191 acquired);
1192 if (IS_ERR(rstc))
1193 goto err_rst;
1194 resets->rstc[i] = rstc;
1195 }
1196 resets->num_rstcs = num;
1197 resets->base.array = true;
1198
1199 return &resets->base;
1200
1201 err_rst:
1202 mutex_lock(&reset_list_mutex);
1203 while (--i >= 0)
1204 __reset_control_put_internal(resets->rstc[i]);
1205 mutex_unlock(&reset_list_mutex);
1206
1207 kfree(resets);
1208
1209 return rstc;
1210 }
1211 EXPORT_SYMBOL_GPL(of_reset_control_array_get);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 struct reset_control *
1227 devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
1228 {
1229 struct reset_control **ptr, *rstc;
1230
1231 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1232 GFP_KERNEL);
1233 if (!ptr)
1234 return ERR_PTR(-ENOMEM);
1235
1236 rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
1237 if (IS_ERR_OR_NULL(rstc)) {
1238 devres_free(ptr);
1239 return rstc;
1240 }
1241
1242 *ptr = rstc;
1243 devres_add(dev, ptr);
1244
1245 return rstc;
1246 }
1247 EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
1248
1249 static int reset_control_get_count_from_lookup(struct device *dev)
1250 {
1251 const struct reset_control_lookup *lookup;
1252 const char *dev_id;
1253 int count = 0;
1254
1255 if (!dev)
1256 return -EINVAL;
1257
1258 dev_id = dev_name(dev);
1259 mutex_lock(&reset_lookup_mutex);
1260
1261 list_for_each_entry(lookup, &reset_lookup_list, list) {
1262 if (!strcmp(lookup->dev_id, dev_id))
1263 count++;
1264 }
1265
1266 mutex_unlock(&reset_lookup_mutex);
1267
1268 if (count == 0)
1269 count = -ENOENT;
1270
1271 return count;
1272 }
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 int reset_control_get_count(struct device *dev)
1283 {
1284 if (dev->of_node)
1285 return of_reset_control_get_count(dev->of_node);
1286
1287 return reset_control_get_count_from_lookup(dev);
1288 }
1289 EXPORT_SYMBOL_GPL(reset_control_get_count);