0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/device.h>
0010 #include <linux/module.h>
0011 #include <linux/slab.h>
0012 #include <linux/percpu.h>
0013
0014 #include <asm/sections.h>
0015
0016 #include "base.h"
0017 #include "trace.h"
0018
0019 struct devres_node {
0020 struct list_head entry;
0021 dr_release_t release;
0022 const char *name;
0023 size_t size;
0024 };
0025
0026 struct devres {
0027 struct devres_node node;
0028
0029
0030
0031
0032
0033
0034
0035 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
0036 };
0037
0038 struct devres_group {
0039 struct devres_node node[2];
0040 void *id;
0041 int color;
0042
0043 };
0044
0045 static void set_node_dbginfo(struct devres_node *node, const char *name,
0046 size_t size)
0047 {
0048 node->name = name;
0049 node->size = size;
0050 }
0051
0052 #ifdef CONFIG_DEBUG_DEVRES
0053 static int log_devres = 0;
0054 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
0055
0056 static void devres_dbg(struct device *dev, struct devres_node *node,
0057 const char *op)
0058 {
0059 if (unlikely(log_devres))
0060 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
0061 op, node, node->name, node->size);
0062 }
0063 #else
0064 #define devres_dbg(dev, node, op) do {} while (0)
0065 #endif
0066
0067 static void devres_log(struct device *dev, struct devres_node *node,
0068 const char *op)
0069 {
0070 trace_devres_log(dev, op, node, node->name, node->size);
0071 devres_dbg(dev, node, op);
0072 }
0073
0074
0075
0076
0077
0078 static void group_open_release(struct device *dev, void *res)
0079 {
0080
0081 }
0082
0083 static void group_close_release(struct device *dev, void *res)
0084 {
0085
0086 }
0087
0088 static struct devres_group * node_to_group(struct devres_node *node)
0089 {
0090 if (node->release == &group_open_release)
0091 return container_of(node, struct devres_group, node[0]);
0092 if (node->release == &group_close_release)
0093 return container_of(node, struct devres_group, node[1]);
0094 return NULL;
0095 }
0096
0097 static bool check_dr_size(size_t size, size_t *tot_size)
0098 {
0099
0100 if (unlikely(check_add_overflow(sizeof(struct devres),
0101 size, tot_size)))
0102 return false;
0103
0104 return true;
0105 }
0106
0107 static __always_inline struct devres * alloc_dr(dr_release_t release,
0108 size_t size, gfp_t gfp, int nid)
0109 {
0110 size_t tot_size;
0111 struct devres *dr;
0112
0113 if (!check_dr_size(size, &tot_size))
0114 return NULL;
0115
0116 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
0117 if (unlikely(!dr))
0118 return NULL;
0119
0120 memset(dr, 0, offsetof(struct devres, data));
0121
0122 INIT_LIST_HEAD(&dr->node.entry);
0123 dr->node.release = release;
0124 return dr;
0125 }
0126
0127 static void add_dr(struct device *dev, struct devres_node *node)
0128 {
0129 devres_log(dev, node, "ADD");
0130 BUG_ON(!list_empty(&node->entry));
0131 list_add_tail(&node->entry, &dev->devres_head);
0132 }
0133
0134 static void replace_dr(struct device *dev,
0135 struct devres_node *old, struct devres_node *new)
0136 {
0137 devres_log(dev, old, "REPLACE");
0138 BUG_ON(!list_empty(&new->entry));
0139 list_replace(&old->entry, &new->entry);
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
0158 const char *name)
0159 {
0160 struct devres *dr;
0161
0162 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
0163 if (unlikely(!dr))
0164 return NULL;
0165 set_node_dbginfo(&dr->node, name, size);
0166 return dr->data;
0167 }
0168 EXPORT_SYMBOL_GPL(__devres_alloc_node);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 void devres_for_each_res(struct device *dev, dr_release_t release,
0186 dr_match_t match, void *match_data,
0187 void (*fn)(struct device *, void *, void *),
0188 void *data)
0189 {
0190 struct devres_node *node;
0191 struct devres_node *tmp;
0192 unsigned long flags;
0193
0194 if (!fn)
0195 return;
0196
0197 spin_lock_irqsave(&dev->devres_lock, flags);
0198 list_for_each_entry_safe_reverse(node, tmp,
0199 &dev->devres_head, entry) {
0200 struct devres *dr = container_of(node, struct devres, node);
0201
0202 if (node->release != release)
0203 continue;
0204 if (match && !match(dev, dr->data, match_data))
0205 continue;
0206 fn(dev, dr->data, data);
0207 }
0208 spin_unlock_irqrestore(&dev->devres_lock, flags);
0209 }
0210 EXPORT_SYMBOL_GPL(devres_for_each_res);
0211
0212
0213
0214
0215
0216
0217
0218 void devres_free(void *res)
0219 {
0220 if (res) {
0221 struct devres *dr = container_of(res, struct devres, data);
0222
0223 BUG_ON(!list_empty(&dr->node.entry));
0224 kfree(dr);
0225 }
0226 }
0227 EXPORT_SYMBOL_GPL(devres_free);
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 void devres_add(struct device *dev, void *res)
0239 {
0240 struct devres *dr = container_of(res, struct devres, data);
0241 unsigned long flags;
0242
0243 spin_lock_irqsave(&dev->devres_lock, flags);
0244 add_dr(dev, &dr->node);
0245 spin_unlock_irqrestore(&dev->devres_lock, flags);
0246 }
0247 EXPORT_SYMBOL_GPL(devres_add);
0248
0249 static struct devres *find_dr(struct device *dev, dr_release_t release,
0250 dr_match_t match, void *match_data)
0251 {
0252 struct devres_node *node;
0253
0254 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
0255 struct devres *dr = container_of(node, struct devres, node);
0256
0257 if (node->release != release)
0258 continue;
0259 if (match && !match(dev, dr->data, match_data))
0260 continue;
0261 return dr;
0262 }
0263
0264 return NULL;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 void * devres_find(struct device *dev, dr_release_t release,
0282 dr_match_t match, void *match_data)
0283 {
0284 struct devres *dr;
0285 unsigned long flags;
0286
0287 spin_lock_irqsave(&dev->devres_lock, flags);
0288 dr = find_dr(dev, release, match, match_data);
0289 spin_unlock_irqrestore(&dev->devres_lock, flags);
0290
0291 if (dr)
0292 return dr->data;
0293 return NULL;
0294 }
0295 EXPORT_SYMBOL_GPL(devres_find);
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void * devres_get(struct device *dev, void *new_res,
0312 dr_match_t match, void *match_data)
0313 {
0314 struct devres *new_dr = container_of(new_res, struct devres, data);
0315 struct devres *dr;
0316 unsigned long flags;
0317
0318 spin_lock_irqsave(&dev->devres_lock, flags);
0319 dr = find_dr(dev, new_dr->node.release, match, match_data);
0320 if (!dr) {
0321 add_dr(dev, &new_dr->node);
0322 dr = new_dr;
0323 new_res = NULL;
0324 }
0325 spin_unlock_irqrestore(&dev->devres_lock, flags);
0326 devres_free(new_res);
0327
0328 return dr->data;
0329 }
0330 EXPORT_SYMBOL_GPL(devres_get);
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 void * devres_remove(struct device *dev, dr_release_t release,
0348 dr_match_t match, void *match_data)
0349 {
0350 struct devres *dr;
0351 unsigned long flags;
0352
0353 spin_lock_irqsave(&dev->devres_lock, flags);
0354 dr = find_dr(dev, release, match, match_data);
0355 if (dr) {
0356 list_del_init(&dr->node.entry);
0357 devres_log(dev, &dr->node, "REM");
0358 }
0359 spin_unlock_irqrestore(&dev->devres_lock, flags);
0360
0361 if (dr)
0362 return dr->data;
0363 return NULL;
0364 }
0365 EXPORT_SYMBOL_GPL(devres_remove);
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 int devres_destroy(struct device *dev, dr_release_t release,
0386 dr_match_t match, void *match_data)
0387 {
0388 void *res;
0389
0390 res = devres_remove(dev, release, match, match_data);
0391 if (unlikely(!res))
0392 return -ENOENT;
0393
0394 devres_free(res);
0395 return 0;
0396 }
0397 EXPORT_SYMBOL_GPL(devres_destroy);
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 int devres_release(struct device *dev, dr_release_t release,
0416 dr_match_t match, void *match_data)
0417 {
0418 void *res;
0419
0420 res = devres_remove(dev, release, match, match_data);
0421 if (unlikely(!res))
0422 return -ENOENT;
0423
0424 (*release)(dev, res);
0425 devres_free(res);
0426 return 0;
0427 }
0428 EXPORT_SYMBOL_GPL(devres_release);
0429
0430 static int remove_nodes(struct device *dev,
0431 struct list_head *first, struct list_head *end,
0432 struct list_head *todo)
0433 {
0434 struct devres_node *node, *n;
0435 int cnt = 0, nr_groups = 0;
0436
0437
0438
0439
0440 node = list_entry(first, struct devres_node, entry);
0441 list_for_each_entry_safe_from(node, n, end, entry) {
0442 struct devres_group *grp;
0443
0444 grp = node_to_group(node);
0445 if (grp) {
0446
0447 grp->color = 0;
0448 nr_groups++;
0449 } else {
0450
0451 if (&node->entry == first)
0452 first = first->next;
0453 list_move_tail(&node->entry, todo);
0454 cnt++;
0455 }
0456 }
0457
0458 if (!nr_groups)
0459 return cnt;
0460
0461
0462
0463
0464
0465
0466
0467 node = list_entry(first, struct devres_node, entry);
0468 list_for_each_entry_safe_from(node, n, end, entry) {
0469 struct devres_group *grp;
0470
0471 grp = node_to_group(node);
0472 BUG_ON(!grp || list_empty(&grp->node[0].entry));
0473
0474 grp->color++;
0475 if (list_empty(&grp->node[1].entry))
0476 grp->color++;
0477
0478 BUG_ON(grp->color <= 0 || grp->color > 2);
0479 if (grp->color == 2) {
0480
0481
0482
0483 list_move_tail(&grp->node[0].entry, todo);
0484 list_del_init(&grp->node[1].entry);
0485 }
0486 }
0487
0488 return cnt;
0489 }
0490
0491 static void release_nodes(struct device *dev, struct list_head *todo)
0492 {
0493 struct devres *dr, *tmp;
0494
0495
0496
0497
0498 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
0499 devres_log(dev, &dr->node, "REL");
0500 dr->node.release(dev, dr->data);
0501 kfree(dr);
0502 }
0503 }
0504
0505
0506
0507
0508
0509
0510
0511
0512 int devres_release_all(struct device *dev)
0513 {
0514 unsigned long flags;
0515 LIST_HEAD(todo);
0516 int cnt;
0517
0518
0519 if (WARN_ON(dev->devres_head.next == NULL))
0520 return -ENODEV;
0521
0522
0523 if (list_empty(&dev->devres_head))
0524 return 0;
0525
0526 spin_lock_irqsave(&dev->devres_lock, flags);
0527 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
0528 spin_unlock_irqrestore(&dev->devres_lock, flags);
0529
0530 release_nodes(dev, &todo);
0531 return cnt;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
0548 {
0549 struct devres_group *grp;
0550 unsigned long flags;
0551
0552 grp = kmalloc(sizeof(*grp), gfp);
0553 if (unlikely(!grp))
0554 return NULL;
0555
0556 grp->node[0].release = &group_open_release;
0557 grp->node[1].release = &group_close_release;
0558 INIT_LIST_HEAD(&grp->node[0].entry);
0559 INIT_LIST_HEAD(&grp->node[1].entry);
0560 set_node_dbginfo(&grp->node[0], "grp<", 0);
0561 set_node_dbginfo(&grp->node[1], "grp>", 0);
0562 grp->id = grp;
0563 if (id)
0564 grp->id = id;
0565
0566 spin_lock_irqsave(&dev->devres_lock, flags);
0567 add_dr(dev, &grp->node[0]);
0568 spin_unlock_irqrestore(&dev->devres_lock, flags);
0569 return grp->id;
0570 }
0571 EXPORT_SYMBOL_GPL(devres_open_group);
0572
0573
0574 static struct devres_group * find_group(struct device *dev, void *id)
0575 {
0576 struct devres_node *node;
0577
0578 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
0579 struct devres_group *grp;
0580
0581 if (node->release != &group_open_release)
0582 continue;
0583
0584 grp = container_of(node, struct devres_group, node[0]);
0585
0586 if (id) {
0587 if (grp->id == id)
0588 return grp;
0589 } else if (list_empty(&grp->node[1].entry))
0590 return grp;
0591 }
0592
0593 return NULL;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 void devres_close_group(struct device *dev, void *id)
0605 {
0606 struct devres_group *grp;
0607 unsigned long flags;
0608
0609 spin_lock_irqsave(&dev->devres_lock, flags);
0610
0611 grp = find_group(dev, id);
0612 if (grp)
0613 add_dr(dev, &grp->node[1]);
0614 else
0615 WARN_ON(1);
0616
0617 spin_unlock_irqrestore(&dev->devres_lock, flags);
0618 }
0619 EXPORT_SYMBOL_GPL(devres_close_group);
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 void devres_remove_group(struct device *dev, void *id)
0631 {
0632 struct devres_group *grp;
0633 unsigned long flags;
0634
0635 spin_lock_irqsave(&dev->devres_lock, flags);
0636
0637 grp = find_group(dev, id);
0638 if (grp) {
0639 list_del_init(&grp->node[0].entry);
0640 list_del_init(&grp->node[1].entry);
0641 devres_log(dev, &grp->node[0], "REM");
0642 } else
0643 WARN_ON(1);
0644
0645 spin_unlock_irqrestore(&dev->devres_lock, flags);
0646
0647 kfree(grp);
0648 }
0649 EXPORT_SYMBOL_GPL(devres_remove_group);
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663 int devres_release_group(struct device *dev, void *id)
0664 {
0665 struct devres_group *grp;
0666 unsigned long flags;
0667 LIST_HEAD(todo);
0668 int cnt = 0;
0669
0670 spin_lock_irqsave(&dev->devres_lock, flags);
0671
0672 grp = find_group(dev, id);
0673 if (grp) {
0674 struct list_head *first = &grp->node[0].entry;
0675 struct list_head *end = &dev->devres_head;
0676
0677 if (!list_empty(&grp->node[1].entry))
0678 end = grp->node[1].entry.next;
0679
0680 cnt = remove_nodes(dev, first, end, &todo);
0681 spin_unlock_irqrestore(&dev->devres_lock, flags);
0682
0683 release_nodes(dev, &todo);
0684 } else {
0685 WARN_ON(1);
0686 spin_unlock_irqrestore(&dev->devres_lock, flags);
0687 }
0688
0689 return cnt;
0690 }
0691 EXPORT_SYMBOL_GPL(devres_release_group);
0692
0693
0694
0695
0696
0697
0698 struct action_devres {
0699 void *data;
0700 void (*action)(void *);
0701 };
0702
0703 static int devm_action_match(struct device *dev, void *res, void *p)
0704 {
0705 struct action_devres *devres = res;
0706 struct action_devres *target = p;
0707
0708 return devres->action == target->action &&
0709 devres->data == target->data;
0710 }
0711
0712 static void devm_action_release(struct device *dev, void *res)
0713 {
0714 struct action_devres *devres = res;
0715
0716 devres->action(devres->data);
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 int devm_add_action(struct device *dev, void (*action)(void *), void *data)
0729 {
0730 struct action_devres *devres;
0731
0732 devres = devres_alloc(devm_action_release,
0733 sizeof(struct action_devres), GFP_KERNEL);
0734 if (!devres)
0735 return -ENOMEM;
0736
0737 devres->data = data;
0738 devres->action = action;
0739
0740 devres_add(dev, devres);
0741 return 0;
0742 }
0743 EXPORT_SYMBOL_GPL(devm_add_action);
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
0755 {
0756 struct action_devres devres = {
0757 .data = data,
0758 .action = action,
0759 };
0760
0761 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
0762 &devres));
0763 }
0764 EXPORT_SYMBOL_GPL(devm_remove_action);
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 void devm_release_action(struct device *dev, void (*action)(void *), void *data)
0777 {
0778 struct action_devres devres = {
0779 .data = data,
0780 .action = action,
0781 };
0782
0783 WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
0784 &devres));
0785
0786 }
0787 EXPORT_SYMBOL_GPL(devm_release_action);
0788
0789
0790
0791
0792 static void devm_kmalloc_release(struct device *dev, void *res)
0793 {
0794
0795 }
0796
0797 static int devm_kmalloc_match(struct device *dev, void *res, void *data)
0798 {
0799 return res == data;
0800 }
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
0816 {
0817 struct devres *dr;
0818
0819 if (unlikely(!size))
0820 return ZERO_SIZE_PTR;
0821
0822
0823 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
0824 if (unlikely(!dr))
0825 return NULL;
0826
0827
0828
0829
0830
0831 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
0832 devres_add(dev, dr->data);
0833 return dr->data;
0834 }
0835 EXPORT_SYMBOL_GPL(devm_kmalloc);
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853 void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
0854 {
0855 size_t total_new_size, total_old_size;
0856 struct devres *old_dr, *new_dr;
0857 unsigned long flags;
0858
0859 if (unlikely(!new_size)) {
0860 devm_kfree(dev, ptr);
0861 return ZERO_SIZE_PTR;
0862 }
0863
0864 if (unlikely(ZERO_OR_NULL_PTR(ptr)))
0865 return devm_kmalloc(dev, new_size, gfp);
0866
0867 if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
0868
0869
0870
0871
0872 return NULL;
0873
0874 if (!check_dr_size(new_size, &total_new_size))
0875 return NULL;
0876
0877 total_old_size = ksize(container_of(ptr, struct devres, data));
0878 if (total_old_size == 0) {
0879 WARN(1, "Pointer doesn't point to dynamically allocated memory.");
0880 return NULL;
0881 }
0882
0883
0884
0885
0886
0887 if (total_new_size <= total_old_size)
0888 return ptr;
0889
0890
0891
0892
0893
0894 new_dr = alloc_dr(devm_kmalloc_release,
0895 total_new_size, gfp, dev_to_node(dev));
0896 if (!new_dr)
0897 return NULL;
0898
0899
0900
0901
0902
0903 spin_lock_irqsave(&dev->devres_lock, flags);
0904
0905 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
0906 if (!old_dr) {
0907 spin_unlock_irqrestore(&dev->devres_lock, flags);
0908 kfree(new_dr);
0909 WARN(1, "Memory chunk not managed or managed by a different device.");
0910 return NULL;
0911 }
0912
0913 replace_dr(dev, &old_dr->node, &new_dr->node);
0914
0915 spin_unlock_irqrestore(&dev->devres_lock, flags);
0916
0917
0918
0919
0920
0921 memcpy(new_dr->data, old_dr->data,
0922 total_old_size - offsetof(struct devres, data));
0923
0924
0925
0926
0927
0928 kfree(old_dr);
0929
0930 return new_dr->data;
0931 }
0932 EXPORT_SYMBOL_GPL(devm_krealloc);
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
0945 {
0946 size_t size;
0947 char *buf;
0948
0949 if (!s)
0950 return NULL;
0951
0952 size = strlen(s) + 1;
0953 buf = devm_kmalloc(dev, size, gfp);
0954 if (buf)
0955 memcpy(buf, s, size);
0956 return buf;
0957 }
0958 EXPORT_SYMBOL_GPL(devm_kstrdup);
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
0974 {
0975 if (is_kernel_rodata((unsigned long)s))
0976 return s;
0977
0978 return devm_kstrdup(dev, s, gfp);
0979 }
0980 EXPORT_SYMBOL_GPL(devm_kstrdup_const);
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
0994 va_list ap)
0995 {
0996 unsigned int len;
0997 char *p;
0998 va_list aq;
0999
1000 va_copy(aq, ap);
1001 len = vsnprintf(NULL, 0, fmt, aq);
1002 va_end(aq);
1003
1004 p = devm_kmalloc(dev, len+1, gfp);
1005 if (!p)
1006 return NULL;
1007
1008 vsnprintf(p, len+1, fmt, ap);
1009
1010 return p;
1011 }
1012 EXPORT_SYMBOL(devm_kvasprintf);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1026 {
1027 va_list ap;
1028 char *p;
1029
1030 va_start(ap, fmt);
1031 p = devm_kvasprintf(dev, gfp, fmt, ap);
1032 va_end(ap);
1033
1034 return p;
1035 }
1036 EXPORT_SYMBOL_GPL(devm_kasprintf);
1037
1038
1039
1040
1041
1042
1043
1044
1045 void devm_kfree(struct device *dev, const void *p)
1046 {
1047 int rc;
1048
1049
1050
1051
1052
1053 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1054 return;
1055
1056 rc = devres_destroy(dev, devm_kmalloc_release,
1057 devm_kmalloc_match, (void *)p);
1058 WARN_ON(rc);
1059 }
1060 EXPORT_SYMBOL_GPL(devm_kfree);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1072 {
1073 void *p;
1074
1075 p = devm_kmalloc(dev, len, gfp);
1076 if (p)
1077 memcpy(p, src, len);
1078
1079 return p;
1080 }
1081 EXPORT_SYMBOL_GPL(devm_kmemdup);
1082
1083 struct pages_devres {
1084 unsigned long addr;
1085 unsigned int order;
1086 };
1087
1088 static int devm_pages_match(struct device *dev, void *res, void *p)
1089 {
1090 struct pages_devres *devres = res;
1091 struct pages_devres *target = p;
1092
1093 return devres->addr == target->addr;
1094 }
1095
1096 static void devm_pages_release(struct device *dev, void *res)
1097 {
1098 struct pages_devres *devres = res;
1099
1100 free_pages(devres->addr, devres->order);
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 unsigned long devm_get_free_pages(struct device *dev,
1117 gfp_t gfp_mask, unsigned int order)
1118 {
1119 struct pages_devres *devres;
1120 unsigned long addr;
1121
1122 addr = __get_free_pages(gfp_mask, order);
1123
1124 if (unlikely(!addr))
1125 return 0;
1126
1127 devres = devres_alloc(devm_pages_release,
1128 sizeof(struct pages_devres), GFP_KERNEL);
1129 if (unlikely(!devres)) {
1130 free_pages(addr, order);
1131 return 0;
1132 }
1133
1134 devres->addr = addr;
1135 devres->order = order;
1136
1137 devres_add(dev, devres);
1138 return addr;
1139 }
1140 EXPORT_SYMBOL_GPL(devm_get_free_pages);
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 void devm_free_pages(struct device *dev, unsigned long addr)
1151 {
1152 struct pages_devres devres = { .addr = addr };
1153
1154 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1155 &devres));
1156 }
1157 EXPORT_SYMBOL_GPL(devm_free_pages);
1158
1159 static void devm_percpu_release(struct device *dev, void *pdata)
1160 {
1161 void __percpu *p;
1162
1163 p = *(void __percpu **)pdata;
1164 free_percpu(p);
1165 }
1166
1167 static int devm_percpu_match(struct device *dev, void *data, void *p)
1168 {
1169 struct devres *devr = container_of(data, struct devres, data);
1170
1171 return *(void **)devr->data == p;
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1187 size_t align)
1188 {
1189 void *p;
1190 void __percpu *pcpu;
1191
1192 pcpu = __alloc_percpu(size, align);
1193 if (!pcpu)
1194 return NULL;
1195
1196 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1197 if (!p) {
1198 free_percpu(pcpu);
1199 return NULL;
1200 }
1201
1202 *(void __percpu **)p = pcpu;
1203
1204 devres_add(dev, p);
1205
1206 return pcpu;
1207 }
1208 EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1209
1210
1211
1212
1213
1214
1215
1216
1217 void devm_free_percpu(struct device *dev, void __percpu *pdata)
1218 {
1219 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1220 (__force void *)pdata));
1221 }
1222 EXPORT_SYMBOL_GPL(devm_free_percpu);