0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/slab.h>
0030 #include <linux/export.h>
0031 #include <linux/bitmap.h>
0032 #include <linux/rculist.h>
0033 #include <linux/interrupt.h>
0034 #include <linux/genalloc.h>
0035 #include <linux/of_device.h>
0036 #include <linux/vmalloc.h>
0037
0038 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
0039 {
0040 return chunk->end_addr - chunk->start_addr + 1;
0041 }
0042
0043 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
0044 {
0045 unsigned long val, nval;
0046
0047 nval = *addr;
0048 do {
0049 val = nval;
0050 if (val & mask_to_set)
0051 return -EBUSY;
0052 cpu_relax();
0053 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
0054
0055 return 0;
0056 }
0057
0058 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
0059 {
0060 unsigned long val, nval;
0061
0062 nval = *addr;
0063 do {
0064 val = nval;
0065 if ((val & mask_to_clear) != mask_to_clear)
0066 return -EBUSY;
0067 cpu_relax();
0068 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
0069
0070 return 0;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 static unsigned long
0085 bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
0086 {
0087 unsigned long *p = map + BIT_WORD(start);
0088 const unsigned long size = start + nr;
0089 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
0090 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
0091
0092 while (nr >= bits_to_set) {
0093 if (set_bits_ll(p, mask_to_set))
0094 return nr;
0095 nr -= bits_to_set;
0096 bits_to_set = BITS_PER_LONG;
0097 mask_to_set = ~0UL;
0098 p++;
0099 }
0100 if (nr) {
0101 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
0102 if (set_bits_ll(p, mask_to_set))
0103 return nr;
0104 }
0105
0106 return 0;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 static unsigned long
0121 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
0122 {
0123 unsigned long *p = map + BIT_WORD(start);
0124 const unsigned long size = start + nr;
0125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
0126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
0127
0128 while (nr >= bits_to_clear) {
0129 if (clear_bits_ll(p, mask_to_clear))
0130 return nr;
0131 nr -= bits_to_clear;
0132 bits_to_clear = BITS_PER_LONG;
0133 mask_to_clear = ~0UL;
0134 p++;
0135 }
0136 if (nr) {
0137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
0138 if (clear_bits_ll(p, mask_to_clear))
0139 return nr;
0140 }
0141
0142 return 0;
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
0154 {
0155 struct gen_pool *pool;
0156
0157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
0158 if (pool != NULL) {
0159 spin_lock_init(&pool->lock);
0160 INIT_LIST_HEAD(&pool->chunks);
0161 pool->min_alloc_order = min_alloc_order;
0162 pool->algo = gen_pool_first_fit;
0163 pool->data = NULL;
0164 pool->name = NULL;
0165 }
0166 return pool;
0167 }
0168 EXPORT_SYMBOL(gen_pool_create);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
0185 size_t size, int nid, void *owner)
0186 {
0187 struct gen_pool_chunk *chunk;
0188 unsigned long nbits = size >> pool->min_alloc_order;
0189 unsigned long nbytes = sizeof(struct gen_pool_chunk) +
0190 BITS_TO_LONGS(nbits) * sizeof(long);
0191
0192 chunk = vzalloc_node(nbytes, nid);
0193 if (unlikely(chunk == NULL))
0194 return -ENOMEM;
0195
0196 chunk->phys_addr = phys;
0197 chunk->start_addr = virt;
0198 chunk->end_addr = virt + size - 1;
0199 chunk->owner = owner;
0200 atomic_long_set(&chunk->avail, size);
0201
0202 spin_lock(&pool->lock);
0203 list_add_rcu(&chunk->next_chunk, &pool->chunks);
0204 spin_unlock(&pool->lock);
0205
0206 return 0;
0207 }
0208 EXPORT_SYMBOL(gen_pool_add_owner);
0209
0210
0211
0212
0213
0214
0215
0216
0217 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
0218 {
0219 struct gen_pool_chunk *chunk;
0220 phys_addr_t paddr = -1;
0221
0222 rcu_read_lock();
0223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0224 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
0225 paddr = chunk->phys_addr + (addr - chunk->start_addr);
0226 break;
0227 }
0228 }
0229 rcu_read_unlock();
0230
0231 return paddr;
0232 }
0233 EXPORT_SYMBOL(gen_pool_virt_to_phys);
0234
0235
0236
0237
0238
0239
0240
0241
0242 void gen_pool_destroy(struct gen_pool *pool)
0243 {
0244 struct list_head *_chunk, *_next_chunk;
0245 struct gen_pool_chunk *chunk;
0246 int order = pool->min_alloc_order;
0247 unsigned long bit, end_bit;
0248
0249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
0250 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
0251 list_del(&chunk->next_chunk);
0252
0253 end_bit = chunk_size(chunk) >> order;
0254 bit = find_first_bit(chunk->bits, end_bit);
0255 BUG_ON(bit < end_bit);
0256
0257 vfree(chunk);
0258 }
0259 kfree_const(pool->name);
0260 kfree(pool);
0261 }
0262 EXPORT_SYMBOL(gen_pool_destroy);
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
0278 genpool_algo_t algo, void *data, void **owner)
0279 {
0280 struct gen_pool_chunk *chunk;
0281 unsigned long addr = 0;
0282 int order = pool->min_alloc_order;
0283 unsigned long nbits, start_bit, end_bit, remain;
0284
0285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
0286 BUG_ON(in_nmi());
0287 #endif
0288
0289 if (owner)
0290 *owner = NULL;
0291
0292 if (size == 0)
0293 return 0;
0294
0295 nbits = (size + (1UL << order) - 1) >> order;
0296 rcu_read_lock();
0297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0298 if (size > atomic_long_read(&chunk->avail))
0299 continue;
0300
0301 start_bit = 0;
0302 end_bit = chunk_size(chunk) >> order;
0303 retry:
0304 start_bit = algo(chunk->bits, end_bit, start_bit,
0305 nbits, data, pool, chunk->start_addr);
0306 if (start_bit >= end_bit)
0307 continue;
0308 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
0309 if (remain) {
0310 remain = bitmap_clear_ll(chunk->bits, start_bit,
0311 nbits - remain);
0312 BUG_ON(remain);
0313 goto retry;
0314 }
0315
0316 addr = chunk->start_addr + ((unsigned long)start_bit << order);
0317 size = nbits << order;
0318 atomic_long_sub(size, &chunk->avail);
0319 if (owner)
0320 *owner = chunk->owner;
0321 break;
0322 }
0323 rcu_read_unlock();
0324 return addr;
0325 }
0326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
0342 {
0343 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
0344 }
0345 EXPORT_SYMBOL(gen_pool_dma_alloc);
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
0363 dma_addr_t *dma, genpool_algo_t algo, void *data)
0364 {
0365 unsigned long vaddr;
0366
0367 if (!pool)
0368 return NULL;
0369
0370 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
0371 if (!vaddr)
0372 return NULL;
0373
0374 if (dma)
0375 *dma = gen_pool_virt_to_phys(pool, vaddr);
0376
0377 return (void *)vaddr;
0378 }
0379 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
0396 dma_addr_t *dma, int align)
0397 {
0398 struct genpool_data_align data = { .align = align };
0399
0400 return gen_pool_dma_alloc_algo(pool, size, dma,
0401 gen_pool_first_fit_align, &data);
0402 }
0403 EXPORT_SYMBOL(gen_pool_dma_alloc_align);
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
0420 {
0421 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
0422 }
0423 EXPORT_SYMBOL(gen_pool_dma_zalloc);
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
0441 dma_addr_t *dma, genpool_algo_t algo, void *data)
0442 {
0443 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
0444
0445 if (vaddr)
0446 memset(vaddr, 0, size);
0447
0448 return vaddr;
0449 }
0450 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
0467 dma_addr_t *dma, int align)
0468 {
0469 struct genpool_data_align data = { .align = align };
0470
0471 return gen_pool_dma_zalloc_algo(pool, size, dma,
0472 gen_pool_first_fit_align, &data);
0473 }
0474 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
0488 void **owner)
0489 {
0490 struct gen_pool_chunk *chunk;
0491 int order = pool->min_alloc_order;
0492 unsigned long start_bit, nbits, remain;
0493
0494 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
0495 BUG_ON(in_nmi());
0496 #endif
0497
0498 if (owner)
0499 *owner = NULL;
0500
0501 nbits = (size + (1UL << order) - 1) >> order;
0502 rcu_read_lock();
0503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0504 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
0505 BUG_ON(addr + size - 1 > chunk->end_addr);
0506 start_bit = (addr - chunk->start_addr) >> order;
0507 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
0508 BUG_ON(remain);
0509 size = nbits << order;
0510 atomic_long_add(size, &chunk->avail);
0511 if (owner)
0512 *owner = chunk->owner;
0513 rcu_read_unlock();
0514 return;
0515 }
0516 }
0517 rcu_read_unlock();
0518 BUG();
0519 }
0520 EXPORT_SYMBOL(gen_pool_free_owner);
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 void gen_pool_for_each_chunk(struct gen_pool *pool,
0532 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
0533 void *data)
0534 {
0535 struct gen_pool_chunk *chunk;
0536
0537 rcu_read_lock();
0538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
0539 func(pool, chunk, data);
0540 rcu_read_unlock();
0541 }
0542 EXPORT_SYMBOL(gen_pool_for_each_chunk);
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553 bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
0554 size_t size)
0555 {
0556 bool found = false;
0557 unsigned long end = start + size - 1;
0558 struct gen_pool_chunk *chunk;
0559
0560 rcu_read_lock();
0561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
0562 if (start >= chunk->start_addr && start <= chunk->end_addr) {
0563 if (end <= chunk->end_addr) {
0564 found = true;
0565 break;
0566 }
0567 }
0568 }
0569 rcu_read_unlock();
0570 return found;
0571 }
0572 EXPORT_SYMBOL(gen_pool_has_addr);
0573
0574
0575
0576
0577
0578
0579
0580 size_t gen_pool_avail(struct gen_pool *pool)
0581 {
0582 struct gen_pool_chunk *chunk;
0583 size_t avail = 0;
0584
0585 rcu_read_lock();
0586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
0587 avail += atomic_long_read(&chunk->avail);
0588 rcu_read_unlock();
0589 return avail;
0590 }
0591 EXPORT_SYMBOL_GPL(gen_pool_avail);
0592
0593
0594
0595
0596
0597
0598
0599 size_t gen_pool_size(struct gen_pool *pool)
0600 {
0601 struct gen_pool_chunk *chunk;
0602 size_t size = 0;
0603
0604 rcu_read_lock();
0605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
0606 size += chunk_size(chunk);
0607 rcu_read_unlock();
0608 return size;
0609 }
0610 EXPORT_SYMBOL_GPL(gen_pool_size);
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
0623 {
0624 rcu_read_lock();
0625
0626 pool->algo = algo;
0627 if (!pool->algo)
0628 pool->algo = gen_pool_first_fit;
0629
0630 pool->data = data;
0631
0632 rcu_read_unlock();
0633 }
0634 EXPORT_SYMBOL(gen_pool_set_algo);
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
0648 unsigned long start, unsigned int nr, void *data,
0649 struct gen_pool *pool, unsigned long start_addr)
0650 {
0651 return bitmap_find_next_zero_area(map, size, start, nr, 0);
0652 }
0653 EXPORT_SYMBOL(gen_pool_first_fit);
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
0667 unsigned long start, unsigned int nr, void *data,
0668 struct gen_pool *pool, unsigned long start_addr)
0669 {
0670 struct genpool_data_align *alignment;
0671 unsigned long align_mask, align_off;
0672 int order;
0673
0674 alignment = data;
0675 order = pool->min_alloc_order;
0676 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
0677 align_off = (start_addr & (alignment->align - 1)) >> order;
0678
0679 return bitmap_find_next_zero_area_off(map, size, start, nr,
0680 align_mask, align_off);
0681 }
0682 EXPORT_SYMBOL(gen_pool_first_fit_align);
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
0695 unsigned long start, unsigned int nr, void *data,
0696 struct gen_pool *pool, unsigned long start_addr)
0697 {
0698 struct genpool_data_fixed *fixed_data;
0699 int order;
0700 unsigned long offset_bit;
0701 unsigned long start_bit;
0702
0703 fixed_data = data;
0704 order = pool->min_alloc_order;
0705 offset_bit = fixed_data->offset >> order;
0706 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
0707 return size;
0708
0709 start_bit = bitmap_find_next_zero_area(map, size,
0710 start + offset_bit, nr, 0);
0711 if (start_bit != offset_bit)
0712 start_bit = size;
0713 return start_bit;
0714 }
0715 EXPORT_SYMBOL(gen_pool_fixed_alloc);
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
0730 unsigned long size, unsigned long start,
0731 unsigned int nr, void *data, struct gen_pool *pool,
0732 unsigned long start_addr)
0733 {
0734 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
0735
0736 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
0737 }
0738 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
0755 unsigned long start, unsigned int nr, void *data,
0756 struct gen_pool *pool, unsigned long start_addr)
0757 {
0758 unsigned long start_bit = size;
0759 unsigned long len = size + 1;
0760 unsigned long index;
0761
0762 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
0763
0764 while (index < size) {
0765 unsigned long next_bit = find_next_bit(map, size, index + nr);
0766 if ((next_bit - index) < len) {
0767 len = next_bit - index;
0768 start_bit = index;
0769 if (len == nr)
0770 return start_bit;
0771 }
0772 index = bitmap_find_next_zero_area(map, size,
0773 next_bit + 1, nr, 0);
0774 }
0775
0776 return start_bit;
0777 }
0778 EXPORT_SYMBOL(gen_pool_best_fit);
0779
0780 static void devm_gen_pool_release(struct device *dev, void *res)
0781 {
0782 gen_pool_destroy(*(struct gen_pool **)res);
0783 }
0784
0785 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
0786 {
0787 struct gen_pool **p = res;
0788
0789
0790 if (!data && !(*p)->name)
0791 return 1;
0792
0793 if (!data || !(*p)->name)
0794 return 0;
0795
0796 return !strcmp((*p)->name, data);
0797 }
0798
0799
0800
0801
0802
0803
0804
0805
0806 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
0807 {
0808 struct gen_pool **p;
0809
0810 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
0811 (void *)name);
0812 if (!p)
0813 return NULL;
0814 return *p;
0815 }
0816 EXPORT_SYMBOL_GPL(gen_pool_get);
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
0830 int nid, const char *name)
0831 {
0832 struct gen_pool **ptr, *pool;
0833 const char *pool_name = NULL;
0834
0835
0836 if (gen_pool_get(dev, name))
0837 return ERR_PTR(-EINVAL);
0838
0839 if (name) {
0840 pool_name = kstrdup_const(name, GFP_KERNEL);
0841 if (!pool_name)
0842 return ERR_PTR(-ENOMEM);
0843 }
0844
0845 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
0846 if (!ptr)
0847 goto free_pool_name;
0848
0849 pool = gen_pool_create(min_alloc_order, nid);
0850 if (!pool)
0851 goto free_devres;
0852
0853 *ptr = pool;
0854 pool->name = pool_name;
0855 devres_add(dev, ptr);
0856
0857 return pool;
0858
0859 free_devres:
0860 devres_free(ptr);
0861 free_pool_name:
0862 kfree_const(pool_name);
0863
0864 return ERR_PTR(-ENOMEM);
0865 }
0866 EXPORT_SYMBOL(devm_gen_pool_create);
0867
0868 #ifdef CONFIG_OF
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 struct gen_pool *of_gen_pool_get(struct device_node *np,
0880 const char *propname, int index)
0881 {
0882 struct platform_device *pdev;
0883 struct device_node *np_pool, *parent;
0884 const char *name = NULL;
0885 struct gen_pool *pool = NULL;
0886
0887 np_pool = of_parse_phandle(np, propname, index);
0888 if (!np_pool)
0889 return NULL;
0890
0891 pdev = of_find_device_by_node(np_pool);
0892 if (!pdev) {
0893
0894 parent = of_get_parent(np_pool);
0895 pdev = of_find_device_by_node(parent);
0896 of_node_put(parent);
0897
0898 of_property_read_string(np_pool, "label", &name);
0899 if (!name)
0900 name = np_pool->name;
0901 }
0902 if (pdev)
0903 pool = gen_pool_get(&pdev->dev, name);
0904 of_node_put(np_pool);
0905
0906 return pool;
0907 }
0908 EXPORT_SYMBOL_GPL(of_gen_pool_get);
0909 #endif