0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/iova.h>
0009 #include <linux/module.h>
0010 #include <linux/slab.h>
0011 #include <linux/smp.h>
0012 #include <linux/bitops.h>
0013 #include <linux/cpu.h>
0014
0015
0016 #define IOVA_ANCHOR ~0UL
0017
0018 #define IOVA_RANGE_CACHE_MAX_SIZE 6
0019
0020 static bool iova_rcache_insert(struct iova_domain *iovad,
0021 unsigned long pfn,
0022 unsigned long size);
0023 static unsigned long iova_rcache_get(struct iova_domain *iovad,
0024 unsigned long size,
0025 unsigned long limit_pfn);
0026 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
0027 static void free_iova_rcaches(struct iova_domain *iovad);
0028
0029 unsigned long iova_rcache_range(void)
0030 {
0031 return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
0032 }
0033
0034 static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
0035 {
0036 struct iova_domain *iovad;
0037
0038 iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
0039
0040 free_cpu_cached_iovas(cpu, iovad);
0041 return 0;
0042 }
0043
0044 static void free_global_cached_iovas(struct iova_domain *iovad);
0045
0046 static struct iova *to_iova(struct rb_node *node)
0047 {
0048 return rb_entry(node, struct iova, node);
0049 }
0050
0051 void
0052 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
0053 unsigned long start_pfn)
0054 {
0055
0056
0057
0058
0059
0060 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
0061
0062 spin_lock_init(&iovad->iova_rbtree_lock);
0063 iovad->rbroot = RB_ROOT;
0064 iovad->cached_node = &iovad->anchor.node;
0065 iovad->cached32_node = &iovad->anchor.node;
0066 iovad->granule = granule;
0067 iovad->start_pfn = start_pfn;
0068 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
0069 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
0070 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
0071 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
0072 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
0073 }
0074 EXPORT_SYMBOL_GPL(init_iova_domain);
0075
0076 static struct rb_node *
0077 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
0078 {
0079 if (limit_pfn <= iovad->dma_32bit_pfn)
0080 return iovad->cached32_node;
0081
0082 return iovad->cached_node;
0083 }
0084
0085 static void
0086 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
0087 {
0088 if (new->pfn_hi < iovad->dma_32bit_pfn)
0089 iovad->cached32_node = &new->node;
0090 else
0091 iovad->cached_node = &new->node;
0092 }
0093
0094 static void
0095 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
0096 {
0097 struct iova *cached_iova;
0098
0099 cached_iova = to_iova(iovad->cached32_node);
0100 if (free == cached_iova ||
0101 (free->pfn_hi < iovad->dma_32bit_pfn &&
0102 free->pfn_lo >= cached_iova->pfn_lo))
0103 iovad->cached32_node = rb_next(&free->node);
0104
0105 if (free->pfn_lo < iovad->dma_32bit_pfn)
0106 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
0107
0108 cached_iova = to_iova(iovad->cached_node);
0109 if (free->pfn_lo >= cached_iova->pfn_lo)
0110 iovad->cached_node = rb_next(&free->node);
0111 }
0112
0113 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
0114 {
0115 struct rb_node *node, *next;
0116
0117
0118
0119
0120
0121
0122
0123
0124 if (limit_pfn > iovad->dma_32bit_pfn)
0125 return &iovad->anchor.node;
0126
0127 node = iovad->rbroot.rb_node;
0128 while (to_iova(node)->pfn_hi < limit_pfn)
0129 node = node->rb_right;
0130
0131 search_left:
0132 while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
0133 node = node->rb_left;
0134
0135 if (!node->rb_left)
0136 return node;
0137
0138 next = node->rb_left;
0139 while (next->rb_right) {
0140 next = next->rb_right;
0141 if (to_iova(next)->pfn_lo >= limit_pfn) {
0142 node = next;
0143 goto search_left;
0144 }
0145 }
0146
0147 return node;
0148 }
0149
0150
0151 static void
0152 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
0153 struct rb_node *start)
0154 {
0155 struct rb_node **new, *parent = NULL;
0156
0157 new = (start) ? &start : &(root->rb_node);
0158
0159 while (*new) {
0160 struct iova *this = to_iova(*new);
0161
0162 parent = *new;
0163
0164 if (iova->pfn_lo < this->pfn_lo)
0165 new = &((*new)->rb_left);
0166 else if (iova->pfn_lo > this->pfn_lo)
0167 new = &((*new)->rb_right);
0168 else {
0169 WARN_ON(1);
0170 return;
0171 }
0172 }
0173
0174 rb_link_node(&iova->node, parent, new);
0175 rb_insert_color(&iova->node, root);
0176 }
0177
0178 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
0179 unsigned long size, unsigned long limit_pfn,
0180 struct iova *new, bool size_aligned)
0181 {
0182 struct rb_node *curr, *prev;
0183 struct iova *curr_iova;
0184 unsigned long flags;
0185 unsigned long new_pfn, retry_pfn;
0186 unsigned long align_mask = ~0UL;
0187 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
0188
0189 if (size_aligned)
0190 align_mask <<= fls_long(size - 1);
0191
0192
0193 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0194 if (limit_pfn <= iovad->dma_32bit_pfn &&
0195 size >= iovad->max32_alloc_size)
0196 goto iova32_full;
0197
0198 curr = __get_cached_rbnode(iovad, limit_pfn);
0199 curr_iova = to_iova(curr);
0200 retry_pfn = curr_iova->pfn_hi + 1;
0201
0202 retry:
0203 do {
0204 high_pfn = min(high_pfn, curr_iova->pfn_lo);
0205 new_pfn = (high_pfn - size) & align_mask;
0206 prev = curr;
0207 curr = rb_prev(curr);
0208 curr_iova = to_iova(curr);
0209 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
0210
0211 if (high_pfn < size || new_pfn < low_pfn) {
0212 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
0213 high_pfn = limit_pfn;
0214 low_pfn = retry_pfn;
0215 curr = iova_find_limit(iovad, limit_pfn);
0216 curr_iova = to_iova(curr);
0217 goto retry;
0218 }
0219 iovad->max32_alloc_size = size;
0220 goto iova32_full;
0221 }
0222
0223
0224 new->pfn_lo = new_pfn;
0225 new->pfn_hi = new->pfn_lo + size - 1;
0226
0227
0228 iova_insert_rbtree(&iovad->rbroot, new, prev);
0229 __cached_rbnode_insert_update(iovad, new);
0230
0231 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0232 return 0;
0233
0234 iova32_full:
0235 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0236 return -ENOMEM;
0237 }
0238
0239 static struct kmem_cache *iova_cache;
0240 static unsigned int iova_cache_users;
0241 static DEFINE_MUTEX(iova_cache_mutex);
0242
0243 static struct iova *alloc_iova_mem(void)
0244 {
0245 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
0246 }
0247
0248 static void free_iova_mem(struct iova *iova)
0249 {
0250 if (iova->pfn_lo != IOVA_ANCHOR)
0251 kmem_cache_free(iova_cache, iova);
0252 }
0253
0254 int iova_cache_get(void)
0255 {
0256 mutex_lock(&iova_cache_mutex);
0257 if (!iova_cache_users) {
0258 int ret;
0259
0260 ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
0261 iova_cpuhp_dead);
0262 if (ret) {
0263 mutex_unlock(&iova_cache_mutex);
0264 pr_err("Couldn't register cpuhp handler\n");
0265 return ret;
0266 }
0267
0268 iova_cache = kmem_cache_create(
0269 "iommu_iova", sizeof(struct iova), 0,
0270 SLAB_HWCACHE_ALIGN, NULL);
0271 if (!iova_cache) {
0272 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
0273 mutex_unlock(&iova_cache_mutex);
0274 pr_err("Couldn't create iova cache\n");
0275 return -ENOMEM;
0276 }
0277 }
0278
0279 iova_cache_users++;
0280 mutex_unlock(&iova_cache_mutex);
0281
0282 return 0;
0283 }
0284 EXPORT_SYMBOL_GPL(iova_cache_get);
0285
0286 void iova_cache_put(void)
0287 {
0288 mutex_lock(&iova_cache_mutex);
0289 if (WARN_ON(!iova_cache_users)) {
0290 mutex_unlock(&iova_cache_mutex);
0291 return;
0292 }
0293 iova_cache_users--;
0294 if (!iova_cache_users) {
0295 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
0296 kmem_cache_destroy(iova_cache);
0297 }
0298 mutex_unlock(&iova_cache_mutex);
0299 }
0300 EXPORT_SYMBOL_GPL(iova_cache_put);
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 struct iova *
0314 alloc_iova(struct iova_domain *iovad, unsigned long size,
0315 unsigned long limit_pfn,
0316 bool size_aligned)
0317 {
0318 struct iova *new_iova;
0319 int ret;
0320
0321 new_iova = alloc_iova_mem();
0322 if (!new_iova)
0323 return NULL;
0324
0325 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
0326 new_iova, size_aligned);
0327
0328 if (ret) {
0329 free_iova_mem(new_iova);
0330 return NULL;
0331 }
0332
0333 return new_iova;
0334 }
0335 EXPORT_SYMBOL_GPL(alloc_iova);
0336
0337 static struct iova *
0338 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
0339 {
0340 struct rb_node *node = iovad->rbroot.rb_node;
0341
0342 assert_spin_locked(&iovad->iova_rbtree_lock);
0343
0344 while (node) {
0345 struct iova *iova = to_iova(node);
0346
0347 if (pfn < iova->pfn_lo)
0348 node = node->rb_left;
0349 else if (pfn > iova->pfn_hi)
0350 node = node->rb_right;
0351 else
0352 return iova;
0353 }
0354
0355 return NULL;
0356 }
0357
0358 static void remove_iova(struct iova_domain *iovad, struct iova *iova)
0359 {
0360 assert_spin_locked(&iovad->iova_rbtree_lock);
0361 __cached_rbnode_delete_update(iovad, iova);
0362 rb_erase(&iova->node, &iovad->rbroot);
0363 }
0364
0365
0366
0367
0368
0369
0370
0371
0372 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
0373 {
0374 unsigned long flags;
0375 struct iova *iova;
0376
0377
0378 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0379 iova = private_find_iova(iovad, pfn);
0380 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0381 return iova;
0382 }
0383 EXPORT_SYMBOL_GPL(find_iova);
0384
0385
0386
0387
0388
0389
0390
0391 void
0392 __free_iova(struct iova_domain *iovad, struct iova *iova)
0393 {
0394 unsigned long flags;
0395
0396 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0397 remove_iova(iovad, iova);
0398 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0399 free_iova_mem(iova);
0400 }
0401 EXPORT_SYMBOL_GPL(__free_iova);
0402
0403
0404
0405
0406
0407
0408
0409
0410 void
0411 free_iova(struct iova_domain *iovad, unsigned long pfn)
0412 {
0413 unsigned long flags;
0414 struct iova *iova;
0415
0416 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0417 iova = private_find_iova(iovad, pfn);
0418 if (!iova) {
0419 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0420 return;
0421 }
0422 remove_iova(iovad, iova);
0423 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0424 free_iova_mem(iova);
0425 }
0426 EXPORT_SYMBOL_GPL(free_iova);
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 unsigned long
0439 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
0440 unsigned long limit_pfn, bool flush_rcache)
0441 {
0442 unsigned long iova_pfn;
0443 struct iova *new_iova;
0444
0445
0446
0447
0448
0449
0450
0451 if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
0452 size = roundup_pow_of_two(size);
0453
0454 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
0455 if (iova_pfn)
0456 return iova_pfn;
0457
0458 retry:
0459 new_iova = alloc_iova(iovad, size, limit_pfn, true);
0460 if (!new_iova) {
0461 unsigned int cpu;
0462
0463 if (!flush_rcache)
0464 return 0;
0465
0466
0467 flush_rcache = false;
0468 for_each_online_cpu(cpu)
0469 free_cpu_cached_iovas(cpu, iovad);
0470 free_global_cached_iovas(iovad);
0471 goto retry;
0472 }
0473
0474 return new_iova->pfn_lo;
0475 }
0476 EXPORT_SYMBOL_GPL(alloc_iova_fast);
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 void
0487 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
0488 {
0489 if (iova_rcache_insert(iovad, pfn, size))
0490 return;
0491
0492 free_iova(iovad, pfn);
0493 }
0494 EXPORT_SYMBOL_GPL(free_iova_fast);
0495
0496 static void iova_domain_free_rcaches(struct iova_domain *iovad)
0497 {
0498 cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
0499 &iovad->cpuhp_dead);
0500 free_iova_rcaches(iovad);
0501 }
0502
0503
0504
0505
0506
0507
0508 void put_iova_domain(struct iova_domain *iovad)
0509 {
0510 struct iova *iova, *tmp;
0511
0512 if (iovad->rcaches)
0513 iova_domain_free_rcaches(iovad);
0514
0515 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
0516 free_iova_mem(iova);
0517 }
0518 EXPORT_SYMBOL_GPL(put_iova_domain);
0519
0520 static int
0521 __is_range_overlap(struct rb_node *node,
0522 unsigned long pfn_lo, unsigned long pfn_hi)
0523 {
0524 struct iova *iova = to_iova(node);
0525
0526 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
0527 return 1;
0528 return 0;
0529 }
0530
0531 static inline struct iova *
0532 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
0533 {
0534 struct iova *iova;
0535
0536 iova = alloc_iova_mem();
0537 if (iova) {
0538 iova->pfn_lo = pfn_lo;
0539 iova->pfn_hi = pfn_hi;
0540 }
0541
0542 return iova;
0543 }
0544
0545 static struct iova *
0546 __insert_new_range(struct iova_domain *iovad,
0547 unsigned long pfn_lo, unsigned long pfn_hi)
0548 {
0549 struct iova *iova;
0550
0551 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
0552 if (iova)
0553 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
0554
0555 return iova;
0556 }
0557
0558 static void
0559 __adjust_overlap_range(struct iova *iova,
0560 unsigned long *pfn_lo, unsigned long *pfn_hi)
0561 {
0562 if (*pfn_lo < iova->pfn_lo)
0563 iova->pfn_lo = *pfn_lo;
0564 if (*pfn_hi > iova->pfn_hi)
0565 *pfn_lo = iova->pfn_hi + 1;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 struct iova *
0577 reserve_iova(struct iova_domain *iovad,
0578 unsigned long pfn_lo, unsigned long pfn_hi)
0579 {
0580 struct rb_node *node;
0581 unsigned long flags;
0582 struct iova *iova;
0583 unsigned int overlap = 0;
0584
0585
0586 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
0587 return NULL;
0588
0589 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0590 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
0591 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
0592 iova = to_iova(node);
0593 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
0594 if ((pfn_lo >= iova->pfn_lo) &&
0595 (pfn_hi <= iova->pfn_hi))
0596 goto finish;
0597 overlap = 1;
0598
0599 } else if (overlap)
0600 break;
0601 }
0602
0603
0604
0605
0606 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
0607 finish:
0608
0609 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0610 return iova;
0611 }
0612 EXPORT_SYMBOL_GPL(reserve_iova);
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 #define IOVA_MAG_SIZE 127
0628 #define MAX_GLOBAL_MAGS 32
0629
0630 struct iova_magazine {
0631 unsigned long size;
0632 unsigned long pfns[IOVA_MAG_SIZE];
0633 };
0634
0635 struct iova_cpu_rcache {
0636 spinlock_t lock;
0637 struct iova_magazine *loaded;
0638 struct iova_magazine *prev;
0639 };
0640
0641 struct iova_rcache {
0642 spinlock_t lock;
0643 unsigned long depot_size;
0644 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
0645 struct iova_cpu_rcache __percpu *cpu_rcaches;
0646 };
0647
0648 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
0649 {
0650 return kzalloc(sizeof(struct iova_magazine), flags);
0651 }
0652
0653 static void iova_magazine_free(struct iova_magazine *mag)
0654 {
0655 kfree(mag);
0656 }
0657
0658 static void
0659 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
0660 {
0661 unsigned long flags;
0662 int i;
0663
0664 if (!mag)
0665 return;
0666
0667 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
0668
0669 for (i = 0 ; i < mag->size; ++i) {
0670 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
0671
0672 if (WARN_ON(!iova))
0673 continue;
0674
0675 remove_iova(iovad, iova);
0676 free_iova_mem(iova);
0677 }
0678
0679 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
0680
0681 mag->size = 0;
0682 }
0683
0684 static bool iova_magazine_full(struct iova_magazine *mag)
0685 {
0686 return (mag && mag->size == IOVA_MAG_SIZE);
0687 }
0688
0689 static bool iova_magazine_empty(struct iova_magazine *mag)
0690 {
0691 return (!mag || mag->size == 0);
0692 }
0693
0694 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
0695 unsigned long limit_pfn)
0696 {
0697 int i;
0698 unsigned long pfn;
0699
0700 BUG_ON(iova_magazine_empty(mag));
0701
0702
0703 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
0704 if (i == 0)
0705 return 0;
0706
0707
0708 pfn = mag->pfns[i];
0709 mag->pfns[i] = mag->pfns[--mag->size];
0710
0711 return pfn;
0712 }
0713
0714 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
0715 {
0716 BUG_ON(iova_magazine_full(mag));
0717
0718 mag->pfns[mag->size++] = pfn;
0719 }
0720
0721 int iova_domain_init_rcaches(struct iova_domain *iovad)
0722 {
0723 unsigned int cpu;
0724 int i, ret;
0725
0726 iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
0727 sizeof(struct iova_rcache),
0728 GFP_KERNEL);
0729 if (!iovad->rcaches)
0730 return -ENOMEM;
0731
0732 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
0733 struct iova_cpu_rcache *cpu_rcache;
0734 struct iova_rcache *rcache;
0735
0736 rcache = &iovad->rcaches[i];
0737 spin_lock_init(&rcache->lock);
0738 rcache->depot_size = 0;
0739 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
0740 cache_line_size());
0741 if (!rcache->cpu_rcaches) {
0742 ret = -ENOMEM;
0743 goto out_err;
0744 }
0745 for_each_possible_cpu(cpu) {
0746 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
0747
0748 spin_lock_init(&cpu_rcache->lock);
0749 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
0750 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
0751 if (!cpu_rcache->loaded || !cpu_rcache->prev) {
0752 ret = -ENOMEM;
0753 goto out_err;
0754 }
0755 }
0756 }
0757
0758 ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
0759 &iovad->cpuhp_dead);
0760 if (ret)
0761 goto out_err;
0762 return 0;
0763
0764 out_err:
0765 free_iova_rcaches(iovad);
0766 return ret;
0767 }
0768 EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
0769
0770
0771
0772
0773
0774
0775
0776 static bool __iova_rcache_insert(struct iova_domain *iovad,
0777 struct iova_rcache *rcache,
0778 unsigned long iova_pfn)
0779 {
0780 struct iova_magazine *mag_to_free = NULL;
0781 struct iova_cpu_rcache *cpu_rcache;
0782 bool can_insert = false;
0783 unsigned long flags;
0784
0785 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
0786 spin_lock_irqsave(&cpu_rcache->lock, flags);
0787
0788 if (!iova_magazine_full(cpu_rcache->loaded)) {
0789 can_insert = true;
0790 } else if (!iova_magazine_full(cpu_rcache->prev)) {
0791 swap(cpu_rcache->prev, cpu_rcache->loaded);
0792 can_insert = true;
0793 } else {
0794 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
0795
0796 if (new_mag) {
0797 spin_lock(&rcache->lock);
0798 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
0799 rcache->depot[rcache->depot_size++] =
0800 cpu_rcache->loaded;
0801 } else {
0802 mag_to_free = cpu_rcache->loaded;
0803 }
0804 spin_unlock(&rcache->lock);
0805
0806 cpu_rcache->loaded = new_mag;
0807 can_insert = true;
0808 }
0809 }
0810
0811 if (can_insert)
0812 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
0813
0814 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
0815
0816 if (mag_to_free) {
0817 iova_magazine_free_pfns(mag_to_free, iovad);
0818 iova_magazine_free(mag_to_free);
0819 }
0820
0821 return can_insert;
0822 }
0823
0824 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
0825 unsigned long size)
0826 {
0827 unsigned int log_size = order_base_2(size);
0828
0829 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
0830 return false;
0831
0832 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
0833 }
0834
0835
0836
0837
0838
0839
0840 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
0841 unsigned long limit_pfn)
0842 {
0843 struct iova_cpu_rcache *cpu_rcache;
0844 unsigned long iova_pfn = 0;
0845 bool has_pfn = false;
0846 unsigned long flags;
0847
0848 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
0849 spin_lock_irqsave(&cpu_rcache->lock, flags);
0850
0851 if (!iova_magazine_empty(cpu_rcache->loaded)) {
0852 has_pfn = true;
0853 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
0854 swap(cpu_rcache->prev, cpu_rcache->loaded);
0855 has_pfn = true;
0856 } else {
0857 spin_lock(&rcache->lock);
0858 if (rcache->depot_size > 0) {
0859 iova_magazine_free(cpu_rcache->loaded);
0860 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
0861 has_pfn = true;
0862 }
0863 spin_unlock(&rcache->lock);
0864 }
0865
0866 if (has_pfn)
0867 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
0868
0869 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
0870
0871 return iova_pfn;
0872 }
0873
0874
0875
0876
0877
0878
0879 static unsigned long iova_rcache_get(struct iova_domain *iovad,
0880 unsigned long size,
0881 unsigned long limit_pfn)
0882 {
0883 unsigned int log_size = order_base_2(size);
0884
0885 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
0886 return 0;
0887
0888 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
0889 }
0890
0891
0892
0893
0894 static void free_iova_rcaches(struct iova_domain *iovad)
0895 {
0896 struct iova_rcache *rcache;
0897 struct iova_cpu_rcache *cpu_rcache;
0898 unsigned int cpu;
0899 int i, j;
0900
0901 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
0902 rcache = &iovad->rcaches[i];
0903 if (!rcache->cpu_rcaches)
0904 break;
0905 for_each_possible_cpu(cpu) {
0906 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
0907 iova_magazine_free(cpu_rcache->loaded);
0908 iova_magazine_free(cpu_rcache->prev);
0909 }
0910 free_percpu(rcache->cpu_rcaches);
0911 for (j = 0; j < rcache->depot_size; ++j)
0912 iova_magazine_free(rcache->depot[j]);
0913 }
0914
0915 kfree(iovad->rcaches);
0916 iovad->rcaches = NULL;
0917 }
0918
0919
0920
0921
0922 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
0923 {
0924 struct iova_cpu_rcache *cpu_rcache;
0925 struct iova_rcache *rcache;
0926 unsigned long flags;
0927 int i;
0928
0929 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
0930 rcache = &iovad->rcaches[i];
0931 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
0932 spin_lock_irqsave(&cpu_rcache->lock, flags);
0933 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
0934 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
0935 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
0936 }
0937 }
0938
0939
0940
0941
0942 static void free_global_cached_iovas(struct iova_domain *iovad)
0943 {
0944 struct iova_rcache *rcache;
0945 unsigned long flags;
0946 int i, j;
0947
0948 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
0949 rcache = &iovad->rcaches[i];
0950 spin_lock_irqsave(&rcache->lock, flags);
0951 for (j = 0; j < rcache->depot_size; ++j) {
0952 iova_magazine_free_pfns(rcache->depot[j], iovad);
0953 iova_magazine_free(rcache->depot[j]);
0954 }
0955 rcache->depot_size = 0;
0956 spin_unlock_irqrestore(&rcache->lock, flags);
0957 }
0958 }
0959 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
0960 MODULE_LICENSE("GPL");