Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * IOMMU mmap management and range allocation functions.
0004  * Based almost entirely upon the powerpc iommu allocator.
0005  */
0006 
0007 #include <linux/export.h>
0008 #include <linux/bitmap.h>
0009 #include <linux/bug.h>
0010 #include <linux/iommu-helper.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/hash.h>
0013 #include <asm/iommu-common.h>
0014 
0015 static unsigned long iommu_large_alloc = 15;
0016 
0017 static  DEFINE_PER_CPU(unsigned int, iommu_hash_common);
0018 
0019 static inline bool need_flush(struct iommu_map_table *iommu)
0020 {
0021     return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
0022 }
0023 
0024 static inline void set_flush(struct iommu_map_table *iommu)
0025 {
0026     iommu->flags |= IOMMU_NEED_FLUSH;
0027 }
0028 
0029 static inline void clear_flush(struct iommu_map_table *iommu)
0030 {
0031     iommu->flags &= ~IOMMU_NEED_FLUSH;
0032 }
0033 
0034 static void setup_iommu_pool_hash(void)
0035 {
0036     unsigned int i;
0037     static bool do_once;
0038 
0039     if (do_once)
0040         return;
0041     do_once = true;
0042     for_each_possible_cpu(i)
0043         per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
0044 }
0045 
0046 /*
0047  * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
0048  * is the number of table entries. If `large_pool' is set to true,
0049  * the top 1/4 of the table will be set aside for pool allocations
0050  * of more than iommu_large_alloc pages.
0051  */
0052 void iommu_tbl_pool_init(struct iommu_map_table *iommu,
0053              unsigned long num_entries,
0054              u32 table_shift,
0055              void (*lazy_flush)(struct iommu_map_table *),
0056              bool large_pool, u32 npools,
0057              bool skip_span_boundary_check)
0058 {
0059     unsigned int start, i;
0060     struct iommu_pool *p = &(iommu->large_pool);
0061 
0062     setup_iommu_pool_hash();
0063     if (npools == 0)
0064         iommu->nr_pools = IOMMU_NR_POOLS;
0065     else
0066         iommu->nr_pools = npools;
0067     BUG_ON(npools > IOMMU_NR_POOLS);
0068 
0069     iommu->table_shift = table_shift;
0070     iommu->lazy_flush = lazy_flush;
0071     start = 0;
0072     if (skip_span_boundary_check)
0073         iommu->flags |= IOMMU_NO_SPAN_BOUND;
0074     if (large_pool)
0075         iommu->flags |= IOMMU_HAS_LARGE_POOL;
0076 
0077     if (!large_pool)
0078         iommu->poolsize = num_entries/iommu->nr_pools;
0079     else
0080         iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
0081     for (i = 0; i < iommu->nr_pools; i++) {
0082         spin_lock_init(&(iommu->pools[i].lock));
0083         iommu->pools[i].start = start;
0084         iommu->pools[i].hint = start;
0085         start += iommu->poolsize; /* start for next pool */
0086         iommu->pools[i].end = start - 1;
0087     }
0088     if (!large_pool)
0089         return;
0090     /* initialize large_pool */
0091     spin_lock_init(&(p->lock));
0092     p->start = start;
0093     p->hint = p->start;
0094     p->end = num_entries;
0095 }
0096 
0097 unsigned long iommu_tbl_range_alloc(struct device *dev,
0098                 struct iommu_map_table *iommu,
0099                 unsigned long npages,
0100                 unsigned long *handle,
0101                 unsigned long mask,
0102                 unsigned int align_order)
0103 {
0104     unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
0105     unsigned long n, end, start, limit, boundary_size;
0106     struct iommu_pool *pool;
0107     int pass = 0;
0108     unsigned int pool_nr;
0109     unsigned int npools = iommu->nr_pools;
0110     unsigned long flags;
0111     bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
0112     bool largealloc = (large_pool && npages > iommu_large_alloc);
0113     unsigned long shift;
0114     unsigned long align_mask = 0;
0115 
0116     if (align_order > 0)
0117         align_mask = ~0ul >> (BITS_PER_LONG - align_order);
0118 
0119     /* Sanity check */
0120     if (unlikely(npages == 0)) {
0121         WARN_ON_ONCE(1);
0122         return IOMMU_ERROR_CODE;
0123     }
0124 
0125     if (largealloc) {
0126         pool = &(iommu->large_pool);
0127         pool_nr = 0; /* to keep compiler happy */
0128     } else {
0129         /* pick out pool_nr */
0130         pool_nr =  pool_hash & (npools - 1);
0131         pool = &(iommu->pools[pool_nr]);
0132     }
0133     spin_lock_irqsave(&pool->lock, flags);
0134 
0135  again:
0136     if (pass == 0 && handle && *handle &&
0137         (*handle >= pool->start) && (*handle < pool->end))
0138         start = *handle;
0139     else
0140         start = pool->hint;
0141 
0142     limit = pool->end;
0143 
0144     /* The case below can happen if we have a small segment appended
0145      * to a large, or when the previous alloc was at the very end of
0146      * the available space. If so, go back to the beginning. If a
0147      * flush is needed, it will get done based on the return value
0148      * from iommu_area_alloc() below.
0149      */
0150     if (start >= limit)
0151         start = pool->start;
0152     shift = iommu->table_map_base >> iommu->table_shift;
0153     if (limit + shift > mask) {
0154         limit = mask - shift + 1;
0155         /* If we're constrained on address range, first try
0156          * at the masked hint to avoid O(n) search complexity,
0157          * but on second pass, start at 0 in pool 0.
0158          */
0159         if ((start & mask) >= limit || pass > 0) {
0160             spin_unlock(&(pool->lock));
0161             pool = &(iommu->pools[0]);
0162             spin_lock(&(pool->lock));
0163             start = pool->start;
0164         } else {
0165             start &= mask;
0166         }
0167     }
0168 
0169     /*
0170      * if the skip_span_boundary_check had been set during init, we set
0171      * things up so that iommu_is_span_boundary() merely checks if the
0172      * (index + npages) < num_tsb_entries
0173      */
0174     if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
0175         shift = 0;
0176         boundary_size = iommu->poolsize * iommu->nr_pools;
0177     } else {
0178         boundary_size = dma_get_seg_boundary_nr_pages(dev,
0179                     iommu->table_shift);
0180     }
0181     n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
0182                  boundary_size, align_mask);
0183     if (n == -1) {
0184         if (likely(pass == 0)) {
0185             /* First failure, rescan from the beginning.  */
0186             pool->hint = pool->start;
0187             set_flush(iommu);
0188             pass++;
0189             goto again;
0190         } else if (!largealloc && pass <= iommu->nr_pools) {
0191             spin_unlock(&(pool->lock));
0192             pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
0193             pool = &(iommu->pools[pool_nr]);
0194             spin_lock(&(pool->lock));
0195             pool->hint = pool->start;
0196             set_flush(iommu);
0197             pass++;
0198             goto again;
0199         } else {
0200             /* give up */
0201             n = IOMMU_ERROR_CODE;
0202             goto bail;
0203         }
0204     }
0205     if (iommu->lazy_flush &&
0206         (n < pool->hint || need_flush(iommu))) {
0207         clear_flush(iommu);
0208         iommu->lazy_flush(iommu);
0209     }
0210 
0211     end = n + npages;
0212     pool->hint = end;
0213 
0214     /* Update handle for SG allocations */
0215     if (handle)
0216         *handle = end;
0217 bail:
0218     spin_unlock_irqrestore(&(pool->lock), flags);
0219 
0220     return n;
0221 }
0222 
0223 static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
0224                    unsigned long entry)
0225 {
0226     struct iommu_pool *p;
0227     unsigned long largepool_start = tbl->large_pool.start;
0228     bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
0229 
0230     /* The large pool is the last pool at the top of the table */
0231     if (large_pool && entry >= largepool_start) {
0232         p = &tbl->large_pool;
0233     } else {
0234         unsigned int pool_nr = entry / tbl->poolsize;
0235 
0236         BUG_ON(pool_nr >= tbl->nr_pools);
0237         p = &tbl->pools[pool_nr];
0238     }
0239     return p;
0240 }
0241 
0242 /* Caller supplies the index of the entry into the iommu map table
0243  * itself when the mapping from dma_addr to the entry is not the
0244  * default addr->entry mapping below.
0245  */
0246 void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
0247               unsigned long npages, unsigned long entry)
0248 {
0249     struct iommu_pool *pool;
0250     unsigned long flags;
0251     unsigned long shift = iommu->table_shift;
0252 
0253     if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
0254         entry = (dma_addr - iommu->table_map_base) >> shift;
0255     pool = get_pool(iommu, entry);
0256 
0257     spin_lock_irqsave(&(pool->lock), flags);
0258     bitmap_clear(iommu->map, entry, npages);
0259     spin_unlock_irqrestore(&(pool->lock), flags);
0260 }