Back to home page

LXR

 
 

    


0001 /*
0002  * IOMMU mmap management and range allocation functions.
0003  * Based almost entirely upon the powerpc iommu allocator.
0004  */
0005 
0006 #include <linux/export.h>
0007 #include <linux/bitmap.h>
0008 #include <linux/bug.h>
0009 #include <linux/iommu-helper.h>
0010 #include <linux/iommu-common.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/hash.h>
0013 
0014 static unsigned long iommu_large_alloc = 15;
0015 
0016 static  DEFINE_PER_CPU(unsigned int, iommu_hash_common);
0017 
0018 static inline bool need_flush(struct iommu_map_table *iommu)
0019 {
0020     return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
0021 }
0022 
0023 static inline void set_flush(struct iommu_map_table *iommu)
0024 {
0025     iommu->flags |= IOMMU_NEED_FLUSH;
0026 }
0027 
0028 static inline void clear_flush(struct iommu_map_table *iommu)
0029 {
0030     iommu->flags &= ~IOMMU_NEED_FLUSH;
0031 }
0032 
0033 static void setup_iommu_pool_hash(void)
0034 {
0035     unsigned int i;
0036     static bool do_once;
0037 
0038     if (do_once)
0039         return;
0040     do_once = true;
0041     for_each_possible_cpu(i)
0042         per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
0043 }
0044 
0045 /*
0046  * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
0047  * is the number of table entries. If `large_pool' is set to true,
0048  * the top 1/4 of the table will be set aside for pool allocations
0049  * of more than iommu_large_alloc pages.
0050  */
0051 void iommu_tbl_pool_init(struct iommu_map_table *iommu,
0052              unsigned long num_entries,
0053              u32 table_shift,
0054              void (*lazy_flush)(struct iommu_map_table *),
0055              bool large_pool, u32 npools,
0056              bool skip_span_boundary_check)
0057 {
0058     unsigned int start, i;
0059     struct iommu_pool *p = &(iommu->large_pool);
0060 
0061     setup_iommu_pool_hash();
0062     if (npools == 0)
0063         iommu->nr_pools = IOMMU_NR_POOLS;
0064     else
0065         iommu->nr_pools = npools;
0066     BUG_ON(npools > IOMMU_NR_POOLS);
0067 
0068     iommu->table_shift = table_shift;
0069     iommu->lazy_flush = lazy_flush;
0070     start = 0;
0071     if (skip_span_boundary_check)
0072         iommu->flags |= IOMMU_NO_SPAN_BOUND;
0073     if (large_pool)
0074         iommu->flags |= IOMMU_HAS_LARGE_POOL;
0075 
0076     if (!large_pool)
0077         iommu->poolsize = num_entries/iommu->nr_pools;
0078     else
0079         iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
0080     for (i = 0; i < iommu->nr_pools; i++) {
0081         spin_lock_init(&(iommu->pools[i].lock));
0082         iommu->pools[i].start = start;
0083         iommu->pools[i].hint = start;
0084         start += iommu->poolsize; /* start for next pool */
0085         iommu->pools[i].end = start - 1;
0086     }
0087     if (!large_pool)
0088         return;
0089     /* initialize large_pool */
0090     spin_lock_init(&(p->lock));
0091     p->start = start;
0092     p->hint = p->start;
0093     p->end = num_entries;
0094 }
0095 EXPORT_SYMBOL(iommu_tbl_pool_init);
0096 
0097 unsigned long iommu_tbl_range_alloc(struct device *dev,
0098                 struct iommu_map_table *iommu,
0099                 unsigned long npages,
0100                 unsigned long *handle,
0101                 unsigned long mask,
0102                 unsigned int align_order)
0103 {
0104     unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
0105     unsigned long n, end, start, limit, boundary_size;
0106     struct iommu_pool *pool;
0107     int pass = 0;
0108     unsigned int pool_nr;
0109     unsigned int npools = iommu->nr_pools;
0110     unsigned long flags;
0111     bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
0112     bool largealloc = (large_pool && npages > iommu_large_alloc);
0113     unsigned long shift;
0114     unsigned long align_mask = 0;
0115 
0116     if (align_order > 0)
0117         align_mask = ~0ul >> (BITS_PER_LONG - align_order);
0118 
0119     /* Sanity check */
0120     if (unlikely(npages == 0)) {
0121         WARN_ON_ONCE(1);
0122         return IOMMU_ERROR_CODE;
0123     }
0124 
0125     if (largealloc) {
0126         pool = &(iommu->large_pool);
0127         pool_nr = 0; /* to keep compiler happy */
0128     } else {
0129         /* pick out pool_nr */
0130         pool_nr =  pool_hash & (npools - 1);
0131         pool = &(iommu->pools[pool_nr]);
0132     }
0133     spin_lock_irqsave(&pool->lock, flags);
0134 
0135  again:
0136     if (pass == 0 && handle && *handle &&
0137         (*handle >= pool->start) && (*handle < pool->end))
0138         start = *handle;
0139     else
0140         start = pool->hint;
0141 
0142     limit = pool->end;
0143 
0144     /* The case below can happen if we have a small segment appended
0145      * to a large, or when the previous alloc was at the very end of
0146      * the available space. If so, go back to the beginning. If a
0147      * flush is needed, it will get done based on the return value
0148      * from iommu_area_alloc() below.
0149      */
0150     if (start >= limit)
0151         start = pool->start;
0152     shift = iommu->table_map_base >> iommu->table_shift;
0153     if (limit + shift > mask) {
0154         limit = mask - shift + 1;
0155         /* If we're constrained on address range, first try
0156          * at the masked hint to avoid O(n) search complexity,
0157          * but on second pass, start at 0 in pool 0.
0158          */
0159         if ((start & mask) >= limit || pass > 0) {
0160             spin_unlock(&(pool->lock));
0161             pool = &(iommu->pools[0]);
0162             spin_lock(&(pool->lock));
0163             start = pool->start;
0164         } else {
0165             start &= mask;
0166         }
0167     }
0168 
0169     if (dev)
0170         boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
0171                       1 << iommu->table_shift);
0172     else
0173         boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
0174 
0175     boundary_size = boundary_size >> iommu->table_shift;
0176     /*
0177      * if the skip_span_boundary_check had been set during init, we set
0178      * things up so that iommu_is_span_boundary() merely checks if the
0179      * (index + npages) < num_tsb_entries
0180      */
0181     if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
0182         shift = 0;
0183         boundary_size = iommu->poolsize * iommu->nr_pools;
0184     }
0185     n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
0186                  boundary_size, align_mask);
0187     if (n == -1) {
0188         if (likely(pass == 0)) {
0189             /* First failure, rescan from the beginning.  */
0190             pool->hint = pool->start;
0191             set_flush(iommu);
0192             pass++;
0193             goto again;
0194         } else if (!largealloc && pass <= iommu->nr_pools) {
0195             spin_unlock(&(pool->lock));
0196             pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
0197             pool = &(iommu->pools[pool_nr]);
0198             spin_lock(&(pool->lock));
0199             pool->hint = pool->start;
0200             set_flush(iommu);
0201             pass++;
0202             goto again;
0203         } else {
0204             /* give up */
0205             n = IOMMU_ERROR_CODE;
0206             goto bail;
0207         }
0208     }
0209     if (iommu->lazy_flush &&
0210         (n < pool->hint || need_flush(iommu))) {
0211         clear_flush(iommu);
0212         iommu->lazy_flush(iommu);
0213     }
0214 
0215     end = n + npages;
0216     pool->hint = end;
0217 
0218     /* Update handle for SG allocations */
0219     if (handle)
0220         *handle = end;
0221 bail:
0222     spin_unlock_irqrestore(&(pool->lock), flags);
0223 
0224     return n;
0225 }
0226 EXPORT_SYMBOL(iommu_tbl_range_alloc);
0227 
0228 static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
0229                    unsigned long entry)
0230 {
0231     struct iommu_pool *p;
0232     unsigned long largepool_start = tbl->large_pool.start;
0233     bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
0234 
0235     /* The large pool is the last pool at the top of the table */
0236     if (large_pool && entry >= largepool_start) {
0237         p = &tbl->large_pool;
0238     } else {
0239         unsigned int pool_nr = entry / tbl->poolsize;
0240 
0241         BUG_ON(pool_nr >= tbl->nr_pools);
0242         p = &tbl->pools[pool_nr];
0243     }
0244     return p;
0245 }
0246 
0247 /* Caller supplies the index of the entry into the iommu map table
0248  * itself when the mapping from dma_addr to the entry is not the
0249  * default addr->entry mapping below.
0250  */
0251 void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
0252               unsigned long npages, unsigned long entry)
0253 {
0254     struct iommu_pool *pool;
0255     unsigned long flags;
0256     unsigned long shift = iommu->table_shift;
0257 
0258     if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
0259         entry = (dma_addr - iommu->table_map_base) >> shift;
0260     pool = get_pool(iommu, entry);
0261 
0262     spin_lock_irqsave(&(pool->lock), flags);
0263     bitmap_clear(iommu->map, entry, npages);
0264     spin_unlock_irqrestore(&(pool->lock), flags);
0265 }
0266 EXPORT_SYMBOL(iommu_tbl_range_free);