0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/errno.h>
0034 #include <linux/slab.h>
0035 #include <linux/bitmap.h>
0036
0037 #include "mthca_dev.h"
0038
0039
0040 u32 mthca_alloc(struct mthca_alloc *alloc)
0041 {
0042 unsigned long flags;
0043 u32 obj;
0044
0045 spin_lock_irqsave(&alloc->lock, flags);
0046
0047 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
0048 if (obj >= alloc->max) {
0049 alloc->top = (alloc->top + alloc->max) & alloc->mask;
0050 obj = find_first_zero_bit(alloc->table, alloc->max);
0051 }
0052
0053 if (obj < alloc->max) {
0054 __set_bit(obj, alloc->table);
0055 obj |= alloc->top;
0056 } else
0057 obj = -1;
0058
0059 spin_unlock_irqrestore(&alloc->lock, flags);
0060
0061 return obj;
0062 }
0063
0064 void mthca_free(struct mthca_alloc *alloc, u32 obj)
0065 {
0066 unsigned long flags;
0067
0068 obj &= alloc->max - 1;
0069
0070 spin_lock_irqsave(&alloc->lock, flags);
0071
0072 __clear_bit(obj, alloc->table);
0073 alloc->last = min(alloc->last, obj);
0074 alloc->top = (alloc->top + alloc->max) & alloc->mask;
0075
0076 spin_unlock_irqrestore(&alloc->lock, flags);
0077 }
0078
0079 int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
0080 u32 reserved)
0081 {
0082
0083 if (num != 1 << (ffs(num) - 1))
0084 return -EINVAL;
0085
0086 alloc->last = 0;
0087 alloc->top = 0;
0088 alloc->max = num;
0089 alloc->mask = mask;
0090 spin_lock_init(&alloc->lock);
0091 alloc->table = bitmap_zalloc(num, GFP_KERNEL);
0092 if (!alloc->table)
0093 return -ENOMEM;
0094
0095 bitmap_set(alloc->table, 0, reserved);
0096
0097 return 0;
0098 }
0099
0100 void mthca_alloc_cleanup(struct mthca_alloc *alloc)
0101 {
0102 bitmap_free(alloc->table);
0103 }
0104
0105
0106
0107
0108
0109
0110
0111 #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
0112
0113 void *mthca_array_get(struct mthca_array *array, int index)
0114 {
0115 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0116
0117 if (array->page_list[p].page)
0118 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
0119 else
0120 return NULL;
0121 }
0122
0123 int mthca_array_set(struct mthca_array *array, int index, void *value)
0124 {
0125 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0126
0127
0128 if (!array->page_list[p].page)
0129 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
0130
0131 if (!array->page_list[p].page)
0132 return -ENOMEM;
0133
0134 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
0135 ++array->page_list[p].used;
0136
0137 return 0;
0138 }
0139
0140 void mthca_array_clear(struct mthca_array *array, int index)
0141 {
0142 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0143
0144 if (--array->page_list[p].used == 0) {
0145 free_page((unsigned long) array->page_list[p].page);
0146 array->page_list[p].page = NULL;
0147 } else
0148 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
0149
0150 if (array->page_list[p].used < 0)
0151 pr_debug("Array %p index %d page %d with ref count %d < 0\n",
0152 array, index, p, array->page_list[p].used);
0153 }
0154
0155 int mthca_array_init(struct mthca_array *array, int nent)
0156 {
0157 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
0158 int i;
0159
0160 array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
0161 GFP_KERNEL);
0162 if (!array->page_list)
0163 return -ENOMEM;
0164
0165 for (i = 0; i < npage; ++i) {
0166 array->page_list[i].page = NULL;
0167 array->page_list[i].used = 0;
0168 }
0169
0170 return 0;
0171 }
0172
0173 void mthca_array_cleanup(struct mthca_array *array, int nent)
0174 {
0175 int i;
0176
0177 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
0178 free_page((unsigned long) array->page_list[i].page);
0179
0180 kfree(array->page_list);
0181 }
0182
0183
0184
0185
0186
0187
0188
0189
0190 int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
0191 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
0192 int hca_write, struct mthca_mr *mr)
0193 {
0194 int err = -ENOMEM;
0195 int npages, shift;
0196 u64 *dma_list = NULL;
0197 dma_addr_t t;
0198 int i;
0199
0200 if (size <= max_direct) {
0201 *is_direct = 1;
0202 npages = 1;
0203 shift = get_order(size) + PAGE_SHIFT;
0204
0205 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
0206 size, &t, GFP_KERNEL);
0207 if (!buf->direct.buf)
0208 return -ENOMEM;
0209
0210 dma_unmap_addr_set(&buf->direct, mapping, t);
0211
0212 while (t & ((1 << shift) - 1)) {
0213 --shift;
0214 npages *= 2;
0215 }
0216
0217 dma_list = kmalloc_array(npages, sizeof(*dma_list),
0218 GFP_KERNEL);
0219 if (!dma_list)
0220 goto err_free;
0221
0222 for (i = 0; i < npages; ++i)
0223 dma_list[i] = t + i * (1 << shift);
0224 } else {
0225 *is_direct = 0;
0226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
0227 shift = PAGE_SHIFT;
0228
0229 dma_list = kmalloc_array(npages, sizeof(*dma_list),
0230 GFP_KERNEL);
0231 if (!dma_list)
0232 return -ENOMEM;
0233
0234 buf->page_list = kmalloc_array(npages,
0235 sizeof(*buf->page_list),
0236 GFP_KERNEL);
0237 if (!buf->page_list)
0238 goto err_out;
0239
0240 for (i = 0; i < npages; ++i)
0241 buf->page_list[i].buf = NULL;
0242
0243 for (i = 0; i < npages; ++i) {
0244 buf->page_list[i].buf =
0245 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
0246 &t, GFP_KERNEL);
0247 if (!buf->page_list[i].buf)
0248 goto err_free;
0249
0250 dma_list[i] = t;
0251 dma_unmap_addr_set(&buf->page_list[i], mapping, t);
0252
0253 clear_page(buf->page_list[i].buf);
0254 }
0255 }
0256
0257 err = mthca_mr_alloc_phys(dev, pd->pd_num,
0258 dma_list, shift, npages,
0259 0, size,
0260 MTHCA_MPT_FLAG_LOCAL_READ |
0261 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
0262 mr);
0263 if (err)
0264 goto err_free;
0265
0266 kfree(dma_list);
0267
0268 return 0;
0269
0270 err_free:
0271 mthca_buf_free(dev, size, buf, *is_direct, NULL);
0272
0273 err_out:
0274 kfree(dma_list);
0275
0276 return err;
0277 }
0278
0279 void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
0280 int is_direct, struct mthca_mr *mr)
0281 {
0282 int i;
0283
0284 if (mr)
0285 mthca_free_mr(dev, mr);
0286
0287 if (is_direct)
0288 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
0289 dma_unmap_addr(&buf->direct, mapping));
0290 else {
0291 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
0292 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0293 buf->page_list[i].buf,
0294 dma_unmap_addr(&buf->page_list[i],
0295 mapping));
0296 kfree(buf->page_list);
0297 }
0298 }