Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/errno.h>
0034 #include <linux/slab.h>
0035 #include <linux/bitmap.h>
0036 
0037 #include "mthca_dev.h"
0038 
0039 /* Trivial bitmap-based allocator */
0040 u32 mthca_alloc(struct mthca_alloc *alloc)
0041 {
0042     unsigned long flags;
0043     u32 obj;
0044 
0045     spin_lock_irqsave(&alloc->lock, flags);
0046 
0047     obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
0048     if (obj >= alloc->max) {
0049         alloc->top = (alloc->top + alloc->max) & alloc->mask;
0050         obj = find_first_zero_bit(alloc->table, alloc->max);
0051     }
0052 
0053     if (obj < alloc->max) {
0054         __set_bit(obj, alloc->table);
0055         obj |= alloc->top;
0056     } else
0057         obj = -1;
0058 
0059     spin_unlock_irqrestore(&alloc->lock, flags);
0060 
0061     return obj;
0062 }
0063 
0064 void mthca_free(struct mthca_alloc *alloc, u32 obj)
0065 {
0066     unsigned long flags;
0067 
0068     obj &= alloc->max - 1;
0069 
0070     spin_lock_irqsave(&alloc->lock, flags);
0071 
0072     __clear_bit(obj, alloc->table);
0073     alloc->last = min(alloc->last, obj);
0074     alloc->top = (alloc->top + alloc->max) & alloc->mask;
0075 
0076     spin_unlock_irqrestore(&alloc->lock, flags);
0077 }
0078 
0079 int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
0080              u32 reserved)
0081 {
0082     /* num must be a power of 2 */
0083     if (num != 1 << (ffs(num) - 1))
0084         return -EINVAL;
0085 
0086     alloc->last = 0;
0087     alloc->top  = 0;
0088     alloc->max  = num;
0089     alloc->mask = mask;
0090     spin_lock_init(&alloc->lock);
0091     alloc->table = bitmap_zalloc(num, GFP_KERNEL);
0092     if (!alloc->table)
0093         return -ENOMEM;
0094 
0095     bitmap_set(alloc->table, 0, reserved);
0096 
0097     return 0;
0098 }
0099 
0100 void mthca_alloc_cleanup(struct mthca_alloc *alloc)
0101 {
0102     bitmap_free(alloc->table);
0103 }
0104 
0105 /*
0106  * Array of pointers with lazy allocation of leaf pages.  Callers of
0107  * _get, _set and _clear methods must use a lock or otherwise
0108  * serialize access to the array.
0109  */
0110 
0111 #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
0112 
0113 void *mthca_array_get(struct mthca_array *array, int index)
0114 {
0115     int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0116 
0117     if (array->page_list[p].page)
0118         return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
0119     else
0120         return NULL;
0121 }
0122 
0123 int mthca_array_set(struct mthca_array *array, int index, void *value)
0124 {
0125     int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0126 
0127     /* Allocate with GFP_ATOMIC because we'll be called with locks held. */
0128     if (!array->page_list[p].page)
0129         array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
0130 
0131     if (!array->page_list[p].page)
0132         return -ENOMEM;
0133 
0134     array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
0135     ++array->page_list[p].used;
0136 
0137     return 0;
0138 }
0139 
0140 void mthca_array_clear(struct mthca_array *array, int index)
0141 {
0142     int p = (index * sizeof (void *)) >> PAGE_SHIFT;
0143 
0144     if (--array->page_list[p].used == 0) {
0145         free_page((unsigned long) array->page_list[p].page);
0146         array->page_list[p].page = NULL;
0147     } else
0148         array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
0149 
0150     if (array->page_list[p].used < 0)
0151         pr_debug("Array %p index %d page %d with ref count %d < 0\n",
0152              array, index, p, array->page_list[p].used);
0153 }
0154 
0155 int mthca_array_init(struct mthca_array *array, int nent)
0156 {
0157     int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
0158     int i;
0159 
0160     array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
0161                      GFP_KERNEL);
0162     if (!array->page_list)
0163         return -ENOMEM;
0164 
0165     for (i = 0; i < npage; ++i) {
0166         array->page_list[i].page = NULL;
0167         array->page_list[i].used = 0;
0168     }
0169 
0170     return 0;
0171 }
0172 
0173 void mthca_array_cleanup(struct mthca_array *array, int nent)
0174 {
0175     int i;
0176 
0177     for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
0178         free_page((unsigned long) array->page_list[i].page);
0179 
0180     kfree(array->page_list);
0181 }
0182 
0183 /*
0184  * Handling for queue buffers -- we allocate a bunch of memory and
0185  * register it in a memory region at HCA virtual address 0.  If the
0186  * requested size is > max_direct, we split the allocation into
0187  * multiple pages, so we don't require too much contiguous memory.
0188  */
0189 
0190 int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
0191             union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
0192             int hca_write, struct mthca_mr *mr)
0193 {
0194     int err = -ENOMEM;
0195     int npages, shift;
0196     u64 *dma_list = NULL;
0197     dma_addr_t t;
0198     int i;
0199 
0200     if (size <= max_direct) {
0201         *is_direct = 1;
0202         npages     = 1;
0203         shift      = get_order(size) + PAGE_SHIFT;
0204 
0205         buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
0206                              size, &t, GFP_KERNEL);
0207         if (!buf->direct.buf)
0208             return -ENOMEM;
0209 
0210         dma_unmap_addr_set(&buf->direct, mapping, t);
0211 
0212         while (t & ((1 << shift) - 1)) {
0213             --shift;
0214             npages *= 2;
0215         }
0216 
0217         dma_list = kmalloc_array(npages, sizeof(*dma_list),
0218                      GFP_KERNEL);
0219         if (!dma_list)
0220             goto err_free;
0221 
0222         for (i = 0; i < npages; ++i)
0223             dma_list[i] = t + i * (1 << shift);
0224     } else {
0225         *is_direct = 0;
0226         npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
0227         shift      = PAGE_SHIFT;
0228 
0229         dma_list = kmalloc_array(npages, sizeof(*dma_list),
0230                      GFP_KERNEL);
0231         if (!dma_list)
0232             return -ENOMEM;
0233 
0234         buf->page_list = kmalloc_array(npages,
0235                            sizeof(*buf->page_list),
0236                            GFP_KERNEL);
0237         if (!buf->page_list)
0238             goto err_out;
0239 
0240         for (i = 0; i < npages; ++i)
0241             buf->page_list[i].buf = NULL;
0242 
0243         for (i = 0; i < npages; ++i) {
0244             buf->page_list[i].buf =
0245                 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
0246                            &t, GFP_KERNEL);
0247             if (!buf->page_list[i].buf)
0248                 goto err_free;
0249 
0250             dma_list[i] = t;
0251             dma_unmap_addr_set(&buf->page_list[i], mapping, t);
0252 
0253             clear_page(buf->page_list[i].buf);
0254         }
0255     }
0256 
0257     err = mthca_mr_alloc_phys(dev, pd->pd_num,
0258                   dma_list, shift, npages,
0259                   0, size,
0260                   MTHCA_MPT_FLAG_LOCAL_READ |
0261                   (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
0262                   mr);
0263     if (err)
0264         goto err_free;
0265 
0266     kfree(dma_list);
0267 
0268     return 0;
0269 
0270 err_free:
0271     mthca_buf_free(dev, size, buf, *is_direct, NULL);
0272 
0273 err_out:
0274     kfree(dma_list);
0275 
0276     return err;
0277 }
0278 
0279 void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
0280             int is_direct, struct mthca_mr *mr)
0281 {
0282     int i;
0283 
0284     if (mr)
0285         mthca_free_mr(dev, mr);
0286 
0287     if (is_direct)
0288         dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
0289                   dma_unmap_addr(&buf->direct, mapping));
0290     else {
0291         for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
0292             dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0293                       buf->page_list[i].buf,
0294                       dma_unmap_addr(&buf->page_list[i],
0295                              mapping));
0296         kfree(buf->page_list);
0297     }
0298 }