Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
0003  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
0004  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <linux/mm.h>
0036 #include <linux/scatterlist.h>
0037 #include <linux/sched.h>
0038 #include <linux/slab.h>
0039 
0040 #include <asm/page.h>
0041 
0042 #include "mthca_memfree.h"
0043 #include "mthca_dev.h"
0044 #include "mthca_cmd.h"
0045 
0046 /*
0047  * We allocate in as big chunks as we can, up to a maximum of 256 KB
0048  * per chunk.
0049  */
0050 enum {
0051     MTHCA_ICM_ALLOC_SIZE   = 1 << 18,
0052     MTHCA_TABLE_CHUNK_SIZE = 1 << 18
0053 };
0054 
0055 struct mthca_user_db_table {
0056     struct mutex mutex;
0057     struct {
0058         u64                uvirt;
0059         struct scatterlist mem;
0060         int                refcount;
0061     } page[];
0062 };
0063 
0064 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
0065 {
0066     int i;
0067 
0068     if (chunk->nsg > 0)
0069         dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
0070                  DMA_BIDIRECTIONAL);
0071 
0072     for (i = 0; i < chunk->npages; ++i)
0073         __free_pages(sg_page(&chunk->mem[i]),
0074                  get_order(chunk->mem[i].length));
0075 }
0076 
0077 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
0078 {
0079     int i;
0080 
0081     for (i = 0; i < chunk->npages; ++i) {
0082         dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
0083                   lowmem_page_address(sg_page(&chunk->mem[i])),
0084                   sg_dma_address(&chunk->mem[i]));
0085     }
0086 }
0087 
0088 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
0089 {
0090     struct mthca_icm_chunk *chunk, *tmp;
0091 
0092     if (!icm)
0093         return;
0094 
0095     list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
0096         if (coherent)
0097             mthca_free_icm_coherent(dev, chunk);
0098         else
0099             mthca_free_icm_pages(dev, chunk);
0100 
0101         kfree(chunk);
0102     }
0103 
0104     kfree(icm);
0105 }
0106 
0107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
0108 {
0109     struct page *page;
0110 
0111     /*
0112      * Use __GFP_ZERO because buggy firmware assumes ICM pages are
0113      * cleared, and subtle failures are seen if they aren't.
0114      */
0115     page = alloc_pages(gfp_mask | __GFP_ZERO, order);
0116     if (!page)
0117         return -ENOMEM;
0118 
0119     sg_set_page(mem, page, PAGE_SIZE << order, 0);
0120     return 0;
0121 }
0122 
0123 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
0124                     int order, gfp_t gfp_mask)
0125 {
0126     void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
0127                        gfp_mask);
0128     if (!buf)
0129         return -ENOMEM;
0130 
0131     sg_set_buf(mem, buf, PAGE_SIZE << order);
0132     BUG_ON(mem->offset);
0133     sg_dma_len(mem) = PAGE_SIZE << order;
0134     return 0;
0135 }
0136 
0137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
0138                   gfp_t gfp_mask, int coherent)
0139 {
0140     struct mthca_icm *icm;
0141     struct mthca_icm_chunk *chunk = NULL;
0142     int cur_order;
0143     int ret;
0144 
0145     /* We use sg_set_buf for coherent allocs, which assumes low memory */
0146     BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
0147 
0148     icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
0149     if (!icm)
0150         return icm;
0151 
0152     icm->refcount = 0;
0153     INIT_LIST_HEAD(&icm->chunk_list);
0154 
0155     cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
0156 
0157     while (npages > 0) {
0158         if (!chunk) {
0159             chunk = kmalloc(sizeof *chunk,
0160                     gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
0161             if (!chunk)
0162                 goto fail;
0163 
0164             sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
0165             chunk->npages = 0;
0166             chunk->nsg    = 0;
0167             list_add_tail(&chunk->list, &icm->chunk_list);
0168         }
0169 
0170         while (1 << cur_order > npages)
0171             --cur_order;
0172 
0173         if (coherent)
0174             ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
0175                                &chunk->mem[chunk->npages],
0176                                cur_order, gfp_mask);
0177         else
0178             ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
0179                             cur_order, gfp_mask);
0180 
0181         if (!ret) {
0182             ++chunk->npages;
0183 
0184             if (coherent)
0185                 ++chunk->nsg;
0186             else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
0187                 chunk->nsg =
0188                     dma_map_sg(&dev->pdev->dev, chunk->mem,
0189                            chunk->npages,
0190                            DMA_BIDIRECTIONAL);
0191 
0192                 if (chunk->nsg <= 0)
0193                     goto fail;
0194             }
0195 
0196             if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
0197                 chunk = NULL;
0198 
0199             npages -= 1 << cur_order;
0200         } else {
0201             --cur_order;
0202             if (cur_order < 0)
0203                 goto fail;
0204         }
0205     }
0206 
0207     if (!coherent && chunk) {
0208         chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
0209                     chunk->npages, DMA_BIDIRECTIONAL);
0210 
0211         if (chunk->nsg <= 0)
0212             goto fail;
0213     }
0214 
0215     return icm;
0216 
0217 fail:
0218     mthca_free_icm(dev, icm, coherent);
0219     return NULL;
0220 }
0221 
0222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
0223 {
0224     int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
0225     int ret = 0;
0226 
0227     mutex_lock(&table->mutex);
0228 
0229     if (table->icm[i]) {
0230         ++table->icm[i]->refcount;
0231         goto out;
0232     }
0233 
0234     table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
0235                     (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
0236                     __GFP_NOWARN, table->coherent);
0237     if (!table->icm[i]) {
0238         ret = -ENOMEM;
0239         goto out;
0240     }
0241 
0242     if (mthca_MAP_ICM(dev, table->icm[i],
0243               table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
0244         mthca_free_icm(dev, table->icm[i], table->coherent);
0245         table->icm[i] = NULL;
0246         ret = -ENOMEM;
0247         goto out;
0248     }
0249 
0250     ++table->icm[i]->refcount;
0251 
0252 out:
0253     mutex_unlock(&table->mutex);
0254     return ret;
0255 }
0256 
0257 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
0258 {
0259     int i;
0260 
0261     if (!mthca_is_memfree(dev))
0262         return;
0263 
0264     i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
0265 
0266     mutex_lock(&table->mutex);
0267 
0268     if (--table->icm[i]->refcount == 0) {
0269         mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
0270                 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
0271         mthca_free_icm(dev, table->icm[i], table->coherent);
0272         table->icm[i] = NULL;
0273     }
0274 
0275     mutex_unlock(&table->mutex);
0276 }
0277 
0278 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
0279 {
0280     int idx, offset, dma_offset, i;
0281     struct mthca_icm_chunk *chunk;
0282     struct mthca_icm *icm;
0283     struct page *page = NULL;
0284 
0285     if (!table->lowmem)
0286         return NULL;
0287 
0288     mutex_lock(&table->mutex);
0289 
0290     idx = (obj & (table->num_obj - 1)) * table->obj_size;
0291     icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
0292     dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
0293 
0294     if (!icm)
0295         goto out;
0296 
0297     list_for_each_entry(chunk, &icm->chunk_list, list) {
0298         for (i = 0; i < chunk->npages; ++i) {
0299             if (dma_handle && dma_offset >= 0) {
0300                 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
0301                     *dma_handle = sg_dma_address(&chunk->mem[i]) +
0302                         dma_offset;
0303                 dma_offset -= sg_dma_len(&chunk->mem[i]);
0304             }
0305             /* DMA mapping can merge pages but not split them,
0306              * so if we found the page, dma_handle has already
0307              * been assigned to. */
0308             if (chunk->mem[i].length > offset) {
0309                 page = sg_page(&chunk->mem[i]);
0310                 goto out;
0311             }
0312             offset -= chunk->mem[i].length;
0313         }
0314     }
0315 
0316 out:
0317     mutex_unlock(&table->mutex);
0318     return page ? lowmem_page_address(page) + offset : NULL;
0319 }
0320 
0321 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
0322               int start, int end)
0323 {
0324     int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
0325     int i, err;
0326 
0327     for (i = start; i <= end; i += inc) {
0328         err = mthca_table_get(dev, table, i);
0329         if (err)
0330             goto fail;
0331     }
0332 
0333     return 0;
0334 
0335 fail:
0336     while (i > start) {
0337         i -= inc;
0338         mthca_table_put(dev, table, i);
0339     }
0340 
0341     return err;
0342 }
0343 
0344 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
0345                int start, int end)
0346 {
0347     int i;
0348 
0349     if (!mthca_is_memfree(dev))
0350         return;
0351 
0352     for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
0353         mthca_table_put(dev, table, i);
0354 }
0355 
0356 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
0357                           u64 virt, int obj_size,
0358                           int nobj, int reserved,
0359                           int use_lowmem, int use_coherent)
0360 {
0361     struct mthca_icm_table *table;
0362     int obj_per_chunk;
0363     int num_icm;
0364     unsigned chunk_size;
0365     int i;
0366 
0367     obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
0368     num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
0369 
0370     table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
0371     if (!table)
0372         return NULL;
0373 
0374     table->virt     = virt;
0375     table->num_icm  = num_icm;
0376     table->num_obj  = nobj;
0377     table->obj_size = obj_size;
0378     table->lowmem   = use_lowmem;
0379     table->coherent = use_coherent;
0380     mutex_init(&table->mutex);
0381 
0382     for (i = 0; i < num_icm; ++i)
0383         table->icm[i] = NULL;
0384 
0385     for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
0386         chunk_size = MTHCA_TABLE_CHUNK_SIZE;
0387         if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
0388             chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
0389 
0390         table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
0391                         (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
0392                         __GFP_NOWARN, use_coherent);
0393         if (!table->icm[i])
0394             goto err;
0395         if (mthca_MAP_ICM(dev, table->icm[i],
0396                   virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
0397             mthca_free_icm(dev, table->icm[i], table->coherent);
0398             table->icm[i] = NULL;
0399             goto err;
0400         }
0401 
0402         /*
0403          * Add a reference to this ICM chunk so that it never
0404          * gets freed (since it contains reserved firmware objects).
0405          */
0406         ++table->icm[i]->refcount;
0407     }
0408 
0409     return table;
0410 
0411 err:
0412     for (i = 0; i < num_icm; ++i)
0413         if (table->icm[i]) {
0414             mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
0415                     MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
0416             mthca_free_icm(dev, table->icm[i], table->coherent);
0417         }
0418 
0419     kfree(table);
0420 
0421     return NULL;
0422 }
0423 
0424 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
0425 {
0426     int i;
0427 
0428     for (i = 0; i < table->num_icm; ++i)
0429         if (table->icm[i]) {
0430             mthca_UNMAP_ICM(dev,
0431                     table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
0432                     MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
0433             mthca_free_icm(dev, table->icm[i], table->coherent);
0434         }
0435 
0436     kfree(table);
0437 }
0438 
0439 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
0440 {
0441     return dev->uar_table.uarc_base +
0442         uar->index * dev->uar_table.uarc_size +
0443         page * MTHCA_ICM_PAGE_SIZE;
0444 }
0445 
0446 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
0447               struct mthca_user_db_table *db_tab, int index, u64 uaddr)
0448 {
0449     struct page *pages[1];
0450     int ret = 0;
0451     int i;
0452 
0453     if (!mthca_is_memfree(dev))
0454         return 0;
0455 
0456     if (index < 0 || index > dev->uar_table.uarc_size / 8)
0457         return -EINVAL;
0458 
0459     mutex_lock(&db_tab->mutex);
0460 
0461     i = index / MTHCA_DB_REC_PER_PAGE;
0462 
0463     if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
0464         (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
0465         (uaddr & 4095)) {
0466         ret = -EINVAL;
0467         goto out;
0468     }
0469 
0470     if (db_tab->page[i].refcount) {
0471         ++db_tab->page[i].refcount;
0472         goto out;
0473     }
0474 
0475     ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
0476                   FOLL_WRITE | FOLL_LONGTERM, pages);
0477     if (ret < 0)
0478         goto out;
0479 
0480     sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
0481             uaddr & ~PAGE_MASK);
0482 
0483     ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
0484              DMA_TO_DEVICE);
0485     if (ret < 0) {
0486         unpin_user_page(pages[0]);
0487         goto out;
0488     }
0489 
0490     ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
0491                  mthca_uarc_virt(dev, uar, i));
0492     if (ret) {
0493         dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
0494                  DMA_TO_DEVICE);
0495         unpin_user_page(sg_page(&db_tab->page[i].mem));
0496         goto out;
0497     }
0498 
0499     db_tab->page[i].uvirt    = uaddr;
0500     db_tab->page[i].refcount = 1;
0501 
0502 out:
0503     mutex_unlock(&db_tab->mutex);
0504     return ret;
0505 }
0506 
0507 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
0508              struct mthca_user_db_table *db_tab, int index)
0509 {
0510     if (!mthca_is_memfree(dev))
0511         return;
0512 
0513     /*
0514      * To make our bookkeeping simpler, we don't unmap DB
0515      * pages until we clean up the whole db table.
0516      */
0517 
0518     mutex_lock(&db_tab->mutex);
0519 
0520     --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
0521 
0522     mutex_unlock(&db_tab->mutex);
0523 }
0524 
0525 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
0526 {
0527     struct mthca_user_db_table *db_tab;
0528     int npages;
0529     int i;
0530 
0531     if (!mthca_is_memfree(dev))
0532         return NULL;
0533 
0534     npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
0535     db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
0536     if (!db_tab)
0537         return ERR_PTR(-ENOMEM);
0538 
0539     mutex_init(&db_tab->mutex);
0540     for (i = 0; i < npages; ++i) {
0541         db_tab->page[i].refcount = 0;
0542         db_tab->page[i].uvirt    = 0;
0543         sg_init_table(&db_tab->page[i].mem, 1);
0544     }
0545 
0546     return db_tab;
0547 }
0548 
0549 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
0550                    struct mthca_user_db_table *db_tab)
0551 {
0552     int i;
0553 
0554     if (!mthca_is_memfree(dev))
0555         return;
0556 
0557     for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
0558         if (db_tab->page[i].uvirt) {
0559             mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
0560             dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
0561                      DMA_TO_DEVICE);
0562             unpin_user_page(sg_page(&db_tab->page[i].mem));
0563         }
0564     }
0565 
0566     kfree(db_tab);
0567 }
0568 
0569 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
0570            u32 qn, __be32 **db)
0571 {
0572     int group;
0573     int start, end, dir;
0574     int i, j;
0575     struct mthca_db_page *page;
0576     int ret = 0;
0577 
0578     mutex_lock(&dev->db_tab->mutex);
0579 
0580     switch (type) {
0581     case MTHCA_DB_TYPE_CQ_ARM:
0582     case MTHCA_DB_TYPE_SQ:
0583         group = 0;
0584         start = 0;
0585         end   = dev->db_tab->max_group1;
0586         dir   = 1;
0587         break;
0588 
0589     case MTHCA_DB_TYPE_CQ_SET_CI:
0590     case MTHCA_DB_TYPE_RQ:
0591     case MTHCA_DB_TYPE_SRQ:
0592         group = 1;
0593         start = dev->db_tab->npages - 1;
0594         end   = dev->db_tab->min_group2;
0595         dir   = -1;
0596         break;
0597 
0598     default:
0599         ret = -EINVAL;
0600         goto out;
0601     }
0602 
0603     for (i = start; i != end; i += dir)
0604         if (dev->db_tab->page[i].db_rec &&
0605             !bitmap_full(dev->db_tab->page[i].used,
0606                  MTHCA_DB_REC_PER_PAGE)) {
0607             page = dev->db_tab->page + i;
0608             goto found;
0609         }
0610 
0611     for (i = start; i != end; i += dir)
0612         if (!dev->db_tab->page[i].db_rec) {
0613             page = dev->db_tab->page + i;
0614             goto alloc;
0615         }
0616 
0617     if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
0618         ret = -ENOMEM;
0619         goto out;
0620     }
0621 
0622     if (group == 0)
0623         ++dev->db_tab->max_group1;
0624     else
0625         --dev->db_tab->min_group2;
0626 
0627     page = dev->db_tab->page + end;
0628 
0629 alloc:
0630     page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
0631                       MTHCA_ICM_PAGE_SIZE, &page->mapping,
0632                       GFP_KERNEL);
0633     if (!page->db_rec) {
0634         ret = -ENOMEM;
0635         goto out;
0636     }
0637 
0638     ret = mthca_MAP_ICM_page(dev, page->mapping,
0639                  mthca_uarc_virt(dev, &dev->driver_uar, i));
0640     if (ret) {
0641         dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
0642                   page->db_rec, page->mapping);
0643         goto out;
0644     }
0645 
0646     bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
0647 
0648 found:
0649     j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
0650     set_bit(j, page->used);
0651 
0652     if (group == 1)
0653         j = MTHCA_DB_REC_PER_PAGE - 1 - j;
0654 
0655     ret = i * MTHCA_DB_REC_PER_PAGE + j;
0656 
0657     page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
0658 
0659     *db = (__be32 *) &page->db_rec[j];
0660 
0661 out:
0662     mutex_unlock(&dev->db_tab->mutex);
0663 
0664     return ret;
0665 }
0666 
0667 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
0668 {
0669     int i, j;
0670     struct mthca_db_page *page;
0671 
0672     i = db_index / MTHCA_DB_REC_PER_PAGE;
0673     j = db_index % MTHCA_DB_REC_PER_PAGE;
0674 
0675     page = dev->db_tab->page + i;
0676 
0677     mutex_lock(&dev->db_tab->mutex);
0678 
0679     page->db_rec[j] = 0;
0680     if (i >= dev->db_tab->min_group2)
0681         j = MTHCA_DB_REC_PER_PAGE - 1 - j;
0682     clear_bit(j, page->used);
0683 
0684     if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
0685         i >= dev->db_tab->max_group1 - 1) {
0686         mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
0687 
0688         dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
0689                   page->db_rec, page->mapping);
0690         page->db_rec = NULL;
0691 
0692         if (i == dev->db_tab->max_group1) {
0693             --dev->db_tab->max_group1;
0694             /* XXX may be able to unmap more pages now */
0695         }
0696         if (i == dev->db_tab->min_group2)
0697             ++dev->db_tab->min_group2;
0698     }
0699 
0700     mutex_unlock(&dev->db_tab->mutex);
0701 }
0702 
0703 int mthca_init_db_tab(struct mthca_dev *dev)
0704 {
0705     int i;
0706 
0707     if (!mthca_is_memfree(dev))
0708         return 0;
0709 
0710     dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
0711     if (!dev->db_tab)
0712         return -ENOMEM;
0713 
0714     mutex_init(&dev->db_tab->mutex);
0715 
0716     dev->db_tab->npages     = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
0717     dev->db_tab->max_group1 = 0;
0718     dev->db_tab->min_group2 = dev->db_tab->npages - 1;
0719 
0720     dev->db_tab->page = kmalloc_array(dev->db_tab->npages,
0721                       sizeof(*dev->db_tab->page),
0722                       GFP_KERNEL);
0723     if (!dev->db_tab->page) {
0724         kfree(dev->db_tab);
0725         return -ENOMEM;
0726     }
0727 
0728     for (i = 0; i < dev->db_tab->npages; ++i)
0729         dev->db_tab->page[i].db_rec = NULL;
0730 
0731     return 0;
0732 }
0733 
0734 void mthca_cleanup_db_tab(struct mthca_dev *dev)
0735 {
0736     int i;
0737 
0738     if (!mthca_is_memfree(dev))
0739         return;
0740 
0741     /*
0742      * Because we don't always free our UARC pages when they
0743      * become empty to make mthca_free_db() simpler we need to
0744      * make a sweep through the doorbell pages and free any
0745      * leftover pages now.
0746      */
0747     for (i = 0; i < dev->db_tab->npages; ++i) {
0748         if (!dev->db_tab->page[i].db_rec)
0749             continue;
0750 
0751         if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
0752             mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
0753 
0754         mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
0755 
0756         dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
0757                   dev->db_tab->page[i].db_rec,
0758                   dev->db_tab->page[i].mapping);
0759     }
0760 
0761     kfree(dev->db_tab->page);
0762     kfree(dev->db_tab);
0763 }