Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
0002 /*
0003  * Copyright (c) 2017 Hisilicon Limited.
0004  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
0005  */
0006 
0007 #include <rdma/ib_umem.h>
0008 #include "hns_roce_device.h"
0009 
0010 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
0011              struct hns_roce_db *db)
0012 {
0013     unsigned long page_addr = virt & PAGE_MASK;
0014     struct hns_roce_user_db_page *page;
0015     unsigned int offset;
0016     int ret = 0;
0017 
0018     mutex_lock(&context->page_mutex);
0019 
0020     list_for_each_entry(page, &context->page_list, list)
0021         if (page->user_virt == page_addr)
0022             goto found;
0023 
0024     page = kmalloc(sizeof(*page), GFP_KERNEL);
0025     if (!page) {
0026         ret = -ENOMEM;
0027         goto out;
0028     }
0029 
0030     refcount_set(&page->refcount, 1);
0031     page->user_virt = page_addr;
0032     page->umem = ib_umem_get(context->ibucontext.device, page_addr,
0033                  PAGE_SIZE, 0);
0034     if (IS_ERR(page->umem)) {
0035         ret = PTR_ERR(page->umem);
0036         kfree(page);
0037         goto out;
0038     }
0039 
0040     list_add(&page->list, &context->page_list);
0041 
0042 found:
0043     offset = virt - page_addr;
0044     db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
0045     db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
0046     db->u.user_page = page;
0047     refcount_inc(&page->refcount);
0048 
0049 out:
0050     mutex_unlock(&context->page_mutex);
0051 
0052     return ret;
0053 }
0054 
0055 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
0056                 struct hns_roce_db *db)
0057 {
0058     mutex_lock(&context->page_mutex);
0059 
0060     refcount_dec(&db->u.user_page->refcount);
0061     if (refcount_dec_if_one(&db->u.user_page->refcount)) {
0062         list_del(&db->u.user_page->list);
0063         ib_umem_release(db->u.user_page->umem);
0064         kfree(db->u.user_page);
0065     }
0066 
0067     mutex_unlock(&context->page_mutex);
0068 }
0069 
0070 static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
0071                     struct device *dma_device)
0072 {
0073     struct hns_roce_db_pgdir *pgdir;
0074 
0075     pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
0076     if (!pgdir)
0077         return NULL;
0078 
0079     bitmap_fill(pgdir->order1,
0080             HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
0081     pgdir->bits[0] = pgdir->order0;
0082     pgdir->bits[1] = pgdir->order1;
0083     pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
0084                      &pgdir->db_dma, GFP_KERNEL);
0085     if (!pgdir->page) {
0086         kfree(pgdir);
0087         return NULL;
0088     }
0089 
0090     return pgdir;
0091 }
0092 
0093 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
0094                     struct hns_roce_db *db, int order)
0095 {
0096     unsigned long o;
0097     unsigned long i;
0098 
0099     for (o = order; o <= 1; ++o) {
0100         i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
0101         if (i < HNS_ROCE_DB_PER_PAGE >> o)
0102             goto found;
0103     }
0104 
0105     return -ENOMEM;
0106 
0107 found:
0108     clear_bit(i, pgdir->bits[o]);
0109 
0110     i <<= o;
0111 
0112     if (o > order)
0113         set_bit(i ^ 1, pgdir->bits[order]);
0114 
0115     db->u.pgdir = pgdir;
0116     db->index   = i;
0117     db->db_record   = pgdir->page + db->index;
0118     db->dma     = pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
0119     db->order   = order;
0120 
0121     return 0;
0122 }
0123 
0124 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
0125               int order)
0126 {
0127     struct hns_roce_db_pgdir *pgdir;
0128     int ret = 0;
0129 
0130     mutex_lock(&hr_dev->pgdir_mutex);
0131 
0132     list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
0133         if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
0134             goto out;
0135 
0136     pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
0137     if (!pgdir) {
0138         ret = -ENOMEM;
0139         goto out;
0140     }
0141 
0142     list_add(&pgdir->list, &hr_dev->pgdir_list);
0143 
0144     /* This should never fail -- we just allocated an empty page: */
0145     WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
0146 
0147 out:
0148     mutex_unlock(&hr_dev->pgdir_mutex);
0149 
0150     return ret;
0151 }
0152 
0153 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
0154 {
0155     unsigned long o;
0156     unsigned long i;
0157 
0158     mutex_lock(&hr_dev->pgdir_mutex);
0159 
0160     o = db->order;
0161     i = db->index;
0162 
0163     if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
0164         clear_bit(i ^ 1, db->u.pgdir->order0);
0165         ++o;
0166     }
0167 
0168     i >>= o;
0169     set_bit(i, db->u.pgdir->bits[o]);
0170 
0171     if (bitmap_full(db->u.pgdir->order1,
0172             HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
0173         dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
0174                   db->u.pgdir->db_dma);
0175         list_del(&db->u.pgdir->list);
0176         kfree(db->u.pgdir);
0177     }
0178 
0179     mutex_unlock(&hr_dev->pgdir_mutex);
0180 }