0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/slab.h>
0034 #include <rdma/uverbs_ioctl.h>
0035
0036 #include "mlx4_ib.h"
0037
0038 struct mlx4_ib_user_db_page {
0039 struct list_head list;
0040 struct ib_umem *umem;
0041 unsigned long user_virt;
0042 int refcnt;
0043 };
0044
0045 int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
0046 struct mlx4_db *db)
0047 {
0048 struct mlx4_ib_user_db_page *page;
0049 int err = 0;
0050 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
0051 udata, struct mlx4_ib_ucontext, ibucontext);
0052
0053 mutex_lock(&context->db_page_mutex);
0054
0055 list_for_each_entry(page, &context->db_page_list, list)
0056 if (page->user_virt == (virt & PAGE_MASK))
0057 goto found;
0058
0059 page = kmalloc(sizeof *page, GFP_KERNEL);
0060 if (!page) {
0061 err = -ENOMEM;
0062 goto out;
0063 }
0064
0065 page->user_virt = (virt & PAGE_MASK);
0066 page->refcnt = 0;
0067 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
0068 PAGE_SIZE, 0);
0069 if (IS_ERR(page->umem)) {
0070 err = PTR_ERR(page->umem);
0071 kfree(page);
0072 goto out;
0073 }
0074
0075 list_add(&page->list, &context->db_page_list);
0076
0077 found:
0078 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
0079 (virt & ~PAGE_MASK);
0080 db->u.user_page = page;
0081 ++page->refcnt;
0082
0083 out:
0084 mutex_unlock(&context->db_page_mutex);
0085
0086 return err;
0087 }
0088
0089 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
0090 {
0091 mutex_lock(&context->db_page_mutex);
0092
0093 if (!--db->u.user_page->refcnt) {
0094 list_del(&db->u.user_page->list);
0095 ib_umem_release(db->u.user_page->umem);
0096 kfree(db->u.user_page);
0097 }
0098
0099 mutex_unlock(&context->db_page_mutex);
0100 }