0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kref.h>
0034 #include <linux/slab.h>
0035 #include <linux/sched/mm.h>
0036 #include <rdma/ib_umem.h>
0037
0038 #include "mlx5_ib.h"
0039
0040 struct mlx5_ib_user_db_page {
0041 struct list_head list;
0042 struct ib_umem *umem;
0043 unsigned long user_virt;
0044 int refcnt;
0045 struct mm_struct *mm;
0046 };
0047
0048 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
0049 struct mlx5_db *db)
0050 {
0051 struct mlx5_ib_user_db_page *page;
0052 int err = 0;
0053
0054 mutex_lock(&context->db_page_mutex);
0055
0056 list_for_each_entry(page, &context->db_page_list, list)
0057 if ((current->mm == page->mm) &&
0058 (page->user_virt == (virt & PAGE_MASK)))
0059 goto found;
0060
0061 page = kmalloc(sizeof(*page), GFP_KERNEL);
0062 if (!page) {
0063 err = -ENOMEM;
0064 goto out;
0065 }
0066
0067 page->user_virt = (virt & PAGE_MASK);
0068 page->refcnt = 0;
0069 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
0070 PAGE_SIZE, 0);
0071 if (IS_ERR(page->umem)) {
0072 err = PTR_ERR(page->umem);
0073 kfree(page);
0074 goto out;
0075 }
0076 mmgrab(current->mm);
0077 page->mm = current->mm;
0078
0079 list_add(&page->list, &context->db_page_list);
0080
0081 found:
0082 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
0083 (virt & ~PAGE_MASK);
0084 db->u.user_page = page;
0085 ++page->refcnt;
0086
0087 out:
0088 mutex_unlock(&context->db_page_mutex);
0089
0090 return err;
0091 }
0092
0093 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
0094 {
0095 mutex_lock(&context->db_page_mutex);
0096
0097 if (!--db->u.user_page->refcnt) {
0098 list_del(&db->u.user_page->list);
0099 mmdrop(db->u.user_page->mm);
0100 ib_umem_release(db->u.user_page->umem);
0101 kfree(db->u.user_page);
0102 }
0103
0104 mutex_unlock(&context->db_page_mutex);
0105 }