0001
0002
0003
0004
0005
0006
0007 #include <linux/vmalloc.h>
0008 #include <linux/mm.h>
0009 #include <linux/errno.h>
0010 #include <rdma/uverbs_ioctl.h>
0011
0012 #include "rxe.h"
0013 #include "rxe_loc.h"
0014 #include "rxe_queue.h"
0015
0016 void rxe_mmap_release(struct kref *ref)
0017 {
0018 struct rxe_mmap_info *ip = container_of(ref,
0019 struct rxe_mmap_info, ref);
0020 struct rxe_dev *rxe = to_rdev(ip->context->device);
0021
0022 spin_lock_bh(&rxe->pending_lock);
0023
0024 if (!list_empty(&ip->pending_mmaps))
0025 list_del(&ip->pending_mmaps);
0026
0027 spin_unlock_bh(&rxe->pending_lock);
0028
0029 vfree(ip->obj);
0030 kfree(ip);
0031 }
0032
0033
0034
0035
0036
0037 static void rxe_vma_open(struct vm_area_struct *vma)
0038 {
0039 struct rxe_mmap_info *ip = vma->vm_private_data;
0040
0041 kref_get(&ip->ref);
0042 }
0043
0044 static void rxe_vma_close(struct vm_area_struct *vma)
0045 {
0046 struct rxe_mmap_info *ip = vma->vm_private_data;
0047
0048 kref_put(&ip->ref, rxe_mmap_release);
0049 }
0050
0051 static const struct vm_operations_struct rxe_vm_ops = {
0052 .open = rxe_vma_open,
0053 .close = rxe_vma_close,
0054 };
0055
0056
0057
0058
0059
0060
0061
0062 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
0063 {
0064 struct rxe_dev *rxe = to_rdev(context->device);
0065 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
0066 unsigned long size = vma->vm_end - vma->vm_start;
0067 struct rxe_mmap_info *ip, *pp;
0068 int ret;
0069
0070
0071
0072
0073
0074
0075 spin_lock_bh(&rxe->pending_lock);
0076 list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
0077 if (context != ip->context || (__u64)offset != ip->info.offset)
0078 continue;
0079
0080
0081 if (size > ip->info.size) {
0082 pr_err("mmap region is larger than the object!\n");
0083 spin_unlock_bh(&rxe->pending_lock);
0084 ret = -EINVAL;
0085 goto done;
0086 }
0087
0088 goto found_it;
0089 }
0090 pr_warn("unable to find pending mmap info\n");
0091 spin_unlock_bh(&rxe->pending_lock);
0092 ret = -EINVAL;
0093 goto done;
0094
0095 found_it:
0096 list_del_init(&ip->pending_mmaps);
0097 spin_unlock_bh(&rxe->pending_lock);
0098
0099 ret = remap_vmalloc_range(vma, ip->obj, 0);
0100 if (ret) {
0101 pr_err("err %d from remap_vmalloc_range\n", ret);
0102 goto done;
0103 }
0104
0105 vma->vm_ops = &rxe_vm_ops;
0106 vma->vm_private_data = ip;
0107 rxe_vma_open(vma);
0108 done:
0109 return ret;
0110 }
0111
0112
0113
0114
0115 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
0116 struct ib_udata *udata, void *obj)
0117 {
0118 struct rxe_mmap_info *ip;
0119
0120 if (!udata)
0121 return ERR_PTR(-EINVAL);
0122
0123 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
0124 if (!ip)
0125 return ERR_PTR(-ENOMEM);
0126
0127 size = PAGE_ALIGN(size);
0128
0129 spin_lock_bh(&rxe->mmap_offset_lock);
0130
0131 if (rxe->mmap_offset == 0)
0132 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
0133
0134 ip->info.offset = rxe->mmap_offset;
0135 rxe->mmap_offset += ALIGN(size, SHMLBA);
0136
0137 spin_unlock_bh(&rxe->mmap_offset_lock);
0138
0139 INIT_LIST_HEAD(&ip->pending_mmaps);
0140 ip->info.size = size;
0141 ip->context =
0142 container_of(udata, struct uverbs_attr_bundle, driver_udata)
0143 ->context;
0144 ip->obj = obj;
0145 kref_init(&ip->ref);
0146
0147 return ip;
0148 }