0001
0002
0003
0004
0005
0006
0007 #include <linux/xarray.h>
0008 #include "uverbs.h"
0009 #include "core_priv.h"
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 void rdma_umap_priv_init(struct rdma_umap_priv *priv,
0031 struct vm_area_struct *vma,
0032 struct rdma_user_mmap_entry *entry)
0033 {
0034 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
0035
0036 priv->vma = vma;
0037 if (entry) {
0038 kref_get(&entry->ref);
0039 priv->entry = entry;
0040 }
0041 vma->vm_private_data = priv;
0042
0043
0044 mutex_lock(&ufile->umap_lock);
0045 list_add(&priv->list, &ufile->umaps);
0046 mutex_unlock(&ufile->umap_lock);
0047 }
0048 EXPORT_SYMBOL(rdma_umap_priv_init);
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
0068 unsigned long pfn, unsigned long size, pgprot_t prot,
0069 struct rdma_user_mmap_entry *entry)
0070 {
0071 struct ib_uverbs_file *ufile = ucontext->ufile;
0072 struct rdma_umap_priv *priv;
0073
0074 if (!(vma->vm_flags & VM_SHARED))
0075 return -EINVAL;
0076
0077 if (vma->vm_end - vma->vm_start != size)
0078 return -EINVAL;
0079
0080
0081 if (WARN_ON(!vma->vm_file ||
0082 vma->vm_file->private_data != ufile))
0083 return -EINVAL;
0084 lockdep_assert_held(&ufile->device->disassociate_srcu);
0085
0086 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0087 if (!priv)
0088 return -ENOMEM;
0089
0090 vma->vm_page_prot = prot;
0091 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
0092 kfree(priv);
0093 return -EAGAIN;
0094 }
0095
0096 rdma_umap_priv_init(priv, vma, entry);
0097 return 0;
0098 }
0099 EXPORT_SYMBOL(rdma_user_mmap_io);
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 struct rdma_user_mmap_entry *
0117 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
0118 unsigned long pgoff)
0119 {
0120 struct rdma_user_mmap_entry *entry;
0121
0122 if (pgoff > U32_MAX)
0123 return NULL;
0124
0125 xa_lock(&ucontext->mmap_xa);
0126
0127 entry = xa_load(&ucontext->mmap_xa, pgoff);
0128
0129
0130
0131
0132
0133
0134 if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
0135 !kref_get_unless_zero(&entry->ref))
0136 goto err;
0137
0138 xa_unlock(&ucontext->mmap_xa);
0139
0140 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
0141 pgoff, entry->npages);
0142
0143 return entry;
0144
0145 err:
0146 xa_unlock(&ucontext->mmap_xa);
0147 return NULL;
0148 }
0149 EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 struct rdma_user_mmap_entry *
0161 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
0162 struct vm_area_struct *vma)
0163 {
0164 struct rdma_user_mmap_entry *entry;
0165
0166 if (!(vma->vm_flags & VM_SHARED))
0167 return NULL;
0168 entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
0169 if (!entry)
0170 return NULL;
0171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
0172 rdma_user_mmap_entry_put(entry);
0173 return NULL;
0174 }
0175 return entry;
0176 }
0177 EXPORT_SYMBOL(rdma_user_mmap_entry_get);
0178
0179 static void rdma_user_mmap_entry_free(struct kref *kref)
0180 {
0181 struct rdma_user_mmap_entry *entry =
0182 container_of(kref, struct rdma_user_mmap_entry, ref);
0183 struct ib_ucontext *ucontext = entry->ucontext;
0184 unsigned long i;
0185
0186
0187
0188
0189
0190 xa_lock(&ucontext->mmap_xa);
0191 for (i = 0; i < entry->npages; i++)
0192 __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
0193 xa_unlock(&ucontext->mmap_xa);
0194
0195 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
0196 entry->start_pgoff, entry->npages);
0197
0198 if (ucontext->device->ops.mmap_free)
0199 ucontext->device->ops.mmap_free(entry);
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
0215 {
0216 kref_put(&entry->ref, rdma_user_mmap_entry_free);
0217 }
0218 EXPORT_SYMBOL(rdma_user_mmap_entry_put);
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
0231 {
0232 if (!entry)
0233 return;
0234
0235 xa_lock(&entry->ucontext->mmap_xa);
0236 entry->driver_removed = true;
0237 xa_unlock(&entry->ucontext->mmap_xa);
0238 kref_put(&entry->ref, rdma_user_mmap_entry_free);
0239 }
0240 EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
0263 struct rdma_user_mmap_entry *entry,
0264 size_t length, u32 min_pgoff,
0265 u32 max_pgoff)
0266 {
0267 struct ib_uverbs_file *ufile = ucontext->ufile;
0268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
0269 u32 xa_first, xa_last, npages;
0270 int err;
0271 u32 i;
0272
0273 if (!entry)
0274 return -EINVAL;
0275
0276 kref_init(&entry->ref);
0277 entry->ucontext = ucontext;
0278
0279
0280
0281
0282
0283
0284
0285 mutex_lock(&ufile->umap_lock);
0286
0287 xa_lock(&ucontext->mmap_xa);
0288
0289
0290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
0291 entry->npages = npages;
0292 while (true) {
0293
0294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
0295 if (xas.xa_node == XAS_RESTART)
0296 goto err_unlock;
0297
0298 xa_first = xas.xa_index;
0299
0300
0301 if (check_add_overflow(xa_first, npages, &xa_last))
0302 goto err_unlock;
0303
0304
0305
0306
0307
0308 xas_next_entry(&xas, xa_last - 1);
0309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
0310 break;
0311 }
0312
0313 for (i = xa_first; i < xa_last; i++) {
0314 err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
0315 if (err)
0316 goto err_undo;
0317 }
0318
0319
0320
0321
0322
0323 entry->start_pgoff = xa_first;
0324 xa_unlock(&ucontext->mmap_xa);
0325 mutex_unlock(&ufile->umap_lock);
0326
0327 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
0328 entry->start_pgoff, npages);
0329
0330 return 0;
0331
0332 err_undo:
0333 for (; i > xa_first; i--)
0334 __xa_erase(&ucontext->mmap_xa, i - 1);
0335
0336 err_unlock:
0337 xa_unlock(&ucontext->mmap_xa);
0338 mutex_unlock(&ufile->umap_lock);
0339 return -ENOMEM;
0340 }
0341 EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
0361 struct rdma_user_mmap_entry *entry,
0362 size_t length)
0363 {
0364 return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
0365 U32_MAX);
0366 }
0367 EXPORT_SYMBOL(rdma_user_mmap_entry_insert);