0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <linux/mmu_context.h>
0038 #include <linux/mempolicy.h>
0039 #include <linux/swap.h>
0040 #include <linux/sched/mm.h>
0041
0042 #include "i915_drv.h"
0043 #include "i915_gem_ioctls.h"
0044 #include "i915_gem_object.h"
0045 #include "i915_gem_userptr.h"
0046 #include "i915_scatterlist.h"
0047
0048 #ifdef CONFIG_MMU_NOTIFIER
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
0061 const struct mmu_notifier_range *range,
0062 unsigned long cur_seq)
0063 {
0064 struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
0065 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0066 long r;
0067
0068 if (!mmu_notifier_range_blockable(range))
0069 return false;
0070
0071 write_lock(&i915->mm.notifier_lock);
0072
0073 mmu_interval_set_seq(mni, cur_seq);
0074
0075 write_unlock(&i915->mm.notifier_lock);
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 if (current->flags & PF_EXITING)
0086 return true;
0087
0088
0089 r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
0090 MAX_SCHEDULE_TIMEOUT);
0091 if (r <= 0)
0092 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
0093
0094 return true;
0095 }
0096
0097 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
0098 .invalidate = i915_gem_userptr_invalidate,
0099 };
0100
0101 static int
0102 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
0103 {
0104 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
0105 obj->userptr.ptr, obj->base.size,
0106 &i915_gem_userptr_notifier_ops);
0107 }
0108
0109 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
0110 {
0111 struct page **pvec = NULL;
0112
0113 assert_object_held_shared(obj);
0114
0115 if (!--obj->userptr.page_ref) {
0116 pvec = obj->userptr.pvec;
0117 obj->userptr.pvec = NULL;
0118 }
0119 GEM_BUG_ON(obj->userptr.page_ref < 0);
0120
0121 if (pvec) {
0122 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
0123
0124 unpin_user_pages(pvec, num_pages);
0125 kvfree(pvec);
0126 }
0127 }
0128
0129 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
0130 {
0131 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
0132 unsigned int max_segment = i915_sg_segment_size();
0133 struct sg_table *st;
0134 unsigned int sg_page_sizes;
0135 struct page **pvec;
0136 int ret;
0137
0138 st = kmalloc(sizeof(*st), GFP_KERNEL);
0139 if (!st)
0140 return -ENOMEM;
0141
0142 if (!obj->userptr.page_ref) {
0143 ret = -EAGAIN;
0144 goto err_free;
0145 }
0146
0147 obj->userptr.page_ref++;
0148 pvec = obj->userptr.pvec;
0149
0150 alloc_table:
0151 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
0152 num_pages << PAGE_SHIFT,
0153 max_segment, GFP_KERNEL);
0154 if (ret)
0155 goto err;
0156
0157 ret = i915_gem_gtt_prepare_pages(obj, st);
0158 if (ret) {
0159 sg_free_table(st);
0160
0161 if (max_segment > PAGE_SIZE) {
0162 max_segment = PAGE_SIZE;
0163 goto alloc_table;
0164 }
0165
0166 goto err;
0167 }
0168
0169 WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
0170 if (i915_gem_object_can_bypass_llc(obj))
0171 obj->cache_dirty = true;
0172
0173 sg_page_sizes = i915_sg_dma_sizes(st->sgl);
0174 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
0175
0176 return 0;
0177
0178 err:
0179 i915_gem_object_userptr_drop_ref(obj);
0180 err_free:
0181 kfree(st);
0182 return ret;
0183 }
0184
0185 static void
0186 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
0187 struct sg_table *pages)
0188 {
0189 struct sgt_iter sgt_iter;
0190 struct page *page;
0191
0192 if (!pages)
0193 return;
0194
0195 __i915_gem_object_release_shmem(obj, pages, true);
0196 i915_gem_gtt_finish_pages(obj, pages);
0197
0198
0199
0200
0201
0202
0203 if (i915_gem_object_is_readonly(obj))
0204 obj->mm.dirty = false;
0205
0206 for_each_sgt_page(page, sgt_iter, pages) {
0207 if (obj->mm.dirty && trylock_page(page)) {
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 set_page_dirty(page);
0227 unlock_page(page);
0228 }
0229
0230 mark_page_accessed(page);
0231 }
0232 obj->mm.dirty = false;
0233
0234 sg_free_table(pages);
0235 kfree(pages);
0236
0237 i915_gem_object_userptr_drop_ref(obj);
0238 }
0239
0240 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
0241 {
0242 struct sg_table *pages;
0243 int err;
0244
0245 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
0246 if (err)
0247 return err;
0248
0249 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
0250 return -EBUSY;
0251
0252 assert_object_held(obj);
0253
0254 pages = __i915_gem_object_unset_pages(obj);
0255 if (!IS_ERR_OR_NULL(pages))
0256 i915_gem_userptr_put_pages(obj, pages);
0257
0258 return err;
0259 }
0260
0261 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
0262 {
0263 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
0264 struct page **pvec;
0265 unsigned int gup_flags = 0;
0266 unsigned long notifier_seq;
0267 int pinned, ret;
0268
0269 if (obj->userptr.notifier.mm != current->mm)
0270 return -EFAULT;
0271
0272 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
0273
0274 ret = i915_gem_object_lock_interruptible(obj, NULL);
0275 if (ret)
0276 return ret;
0277
0278 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
0279 i915_gem_object_unlock(obj);
0280 return 0;
0281 }
0282
0283 ret = i915_gem_object_userptr_unbind(obj);
0284 i915_gem_object_unlock(obj);
0285 if (ret)
0286 return ret;
0287
0288 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
0289 if (!pvec)
0290 return -ENOMEM;
0291
0292 if (!i915_gem_object_is_readonly(obj))
0293 gup_flags |= FOLL_WRITE;
0294
0295 pinned = ret = 0;
0296 while (pinned < num_pages) {
0297 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
0298 num_pages - pinned, gup_flags,
0299 &pvec[pinned]);
0300 if (ret < 0)
0301 goto out;
0302
0303 pinned += ret;
0304 }
0305 ret = 0;
0306
0307 ret = i915_gem_object_lock_interruptible(obj, NULL);
0308 if (ret)
0309 goto out;
0310
0311 if (mmu_interval_read_retry(&obj->userptr.notifier,
0312 !obj->userptr.page_ref ? notifier_seq :
0313 obj->userptr.notifier_seq)) {
0314 ret = -EAGAIN;
0315 goto out_unlock;
0316 }
0317
0318 if (!obj->userptr.page_ref++) {
0319 obj->userptr.pvec = pvec;
0320 obj->userptr.notifier_seq = notifier_seq;
0321 pvec = NULL;
0322 ret = ____i915_gem_object_get_pages(obj);
0323 }
0324
0325 obj->userptr.page_ref--;
0326
0327 out_unlock:
0328 i915_gem_object_unlock(obj);
0329
0330 out:
0331 if (pvec) {
0332 unpin_user_pages(pvec, pinned);
0333 kvfree(pvec);
0334 }
0335
0336 return ret;
0337 }
0338
0339 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
0340 {
0341 if (mmu_interval_read_retry(&obj->userptr.notifier,
0342 obj->userptr.notifier_seq)) {
0343
0344
0345 return -EAGAIN;
0346 }
0347
0348 return 0;
0349 }
0350
0351 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
0352 {
0353 int err;
0354
0355 err = i915_gem_object_userptr_submit_init(obj);
0356 if (err)
0357 return err;
0358
0359 err = i915_gem_object_lock_interruptible(obj, NULL);
0360 if (!err) {
0361
0362
0363
0364
0365
0366 err = i915_gem_object_pin_pages(obj);
0367 if (!err)
0368 i915_gem_object_unpin_pages(obj);
0369
0370 i915_gem_object_unlock(obj);
0371 }
0372
0373 return err;
0374 }
0375
0376 static void
0377 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
0378 {
0379 GEM_WARN_ON(obj->userptr.page_ref);
0380
0381 mmu_interval_notifier_remove(&obj->userptr.notifier);
0382 obj->userptr.notifier.mm = NULL;
0383 }
0384
0385 static int
0386 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
0387 {
0388 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
0389
0390 return -EINVAL;
0391 }
0392
0393 static int
0394 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
0395 const struct drm_i915_gem_pwrite *args)
0396 {
0397 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
0398
0399 return -EINVAL;
0400 }
0401
0402 static int
0403 i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
0404 const struct drm_i915_gem_pread *args)
0405 {
0406 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
0407
0408 return -EINVAL;
0409 }
0410
0411 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
0412 .name = "i915_gem_object_userptr",
0413 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
0414 I915_GEM_OBJECT_NO_MMAP |
0415 I915_GEM_OBJECT_IS_PROXY,
0416 .get_pages = i915_gem_userptr_get_pages,
0417 .put_pages = i915_gem_userptr_put_pages,
0418 .dmabuf_export = i915_gem_userptr_dmabuf_export,
0419 .pwrite = i915_gem_userptr_pwrite,
0420 .pread = i915_gem_userptr_pread,
0421 .release = i915_gem_userptr_release,
0422 };
0423
0424 #endif
0425
0426 static int
0427 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
0428 {
0429 const unsigned long end = addr + len;
0430 struct vm_area_struct *vma;
0431 int ret = -EFAULT;
0432
0433 mmap_read_lock(mm);
0434 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
0435
0436 if (vma->vm_start > addr)
0437 break;
0438
0439 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
0440 break;
0441
0442 if (vma->vm_end >= end) {
0443 ret = 0;
0444 break;
0445 }
0446
0447 addr = vma->vm_end;
0448 }
0449 mmap_read_unlock(mm);
0450
0451 return ret;
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489 int
0490 i915_gem_userptr_ioctl(struct drm_device *dev,
0491 void *data,
0492 struct drm_file *file)
0493 {
0494 static struct lock_class_key __maybe_unused lock_class;
0495 struct drm_i915_private *dev_priv = to_i915(dev);
0496 struct drm_i915_gem_userptr *args = data;
0497 struct drm_i915_gem_object __maybe_unused *obj;
0498 int __maybe_unused ret;
0499 u32 __maybe_unused handle;
0500
0501 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
0502
0503
0504
0505 return -ENODEV;
0506 }
0507
0508 if (args->flags & ~(I915_USERPTR_READ_ONLY |
0509 I915_USERPTR_UNSYNCHRONIZED |
0510 I915_USERPTR_PROBE))
0511 return -EINVAL;
0512
0513 if (i915_gem_object_size_2big(args->user_size))
0514 return -E2BIG;
0515
0516 if (!args->user_size)
0517 return -EINVAL;
0518
0519 if (offset_in_page(args->user_ptr | args->user_size))
0520 return -EINVAL;
0521
0522 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
0523 return -EFAULT;
0524
0525 if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
0526 return -ENODEV;
0527
0528 if (args->flags & I915_USERPTR_READ_ONLY) {
0529
0530
0531
0532
0533 if (!to_gt(dev_priv)->vm->has_read_only)
0534 return -ENODEV;
0535 }
0536
0537 if (args->flags & I915_USERPTR_PROBE) {
0538
0539
0540
0541
0542 ret = probe_range(current->mm, args->user_ptr, args->user_size);
0543 if (ret)
0544 return ret;
0545 }
0546
0547 #ifdef CONFIG_MMU_NOTIFIER
0548 obj = i915_gem_object_alloc();
0549 if (obj == NULL)
0550 return -ENOMEM;
0551
0552 drm_gem_private_object_init(dev, &obj->base, args->user_size);
0553 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
0554 I915_BO_ALLOC_USER);
0555 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
0556 obj->read_domains = I915_GEM_DOMAIN_CPU;
0557 obj->write_domain = I915_GEM_DOMAIN_CPU;
0558 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
0559
0560 obj->userptr.ptr = args->user_ptr;
0561 obj->userptr.notifier_seq = ULONG_MAX;
0562 if (args->flags & I915_USERPTR_READ_ONLY)
0563 i915_gem_object_set_readonly(obj);
0564
0565
0566
0567
0568
0569 ret = i915_gem_userptr_init__mmu_notifier(obj);
0570 if (ret == 0)
0571 ret = drm_gem_handle_create(file, &obj->base, &handle);
0572
0573
0574 i915_gem_object_put(obj);
0575 if (ret)
0576 return ret;
0577
0578 args->handle = handle;
0579 return 0;
0580 #else
0581 return -ENODEV;
0582 #endif
0583 }
0584
0585 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
0586 {
0587 #ifdef CONFIG_MMU_NOTIFIER
0588 rwlock_init(&dev_priv->mm.notifier_lock);
0589 #endif
0590
0591 return 0;
0592 }
0593
0594 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
0595 {
0596 }