0001
0002
0003
0004
0005
0006
0007 #include <linux/anon_inodes.h>
0008 #include <linux/mman.h>
0009 #include <linux/pfn_t.h>
0010 #include <linux/sizes.h>
0011
0012 #include <drm/drm_cache.h>
0013
0014 #include "gt/intel_gt.h"
0015 #include "gt/intel_gt_requests.h"
0016
0017 #include "i915_drv.h"
0018 #include "i915_gem_evict.h"
0019 #include "i915_gem_gtt.h"
0020 #include "i915_gem_ioctls.h"
0021 #include "i915_gem_object.h"
0022 #include "i915_gem_mman.h"
0023 #include "i915_mm.h"
0024 #include "i915_trace.h"
0025 #include "i915_user_extensions.h"
0026 #include "i915_gem_ttm.h"
0027 #include "i915_vma.h"
0028
0029 static inline bool
0030 __vma_matches(struct vm_area_struct *vma, struct file *filp,
0031 unsigned long addr, unsigned long size)
0032 {
0033 if (vma->vm_file != filp)
0034 return false;
0035
0036 return vma->vm_start == addr &&
0037 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 int
0061 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
0062 struct drm_file *file)
0063 {
0064 struct drm_i915_private *i915 = to_i915(dev);
0065 struct drm_i915_gem_mmap *args = data;
0066 struct drm_i915_gem_object *obj;
0067 unsigned long addr;
0068
0069
0070
0071
0072
0073 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
0074 return -EOPNOTSUPP;
0075
0076 if (args->flags & ~(I915_MMAP_WC))
0077 return -EINVAL;
0078
0079 if (args->flags & I915_MMAP_WC && !pat_enabled())
0080 return -ENODEV;
0081
0082 obj = i915_gem_object_lookup(file, args->handle);
0083 if (!obj)
0084 return -ENOENT;
0085
0086
0087
0088
0089 if (!obj->base.filp) {
0090 addr = -ENXIO;
0091 goto err;
0092 }
0093
0094 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
0095 addr = -EINVAL;
0096 goto err;
0097 }
0098
0099 addr = vm_mmap(obj->base.filp, 0, args->size,
0100 PROT_READ | PROT_WRITE, MAP_SHARED,
0101 args->offset);
0102 if (IS_ERR_VALUE(addr))
0103 goto err;
0104
0105 if (args->flags & I915_MMAP_WC) {
0106 struct mm_struct *mm = current->mm;
0107 struct vm_area_struct *vma;
0108
0109 if (mmap_write_lock_killable(mm)) {
0110 addr = -EINTR;
0111 goto err;
0112 }
0113 vma = find_vma(mm, addr);
0114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
0115 vma->vm_page_prot =
0116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
0117 else
0118 addr = -ENOMEM;
0119 mmap_write_unlock(mm);
0120 if (IS_ERR_VALUE(addr))
0121 goto err;
0122 }
0123 i915_gem_object_put(obj);
0124
0125 args->addr_ptr = (u64)addr;
0126 return 0;
0127
0128 err:
0129 i915_gem_object_put(obj);
0130 return addr;
0131 }
0132
0133 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
0134 {
0135 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
0136 }
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 int i915_gem_mmap_gtt_version(void)
0193 {
0194 return 4;
0195 }
0196
0197 static inline struct i915_ggtt_view
0198 compute_partial_view(const struct drm_i915_gem_object *obj,
0199 pgoff_t page_offset,
0200 unsigned int chunk)
0201 {
0202 struct i915_ggtt_view view;
0203
0204 if (i915_gem_object_is_tiled(obj))
0205 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
0206
0207 view.type = I915_GGTT_VIEW_PARTIAL;
0208 view.partial.offset = rounddown(page_offset, chunk);
0209 view.partial.size =
0210 min_t(unsigned int, chunk,
0211 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
0212
0213
0214 if (chunk >= obj->base.size >> PAGE_SHIFT)
0215 view.type = I915_GGTT_VIEW_NORMAL;
0216
0217 return view;
0218 }
0219
0220 static vm_fault_t i915_error_to_vmf_fault(int err)
0221 {
0222 switch (err) {
0223 default:
0224 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
0225 fallthrough;
0226 case -EIO:
0227 case -EFAULT:
0228 case -ENODEV:
0229 case -ENXIO:
0230 return VM_FAULT_SIGBUS;
0231
0232 case -ENOMEM:
0233 return VM_FAULT_OOM;
0234
0235 case 0:
0236 case -EAGAIN:
0237 case -ENOSPC:
0238 case -ERESTARTSYS:
0239 case -EINTR:
0240 case -EBUSY:
0241
0242
0243
0244
0245 return VM_FAULT_NOPAGE;
0246 }
0247 }
0248
0249 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
0250 {
0251 struct vm_area_struct *area = vmf->vma;
0252 struct i915_mmap_offset *mmo = area->vm_private_data;
0253 struct drm_i915_gem_object *obj = mmo->obj;
0254 resource_size_t iomap;
0255 int err;
0256
0257
0258 if (unlikely(i915_gem_object_is_readonly(obj) &&
0259 area->vm_flags & VM_WRITE))
0260 return VM_FAULT_SIGBUS;
0261
0262 if (i915_gem_object_lock_interruptible(obj, NULL))
0263 return VM_FAULT_NOPAGE;
0264
0265 err = i915_gem_object_pin_pages(obj);
0266 if (err)
0267 goto out;
0268
0269 iomap = -1;
0270 if (!i915_gem_object_has_struct_page(obj)) {
0271 iomap = obj->mm.region->iomap.base;
0272 iomap -= obj->mm.region->region.start;
0273 }
0274
0275
0276 err = remap_io_sg(area,
0277 area->vm_start, area->vm_end - area->vm_start,
0278 obj->mm.pages->sgl, iomap);
0279
0280 if (area->vm_flags & VM_WRITE) {
0281 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0282 obj->mm.dirty = true;
0283 }
0284
0285 i915_gem_object_unpin_pages(obj);
0286
0287 out:
0288 i915_gem_object_unlock(obj);
0289 return i915_error_to_vmf_fault(err);
0290 }
0291
0292 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
0293 {
0294 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
0295 struct vm_area_struct *area = vmf->vma;
0296 struct i915_mmap_offset *mmo = area->vm_private_data;
0297 struct drm_i915_gem_object *obj = mmo->obj;
0298 struct drm_device *dev = obj->base.dev;
0299 struct drm_i915_private *i915 = to_i915(dev);
0300 struct intel_runtime_pm *rpm = &i915->runtime_pm;
0301 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0302 bool write = area->vm_flags & VM_WRITE;
0303 struct i915_gem_ww_ctx ww;
0304 intel_wakeref_t wakeref;
0305 struct i915_vma *vma;
0306 pgoff_t page_offset;
0307 int srcu;
0308 int ret;
0309
0310
0311 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
0312
0313 trace_i915_gem_object_fault(obj, page_offset, true, write);
0314
0315 wakeref = intel_runtime_pm_get(rpm);
0316
0317 i915_gem_ww_ctx_init(&ww, true);
0318 retry:
0319 ret = i915_gem_object_lock(obj, &ww);
0320 if (ret)
0321 goto err_rpm;
0322
0323
0324 if (i915_gem_object_is_readonly(obj) && write) {
0325 ret = -EFAULT;
0326 goto err_rpm;
0327 }
0328
0329 ret = i915_gem_object_pin_pages(obj);
0330 if (ret)
0331 goto err_rpm;
0332
0333 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
0334 if (ret)
0335 goto err_pages;
0336
0337
0338 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
0339 PIN_MAPPABLE |
0340 PIN_NONBLOCK |
0341 PIN_NOEVICT);
0342 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
0343
0344 struct i915_ggtt_view view =
0345 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
0346 unsigned int flags;
0347
0348 flags = PIN_MAPPABLE | PIN_NOSEARCH;
0349 if (view.type == I915_GGTT_VIEW_NORMAL)
0350 flags |= PIN_NONBLOCK;
0351
0352
0353
0354
0355
0356
0357 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
0358 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
0359 flags = PIN_MAPPABLE;
0360 view.type = I915_GGTT_VIEW_PARTIAL;
0361 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
0362 }
0363
0364
0365
0366
0367
0368
0369 if (vma == ERR_PTR(-ENOSPC)) {
0370 ret = mutex_lock_interruptible(&ggtt->vm.mutex);
0371 if (!ret) {
0372 ret = i915_gem_evict_vm(&ggtt->vm, &ww);
0373 mutex_unlock(&ggtt->vm.mutex);
0374 }
0375 if (ret)
0376 goto err_reset;
0377 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
0378 }
0379 }
0380 if (IS_ERR(vma)) {
0381 ret = PTR_ERR(vma);
0382 goto err_reset;
0383 }
0384
0385
0386 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
0387 ret = -EFAULT;
0388 goto err_unpin;
0389 }
0390
0391 ret = i915_vma_pin_fence(vma);
0392 if (ret)
0393 goto err_unpin;
0394
0395
0396 ret = remap_io_mapping(area,
0397 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
0398 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
0399 min_t(u64, vma->size, area->vm_end - area->vm_start),
0400 &ggtt->iomap);
0401 if (ret)
0402 goto err_fence;
0403
0404 assert_rpm_wakelock_held(rpm);
0405
0406
0407 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
0408 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
0409 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
0410 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
0411
0412
0413 vma->mmo = mmo;
0414
0415 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
0416 intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
0417 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
0418
0419 if (write) {
0420 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0421 i915_vma_set_ggtt_write(vma);
0422 obj->mm.dirty = true;
0423 }
0424
0425 err_fence:
0426 i915_vma_unpin_fence(vma);
0427 err_unpin:
0428 __i915_vma_unpin(vma);
0429 err_reset:
0430 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
0431 err_pages:
0432 i915_gem_object_unpin_pages(obj);
0433 err_rpm:
0434 if (ret == -EDEADLK) {
0435 ret = i915_gem_ww_ctx_backoff(&ww);
0436 if (!ret)
0437 goto retry;
0438 }
0439 i915_gem_ww_ctx_fini(&ww);
0440 intel_runtime_pm_put(rpm, wakeref);
0441 return i915_error_to_vmf_fault(ret);
0442 }
0443
0444 static int
0445 vm_access(struct vm_area_struct *area, unsigned long addr,
0446 void *buf, int len, int write)
0447 {
0448 struct i915_mmap_offset *mmo = area->vm_private_data;
0449 struct drm_i915_gem_object *obj = mmo->obj;
0450 struct i915_gem_ww_ctx ww;
0451 void *vaddr;
0452 int err = 0;
0453
0454 if (i915_gem_object_is_readonly(obj) && write)
0455 return -EACCES;
0456
0457 addr -= area->vm_start;
0458 if (range_overflows_t(u64, addr, len, obj->base.size))
0459 return -EINVAL;
0460
0461 i915_gem_ww_ctx_init(&ww, true);
0462 retry:
0463 err = i915_gem_object_lock(obj, &ww);
0464 if (err)
0465 goto out;
0466
0467
0468 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
0469 if (IS_ERR(vaddr)) {
0470 err = PTR_ERR(vaddr);
0471 goto out;
0472 }
0473
0474 if (write) {
0475 memcpy(vaddr + addr, buf, len);
0476 __i915_gem_object_flush_map(obj, addr, len);
0477 } else {
0478 memcpy(buf, vaddr + addr, len);
0479 }
0480
0481 i915_gem_object_unpin_map(obj);
0482 out:
0483 if (err == -EDEADLK) {
0484 err = i915_gem_ww_ctx_backoff(&ww);
0485 if (!err)
0486 goto retry;
0487 }
0488 i915_gem_ww_ctx_fini(&ww);
0489
0490 if (err)
0491 return err;
0492
0493 return len;
0494 }
0495
0496 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
0497 {
0498 struct i915_vma *vma;
0499
0500 GEM_BUG_ON(!obj->userfault_count);
0501
0502 for_each_ggtt_vma(vma, obj)
0503 i915_vma_revoke_mmap(vma);
0504
0505 GEM_BUG_ON(obj->userfault_count);
0506 }
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
0517 {
0518 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0519 intel_wakeref_t wakeref;
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0531 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
0532
0533 if (!obj->userfault_count)
0534 goto out;
0535
0536 __i915_gem_object_release_mmap_gtt(obj);
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 wmb();
0547
0548 out:
0549 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
0550 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
0551 }
0552
0553 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
0554 {
0555 struct i915_mmap_offset *mmo, *mn;
0556
0557 if (obj->ops->unmap_virtual)
0558 obj->ops->unmap_virtual(obj);
0559
0560 spin_lock(&obj->mmo.lock);
0561 rbtree_postorder_for_each_entry_safe(mmo, mn,
0562 &obj->mmo.offsets, offset) {
0563
0564
0565
0566
0567 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
0568 continue;
0569
0570 spin_unlock(&obj->mmo.lock);
0571 drm_vma_node_unmap(&mmo->vma_node,
0572 obj->base.dev->anon_inode->i_mapping);
0573 spin_lock(&obj->mmo.lock);
0574 }
0575 spin_unlock(&obj->mmo.lock);
0576 }
0577
0578 static struct i915_mmap_offset *
0579 lookup_mmo(struct drm_i915_gem_object *obj,
0580 enum i915_mmap_type mmap_type)
0581 {
0582 struct rb_node *rb;
0583
0584 spin_lock(&obj->mmo.lock);
0585 rb = obj->mmo.offsets.rb_node;
0586 while (rb) {
0587 struct i915_mmap_offset *mmo =
0588 rb_entry(rb, typeof(*mmo), offset);
0589
0590 if (mmo->mmap_type == mmap_type) {
0591 spin_unlock(&obj->mmo.lock);
0592 return mmo;
0593 }
0594
0595 if (mmo->mmap_type < mmap_type)
0596 rb = rb->rb_right;
0597 else
0598 rb = rb->rb_left;
0599 }
0600 spin_unlock(&obj->mmo.lock);
0601
0602 return NULL;
0603 }
0604
0605 static struct i915_mmap_offset *
0606 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
0607 {
0608 struct rb_node *rb, **p;
0609
0610 spin_lock(&obj->mmo.lock);
0611 rb = NULL;
0612 p = &obj->mmo.offsets.rb_node;
0613 while (*p) {
0614 struct i915_mmap_offset *pos;
0615
0616 rb = *p;
0617 pos = rb_entry(rb, typeof(*pos), offset);
0618
0619 if (pos->mmap_type == mmo->mmap_type) {
0620 spin_unlock(&obj->mmo.lock);
0621 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
0622 &mmo->vma_node);
0623 kfree(mmo);
0624 return pos;
0625 }
0626
0627 if (pos->mmap_type < mmo->mmap_type)
0628 p = &rb->rb_right;
0629 else
0630 p = &rb->rb_left;
0631 }
0632 rb_link_node(&mmo->offset, rb, p);
0633 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
0634 spin_unlock(&obj->mmo.lock);
0635
0636 return mmo;
0637 }
0638
0639 static struct i915_mmap_offset *
0640 mmap_offset_attach(struct drm_i915_gem_object *obj,
0641 enum i915_mmap_type mmap_type,
0642 struct drm_file *file)
0643 {
0644 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0645 struct i915_mmap_offset *mmo;
0646 int err;
0647
0648 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
0649
0650 mmo = lookup_mmo(obj, mmap_type);
0651 if (mmo)
0652 goto out;
0653
0654 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
0655 if (!mmo)
0656 return ERR_PTR(-ENOMEM);
0657
0658 mmo->obj = obj;
0659 mmo->mmap_type = mmap_type;
0660 drm_vma_node_reset(&mmo->vma_node);
0661
0662 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
0663 &mmo->vma_node, obj->base.size / PAGE_SIZE);
0664 if (likely(!err))
0665 goto insert;
0666
0667
0668 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
0669 NULL);
0670 if (err)
0671 goto err;
0672
0673 i915_gem_drain_freed_objects(i915);
0674 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
0675 &mmo->vma_node, obj->base.size / PAGE_SIZE);
0676 if (err)
0677 goto err;
0678
0679 insert:
0680 mmo = insert_mmo(obj, mmo);
0681 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
0682 out:
0683 if (file)
0684 drm_vma_node_allow(&mmo->vma_node, file);
0685 return mmo;
0686
0687 err:
0688 kfree(mmo);
0689 return ERR_PTR(err);
0690 }
0691
0692 static int
0693 __assign_mmap_offset(struct drm_i915_gem_object *obj,
0694 enum i915_mmap_type mmap_type,
0695 u64 *offset, struct drm_file *file)
0696 {
0697 struct i915_mmap_offset *mmo;
0698
0699 if (i915_gem_object_never_mmap(obj))
0700 return -ENODEV;
0701
0702 if (obj->ops->mmap_offset) {
0703 if (mmap_type != I915_MMAP_TYPE_FIXED)
0704 return -ENODEV;
0705
0706 *offset = obj->ops->mmap_offset(obj);
0707 return 0;
0708 }
0709
0710 if (mmap_type == I915_MMAP_TYPE_FIXED)
0711 return -ENODEV;
0712
0713 if (mmap_type != I915_MMAP_TYPE_GTT &&
0714 !i915_gem_object_has_struct_page(obj) &&
0715 !i915_gem_object_has_iomem(obj))
0716 return -ENODEV;
0717
0718 mmo = mmap_offset_attach(obj, mmap_type, file);
0719 if (IS_ERR(mmo))
0720 return PTR_ERR(mmo);
0721
0722 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
0723 return 0;
0724 }
0725
0726 static int
0727 __assign_mmap_offset_handle(struct drm_file *file,
0728 u32 handle,
0729 enum i915_mmap_type mmap_type,
0730 u64 *offset)
0731 {
0732 struct drm_i915_gem_object *obj;
0733 int err;
0734
0735 obj = i915_gem_object_lookup(file, handle);
0736 if (!obj)
0737 return -ENOENT;
0738
0739 err = i915_gem_object_lock_interruptible(obj, NULL);
0740 if (err)
0741 goto out_put;
0742 err = __assign_mmap_offset(obj, mmap_type, offset, file);
0743 i915_gem_object_unlock(obj);
0744 out_put:
0745 i915_gem_object_put(obj);
0746 return err;
0747 }
0748
0749 int
0750 i915_gem_dumb_mmap_offset(struct drm_file *file,
0751 struct drm_device *dev,
0752 u32 handle,
0753 u64 *offset)
0754 {
0755 struct drm_i915_private *i915 = to_i915(dev);
0756 enum i915_mmap_type mmap_type;
0757
0758 if (HAS_LMEM(to_i915(dev)))
0759 mmap_type = I915_MMAP_TYPE_FIXED;
0760 else if (pat_enabled())
0761 mmap_type = I915_MMAP_TYPE_WC;
0762 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
0763 return -ENODEV;
0764 else
0765 mmap_type = I915_MMAP_TYPE_GTT;
0766
0767 return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
0768 }
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785 int
0786 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
0787 struct drm_file *file)
0788 {
0789 struct drm_i915_private *i915 = to_i915(dev);
0790 struct drm_i915_gem_mmap_offset *args = data;
0791 enum i915_mmap_type type;
0792 int err;
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
0804 NULL, 0, NULL);
0805 if (err)
0806 return err;
0807
0808 switch (args->flags) {
0809 case I915_MMAP_OFFSET_GTT:
0810 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
0811 return -ENODEV;
0812 type = I915_MMAP_TYPE_GTT;
0813 break;
0814
0815 case I915_MMAP_OFFSET_WC:
0816 if (!pat_enabled())
0817 return -ENODEV;
0818 type = I915_MMAP_TYPE_WC;
0819 break;
0820
0821 case I915_MMAP_OFFSET_WB:
0822 type = I915_MMAP_TYPE_WB;
0823 break;
0824
0825 case I915_MMAP_OFFSET_UC:
0826 if (!pat_enabled())
0827 return -ENODEV;
0828 type = I915_MMAP_TYPE_UC;
0829 break;
0830
0831 case I915_MMAP_OFFSET_FIXED:
0832 type = I915_MMAP_TYPE_FIXED;
0833 break;
0834
0835 default:
0836 return -EINVAL;
0837 }
0838
0839 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
0840 }
0841
0842 static void vm_open(struct vm_area_struct *vma)
0843 {
0844 struct i915_mmap_offset *mmo = vma->vm_private_data;
0845 struct drm_i915_gem_object *obj = mmo->obj;
0846
0847 GEM_BUG_ON(!obj);
0848 i915_gem_object_get(obj);
0849 }
0850
0851 static void vm_close(struct vm_area_struct *vma)
0852 {
0853 struct i915_mmap_offset *mmo = vma->vm_private_data;
0854 struct drm_i915_gem_object *obj = mmo->obj;
0855
0856 GEM_BUG_ON(!obj);
0857 i915_gem_object_put(obj);
0858 }
0859
0860 static const struct vm_operations_struct vm_ops_gtt = {
0861 .fault = vm_fault_gtt,
0862 .access = vm_access,
0863 .open = vm_open,
0864 .close = vm_close,
0865 };
0866
0867 static const struct vm_operations_struct vm_ops_cpu = {
0868 .fault = vm_fault_cpu,
0869 .access = vm_access,
0870 .open = vm_open,
0871 .close = vm_close,
0872 };
0873
0874 static int singleton_release(struct inode *inode, struct file *file)
0875 {
0876 struct drm_i915_private *i915 = file->private_data;
0877
0878 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
0879 drm_dev_put(&i915->drm);
0880
0881 return 0;
0882 }
0883
0884 static const struct file_operations singleton_fops = {
0885 .owner = THIS_MODULE,
0886 .release = singleton_release,
0887 };
0888
0889 static struct file *mmap_singleton(struct drm_i915_private *i915)
0890 {
0891 struct file *file;
0892
0893 rcu_read_lock();
0894 file = READ_ONCE(i915->gem.mmap_singleton);
0895 if (file && !get_file_rcu(file))
0896 file = NULL;
0897 rcu_read_unlock();
0898 if (file)
0899 return file;
0900
0901 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
0902 if (IS_ERR(file))
0903 return file;
0904
0905
0906 file->f_mapping = i915->drm.anon_inode->i_mapping;
0907
0908 smp_store_mb(i915->gem.mmap_singleton, file);
0909 drm_dev_get(&i915->drm);
0910
0911 return file;
0912 }
0913
0914
0915
0916
0917
0918
0919
0920 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
0921 {
0922 struct drm_vma_offset_node *node;
0923 struct drm_file *priv = filp->private_data;
0924 struct drm_device *dev = priv->minor->dev;
0925 struct drm_i915_gem_object *obj = NULL;
0926 struct i915_mmap_offset *mmo = NULL;
0927 struct file *anon;
0928
0929 if (drm_dev_is_unplugged(dev))
0930 return -ENODEV;
0931
0932 rcu_read_lock();
0933 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
0934 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
0935 vma->vm_pgoff,
0936 vma_pages(vma));
0937 if (node && drm_vma_node_is_allowed(node, priv)) {
0938
0939
0940
0941
0942
0943 if (!node->driver_private) {
0944 mmo = container_of(node, struct i915_mmap_offset, vma_node);
0945 obj = i915_gem_object_get_rcu(mmo->obj);
0946
0947 GEM_BUG_ON(obj && obj->ops->mmap_ops);
0948 } else {
0949 obj = i915_gem_object_get_rcu
0950 (container_of(node, struct drm_i915_gem_object,
0951 base.vma_node));
0952
0953 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
0954 }
0955 }
0956 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
0957 rcu_read_unlock();
0958 if (!obj)
0959 return node ? -EACCES : -EINVAL;
0960
0961 if (i915_gem_object_is_readonly(obj)) {
0962 if (vma->vm_flags & VM_WRITE) {
0963 i915_gem_object_put(obj);
0964 return -EINVAL;
0965 }
0966 vma->vm_flags &= ~VM_MAYWRITE;
0967 }
0968
0969 anon = mmap_singleton(to_i915(dev));
0970 if (IS_ERR(anon)) {
0971 i915_gem_object_put(obj);
0972 return PTR_ERR(anon);
0973 }
0974
0975 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 vma_set_file(vma, anon);
0986
0987 fput(anon);
0988
0989 if (obj->ops->mmap_ops) {
0990 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
0991 vma->vm_ops = obj->ops->mmap_ops;
0992 vma->vm_private_data = node->driver_private;
0993 return 0;
0994 }
0995
0996 vma->vm_private_data = mmo;
0997
0998 switch (mmo->mmap_type) {
0999 case I915_MMAP_TYPE_WC:
1000 vma->vm_page_prot =
1001 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1002 vma->vm_ops = &vm_ops_cpu;
1003 break;
1004
1005 case I915_MMAP_TYPE_FIXED:
1006 GEM_WARN_ON(1);
1007 fallthrough;
1008 case I915_MMAP_TYPE_WB:
1009 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1010 vma->vm_ops = &vm_ops_cpu;
1011 break;
1012
1013 case I915_MMAP_TYPE_UC:
1014 vma->vm_page_prot =
1015 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1016 vma->vm_ops = &vm_ops_cpu;
1017 break;
1018
1019 case I915_MMAP_TYPE_GTT:
1020 vma->vm_page_prot =
1021 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1022 vma->vm_ops = &vm_ops_gtt;
1023 break;
1024 }
1025 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1026
1027 return 0;
1028 }
1029
1030 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1031 #include "selftests/i915_gem_mman.c"
1032 #endif