0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/dma-fence-array.h>
0029 #include <linux/kthread.h>
0030 #include <linux/dma-resv.h>
0031 #include <linux/shmem_fs.h>
0032 #include <linux/slab.h>
0033 #include <linux/stop_machine.h>
0034 #include <linux/swap.h>
0035 #include <linux/pci.h>
0036 #include <linux/dma-buf.h>
0037 #include <linux/mman.h>
0038
0039 #include <drm/drm_cache.h>
0040 #include <drm/drm_vma_manager.h>
0041
0042 #include "display/intel_display.h"
0043 #include "display/intel_frontbuffer.h"
0044
0045 #include "gem/i915_gem_clflush.h"
0046 #include "gem/i915_gem_context.h"
0047 #include "gem/i915_gem_ioctls.h"
0048 #include "gem/i915_gem_mman.h"
0049 #include "gem/i915_gem_pm.h"
0050 #include "gem/i915_gem_region.h"
0051 #include "gem/i915_gem_userptr.h"
0052 #include "gt/intel_engine_user.h"
0053 #include "gt/intel_gt.h"
0054 #include "gt/intel_gt_pm.h"
0055 #include "gt/intel_workarounds.h"
0056
0057 #include "i915_drv.h"
0058 #include "i915_file_private.h"
0059 #include "i915_trace.h"
0060 #include "i915_vgpu.h"
0061 #include "intel_pm.h"
0062
0063 static int
0064 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
0065 {
0066 int err;
0067
0068 err = mutex_lock_interruptible(&ggtt->vm.mutex);
0069 if (err)
0070 return err;
0071
0072 memset(node, 0, sizeof(*node));
0073 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
0074 size, 0, I915_COLOR_UNEVICTABLE,
0075 0, ggtt->mappable_end,
0076 DRM_MM_INSERT_LOW);
0077
0078 mutex_unlock(&ggtt->vm.mutex);
0079
0080 return err;
0081 }
0082
0083 static void
0084 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
0085 {
0086 mutex_lock(&ggtt->vm.mutex);
0087 drm_mm_remove_node(node);
0088 mutex_unlock(&ggtt->vm.mutex);
0089 }
0090
0091 int
0092 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
0093 struct drm_file *file)
0094 {
0095 struct drm_i915_private *i915 = to_i915(dev);
0096 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0097 struct drm_i915_gem_get_aperture *args = data;
0098 struct i915_vma *vma;
0099 u64 pinned;
0100
0101 if (mutex_lock_interruptible(&ggtt->vm.mutex))
0102 return -EINTR;
0103
0104 pinned = ggtt->vm.reserved;
0105 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
0106 if (i915_vma_is_pinned(vma))
0107 pinned += vma->node.size;
0108
0109 mutex_unlock(&ggtt->vm.mutex);
0110
0111 args->aper_size = ggtt->vm.total;
0112 args->aper_available_size = args->aper_size - pinned;
0113
0114 return 0;
0115 }
0116
0117 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
0118 unsigned long flags)
0119 {
0120 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
0121 bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
0122 LIST_HEAD(still_in_list);
0123 intel_wakeref_t wakeref;
0124 struct i915_vma *vma;
0125 int ret;
0126
0127 assert_object_held(obj);
0128
0129 if (list_empty(&obj->vma.list))
0130 return 0;
0131
0132
0133
0134
0135
0136
0137
0138 wakeref = intel_runtime_pm_get(rpm);
0139
0140 try_again:
0141 ret = 0;
0142 spin_lock(&obj->vma.lock);
0143 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
0144 struct i915_vma,
0145 obj_link))) {
0146 list_move_tail(&vma->obj_link, &still_in_list);
0147 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
0148 continue;
0149
0150 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
0151 ret = -EBUSY;
0152 break;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162 ret = -EAGAIN;
0163 if (!i915_vm_tryget(vma->vm))
0164 break;
0165
0166 spin_unlock(&obj->vma.lock);
0167
0168
0169
0170
0171
0172
0173
0174 ret = -EBUSY;
0175 if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
0176 assert_object_held(vma->obj);
0177 ret = i915_vma_unbind_async(vma, vm_trylock);
0178 }
0179
0180 if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
0181 !i915_vma_is_active(vma))) {
0182 if (vm_trylock) {
0183 if (mutex_trylock(&vma->vm->mutex)) {
0184 ret = __i915_vma_unbind(vma);
0185 mutex_unlock(&vma->vm->mutex);
0186 }
0187 } else {
0188 ret = i915_vma_unbind(vma);
0189 }
0190 }
0191
0192 i915_vm_put(vma->vm);
0193 spin_lock(&obj->vma.lock);
0194 }
0195 list_splice_init(&still_in_list, &obj->vma.list);
0196 spin_unlock(&obj->vma.lock);
0197
0198 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
0199 rcu_barrier();
0200 goto try_again;
0201 }
0202
0203 intel_runtime_pm_put(rpm, wakeref);
0204
0205 return ret;
0206 }
0207
0208 static int
0209 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
0210 bool needs_clflush)
0211 {
0212 char *vaddr;
0213 int ret;
0214
0215 vaddr = kmap(page);
0216
0217 if (needs_clflush)
0218 drm_clflush_virt_range(vaddr + offset, len);
0219
0220 ret = __copy_to_user(user_data, vaddr + offset, len);
0221
0222 kunmap(page);
0223
0224 return ret ? -EFAULT : 0;
0225 }
0226
0227 static int
0228 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
0229 struct drm_i915_gem_pread *args)
0230 {
0231 unsigned int needs_clflush;
0232 unsigned int idx, offset;
0233 char __user *user_data;
0234 u64 remain;
0235 int ret;
0236
0237 ret = i915_gem_object_lock_interruptible(obj, NULL);
0238 if (ret)
0239 return ret;
0240
0241 ret = i915_gem_object_pin_pages(obj);
0242 if (ret)
0243 goto err_unlock;
0244
0245 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
0246 if (ret)
0247 goto err_unpin;
0248
0249 i915_gem_object_finish_access(obj);
0250 i915_gem_object_unlock(obj);
0251
0252 remain = args->size;
0253 user_data = u64_to_user_ptr(args->data_ptr);
0254 offset = offset_in_page(args->offset);
0255 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
0256 struct page *page = i915_gem_object_get_page(obj, idx);
0257 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
0258
0259 ret = shmem_pread(page, offset, length, user_data,
0260 needs_clflush);
0261 if (ret)
0262 break;
0263
0264 remain -= length;
0265 user_data += length;
0266 offset = 0;
0267 }
0268
0269 i915_gem_object_unpin_pages(obj);
0270 return ret;
0271
0272 err_unpin:
0273 i915_gem_object_unpin_pages(obj);
0274 err_unlock:
0275 i915_gem_object_unlock(obj);
0276 return ret;
0277 }
0278
0279 static inline bool
0280 gtt_user_read(struct io_mapping *mapping,
0281 loff_t base, int offset,
0282 char __user *user_data, int length)
0283 {
0284 void __iomem *vaddr;
0285 unsigned long unwritten;
0286
0287
0288 vaddr = io_mapping_map_atomic_wc(mapping, base);
0289 unwritten = __copy_to_user_inatomic(user_data,
0290 (void __force *)vaddr + offset,
0291 length);
0292 io_mapping_unmap_atomic(vaddr);
0293 if (unwritten) {
0294 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
0295 unwritten = copy_to_user(user_data,
0296 (void __force *)vaddr + offset,
0297 length);
0298 io_mapping_unmap(vaddr);
0299 }
0300 return unwritten;
0301 }
0302
0303 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
0304 struct drm_mm_node *node,
0305 bool write)
0306 {
0307 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0308 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0309 struct i915_vma *vma;
0310 struct i915_gem_ww_ctx ww;
0311 int ret;
0312
0313 i915_gem_ww_ctx_init(&ww, true);
0314 retry:
0315 vma = ERR_PTR(-ENODEV);
0316 ret = i915_gem_object_lock(obj, &ww);
0317 if (ret)
0318 goto err_ww;
0319
0320 ret = i915_gem_object_set_to_gtt_domain(obj, write);
0321 if (ret)
0322 goto err_ww;
0323
0324 if (!i915_gem_object_is_tiled(obj))
0325 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
0326 PIN_MAPPABLE |
0327 PIN_NONBLOCK |
0328 PIN_NOEVICT);
0329 if (vma == ERR_PTR(-EDEADLK)) {
0330 ret = -EDEADLK;
0331 goto err_ww;
0332 } else if (!IS_ERR(vma)) {
0333 node->start = i915_ggtt_offset(vma);
0334 node->flags = 0;
0335 } else {
0336 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
0337 if (ret)
0338 goto err_ww;
0339 GEM_BUG_ON(!drm_mm_node_allocated(node));
0340 vma = NULL;
0341 }
0342
0343 ret = i915_gem_object_pin_pages(obj);
0344 if (ret) {
0345 if (drm_mm_node_allocated(node)) {
0346 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
0347 remove_mappable_node(ggtt, node);
0348 } else {
0349 i915_vma_unpin(vma);
0350 }
0351 }
0352
0353 err_ww:
0354 if (ret == -EDEADLK) {
0355 ret = i915_gem_ww_ctx_backoff(&ww);
0356 if (!ret)
0357 goto retry;
0358 }
0359 i915_gem_ww_ctx_fini(&ww);
0360
0361 return ret ? ERR_PTR(ret) : vma;
0362 }
0363
0364 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
0365 struct drm_mm_node *node,
0366 struct i915_vma *vma)
0367 {
0368 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0369 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0370
0371 i915_gem_object_unpin_pages(obj);
0372 if (drm_mm_node_allocated(node)) {
0373 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
0374 remove_mappable_node(ggtt, node);
0375 } else {
0376 i915_vma_unpin(vma);
0377 }
0378 }
0379
0380 static int
0381 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
0382 const struct drm_i915_gem_pread *args)
0383 {
0384 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0385 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0386 intel_wakeref_t wakeref;
0387 struct drm_mm_node node;
0388 void __user *user_data;
0389 struct i915_vma *vma;
0390 u64 remain, offset;
0391 int ret = 0;
0392
0393 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0394
0395 vma = i915_gem_gtt_prepare(obj, &node, false);
0396 if (IS_ERR(vma)) {
0397 ret = PTR_ERR(vma);
0398 goto out_rpm;
0399 }
0400
0401 user_data = u64_to_user_ptr(args->data_ptr);
0402 remain = args->size;
0403 offset = args->offset;
0404
0405 while (remain > 0) {
0406
0407
0408
0409
0410
0411
0412 u32 page_base = node.start;
0413 unsigned page_offset = offset_in_page(offset);
0414 unsigned page_length = PAGE_SIZE - page_offset;
0415 page_length = remain < page_length ? remain : page_length;
0416 if (drm_mm_node_allocated(&node)) {
0417 ggtt->vm.insert_page(&ggtt->vm,
0418 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
0419 node.start, I915_CACHE_NONE, 0);
0420 } else {
0421 page_base += offset & PAGE_MASK;
0422 }
0423
0424 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
0425 user_data, page_length)) {
0426 ret = -EFAULT;
0427 break;
0428 }
0429
0430 remain -= page_length;
0431 user_data += page_length;
0432 offset += page_length;
0433 }
0434
0435 i915_gem_gtt_cleanup(obj, &node, vma);
0436 out_rpm:
0437 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
0438 return ret;
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 int
0450 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
0451 struct drm_file *file)
0452 {
0453 struct drm_i915_private *i915 = to_i915(dev);
0454 struct drm_i915_gem_pread *args = data;
0455 struct drm_i915_gem_object *obj;
0456 int ret;
0457
0458
0459
0460
0461 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
0462 return -EOPNOTSUPP;
0463
0464 if (args->size == 0)
0465 return 0;
0466
0467 if (!access_ok(u64_to_user_ptr(args->data_ptr),
0468 args->size))
0469 return -EFAULT;
0470
0471 obj = i915_gem_object_lookup(file, args->handle);
0472 if (!obj)
0473 return -ENOENT;
0474
0475
0476 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
0477 ret = -EINVAL;
0478 goto out;
0479 }
0480
0481 trace_i915_gem_object_pread(obj, args->offset, args->size);
0482 ret = -ENODEV;
0483 if (obj->ops->pread)
0484 ret = obj->ops->pread(obj, args);
0485 if (ret != -ENODEV)
0486 goto out;
0487
0488 ret = i915_gem_object_wait(obj,
0489 I915_WAIT_INTERRUPTIBLE,
0490 MAX_SCHEDULE_TIMEOUT);
0491 if (ret)
0492 goto out;
0493
0494 ret = i915_gem_shmem_pread(obj, args);
0495 if (ret == -EFAULT || ret == -ENODEV)
0496 ret = i915_gem_gtt_pread(obj, args);
0497
0498 out:
0499 i915_gem_object_put(obj);
0500 return ret;
0501 }
0502
0503
0504
0505
0506
0507 static inline bool
0508 ggtt_write(struct io_mapping *mapping,
0509 loff_t base, int offset,
0510 char __user *user_data, int length)
0511 {
0512 void __iomem *vaddr;
0513 unsigned long unwritten;
0514
0515
0516 vaddr = io_mapping_map_atomic_wc(mapping, base);
0517 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
0518 user_data, length);
0519 io_mapping_unmap_atomic(vaddr);
0520 if (unwritten) {
0521 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
0522 unwritten = copy_from_user((void __force *)vaddr + offset,
0523 user_data, length);
0524 io_mapping_unmap(vaddr);
0525 }
0526
0527 return unwritten;
0528 }
0529
0530
0531
0532
0533
0534
0535
0536 static int
0537 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
0538 const struct drm_i915_gem_pwrite *args)
0539 {
0540 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0541 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0542 struct intel_runtime_pm *rpm = &i915->runtime_pm;
0543 intel_wakeref_t wakeref;
0544 struct drm_mm_node node;
0545 struct i915_vma *vma;
0546 u64 remain, offset;
0547 void __user *user_data;
0548 int ret = 0;
0549
0550 if (i915_gem_object_has_struct_page(obj)) {
0551
0552
0553
0554
0555
0556
0557
0558 wakeref = intel_runtime_pm_get_if_in_use(rpm);
0559 if (!wakeref)
0560 return -EFAULT;
0561 } else {
0562
0563 wakeref = intel_runtime_pm_get(rpm);
0564 }
0565
0566 vma = i915_gem_gtt_prepare(obj, &node, true);
0567 if (IS_ERR(vma)) {
0568 ret = PTR_ERR(vma);
0569 goto out_rpm;
0570 }
0571
0572 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
0573
0574 user_data = u64_to_user_ptr(args->data_ptr);
0575 offset = args->offset;
0576 remain = args->size;
0577 while (remain) {
0578
0579
0580
0581
0582
0583
0584 u32 page_base = node.start;
0585 unsigned int page_offset = offset_in_page(offset);
0586 unsigned int page_length = PAGE_SIZE - page_offset;
0587 page_length = remain < page_length ? remain : page_length;
0588 if (drm_mm_node_allocated(&node)) {
0589
0590 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
0591 ggtt->vm.insert_page(&ggtt->vm,
0592 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
0593 node.start, I915_CACHE_NONE, 0);
0594 wmb();
0595 } else {
0596 page_base += offset & PAGE_MASK;
0597 }
0598
0599
0600
0601
0602
0603
0604 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
0605 user_data, page_length)) {
0606 ret = -EFAULT;
0607 break;
0608 }
0609
0610 remain -= page_length;
0611 user_data += page_length;
0612 offset += page_length;
0613 }
0614
0615 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
0616 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
0617
0618 i915_gem_gtt_cleanup(obj, &node, vma);
0619 out_rpm:
0620 intel_runtime_pm_put(rpm, wakeref);
0621 return ret;
0622 }
0623
0624
0625
0626
0627
0628
0629 static int
0630 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
0631 bool needs_clflush_before,
0632 bool needs_clflush_after)
0633 {
0634 char *vaddr;
0635 int ret;
0636
0637 vaddr = kmap(page);
0638
0639 if (needs_clflush_before)
0640 drm_clflush_virt_range(vaddr + offset, len);
0641
0642 ret = __copy_from_user(vaddr + offset, user_data, len);
0643 if (!ret && needs_clflush_after)
0644 drm_clflush_virt_range(vaddr + offset, len);
0645
0646 kunmap(page);
0647
0648 return ret ? -EFAULT : 0;
0649 }
0650
0651 static int
0652 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
0653 const struct drm_i915_gem_pwrite *args)
0654 {
0655 unsigned int partial_cacheline_write;
0656 unsigned int needs_clflush;
0657 unsigned int offset, idx;
0658 void __user *user_data;
0659 u64 remain;
0660 int ret;
0661
0662 ret = i915_gem_object_lock_interruptible(obj, NULL);
0663 if (ret)
0664 return ret;
0665
0666 ret = i915_gem_object_pin_pages(obj);
0667 if (ret)
0668 goto err_unlock;
0669
0670 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
0671 if (ret)
0672 goto err_unpin;
0673
0674 i915_gem_object_finish_access(obj);
0675 i915_gem_object_unlock(obj);
0676
0677
0678
0679
0680
0681 partial_cacheline_write = 0;
0682 if (needs_clflush & CLFLUSH_BEFORE)
0683 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
0684
0685 user_data = u64_to_user_ptr(args->data_ptr);
0686 remain = args->size;
0687 offset = offset_in_page(args->offset);
0688 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
0689 struct page *page = i915_gem_object_get_page(obj, idx);
0690 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
0691
0692 ret = shmem_pwrite(page, offset, length, user_data,
0693 (offset | length) & partial_cacheline_write,
0694 needs_clflush & CLFLUSH_AFTER);
0695 if (ret)
0696 break;
0697
0698 remain -= length;
0699 user_data += length;
0700 offset = 0;
0701 }
0702
0703 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
0704
0705 i915_gem_object_unpin_pages(obj);
0706 return ret;
0707
0708 err_unpin:
0709 i915_gem_object_unpin_pages(obj);
0710 err_unlock:
0711 i915_gem_object_unlock(obj);
0712 return ret;
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723 int
0724 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
0725 struct drm_file *file)
0726 {
0727 struct drm_i915_private *i915 = to_i915(dev);
0728 struct drm_i915_gem_pwrite *args = data;
0729 struct drm_i915_gem_object *obj;
0730 int ret;
0731
0732
0733
0734
0735 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
0736 return -EOPNOTSUPP;
0737
0738 if (args->size == 0)
0739 return 0;
0740
0741 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
0742 return -EFAULT;
0743
0744 obj = i915_gem_object_lookup(file, args->handle);
0745 if (!obj)
0746 return -ENOENT;
0747
0748
0749 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
0750 ret = -EINVAL;
0751 goto err;
0752 }
0753
0754
0755 if (i915_gem_object_is_readonly(obj)) {
0756 ret = -EINVAL;
0757 goto err;
0758 }
0759
0760 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
0761
0762 ret = -ENODEV;
0763 if (obj->ops->pwrite)
0764 ret = obj->ops->pwrite(obj, args);
0765 if (ret != -ENODEV)
0766 goto err;
0767
0768 ret = i915_gem_object_wait(obj,
0769 I915_WAIT_INTERRUPTIBLE |
0770 I915_WAIT_ALL,
0771 MAX_SCHEDULE_TIMEOUT);
0772 if (ret)
0773 goto err;
0774
0775 ret = -EFAULT;
0776
0777
0778
0779
0780
0781
0782 if (!i915_gem_object_has_struct_page(obj) ||
0783 i915_gem_cpu_write_needs_clflush(obj))
0784
0785
0786
0787
0788 ret = i915_gem_gtt_pwrite_fast(obj, args);
0789
0790 if (ret == -EFAULT || ret == -ENOSPC) {
0791 if (i915_gem_object_has_struct_page(obj))
0792 ret = i915_gem_shmem_pwrite(obj, args);
0793 }
0794
0795 err:
0796 i915_gem_object_put(obj);
0797 return ret;
0798 }
0799
0800
0801
0802
0803
0804
0805
0806 int
0807 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
0808 struct drm_file *file)
0809 {
0810 struct drm_i915_gem_sw_finish *args = data;
0811 struct drm_i915_gem_object *obj;
0812
0813 obj = i915_gem_object_lookup(file, args->handle);
0814 if (!obj)
0815 return -ENOENT;
0816
0817
0818
0819
0820
0821
0822
0823 i915_gem_object_flush_if_display(obj);
0824 i915_gem_object_put(obj);
0825
0826 return 0;
0827 }
0828
0829 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
0830 {
0831 struct drm_i915_gem_object *obj, *on;
0832 int i;
0833
0834
0835
0836
0837
0838
0839
0840
0841 list_for_each_entry_safe(obj, on,
0842 &to_gt(i915)->ggtt->userfault_list, userfault_link)
0843 __i915_gem_object_release_mmap_gtt(obj);
0844
0845
0846
0847
0848
0849
0850 for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
0851 struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865 if (!reg->vma)
0866 continue;
0867
0868 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
0869 reg->dirty = true;
0870 }
0871 }
0872
0873 static void discard_ggtt_vma(struct i915_vma *vma)
0874 {
0875 struct drm_i915_gem_object *obj = vma->obj;
0876
0877 spin_lock(&obj->vma.lock);
0878 if (!RB_EMPTY_NODE(&vma->obj_node)) {
0879 rb_erase(&vma->obj_node, &obj->vma.tree);
0880 RB_CLEAR_NODE(&vma->obj_node);
0881 }
0882 spin_unlock(&obj->vma.lock);
0883 }
0884
0885 struct i915_vma *
0886 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
0887 struct i915_gem_ww_ctx *ww,
0888 const struct i915_ggtt_view *view,
0889 u64 size, u64 alignment, u64 flags)
0890 {
0891 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0892 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0893 struct i915_vma *vma;
0894 int ret;
0895
0896 GEM_WARN_ON(!ww);
0897
0898 if (flags & PIN_MAPPABLE &&
0899 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
0900
0901
0902
0903
0904
0905
0906
0907
0908 if (obj->base.size > ggtt->mappable_end)
0909 return ERR_PTR(-E2BIG);
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927 if (flags & PIN_NONBLOCK &&
0928 obj->base.size > ggtt->mappable_end / 2)
0929 return ERR_PTR(-ENOSPC);
0930 }
0931
0932 new_vma:
0933 vma = i915_vma_instance(obj, &ggtt->vm, view);
0934 if (IS_ERR(vma))
0935 return vma;
0936
0937 if (i915_vma_misplaced(vma, size, alignment, flags)) {
0938 if (flags & PIN_NONBLOCK) {
0939 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
0940 return ERR_PTR(-ENOSPC);
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952 if (flags & PIN_MAPPABLE &&
0953 (vma->fence_size > ggtt->mappable_end / 2 ||
0954 !i915_vma_is_map_and_fenceable(vma)))
0955 return ERR_PTR(-ENOSPC);
0956 }
0957
0958 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
0959 discard_ggtt_vma(vma);
0960 goto new_vma;
0961 }
0962
0963 ret = i915_vma_unbind(vma);
0964 if (ret)
0965 return ERR_PTR(ret);
0966 }
0967
0968 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
0969
0970 if (ret)
0971 return ERR_PTR(ret);
0972
0973 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
0974 mutex_lock(&ggtt->vm.mutex);
0975 i915_vma_revoke_fence(vma);
0976 mutex_unlock(&ggtt->vm.mutex);
0977 }
0978
0979 ret = i915_vma_wait_for_bind(vma);
0980 if (ret) {
0981 i915_vma_unpin(vma);
0982 return ERR_PTR(ret);
0983 }
0984
0985 return vma;
0986 }
0987
0988 struct i915_vma * __must_check
0989 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
0990 const struct i915_ggtt_view *view,
0991 u64 size, u64 alignment, u64 flags)
0992 {
0993 struct i915_gem_ww_ctx ww;
0994 struct i915_vma *ret;
0995 int err;
0996
0997 for_i915_gem_ww(&ww, err, true) {
0998 err = i915_gem_object_lock(obj, &ww);
0999 if (err)
1000 continue;
1001
1002 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1003 alignment, flags);
1004 if (IS_ERR(ret))
1005 err = PTR_ERR(ret);
1006 }
1007
1008 return err ? ERR_PTR(err) : ret;
1009 }
1010
1011 int
1012 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1013 struct drm_file *file_priv)
1014 {
1015 struct drm_i915_private *i915 = to_i915(dev);
1016 struct drm_i915_gem_madvise *args = data;
1017 struct drm_i915_gem_object *obj;
1018 int err;
1019
1020 switch (args->madv) {
1021 case I915_MADV_DONTNEED:
1022 case I915_MADV_WILLNEED:
1023 break;
1024 default:
1025 return -EINVAL;
1026 }
1027
1028 obj = i915_gem_object_lookup(file_priv, args->handle);
1029 if (!obj)
1030 return -ENOENT;
1031
1032 err = i915_gem_object_lock_interruptible(obj, NULL);
1033 if (err)
1034 goto out;
1035
1036 if (i915_gem_object_has_pages(obj) &&
1037 i915_gem_object_is_tiled(obj) &&
1038 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1039 if (obj->mm.madv == I915_MADV_WILLNEED) {
1040 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1041 i915_gem_object_clear_tiling_quirk(obj);
1042 i915_gem_object_make_shrinkable(obj);
1043 }
1044 if (args->madv == I915_MADV_WILLNEED) {
1045 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1046 i915_gem_object_make_unshrinkable(obj);
1047 i915_gem_object_set_tiling_quirk(obj);
1048 }
1049 }
1050
1051 if (obj->mm.madv != __I915_MADV_PURGED) {
1052 obj->mm.madv = args->madv;
1053 if (obj->ops->adjust_lru)
1054 obj->ops->adjust_lru(obj);
1055 }
1056
1057 if (i915_gem_object_has_pages(obj) ||
1058 i915_gem_object_has_self_managed_shrink_list(obj)) {
1059 unsigned long flags;
1060
1061 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1062 if (!list_empty(&obj->mm.link)) {
1063 struct list_head *list;
1064
1065 if (obj->mm.madv != I915_MADV_WILLNEED)
1066 list = &i915->mm.purge_list;
1067 else
1068 list = &i915->mm.shrink_list;
1069 list_move_tail(&obj->mm.link, list);
1070
1071 }
1072 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1073 }
1074
1075
1076 if (obj->mm.madv == I915_MADV_DONTNEED &&
1077 !i915_gem_object_has_pages(obj))
1078 i915_gem_object_truncate(obj);
1079
1080 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1081
1082 i915_gem_object_unlock(obj);
1083 out:
1084 i915_gem_object_put(obj);
1085 return err;
1086 }
1087
1088 int i915_gem_init(struct drm_i915_private *dev_priv)
1089 {
1090 int ret;
1091
1092
1093 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1094 mkwrite_device_info(dev_priv)->page_sizes =
1095 I915_GTT_PAGE_SIZE_4K;
1096
1097 ret = i915_gem_init_userptr(dev_priv);
1098 if (ret)
1099 return ret;
1100
1101 intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
1102 intel_wopcm_init(&dev_priv->wopcm);
1103
1104 ret = i915_init_ggtt(dev_priv);
1105 if (ret) {
1106 GEM_BUG_ON(ret == -EIO);
1107 goto err_unlock;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 intel_init_clock_gating(dev_priv);
1120
1121 ret = intel_gt_init(to_gt(dev_priv));
1122 if (ret)
1123 goto err_unlock;
1124
1125 return 0;
1126
1127
1128
1129
1130
1131
1132
1133 err_unlock:
1134 i915_gem_drain_workqueue(dev_priv);
1135
1136 if (ret != -EIO)
1137 intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1138
1139 if (ret == -EIO) {
1140
1141
1142
1143
1144
1145 if (!intel_gt_is_wedged(to_gt(dev_priv))) {
1146 i915_probe_error(dev_priv,
1147 "Failed to initialize GPU, declaring it wedged!\n");
1148 intel_gt_set_wedged(to_gt(dev_priv));
1149 }
1150
1151
1152 ret = i915_ggtt_enable_hw(dev_priv);
1153 i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1154 intel_init_clock_gating(dev_priv);
1155 }
1156
1157 i915_gem_drain_freed_objects(dev_priv);
1158
1159 return ret;
1160 }
1161
1162 void i915_gem_driver_register(struct drm_i915_private *i915)
1163 {
1164 i915_gem_driver_register__shrinker(i915);
1165
1166 intel_engines_driver_register(i915);
1167 }
1168
1169 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1170 {
1171 i915_gem_driver_unregister__shrinker(i915);
1172 }
1173
1174 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1175 {
1176 intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
1177
1178 i915_gem_suspend_late(dev_priv);
1179 intel_gt_driver_remove(to_gt(dev_priv));
1180 dev_priv->uabi_engines = RB_ROOT;
1181
1182
1183 i915_gem_drain_workqueue(dev_priv);
1184
1185 i915_gem_drain_freed_objects(dev_priv);
1186 }
1187
1188 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1189 {
1190 intel_gt_driver_release(to_gt(dev_priv));
1191
1192 intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1193
1194
1195 i915_gem_drain_workqueue(dev_priv);
1196
1197 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1198 }
1199
1200 static void i915_gem_init__mm(struct drm_i915_private *i915)
1201 {
1202 spin_lock_init(&i915->mm.obj_lock);
1203
1204 init_llist_head(&i915->mm.free_list);
1205
1206 INIT_LIST_HEAD(&i915->mm.purge_list);
1207 INIT_LIST_HEAD(&i915->mm.shrink_list);
1208
1209 i915_gem_init__objects(i915);
1210 }
1211
1212 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1213 {
1214 i915_gem_init__mm(dev_priv);
1215 i915_gem_init__contexts(dev_priv);
1216
1217 spin_lock_init(&dev_priv->fb_tracking.lock);
1218 }
1219
1220 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1221 {
1222 i915_gem_drain_freed_objects(dev_priv);
1223 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1224 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1225 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1226 }
1227
1228 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1229 {
1230 struct drm_i915_file_private *file_priv;
1231 struct i915_drm_client *client;
1232 int ret = -ENOMEM;
1233
1234 DRM_DEBUG("\n");
1235
1236 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1237 if (!file_priv)
1238 goto err_alloc;
1239
1240 client = i915_drm_client_add(&i915->clients);
1241 if (IS_ERR(client)) {
1242 ret = PTR_ERR(client);
1243 goto err_client;
1244 }
1245
1246 file->driver_priv = file_priv;
1247 file_priv->dev_priv = i915;
1248 file_priv->file = file;
1249 file_priv->client = client;
1250
1251 file_priv->bsd_engine = -1;
1252 file_priv->hang_timestamp = jiffies;
1253
1254 ret = i915_gem_context_open(i915, file);
1255 if (ret)
1256 goto err_context;
1257
1258 return 0;
1259
1260 err_context:
1261 i915_drm_client_put(client);
1262 err_client:
1263 kfree(file_priv);
1264 err_alloc:
1265 return ret;
1266 }
1267
1268 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1269 #include "selftests/mock_gem_device.c"
1270 #include "selftests/i915_gem.c"
1271 #endif