0001
0002
0003
0004
0005
0006 #include <linux/slab.h> /* fault-inject.h is not standalone! */
0007
0008 #include <linux/fault-inject.h>
0009 #include <linux/sched/mm.h>
0010
0011 #include <drm/drm_cache.h>
0012
0013 #include "gem/i915_gem_internal.h"
0014 #include "gem/i915_gem_lmem.h"
0015 #include "i915_trace.h"
0016 #include "i915_utils.h"
0017 #include "intel_gt.h"
0018 #include "intel_gt_regs.h"
0019 #include "intel_gtt.h"
0020
0021
0022 static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
0023 {
0024 return IS_BROXTON(i915) && i915_vtd_active(i915);
0025 }
0026
0027 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
0028 {
0029 return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
0030 }
0031
0032 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
0033 {
0034 struct drm_i915_gem_object *obj;
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
0049 vm->lmem_pt_obj_flags);
0050
0051
0052
0053
0054
0055 if (!IS_ERR(obj)) {
0056 obj->base.resv = i915_vm_resv_get(vm);
0057 obj->shares_resv_from = vm;
0058 }
0059
0060 return obj;
0061 }
0062
0063 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
0064 {
0065 struct drm_i915_gem_object *obj;
0066
0067 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
0068 i915_gem_shrink_all(vm->i915);
0069
0070 obj = i915_gem_object_create_internal(vm->i915, sz);
0071
0072
0073
0074
0075
0076 if (!IS_ERR(obj)) {
0077 obj->base.resv = i915_vm_resv_get(vm);
0078 obj->shares_resv_from = vm;
0079 }
0080
0081 return obj;
0082 }
0083
0084 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
0085 {
0086 enum i915_map_type type;
0087 void *vaddr;
0088
0089 type = i915_coherent_map_type(vm->i915, obj, true);
0090 vaddr = i915_gem_object_pin_map_unlocked(obj, type);
0091 if (IS_ERR(vaddr))
0092 return PTR_ERR(vaddr);
0093
0094 i915_gem_object_make_unshrinkable(obj);
0095 return 0;
0096 }
0097
0098 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
0099 {
0100 enum i915_map_type type;
0101 void *vaddr;
0102
0103 type = i915_coherent_map_type(vm->i915, obj, true);
0104 vaddr = i915_gem_object_pin_map(obj, type);
0105 if (IS_ERR(vaddr))
0106 return PTR_ERR(vaddr);
0107
0108 i915_gem_object_make_unshrinkable(obj);
0109 return 0;
0110 }
0111
0112 static void clear_vm_list(struct list_head *list)
0113 {
0114 struct i915_vma *vma, *vn;
0115
0116 list_for_each_entry_safe(vma, vn, list, vm_link) {
0117 struct drm_i915_gem_object *obj = vma->obj;
0118
0119 if (!i915_gem_object_get_rcu(obj)) {
0120
0121
0122
0123
0124
0125
0126
0127
0128 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
0129 WARN_ON(__i915_vma_unbind(vma));
0130
0131
0132 list_del_init(&vma->vm_link);
0133
0134
0135
0136
0137
0138 i915_vm_resv_get(vma->vm);
0139 vma->vm_ddestroy = true;
0140 } else {
0141 i915_vma_destroy_locked(vma);
0142 i915_gem_object_put(obj);
0143 }
0144
0145 }
0146 }
0147
0148 static void __i915_vm_close(struct i915_address_space *vm)
0149 {
0150 mutex_lock(&vm->mutex);
0151
0152 clear_vm_list(&vm->bound_list);
0153 clear_vm_list(&vm->unbound_list);
0154
0155
0156 GEM_BUG_ON(!list_empty(&vm->bound_list));
0157 GEM_BUG_ON(!list_empty(&vm->unbound_list));
0158
0159 mutex_unlock(&vm->mutex);
0160 }
0161
0162
0163 int i915_vm_lock_objects(struct i915_address_space *vm,
0164 struct i915_gem_ww_ctx *ww)
0165 {
0166 if (vm->scratch[0]->base.resv == &vm->_resv) {
0167 return i915_gem_object_lock(vm->scratch[0], ww);
0168 } else {
0169 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
0170
0171
0172 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
0173 }
0174 }
0175
0176 void i915_address_space_fini(struct i915_address_space *vm)
0177 {
0178 drm_mm_takedown(&vm->mm);
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 void i915_vm_resv_release(struct kref *kref)
0190 {
0191 struct i915_address_space *vm =
0192 container_of(kref, typeof(*vm), resv_ref);
0193
0194 dma_resv_fini(&vm->_resv);
0195 mutex_destroy(&vm->mutex);
0196
0197 kfree(vm);
0198 }
0199
0200 static void __i915_vm_release(struct work_struct *work)
0201 {
0202 struct i915_address_space *vm =
0203 container_of(work, struct i915_address_space, release_work);
0204
0205 __i915_vm_close(vm);
0206
0207
0208 i915_vma_resource_bind_dep_sync_all(vm);
0209
0210 vm->cleanup(vm);
0211 i915_address_space_fini(vm);
0212
0213 i915_vm_resv_put(vm);
0214 }
0215
0216 void i915_vm_release(struct kref *kref)
0217 {
0218 struct i915_address_space *vm =
0219 container_of(kref, struct i915_address_space, ref);
0220
0221 GEM_BUG_ON(i915_is_ggtt(vm));
0222 trace_i915_ppgtt_release(vm);
0223
0224 queue_work(vm->i915->wq, &vm->release_work);
0225 }
0226
0227 void i915_address_space_init(struct i915_address_space *vm, int subclass)
0228 {
0229 kref_init(&vm->ref);
0230
0231
0232
0233
0234
0235 if (!kref_read(&vm->resv_ref))
0236 kref_init(&vm->resv_ref);
0237
0238 vm->pending_unbind = RB_ROOT_CACHED;
0239 INIT_WORK(&vm->release_work, __i915_vm_release);
0240
0241
0242
0243
0244
0245
0246 mutex_init(&vm->mutex);
0247 lockdep_set_subclass(&vm->mutex, subclass);
0248
0249 if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
0250 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
0251 } else {
0252
0253
0254
0255
0256
0257
0258
0259
0260 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
0261 might_alloc(GFP_KERNEL);
0262 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
0263 }
0264 dma_resv_init(&vm->_resv);
0265
0266 GEM_BUG_ON(!vm->total);
0267 drm_mm_init(&vm->mm, 0, vm->total);
0268
0269 memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
0270 ARRAY_SIZE(vm->min_alignment));
0271
0272 if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
0273 subclass == VM_CLASS_PPGTT) {
0274 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
0275 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
0276 } else if (HAS_64K_PAGES(vm->i915)) {
0277 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
0278 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
0279 }
0280
0281 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
0282
0283 INIT_LIST_HEAD(&vm->bound_list);
0284 INIT_LIST_HEAD(&vm->unbound_list);
0285 }
0286
0287 void *__px_vaddr(struct drm_i915_gem_object *p)
0288 {
0289 enum i915_map_type type;
0290
0291 GEM_BUG_ON(!i915_gem_object_has_pages(p));
0292 return page_unpack_bits(p->mm.mapping, &type);
0293 }
0294
0295 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
0296 {
0297 GEM_BUG_ON(!i915_gem_object_has_pages(p));
0298 return sg_dma_address(p->mm.pages->sgl);
0299 }
0300
0301 struct page *__px_page(struct drm_i915_gem_object *p)
0302 {
0303 GEM_BUG_ON(!i915_gem_object_has_pages(p));
0304 return sg_page(p->mm.pages->sgl);
0305 }
0306
0307 void
0308 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
0309 {
0310 void *vaddr = __px_vaddr(p);
0311
0312 memset64(vaddr, val, count);
0313 drm_clflush_virt_range(vaddr, PAGE_SIZE);
0314 }
0315
0316 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
0317 {
0318 void *vaddr = __px_vaddr(scratch);
0319 u8 val;
0320
0321 val = 0;
0322 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0323 val = POISON_FREE;
0324
0325 memset(vaddr, val, scratch->base.size);
0326 drm_clflush_virt_range(vaddr, scratch->base.size);
0327 }
0328
0329 int setup_scratch_page(struct i915_address_space *vm)
0330 {
0331 unsigned long size;
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 size = I915_GTT_PAGE_SIZE_4K;
0345 if (i915_vm_is_4lvl(vm) &&
0346 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
0347 size = I915_GTT_PAGE_SIZE_64K;
0348
0349 do {
0350 struct drm_i915_gem_object *obj;
0351
0352 obj = vm->alloc_scratch_dma(vm, size);
0353 if (IS_ERR(obj))
0354 goto skip;
0355
0356 if (map_pt_dma(vm, obj))
0357 goto skip_obj;
0358
0359
0360 if (obj->mm.page_sizes.sg < size)
0361 goto skip_obj;
0362
0363
0364 if (__px_dma(obj) & (size - 1))
0365 goto skip_obj;
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376 poison_scratch_page(obj);
0377
0378 vm->scratch[0] = obj;
0379 vm->scratch_order = get_order(size);
0380 return 0;
0381
0382 skip_obj:
0383 i915_gem_object_put(obj);
0384 skip:
0385 if (size == I915_GTT_PAGE_SIZE_4K)
0386 return -ENOMEM;
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 if (HAS_64K_PAGES(vm->i915))
0398 return -ENOMEM;
0399
0400 size = I915_GTT_PAGE_SIZE_4K;
0401 } while (1);
0402 }
0403
0404 void free_scratch(struct i915_address_space *vm)
0405 {
0406 int i;
0407
0408 for (i = 0; i <= vm->top; i++)
0409 i915_gem_object_put(vm->scratch[i]);
0410 }
0411
0412 void gtt_write_workarounds(struct intel_gt *gt)
0413 {
0414 struct drm_i915_private *i915 = gt->i915;
0415 struct intel_uncore *uncore = gt->uncore;
0416
0417
0418
0419
0420
0421
0422
0423 if (IS_BROADWELL(i915))
0424 intel_uncore_write(uncore,
0425 GEN8_L3_LRA_1_GPGPU,
0426 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
0427 else if (IS_CHERRYVIEW(i915))
0428 intel_uncore_write(uncore,
0429 GEN8_L3_LRA_1_GPGPU,
0430 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
0431 else if (IS_GEN9_LP(i915))
0432 intel_uncore_write(uncore,
0433 GEN8_L3_LRA_1_GPGPU,
0434 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
0435 else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
0436 intel_uncore_write(uncore,
0437 GEN8_L3_LRA_1_GPGPU,
0438 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
0452 GRAPHICS_VER(i915) <= 10)
0453 intel_uncore_rmw(uncore,
0454 GEN8_GAMW_ECO_DEV_RW_IA,
0455 0,
0456 GAMW_ECO_ENABLE_64K_IPS_FIELD);
0457
0458 if (IS_GRAPHICS_VER(i915, 8, 11)) {
0459 bool can_use_gtt_cache = true;
0460
0461
0462
0463
0464
0465
0466
0467 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
0468 can_use_gtt_cache = false;
0469
0470
0471 intel_uncore_write(uncore,
0472 HSW_GTT_CACHE_EN,
0473 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
0474 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
0475 intel_uncore_read(uncore,
0476 HSW_GTT_CACHE_EN) == 0);
0477 }
0478 }
0479
0480 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
0481 {
0482
0483 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
0484 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
0485 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
0486 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
0487 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
0488 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
0489 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
0490 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
0491 }
0492
0493 static void icl_setup_private_ppat(struct intel_uncore *uncore)
0494 {
0495 intel_uncore_write(uncore,
0496 GEN10_PAT_INDEX(0),
0497 GEN8_PPAT_WB | GEN8_PPAT_LLC);
0498 intel_uncore_write(uncore,
0499 GEN10_PAT_INDEX(1),
0500 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
0501 intel_uncore_write(uncore,
0502 GEN10_PAT_INDEX(2),
0503 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
0504 intel_uncore_write(uncore,
0505 GEN10_PAT_INDEX(3),
0506 GEN8_PPAT_UC);
0507 intel_uncore_write(uncore,
0508 GEN10_PAT_INDEX(4),
0509 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
0510 intel_uncore_write(uncore,
0511 GEN10_PAT_INDEX(5),
0512 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
0513 intel_uncore_write(uncore,
0514 GEN10_PAT_INDEX(6),
0515 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
0516 intel_uncore_write(uncore,
0517 GEN10_PAT_INDEX(7),
0518 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
0519 }
0520
0521
0522
0523
0524
0525
0526 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
0527 {
0528 struct drm_i915_private *i915 = uncore->i915;
0529 u64 pat;
0530
0531 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |
0532 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |
0533 GEN8_PPAT(3, GEN8_PPAT_UC) |
0534 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
0535 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
0536 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
0537 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
0538
0539
0540 if (GRAPHICS_VER(i915) >= 9)
0541 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
0542 else
0543 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
0544
0545 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
0546 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
0547 }
0548
0549 static void chv_setup_private_ppat(struct intel_uncore *uncore)
0550 {
0551 u64 pat;
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
0573 GEN8_PPAT(1, 0) |
0574 GEN8_PPAT(2, 0) |
0575 GEN8_PPAT(3, 0) |
0576 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
0577 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
0578 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
0579 GEN8_PPAT(7, CHV_PPAT_SNOOP);
0580
0581 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
0582 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
0583 }
0584
0585 void setup_private_pat(struct intel_uncore *uncore)
0586 {
0587 struct drm_i915_private *i915 = uncore->i915;
0588
0589 GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
0590
0591 if (GRAPHICS_VER(i915) >= 12)
0592 tgl_setup_private_ppat(uncore);
0593 else if (GRAPHICS_VER(i915) >= 11)
0594 icl_setup_private_ppat(uncore);
0595 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
0596 chv_setup_private_ppat(uncore);
0597 else
0598 bdw_setup_private_ppat(uncore);
0599 }
0600
0601 struct i915_vma *
0602 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
0603 {
0604 struct drm_i915_gem_object *obj;
0605 struct i915_vma *vma;
0606
0607 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
0608 if (IS_ERR(obj))
0609 return ERR_CAST(obj);
0610
0611 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
0612
0613 vma = i915_vma_instance(obj, vm, NULL);
0614 if (IS_ERR(vma)) {
0615 i915_gem_object_put(obj);
0616 return vma;
0617 }
0618
0619 return vma;
0620 }
0621
0622 struct i915_vma *
0623 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
0624 {
0625 struct i915_vma *vma;
0626 int err;
0627
0628 vma = __vm_create_scratch_for_read(vm, size);
0629 if (IS_ERR(vma))
0630 return vma;
0631
0632 err = i915_vma_pin(vma, 0, 0,
0633 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
0634 if (err) {
0635 i915_vma_put(vma);
0636 return ERR_PTR(err);
0637 }
0638
0639 return vma;
0640 }
0641
0642 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0643 #include "selftests/mock_gtt.c"
0644 #endif