0001
0002
0003
0004
0005
0006 #include <asm/set_memory.h>
0007 #include <asm/smp.h>
0008 #include <linux/types.h>
0009 #include <linux/stop_machine.h>
0010
0011 #include <drm/i915_drm.h>
0012 #include <drm/intel-gtt.h>
0013
0014 #include "gem/i915_gem_lmem.h"
0015
0016 #include "intel_ggtt_gmch.h"
0017 #include "intel_gt.h"
0018 #include "intel_gt_regs.h"
0019 #include "i915_drv.h"
0020 #include "i915_scatterlist.h"
0021 #include "i915_utils.h"
0022 #include "i915_vgpu.h"
0023
0024 #include "intel_gtt.h"
0025 #include "gen8_ppgtt.h"
0026
0027 static inline bool suspend_retains_ptes(struct i915_address_space *vm)
0028 {
0029 return GRAPHICS_VER(vm->i915) >= 8 &&
0030 !HAS_LMEM(vm->i915) &&
0031 vm->is_ggtt;
0032 }
0033
0034 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
0035 unsigned long color,
0036 u64 *start,
0037 u64 *end)
0038 {
0039 if (i915_node_color_differs(node, color))
0040 *start += I915_GTT_PAGE_SIZE;
0041
0042
0043
0044
0045
0046
0047
0048 node = list_next_entry(node, node_list);
0049 if (node->color != color)
0050 *end -= I915_GTT_PAGE_SIZE;
0051 }
0052
0053 static int ggtt_init_hw(struct i915_ggtt *ggtt)
0054 {
0055 struct drm_i915_private *i915 = ggtt->vm.i915;
0056
0057 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
0058
0059 ggtt->vm.is_ggtt = true;
0060
0061
0062 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
0063
0064 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
0065 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
0066
0067 if (ggtt->mappable_end) {
0068 if (!io_mapping_init_wc(&ggtt->iomap,
0069 ggtt->gmadr.start,
0070 ggtt->mappable_end)) {
0071 ggtt->vm.cleanup(&ggtt->vm);
0072 return -EIO;
0073 }
0074
0075 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
0076 ggtt->mappable_end);
0077 }
0078
0079 intel_ggtt_init_fences(ggtt);
0080
0081 return 0;
0082 }
0083
0084
0085
0086
0087
0088 int i915_ggtt_init_hw(struct drm_i915_private *i915)
0089 {
0090 int ret;
0091
0092
0093
0094
0095
0096
0097
0098 ret = ggtt_init_hw(to_gt(i915)->ggtt);
0099 if (ret)
0100 return ret;
0101
0102 return 0;
0103 }
0104
0105
0106
0107
0108
0109 static u64 read_last_pte(struct i915_address_space *vm)
0110 {
0111 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0112 gen8_pte_t __iomem *ptep;
0113
0114 if (!suspend_retains_ptes(vm))
0115 return 0;
0116
0117 GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
0118 ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
0119 return readq(ptep);
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
0130 {
0131 struct i915_vma *vma, *vn;
0132 int save_skip_rewrite;
0133
0134 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
0135
0136 retry:
0137 i915_gem_drain_freed_objects(vm->i915);
0138
0139 mutex_lock(&vm->mutex);
0140
0141
0142
0143
0144
0145 save_skip_rewrite = vm->skip_pte_rewrite;
0146 vm->skip_pte_rewrite = true;
0147
0148 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
0149 struct drm_i915_gem_object *obj = vma->obj;
0150
0151 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
0152
0153 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
0154 continue;
0155
0156
0157 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
0158
0159
0160
0161
0162 i915_gem_object_get(obj);
0163
0164 mutex_unlock(&vm->mutex);
0165
0166 i915_gem_object_lock(obj, NULL);
0167 GEM_WARN_ON(i915_vma_unbind(vma));
0168 i915_gem_object_unlock(obj);
0169 i915_gem_object_put(obj);
0170
0171 vm->skip_pte_rewrite = save_skip_rewrite;
0172 goto retry;
0173 }
0174
0175 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
0176 i915_vma_wait_for_bind(vma);
0177
0178 __i915_vma_evict(vma, false);
0179 drm_mm_remove_node(&vma->node);
0180 }
0181
0182 i915_gem_object_unlock(obj);
0183 }
0184
0185 if (!suspend_retains_ptes(vm))
0186 vm->clear_range(vm, 0, vm->total);
0187 else
0188 i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
0189
0190 vm->skip_pte_rewrite = save_skip_rewrite;
0191
0192 mutex_unlock(&vm->mutex);
0193 }
0194
0195 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
0196 {
0197 i915_ggtt_suspend_vm(&ggtt->vm);
0198 ggtt->invalidate(ggtt);
0199
0200 intel_gt_check_and_clear_faults(ggtt->vm.gt);
0201 }
0202
0203 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
0204 {
0205 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0206
0207 spin_lock_irq(&uncore->lock);
0208 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
0209 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
0210 spin_unlock_irq(&uncore->lock);
0211 }
0212
0213 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
0214 {
0215 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0216
0217
0218
0219
0220
0221 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
0222 }
0223
0224 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
0225 {
0226 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0227 struct drm_i915_private *i915 = ggtt->vm.i915;
0228
0229 gen8_ggtt_invalidate(ggtt);
0230
0231 if (GRAPHICS_VER(i915) >= 12)
0232 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
0233 GEN12_GUC_TLB_INV_CR_INVALIDATE);
0234 else
0235 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
0236 }
0237
0238 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
0239 enum i915_cache_level level,
0240 u32 flags)
0241 {
0242 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
0243
0244 if (flags & PTE_LM)
0245 pte |= GEN12_GGTT_PTE_LM;
0246
0247 return pte;
0248 }
0249
0250 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
0251 {
0252 writeq(pte, addr);
0253 }
0254
0255 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
0256 dma_addr_t addr,
0257 u64 offset,
0258 enum i915_cache_level level,
0259 u32 flags)
0260 {
0261 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0262 gen8_pte_t __iomem *pte =
0263 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
0264
0265 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
0266
0267 ggtt->invalidate(ggtt);
0268 }
0269
0270 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
0271 struct i915_vma_resource *vma_res,
0272 enum i915_cache_level level,
0273 u32 flags)
0274 {
0275 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
0276 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0277 gen8_pte_t __iomem *gte;
0278 gen8_pte_t __iomem *end;
0279 struct sgt_iter iter;
0280 dma_addr_t addr;
0281
0282
0283
0284
0285
0286
0287 gte = (gen8_pte_t __iomem *)ggtt->gsm;
0288 gte += vma_res->start / I915_GTT_PAGE_SIZE;
0289 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
0290
0291 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
0292 gen8_set_pte(gte++, pte_encode | addr);
0293 GEM_BUG_ON(gte > end);
0294
0295
0296 while (gte < end)
0297 gen8_set_pte(gte++, vm->scratch[0]->encode);
0298
0299
0300
0301
0302
0303 ggtt->invalidate(ggtt);
0304 }
0305
0306 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
0307 dma_addr_t addr,
0308 u64 offset,
0309 enum i915_cache_level level,
0310 u32 flags)
0311 {
0312 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0313 gen6_pte_t __iomem *pte =
0314 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
0315
0316 iowrite32(vm->pte_encode(addr, level, flags), pte);
0317
0318 ggtt->invalidate(ggtt);
0319 }
0320
0321
0322
0323
0324
0325
0326
0327 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
0328 struct i915_vma_resource *vma_res,
0329 enum i915_cache_level level,
0330 u32 flags)
0331 {
0332 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0333 gen6_pte_t __iomem *gte;
0334 gen6_pte_t __iomem *end;
0335 struct sgt_iter iter;
0336 dma_addr_t addr;
0337
0338 gte = (gen6_pte_t __iomem *)ggtt->gsm;
0339 gte += vma_res->start / I915_GTT_PAGE_SIZE;
0340 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
0341
0342 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
0343 iowrite32(vm->pte_encode(addr, level, flags), gte++);
0344 GEM_BUG_ON(gte > end);
0345
0346
0347 while (gte < end)
0348 iowrite32(vm->scratch[0]->encode, gte++);
0349
0350
0351
0352
0353
0354 ggtt->invalidate(ggtt);
0355 }
0356
0357 static void nop_clear_range(struct i915_address_space *vm,
0358 u64 start, u64 length)
0359 {
0360 }
0361
0362 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
0363 u64 start, u64 length)
0364 {
0365 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0366 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
0367 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
0368 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
0369 gen8_pte_t __iomem *gtt_base =
0370 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
0371 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
0372 int i;
0373
0374 if (WARN(num_entries > max_entries,
0375 "First entry = %d; Num entries = %d (max=%d)\n",
0376 first_entry, num_entries, max_entries))
0377 num_entries = max_entries;
0378
0379 for (i = 0; i < num_entries; i++)
0380 gen8_set_pte(>t_base[i], scratch_pte);
0381 }
0382
0383 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
0384 {
0385
0386
0387
0388
0389
0390
0391
0392 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
0393 }
0394
0395 struct insert_page {
0396 struct i915_address_space *vm;
0397 dma_addr_t addr;
0398 u64 offset;
0399 enum i915_cache_level level;
0400 };
0401
0402 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
0403 {
0404 struct insert_page *arg = _arg;
0405
0406 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
0407 bxt_vtd_ggtt_wa(arg->vm);
0408
0409 return 0;
0410 }
0411
0412 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
0413 dma_addr_t addr,
0414 u64 offset,
0415 enum i915_cache_level level,
0416 u32 unused)
0417 {
0418 struct insert_page arg = { vm, addr, offset, level };
0419
0420 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
0421 }
0422
0423 struct insert_entries {
0424 struct i915_address_space *vm;
0425 struct i915_vma_resource *vma_res;
0426 enum i915_cache_level level;
0427 u32 flags;
0428 };
0429
0430 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
0431 {
0432 struct insert_entries *arg = _arg;
0433
0434 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
0435 bxt_vtd_ggtt_wa(arg->vm);
0436
0437 return 0;
0438 }
0439
0440 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
0441 struct i915_vma_resource *vma_res,
0442 enum i915_cache_level level,
0443 u32 flags)
0444 {
0445 struct insert_entries arg = { vm, vma_res, level, flags };
0446
0447 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
0448 }
0449
0450 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
0451 u64 start, u64 length)
0452 {
0453 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0454 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
0455 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
0456 gen6_pte_t scratch_pte, __iomem *gtt_base =
0457 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
0458 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
0459 int i;
0460
0461 if (WARN(num_entries > max_entries,
0462 "First entry = %d; Num entries = %d (max=%d)\n",
0463 first_entry, num_entries, max_entries))
0464 num_entries = max_entries;
0465
0466 scratch_pte = vm->scratch[0]->encode;
0467 for (i = 0; i < num_entries; i++)
0468 iowrite32(scratch_pte, >t_base[i]);
0469 }
0470
0471 void intel_ggtt_bind_vma(struct i915_address_space *vm,
0472 struct i915_vm_pt_stash *stash,
0473 struct i915_vma_resource *vma_res,
0474 enum i915_cache_level cache_level,
0475 u32 flags)
0476 {
0477 u32 pte_flags;
0478
0479 if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
0480 return;
0481
0482 vma_res->bound_flags |= flags;
0483
0484
0485 pte_flags = 0;
0486 if (vma_res->bi.readonly)
0487 pte_flags |= PTE_READ_ONLY;
0488 if (vma_res->bi.lmem)
0489 pte_flags |= PTE_LM;
0490
0491 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
0492 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
0493 }
0494
0495 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
0496 struct i915_vma_resource *vma_res)
0497 {
0498 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
0499 }
0500
0501 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
0502 {
0503 u64 size;
0504 int ret;
0505
0506 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
0507 return 0;
0508
0509 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
0510 size = ggtt->vm.total - GUC_GGTT_TOP;
0511
0512 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
0513 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
0514 PIN_NOEVICT);
0515 if (ret)
0516 drm_dbg(&ggtt->vm.i915->drm,
0517 "Failed to reserve top of GGTT for GuC\n");
0518
0519 return ret;
0520 }
0521
0522 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
0523 {
0524 if (drm_mm_node_allocated(&ggtt->uc_fw))
0525 drm_mm_remove_node(&ggtt->uc_fw);
0526 }
0527
0528 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
0529 {
0530 ggtt_release_guc_top(ggtt);
0531 if (drm_mm_node_allocated(&ggtt->error_capture))
0532 drm_mm_remove_node(&ggtt->error_capture);
0533 mutex_destroy(&ggtt->error_mutex);
0534 }
0535
0536 static int init_ggtt(struct i915_ggtt *ggtt)
0537 {
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 unsigned long hole_start, hole_end;
0549 struct drm_mm_node *entry;
0550 int ret;
0551
0552 ggtt->pte_lost = true;
0553
0554
0555
0556
0557
0558
0559
0560 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
0561 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
0562
0563 ret = intel_vgt_balloon(ggtt);
0564 if (ret)
0565 return ret;
0566
0567 mutex_init(&ggtt->error_mutex);
0568 if (ggtt->mappable_end) {
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
0588 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
0589 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
0590 drm_mm_insert_node_in_range(&ggtt->vm.mm,
0591 &ggtt->error_capture,
0592 ggtt->error_capture.size, 0,
0593 ggtt->error_capture.color,
0594 0, ggtt->mappable_end,
0595 DRM_MM_INSERT_LOW);
0596 }
0597 if (drm_mm_node_allocated(&ggtt->error_capture))
0598 drm_dbg(&ggtt->vm.i915->drm,
0599 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
0600 ggtt->error_capture.start,
0601 ggtt->error_capture.start + ggtt->error_capture.size);
0602
0603
0604
0605
0606
0607
0608 ret = ggtt_reserve_guc_top(ggtt);
0609 if (ret)
0610 goto err;
0611
0612
0613 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
0614 drm_dbg(&ggtt->vm.i915->drm,
0615 "clearing unused GTT space: [%lx, %lx]\n",
0616 hole_start, hole_end);
0617 ggtt->vm.clear_range(&ggtt->vm, hole_start,
0618 hole_end - hole_start);
0619 }
0620
0621
0622 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
0623
0624 return 0;
0625
0626 err:
0627 cleanup_init_ggtt(ggtt);
0628 return ret;
0629 }
0630
0631 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
0632 struct i915_vm_pt_stash *stash,
0633 struct i915_vma_resource *vma_res,
0634 enum i915_cache_level cache_level,
0635 u32 flags)
0636 {
0637 u32 pte_flags;
0638
0639
0640 pte_flags = 0;
0641 if (vma_res->bi.readonly)
0642 pte_flags |= PTE_READ_ONLY;
0643
0644 if (flags & I915_VMA_LOCAL_BIND)
0645 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
0646 stash, vma_res, cache_level, flags);
0647
0648 if (flags & I915_VMA_GLOBAL_BIND)
0649 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
0650
0651 vma_res->bound_flags |= flags;
0652 }
0653
0654 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
0655 struct i915_vma_resource *vma_res)
0656 {
0657 if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
0658 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
0659
0660 if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
0661 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
0662 }
0663
0664 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
0665 {
0666 struct i915_vm_pt_stash stash = {};
0667 struct i915_ppgtt *ppgtt;
0668 int err;
0669
0670 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
0671 if (IS_ERR(ppgtt))
0672 return PTR_ERR(ppgtt);
0673
0674 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
0675 err = -ENODEV;
0676 goto err_ppgtt;
0677 }
0678
0679 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
0680 if (err)
0681 goto err_ppgtt;
0682
0683 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
0684 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
0685 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
0686 if (err)
0687 goto err_stash;
0688
0689
0690
0691
0692
0693
0694
0695 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
0696
0697 ggtt->alias = ppgtt;
0698 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
0699
0700 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
0701 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
0702
0703 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
0704 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
0705
0706 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0707 return 0;
0708
0709 err_stash:
0710 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0711 err_ppgtt:
0712 i915_vm_put(&ppgtt->vm);
0713 return err;
0714 }
0715
0716 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
0717 {
0718 struct i915_ppgtt *ppgtt;
0719
0720 ppgtt = fetch_and_zero(&ggtt->alias);
0721 if (!ppgtt)
0722 return;
0723
0724 i915_vm_put(&ppgtt->vm);
0725
0726 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
0727 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
0728 }
0729
0730 int i915_init_ggtt(struct drm_i915_private *i915)
0731 {
0732 int ret;
0733
0734 ret = init_ggtt(to_gt(i915)->ggtt);
0735 if (ret)
0736 return ret;
0737
0738 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
0739 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
0740 if (ret)
0741 cleanup_init_ggtt(to_gt(i915)->ggtt);
0742 }
0743
0744 return 0;
0745 }
0746
0747 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
0748 {
0749 struct i915_vma *vma, *vn;
0750
0751 flush_workqueue(ggtt->vm.i915->wq);
0752 i915_gem_drain_freed_objects(ggtt->vm.i915);
0753
0754 mutex_lock(&ggtt->vm.mutex);
0755
0756 ggtt->vm.skip_pte_rewrite = true;
0757
0758 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
0759 struct drm_i915_gem_object *obj = vma->obj;
0760 bool trylock;
0761
0762 trylock = i915_gem_object_trylock(obj, NULL);
0763 WARN_ON(!trylock);
0764
0765 WARN_ON(__i915_vma_unbind(vma));
0766 if (trylock)
0767 i915_gem_object_unlock(obj);
0768 }
0769
0770 if (drm_mm_node_allocated(&ggtt->error_capture))
0771 drm_mm_remove_node(&ggtt->error_capture);
0772 mutex_destroy(&ggtt->error_mutex);
0773
0774 ggtt_release_guc_top(ggtt);
0775 intel_vgt_deballoon(ggtt);
0776
0777 ggtt->vm.cleanup(&ggtt->vm);
0778
0779 mutex_unlock(&ggtt->vm.mutex);
0780 i915_address_space_fini(&ggtt->vm);
0781
0782 arch_phys_wc_del(ggtt->mtrr);
0783
0784 if (ggtt->iomap.size)
0785 io_mapping_fini(&ggtt->iomap);
0786 }
0787
0788
0789
0790
0791
0792 void i915_ggtt_driver_release(struct drm_i915_private *i915)
0793 {
0794 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0795
0796 fini_aliasing_ppgtt(ggtt);
0797
0798 intel_ggtt_fini_fences(ggtt);
0799 ggtt_cleanup_hw(ggtt);
0800 }
0801
0802
0803
0804
0805
0806
0807 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
0808 {
0809 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0810
0811 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
0812 dma_resv_fini(&ggtt->vm._resv);
0813 }
0814
0815 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
0816 {
0817 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
0818 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
0819 return snb_gmch_ctl << 20;
0820 }
0821
0822 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
0823 {
0824 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
0825 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
0826 if (bdw_gmch_ctl)
0827 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
0828
0829 #ifdef CONFIG_X86_32
0830
0831 if (bdw_gmch_ctl > 4)
0832 bdw_gmch_ctl = 4;
0833 #endif
0834
0835 return bdw_gmch_ctl << 20;
0836 }
0837
0838 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
0839 {
0840 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
0841 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
0842
0843 if (gmch_ctrl)
0844 return 1 << (20 + gmch_ctrl);
0845
0846 return 0;
0847 }
0848
0849 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
0850 {
0851
0852
0853
0854
0855 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
0856 return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
0857 }
0858
0859 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
0860 {
0861 return gen6_gttmmadr_size(i915) / 2;
0862 }
0863
0864 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
0865 {
0866 struct drm_i915_private *i915 = ggtt->vm.i915;
0867 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0868 phys_addr_t phys_addr;
0869 u32 pte_flags;
0870 int ret;
0871
0872 GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
0873 phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
0874
0875
0876
0877
0878
0879
0880
0881
0882 if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
0883 ggtt->gsm = ioremap(phys_addr, size);
0884 else
0885 ggtt->gsm = ioremap_wc(phys_addr, size);
0886 if (!ggtt->gsm) {
0887 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
0888 return -ENOMEM;
0889 }
0890
0891 kref_init(&ggtt->vm.resv_ref);
0892 ret = setup_scratch_page(&ggtt->vm);
0893 if (ret) {
0894 drm_err(&i915->drm, "Scratch setup failed\n");
0895
0896 iounmap(ggtt->gsm);
0897 return ret;
0898 }
0899
0900 pte_flags = 0;
0901 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
0902 pte_flags |= PTE_LM;
0903
0904 ggtt->vm.scratch[0]->encode =
0905 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
0906 I915_CACHE_NONE, pte_flags);
0907
0908 return 0;
0909 }
0910
0911 static void gen6_gmch_remove(struct i915_address_space *vm)
0912 {
0913 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
0914
0915 iounmap(ggtt->gsm);
0916 free_scratch(vm);
0917 }
0918
0919 static struct resource pci_resource(struct pci_dev *pdev, int bar)
0920 {
0921 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
0922 pci_resource_len(pdev, bar));
0923 }
0924
0925 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
0926 {
0927 struct drm_i915_private *i915 = ggtt->vm.i915;
0928 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0929 unsigned int size;
0930 u16 snb_gmch_ctl;
0931
0932 if (!HAS_LMEM(i915)) {
0933 ggtt->gmadr = pci_resource(pdev, 2);
0934 ggtt->mappable_end = resource_size(&ggtt->gmadr);
0935 }
0936
0937 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
0938 if (IS_CHERRYVIEW(i915))
0939 size = chv_get_total_gtt_size(snb_gmch_ctl);
0940 else
0941 size = gen8_get_total_gtt_size(snb_gmch_ctl);
0942
0943 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
0944 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
0945 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
0946
0947 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
0948 ggtt->vm.cleanup = gen6_gmch_remove;
0949 ggtt->vm.insert_page = gen8_ggtt_insert_page;
0950 ggtt->vm.clear_range = nop_clear_range;
0951 if (intel_scanout_needs_vtd_wa(i915))
0952 ggtt->vm.clear_range = gen8_ggtt_clear_range;
0953
0954 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
0955
0956
0957
0958
0959
0960 if (intel_vm_no_concurrent_access_wa(i915)) {
0961 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
0962 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
0963
0964
0965
0966
0967
0968
0969
0970 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
0971 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
0972
0973 ggtt->vm.bind_async_flags =
0974 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0975 }
0976
0977 ggtt->invalidate = gen8_ggtt_invalidate;
0978
0979 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
0980 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
0981
0982 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
0983
0984 setup_private_pat(ggtt->vm.gt->uncore);
0985
0986 return ggtt_probe_common(ggtt, size);
0987 }
0988
0989 static u64 snb_pte_encode(dma_addr_t addr,
0990 enum i915_cache_level level,
0991 u32 flags)
0992 {
0993 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
0994
0995 switch (level) {
0996 case I915_CACHE_L3_LLC:
0997 case I915_CACHE_LLC:
0998 pte |= GEN6_PTE_CACHE_LLC;
0999 break;
1000 case I915_CACHE_NONE:
1001 pte |= GEN6_PTE_UNCACHED;
1002 break;
1003 default:
1004 MISSING_CASE(level);
1005 }
1006
1007 return pte;
1008 }
1009
1010 static u64 ivb_pte_encode(dma_addr_t addr,
1011 enum i915_cache_level level,
1012 u32 flags)
1013 {
1014 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1015
1016 switch (level) {
1017 case I915_CACHE_L3_LLC:
1018 pte |= GEN7_PTE_CACHE_L3_LLC;
1019 break;
1020 case I915_CACHE_LLC:
1021 pte |= GEN6_PTE_CACHE_LLC;
1022 break;
1023 case I915_CACHE_NONE:
1024 pte |= GEN6_PTE_UNCACHED;
1025 break;
1026 default:
1027 MISSING_CASE(level);
1028 }
1029
1030 return pte;
1031 }
1032
1033 static u64 byt_pte_encode(dma_addr_t addr,
1034 enum i915_cache_level level,
1035 u32 flags)
1036 {
1037 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1038
1039 if (!(flags & PTE_READ_ONLY))
1040 pte |= BYT_PTE_WRITEABLE;
1041
1042 if (level != I915_CACHE_NONE)
1043 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1044
1045 return pte;
1046 }
1047
1048 static u64 hsw_pte_encode(dma_addr_t addr,
1049 enum i915_cache_level level,
1050 u32 flags)
1051 {
1052 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1053
1054 if (level != I915_CACHE_NONE)
1055 pte |= HSW_WB_LLC_AGE3;
1056
1057 return pte;
1058 }
1059
1060 static u64 iris_pte_encode(dma_addr_t addr,
1061 enum i915_cache_level level,
1062 u32 flags)
1063 {
1064 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1065
1066 switch (level) {
1067 case I915_CACHE_NONE:
1068 break;
1069 case I915_CACHE_WT:
1070 pte |= HSW_WT_ELLC_LLC_AGE3;
1071 break;
1072 default:
1073 pte |= HSW_WB_ELLC_LLC_AGE3;
1074 break;
1075 }
1076
1077 return pte;
1078 }
1079
1080 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1081 {
1082 struct drm_i915_private *i915 = ggtt->vm.i915;
1083 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1084 unsigned int size;
1085 u16 snb_gmch_ctl;
1086
1087 ggtt->gmadr = pci_resource(pdev, 2);
1088 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1089
1090
1091
1092
1093
1094 if (ggtt->mappable_end < (64 << 20) ||
1095 ggtt->mappable_end > (512 << 20)) {
1096 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1097 &ggtt->mappable_end);
1098 return -ENXIO;
1099 }
1100
1101 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1102
1103 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1104 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1105
1106 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1107 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1108
1109 ggtt->vm.clear_range = nop_clear_range;
1110 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1111 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1112 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1113 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1114 ggtt->vm.cleanup = gen6_gmch_remove;
1115
1116 ggtt->invalidate = gen6_ggtt_invalidate;
1117
1118 if (HAS_EDRAM(i915))
1119 ggtt->vm.pte_encode = iris_pte_encode;
1120 else if (IS_HASWELL(i915))
1121 ggtt->vm.pte_encode = hsw_pte_encode;
1122 else if (IS_VALLEYVIEW(i915))
1123 ggtt->vm.pte_encode = byt_pte_encode;
1124 else if (GRAPHICS_VER(i915) >= 7)
1125 ggtt->vm.pte_encode = ivb_pte_encode;
1126 else
1127 ggtt->vm.pte_encode = snb_pte_encode;
1128
1129 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1130 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1131
1132 return ggtt_probe_common(ggtt, size);
1133 }
1134
1135 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1136 {
1137 struct drm_i915_private *i915 = gt->i915;
1138 int ret;
1139
1140 ggtt->vm.gt = gt;
1141 ggtt->vm.i915 = i915;
1142 ggtt->vm.dma = i915->drm.dev;
1143 dma_resv_init(&ggtt->vm._resv);
1144
1145 if (GRAPHICS_VER(i915) >= 8)
1146 ret = gen8_gmch_probe(ggtt);
1147 else if (GRAPHICS_VER(i915) >= 6)
1148 ret = gen6_gmch_probe(ggtt);
1149 else
1150 ret = intel_ggtt_gmch_probe(ggtt);
1151
1152 if (ret) {
1153 dma_resv_fini(&ggtt->vm._resv);
1154 return ret;
1155 }
1156
1157 if ((ggtt->vm.total - 1) >> 32) {
1158 drm_err(&i915->drm,
1159 "We never expected a Global GTT with more than 32bits"
1160 " of address space! Found %lldM!\n",
1161 ggtt->vm.total >> 20);
1162 ggtt->vm.total = 1ULL << 32;
1163 ggtt->mappable_end =
1164 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1165 }
1166
1167 if (ggtt->mappable_end > ggtt->vm.total) {
1168 drm_err(&i915->drm,
1169 "mappable aperture extends past end of GGTT,"
1170 " aperture=%pa, total=%llx\n",
1171 &ggtt->mappable_end, ggtt->vm.total);
1172 ggtt->mappable_end = ggtt->vm.total;
1173 }
1174
1175
1176 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1177 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1178 (u64)ggtt->mappable_end >> 20);
1179 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1180 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1181
1182 return 0;
1183 }
1184
1185
1186
1187
1188
1189 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1190 {
1191 int ret;
1192
1193 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1194 if (ret)
1195 return ret;
1196
1197 if (i915_vtd_active(i915))
1198 drm_info(&i915->drm, "VT-d active for gfx access\n");
1199
1200 return 0;
1201 }
1202
1203 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1204 {
1205 if (GRAPHICS_VER(i915) < 6)
1206 return intel_ggtt_gmch_enable_hw(i915);
1207
1208 return 0;
1209 }
1210
1211 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1212 {
1213 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1214
1215 ggtt->invalidate = guc_ggtt_invalidate;
1216
1217 ggtt->invalidate(ggtt);
1218 }
1219
1220 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1221 {
1222
1223 if (ggtt->invalidate == gen8_ggtt_invalidate)
1224 return;
1225
1226
1227 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1228
1229 ggtt->invalidate = gen8_ggtt_invalidate;
1230
1231 ggtt->invalidate(ggtt);
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1245 {
1246 struct i915_vma *vma;
1247 bool write_domain_objs = false;
1248 bool retained_ptes;
1249
1250 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1251
1252
1253
1254
1255
1256 retained_ptes = suspend_retains_ptes(vm) &&
1257 !i915_vm_to_ggtt(vm)->pte_lost &&
1258 !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
1259
1260 if (!retained_ptes)
1261 vm->clear_range(vm, 0, vm->total);
1262
1263
1264 list_for_each_entry(vma, &vm->bound_list, vm_link) {
1265 struct drm_i915_gem_object *obj = vma->obj;
1266 unsigned int was_bound =
1267 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1268
1269 GEM_BUG_ON(!was_bound);
1270 if (!retained_ptes)
1271 vma->ops->bind_vma(vm, NULL, vma->resource,
1272 obj ? obj->cache_level : 0,
1273 was_bound);
1274 if (obj) {
1275 write_domain_objs |= fetch_and_zero(&obj->write_domain);
1276 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1277 }
1278 }
1279
1280 return write_domain_objs;
1281 }
1282
1283 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1284 {
1285 bool flush;
1286
1287 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1288
1289 flush = i915_ggtt_resume_vm(&ggtt->vm);
1290
1291 ggtt->invalidate(ggtt);
1292
1293 if (flush)
1294 wbinvd_on_all_cpus();
1295
1296 if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1297 setup_private_pat(ggtt->vm.gt->uncore);
1298
1299 intel_ggtt_restore_fences(ggtt);
1300 }
1301
1302 void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
1303 {
1304 to_gt(i915)->ggtt->pte_lost = val;
1305 }