Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2020 Intel Corporation
0004  */
0005 
0006 #include <linux/slab.h> /* fault-inject.h is not standalone! */
0007 
0008 #include <linux/fault-inject.h>
0009 #include <linux/sched/mm.h>
0010 
0011 #include <drm/drm_cache.h>
0012 
0013 #include "gem/i915_gem_internal.h"
0014 #include "gem/i915_gem_lmem.h"
0015 #include "i915_trace.h"
0016 #include "i915_utils.h"
0017 #include "intel_gt.h"
0018 #include "intel_gt_regs.h"
0019 #include "intel_gtt.h"
0020 
0021 
0022 static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
0023 {
0024     return IS_BROXTON(i915) && i915_vtd_active(i915);
0025 }
0026 
0027 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
0028 {
0029     return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
0030 }
0031 
0032 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
0033 {
0034     struct drm_i915_gem_object *obj;
0035 
0036     /*
0037      * To avoid severe over-allocation when dealing with min_page_size
0038      * restrictions, we override that behaviour here by allowing an object
0039      * size and page layout which can be smaller. In practice this should be
0040      * totally fine, since GTT paging structures are not typically inserted
0041      * into the GTT.
0042      *
0043      * Note that we also hit this path for the scratch page, and for this
0044      * case it might need to be 64K, but that should work fine here since we
0045      * used the passed in size for the page size, which should ensure it
0046      * also has the same alignment.
0047      */
0048     obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
0049                             vm->lmem_pt_obj_flags);
0050     /*
0051      * Ensure all paging structures for this vm share the same dma-resv
0052      * object underneath, with the idea that one object_lock() will lock
0053      * them all at once.
0054      */
0055     if (!IS_ERR(obj)) {
0056         obj->base.resv = i915_vm_resv_get(vm);
0057         obj->shares_resv_from = vm;
0058     }
0059 
0060     return obj;
0061 }
0062 
0063 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
0064 {
0065     struct drm_i915_gem_object *obj;
0066 
0067     if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
0068         i915_gem_shrink_all(vm->i915);
0069 
0070     obj = i915_gem_object_create_internal(vm->i915, sz);
0071     /*
0072      * Ensure all paging structures for this vm share the same dma-resv
0073      * object underneath, with the idea that one object_lock() will lock
0074      * them all at once.
0075      */
0076     if (!IS_ERR(obj)) {
0077         obj->base.resv = i915_vm_resv_get(vm);
0078         obj->shares_resv_from = vm;
0079     }
0080 
0081     return obj;
0082 }
0083 
0084 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
0085 {
0086     enum i915_map_type type;
0087     void *vaddr;
0088 
0089     type = i915_coherent_map_type(vm->i915, obj, true);
0090     vaddr = i915_gem_object_pin_map_unlocked(obj, type);
0091     if (IS_ERR(vaddr))
0092         return PTR_ERR(vaddr);
0093 
0094     i915_gem_object_make_unshrinkable(obj);
0095     return 0;
0096 }
0097 
0098 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
0099 {
0100     enum i915_map_type type;
0101     void *vaddr;
0102 
0103     type = i915_coherent_map_type(vm->i915, obj, true);
0104     vaddr = i915_gem_object_pin_map(obj, type);
0105     if (IS_ERR(vaddr))
0106         return PTR_ERR(vaddr);
0107 
0108     i915_gem_object_make_unshrinkable(obj);
0109     return 0;
0110 }
0111 
0112 static void clear_vm_list(struct list_head *list)
0113 {
0114     struct i915_vma *vma, *vn;
0115 
0116     list_for_each_entry_safe(vma, vn, list, vm_link) {
0117         struct drm_i915_gem_object *obj = vma->obj;
0118 
0119         if (!i915_gem_object_get_rcu(obj)) {
0120             /*
0121              * Object is dying, but has not yet cleared its
0122              * vma list.
0123              * Unbind the dying vma to ensure our list
0124              * is completely drained. We leave the destruction to
0125              * the object destructor to avoid the vma
0126              * disappearing under it.
0127              */
0128             atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
0129             WARN_ON(__i915_vma_unbind(vma));
0130 
0131             /* Remove from the unbound list */
0132             list_del_init(&vma->vm_link);
0133 
0134             /*
0135              * Delay the vm and vm mutex freeing until the
0136              * object is done with destruction.
0137              */
0138             i915_vm_resv_get(vma->vm);
0139             vma->vm_ddestroy = true;
0140         } else {
0141             i915_vma_destroy_locked(vma);
0142             i915_gem_object_put(obj);
0143         }
0144 
0145     }
0146 }
0147 
0148 static void __i915_vm_close(struct i915_address_space *vm)
0149 {
0150     mutex_lock(&vm->mutex);
0151 
0152     clear_vm_list(&vm->bound_list);
0153     clear_vm_list(&vm->unbound_list);
0154 
0155     /* Check for must-fix unanticipated side-effects */
0156     GEM_BUG_ON(!list_empty(&vm->bound_list));
0157     GEM_BUG_ON(!list_empty(&vm->unbound_list));
0158 
0159     mutex_unlock(&vm->mutex);
0160 }
0161 
0162 /* lock the vm into the current ww, if we lock one, we lock all */
0163 int i915_vm_lock_objects(struct i915_address_space *vm,
0164              struct i915_gem_ww_ctx *ww)
0165 {
0166     if (vm->scratch[0]->base.resv == &vm->_resv) {
0167         return i915_gem_object_lock(vm->scratch[0], ww);
0168     } else {
0169         struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
0170 
0171         /* We borrowed the scratch page from ggtt, take the top level object */
0172         return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
0173     }
0174 }
0175 
0176 void i915_address_space_fini(struct i915_address_space *vm)
0177 {
0178     drm_mm_takedown(&vm->mm);
0179 }
0180 
0181 /**
0182  * i915_vm_resv_release - Final struct i915_address_space destructor
0183  * @kref: Pointer to the &i915_address_space.resv_ref member.
0184  *
0185  * This function is called when the last lock sharer no longer shares the
0186  * &i915_address_space._resv lock, and also if we raced when
0187  * destroying a vma by the vma destruction
0188  */
0189 void i915_vm_resv_release(struct kref *kref)
0190 {
0191     struct i915_address_space *vm =
0192         container_of(kref, typeof(*vm), resv_ref);
0193 
0194     dma_resv_fini(&vm->_resv);
0195     mutex_destroy(&vm->mutex);
0196 
0197     kfree(vm);
0198 }
0199 
0200 static void __i915_vm_release(struct work_struct *work)
0201 {
0202     struct i915_address_space *vm =
0203         container_of(work, struct i915_address_space, release_work);
0204 
0205     __i915_vm_close(vm);
0206 
0207     /* Synchronize async unbinds. */
0208     i915_vma_resource_bind_dep_sync_all(vm);
0209 
0210     vm->cleanup(vm);
0211     i915_address_space_fini(vm);
0212 
0213     i915_vm_resv_put(vm);
0214 }
0215 
0216 void i915_vm_release(struct kref *kref)
0217 {
0218     struct i915_address_space *vm =
0219         container_of(kref, struct i915_address_space, ref);
0220 
0221     GEM_BUG_ON(i915_is_ggtt(vm));
0222     trace_i915_ppgtt_release(vm);
0223 
0224     queue_work(vm->i915->wq, &vm->release_work);
0225 }
0226 
0227 void i915_address_space_init(struct i915_address_space *vm, int subclass)
0228 {
0229     kref_init(&vm->ref);
0230 
0231     /*
0232      * Special case for GGTT that has already done an early
0233      * kref_init here.
0234      */
0235     if (!kref_read(&vm->resv_ref))
0236         kref_init(&vm->resv_ref);
0237 
0238     vm->pending_unbind = RB_ROOT_CACHED;
0239     INIT_WORK(&vm->release_work, __i915_vm_release);
0240 
0241     /*
0242      * The vm->mutex must be reclaim safe (for use in the shrinker).
0243      * Do a dummy acquire now under fs_reclaim so that any allocation
0244      * attempt holding the lock is immediately reported by lockdep.
0245      */
0246     mutex_init(&vm->mutex);
0247     lockdep_set_subclass(&vm->mutex, subclass);
0248 
0249     if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
0250         i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
0251     } else {
0252         /*
0253          * CHV + BXT VTD workaround use stop_machine(),
0254          * which is allowed to allocate memory. This means &vm->mutex
0255          * is the outer lock, and in theory we can allocate memory inside
0256          * it through stop_machine().
0257          *
0258          * Add the annotation for this, we use trylock in shrinker.
0259          */
0260         mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
0261         might_alloc(GFP_KERNEL);
0262         mutex_release(&vm->mutex.dep_map, _THIS_IP_);
0263     }
0264     dma_resv_init(&vm->_resv);
0265 
0266     GEM_BUG_ON(!vm->total);
0267     drm_mm_init(&vm->mm, 0, vm->total);
0268 
0269     memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
0270          ARRAY_SIZE(vm->min_alignment));
0271 
0272     if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
0273         subclass == VM_CLASS_PPGTT) {
0274         vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
0275         vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
0276     } else if (HAS_64K_PAGES(vm->i915)) {
0277         vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
0278         vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
0279     }
0280 
0281     vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
0282 
0283     INIT_LIST_HEAD(&vm->bound_list);
0284     INIT_LIST_HEAD(&vm->unbound_list);
0285 }
0286 
0287 void *__px_vaddr(struct drm_i915_gem_object *p)
0288 {
0289     enum i915_map_type type;
0290 
0291     GEM_BUG_ON(!i915_gem_object_has_pages(p));
0292     return page_unpack_bits(p->mm.mapping, &type);
0293 }
0294 
0295 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
0296 {
0297     GEM_BUG_ON(!i915_gem_object_has_pages(p));
0298     return sg_dma_address(p->mm.pages->sgl);
0299 }
0300 
0301 struct page *__px_page(struct drm_i915_gem_object *p)
0302 {
0303     GEM_BUG_ON(!i915_gem_object_has_pages(p));
0304     return sg_page(p->mm.pages->sgl);
0305 }
0306 
0307 void
0308 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
0309 {
0310     void *vaddr = __px_vaddr(p);
0311 
0312     memset64(vaddr, val, count);
0313     drm_clflush_virt_range(vaddr, PAGE_SIZE);
0314 }
0315 
0316 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
0317 {
0318     void *vaddr = __px_vaddr(scratch);
0319     u8 val;
0320 
0321     val = 0;
0322     if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0323         val = POISON_FREE;
0324 
0325     memset(vaddr, val, scratch->base.size);
0326     drm_clflush_virt_range(vaddr, scratch->base.size);
0327 }
0328 
0329 int setup_scratch_page(struct i915_address_space *vm)
0330 {
0331     unsigned long size;
0332 
0333     /*
0334      * In order to utilize 64K pages for an object with a size < 2M, we will
0335      * need to support a 64K scratch page, given that every 16th entry for a
0336      * page-table operating in 64K mode must point to a properly aligned 64K
0337      * region, including any PTEs which happen to point to scratch.
0338      *
0339      * This is only relevant for the 48b PPGTT where we support
0340      * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
0341      * scratch (read-only) between all vm, we create one 64k scratch page
0342      * for all.
0343      */
0344     size = I915_GTT_PAGE_SIZE_4K;
0345     if (i915_vm_is_4lvl(vm) &&
0346         HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
0347         size = I915_GTT_PAGE_SIZE_64K;
0348 
0349     do {
0350         struct drm_i915_gem_object *obj;
0351 
0352         obj = vm->alloc_scratch_dma(vm, size);
0353         if (IS_ERR(obj))
0354             goto skip;
0355 
0356         if (map_pt_dma(vm, obj))
0357             goto skip_obj;
0358 
0359         /* We need a single contiguous page for our scratch */
0360         if (obj->mm.page_sizes.sg < size)
0361             goto skip_obj;
0362 
0363         /* And it needs to be correspondingly aligned */
0364         if (__px_dma(obj) & (size - 1))
0365             goto skip_obj;
0366 
0367         /*
0368          * Use a non-zero scratch page for debugging.
0369          *
0370          * We want a value that should be reasonably obvious
0371          * to spot in the error state, while also causing a GPU hang
0372          * if executed. We prefer using a clear page in production, so
0373          * should it ever be accidentally used, the effect should be
0374          * fairly benign.
0375          */
0376         poison_scratch_page(obj);
0377 
0378         vm->scratch[0] = obj;
0379         vm->scratch_order = get_order(size);
0380         return 0;
0381 
0382 skip_obj:
0383         i915_gem_object_put(obj);
0384 skip:
0385         if (size == I915_GTT_PAGE_SIZE_4K)
0386             return -ENOMEM;
0387 
0388         /*
0389          * If we need 64K minimum GTT pages for device local-memory,
0390          * like on XEHPSDV, then we need to fail the allocation here,
0391          * otherwise we can't safely support the insertion of
0392          * local-memory pages for this vm, since the HW expects the
0393          * correct physical alignment and size when the page-table is
0394          * operating in 64K GTT mode, which includes any scratch PTEs,
0395          * since userspace can still touch them.
0396          */
0397         if (HAS_64K_PAGES(vm->i915))
0398             return -ENOMEM;
0399 
0400         size = I915_GTT_PAGE_SIZE_4K;
0401     } while (1);
0402 }
0403 
0404 void free_scratch(struct i915_address_space *vm)
0405 {
0406     int i;
0407 
0408     for (i = 0; i <= vm->top; i++)
0409         i915_gem_object_put(vm->scratch[i]);
0410 }
0411 
0412 void gtt_write_workarounds(struct intel_gt *gt)
0413 {
0414     struct drm_i915_private *i915 = gt->i915;
0415     struct intel_uncore *uncore = gt->uncore;
0416 
0417     /*
0418      * This function is for gtt related workarounds. This function is
0419      * called on driver load and after a GPU reset, so you can place
0420      * workarounds here even if they get overwritten by GPU reset.
0421      */
0422     /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
0423     if (IS_BROADWELL(i915))
0424         intel_uncore_write(uncore,
0425                    GEN8_L3_LRA_1_GPGPU,
0426                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
0427     else if (IS_CHERRYVIEW(i915))
0428         intel_uncore_write(uncore,
0429                    GEN8_L3_LRA_1_GPGPU,
0430                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
0431     else if (IS_GEN9_LP(i915))
0432         intel_uncore_write(uncore,
0433                    GEN8_L3_LRA_1_GPGPU,
0434                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
0435     else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
0436         intel_uncore_write(uncore,
0437                    GEN8_L3_LRA_1_GPGPU,
0438                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
0439 
0440     /*
0441      * To support 64K PTEs we need to first enable the use of the
0442      * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
0443      * mmio, otherwise the page-walker will simply ignore the IPS bit. This
0444      * shouldn't be needed after GEN10.
0445      *
0446      * 64K pages were first introduced from BDW+, although technically they
0447      * only *work* from gen9+. For pre-BDW we instead have the option for
0448      * 32K pages, but we don't currently have any support for it in our
0449      * driver.
0450      */
0451     if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
0452         GRAPHICS_VER(i915) <= 10)
0453         intel_uncore_rmw(uncore,
0454                  GEN8_GAMW_ECO_DEV_RW_IA,
0455                  0,
0456                  GAMW_ECO_ENABLE_64K_IPS_FIELD);
0457 
0458     if (IS_GRAPHICS_VER(i915, 8, 11)) {
0459         bool can_use_gtt_cache = true;
0460 
0461         /*
0462          * According to the BSpec if we use 2M/1G pages then we also
0463          * need to disable the GTT cache. At least on BDW we can see
0464          * visual corruption when using 2M pages, and not disabling the
0465          * GTT cache.
0466          */
0467         if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
0468             can_use_gtt_cache = false;
0469 
0470         /* WaGttCachingOffByDefault */
0471         intel_uncore_write(uncore,
0472                    HSW_GTT_CACHE_EN,
0473                    can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
0474         drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
0475                  intel_uncore_read(uncore,
0476                            HSW_GTT_CACHE_EN) == 0);
0477     }
0478 }
0479 
0480 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
0481 {
0482     /* TGL doesn't support LLC or AGE settings */
0483     intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
0484     intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
0485     intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
0486     intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
0487     intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
0488     intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
0489     intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
0490     intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
0491 }
0492 
0493 static void icl_setup_private_ppat(struct intel_uncore *uncore)
0494 {
0495     intel_uncore_write(uncore,
0496                GEN10_PAT_INDEX(0),
0497                GEN8_PPAT_WB | GEN8_PPAT_LLC);
0498     intel_uncore_write(uncore,
0499                GEN10_PAT_INDEX(1),
0500                GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
0501     intel_uncore_write(uncore,
0502                GEN10_PAT_INDEX(2),
0503                GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
0504     intel_uncore_write(uncore,
0505                GEN10_PAT_INDEX(3),
0506                GEN8_PPAT_UC);
0507     intel_uncore_write(uncore,
0508                GEN10_PAT_INDEX(4),
0509                GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
0510     intel_uncore_write(uncore,
0511                GEN10_PAT_INDEX(5),
0512                GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
0513     intel_uncore_write(uncore,
0514                GEN10_PAT_INDEX(6),
0515                GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
0516     intel_uncore_write(uncore,
0517                GEN10_PAT_INDEX(7),
0518                GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
0519 }
0520 
0521 /*
0522  * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
0523  * bits. When using advanced contexts each context stores its own PAT, but
0524  * writing this data shouldn't be harmful even in those cases.
0525  */
0526 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
0527 {
0528     struct drm_i915_private *i915 = uncore->i915;
0529     u64 pat;
0530 
0531     pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |  /* for normal objects, no eLLC */
0532           GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |  /* for something pointing to ptes? */
0533           GEN8_PPAT(3, GEN8_PPAT_UC) |          /* Uncached objects, mostly for scanout */
0534           GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
0535           GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
0536           GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
0537           GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
0538 
0539     /* for scanout with eLLC */
0540     if (GRAPHICS_VER(i915) >= 9)
0541         pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
0542     else
0543         pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
0544 
0545     intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
0546     intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
0547 }
0548 
0549 static void chv_setup_private_ppat(struct intel_uncore *uncore)
0550 {
0551     u64 pat;
0552 
0553     /*
0554      * Map WB on BDW to snooped on CHV.
0555      *
0556      * Only the snoop bit has meaning for CHV, the rest is
0557      * ignored.
0558      *
0559      * The hardware will never snoop for certain types of accesses:
0560      * - CPU GTT (GMADR->GGTT->no snoop->memory)
0561      * - PPGTT page tables
0562      * - some other special cycles
0563      *
0564      * As with BDW, we also need to consider the following for GT accesses:
0565      * "For GGTT, there is NO pat_sel[2:0] from the entry,
0566      * so RTL will always use the value corresponding to
0567      * pat_sel = 000".
0568      * Which means we must set the snoop bit in PAT entry 0
0569      * in order to keep the global status page working.
0570      */
0571 
0572     pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
0573           GEN8_PPAT(1, 0) |
0574           GEN8_PPAT(2, 0) |
0575           GEN8_PPAT(3, 0) |
0576           GEN8_PPAT(4, CHV_PPAT_SNOOP) |
0577           GEN8_PPAT(5, CHV_PPAT_SNOOP) |
0578           GEN8_PPAT(6, CHV_PPAT_SNOOP) |
0579           GEN8_PPAT(7, CHV_PPAT_SNOOP);
0580 
0581     intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
0582     intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
0583 }
0584 
0585 void setup_private_pat(struct intel_uncore *uncore)
0586 {
0587     struct drm_i915_private *i915 = uncore->i915;
0588 
0589     GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
0590 
0591     if (GRAPHICS_VER(i915) >= 12)
0592         tgl_setup_private_ppat(uncore);
0593     else if (GRAPHICS_VER(i915) >= 11)
0594         icl_setup_private_ppat(uncore);
0595     else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
0596         chv_setup_private_ppat(uncore);
0597     else
0598         bdw_setup_private_ppat(uncore);
0599 }
0600 
0601 struct i915_vma *
0602 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
0603 {
0604     struct drm_i915_gem_object *obj;
0605     struct i915_vma *vma;
0606 
0607     obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
0608     if (IS_ERR(obj))
0609         return ERR_CAST(obj);
0610 
0611     i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
0612 
0613     vma = i915_vma_instance(obj, vm, NULL);
0614     if (IS_ERR(vma)) {
0615         i915_gem_object_put(obj);
0616         return vma;
0617     }
0618 
0619     return vma;
0620 }
0621 
0622 struct i915_vma *
0623 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
0624 {
0625     struct i915_vma *vma;
0626     int err;
0627 
0628     vma = __vm_create_scratch_for_read(vm, size);
0629     if (IS_ERR(vma))
0630         return vma;
0631 
0632     err = i915_vma_pin(vma, 0, 0,
0633                i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
0634     if (err) {
0635         i915_vma_put(vma);
0636         return ERR_PTR(err);
0637     }
0638 
0639     return vma;
0640 }
0641 
0642 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0643 #include "selftests/mock_gtt.c"
0644 #endif