Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2020 Intel Corporation
0004  */
0005 
0006 #include <linux/slab.h>
0007 
0008 #include "gem/i915_gem_lmem.h"
0009 
0010 #include "i915_trace.h"
0011 #include "intel_gtt.h"
0012 #include "gen6_ppgtt.h"
0013 #include "gen8_ppgtt.h"
0014 
0015 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
0016 {
0017     struct i915_page_table *pt;
0018 
0019     pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
0020     if (unlikely(!pt))
0021         return ERR_PTR(-ENOMEM);
0022 
0023     pt->base = vm->alloc_pt_dma(vm, sz);
0024     if (IS_ERR(pt->base)) {
0025         kfree(pt);
0026         return ERR_PTR(-ENOMEM);
0027     }
0028 
0029     pt->is_compact = false;
0030     atomic_set(&pt->used, 0);
0031     return pt;
0032 }
0033 
0034 struct i915_page_directory *__alloc_pd(int count)
0035 {
0036     struct i915_page_directory *pd;
0037 
0038     pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
0039     if (unlikely(!pd))
0040         return NULL;
0041 
0042     pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
0043     if (unlikely(!pd->entry)) {
0044         kfree(pd);
0045         return NULL;
0046     }
0047 
0048     spin_lock_init(&pd->lock);
0049     return pd;
0050 }
0051 
0052 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
0053 {
0054     struct i915_page_directory *pd;
0055 
0056     pd = __alloc_pd(I915_PDES);
0057     if (unlikely(!pd))
0058         return ERR_PTR(-ENOMEM);
0059 
0060     pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
0061     if (IS_ERR(pd->pt.base)) {
0062         kfree(pd->entry);
0063         kfree(pd);
0064         return ERR_PTR(-ENOMEM);
0065     }
0066 
0067     return pd;
0068 }
0069 
0070 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
0071 {
0072     BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
0073 
0074     if (lvl) {
0075         struct i915_page_directory *pd =
0076             container_of(pt, typeof(*pd), pt);
0077         kfree(pd->entry);
0078     }
0079 
0080     if (pt->base)
0081         i915_gem_object_put(pt->base);
0082 
0083     kfree(pt);
0084 }
0085 
0086 static void
0087 write_dma_entry(struct drm_i915_gem_object * const pdma,
0088         const unsigned short idx,
0089         const u64 encoded_entry)
0090 {
0091     u64 * const vaddr = __px_vaddr(pdma);
0092 
0093     vaddr[idx] = encoded_entry;
0094     drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
0095 }
0096 
0097 void
0098 __set_pd_entry(struct i915_page_directory * const pd,
0099            const unsigned short idx,
0100            struct i915_page_table * const to,
0101            u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
0102 {
0103     /* Each thread pre-pins the pd, and we may have a thread per pde. */
0104     GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
0105 
0106     atomic_inc(px_used(pd));
0107     pd->entry[idx] = to;
0108     write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
0109 }
0110 
0111 void
0112 clear_pd_entry(struct i915_page_directory * const pd,
0113            const unsigned short idx,
0114            const struct drm_i915_gem_object * const scratch)
0115 {
0116     GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
0117 
0118     write_dma_entry(px_base(pd), idx, scratch->encode);
0119     pd->entry[idx] = NULL;
0120     atomic_dec(px_used(pd));
0121 }
0122 
0123 bool
0124 release_pd_entry(struct i915_page_directory * const pd,
0125          const unsigned short idx,
0126          struct i915_page_table * const pt,
0127          const struct drm_i915_gem_object * const scratch)
0128 {
0129     bool free = false;
0130 
0131     if (atomic_add_unless(&pt->used, -1, 1))
0132         return false;
0133 
0134     spin_lock(&pd->lock);
0135     if (atomic_dec_and_test(&pt->used)) {
0136         clear_pd_entry(pd, idx, scratch);
0137         free = true;
0138     }
0139     spin_unlock(&pd->lock);
0140 
0141     return free;
0142 }
0143 
0144 int i915_ppgtt_init_hw(struct intel_gt *gt)
0145 {
0146     struct drm_i915_private *i915 = gt->i915;
0147 
0148     gtt_write_workarounds(gt);
0149 
0150     if (GRAPHICS_VER(i915) == 6)
0151         gen6_ppgtt_enable(gt);
0152     else if (GRAPHICS_VER(i915) == 7)
0153         gen7_ppgtt_enable(gt);
0154 
0155     return 0;
0156 }
0157 
0158 static struct i915_ppgtt *
0159 __ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
0160 {
0161     if (GRAPHICS_VER(gt->i915) < 8)
0162         return gen6_ppgtt_create(gt);
0163     else
0164         return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
0165 }
0166 
0167 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
0168                      unsigned long lmem_pt_obj_flags)
0169 {
0170     struct i915_ppgtt *ppgtt;
0171 
0172     ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
0173     if (IS_ERR(ppgtt))
0174         return ppgtt;
0175 
0176     trace_i915_ppgtt_create(&ppgtt->vm);
0177 
0178     return ppgtt;
0179 }
0180 
0181 void ppgtt_bind_vma(struct i915_address_space *vm,
0182             struct i915_vm_pt_stash *stash,
0183             struct i915_vma_resource *vma_res,
0184             enum i915_cache_level cache_level,
0185             u32 flags)
0186 {
0187     u32 pte_flags;
0188 
0189     if (!vma_res->allocated) {
0190         vm->allocate_va_range(vm, stash, vma_res->start,
0191                       vma_res->vma_size);
0192         vma_res->allocated = true;
0193     }
0194 
0195     /* Applicable to VLV, and gen8+ */
0196     pte_flags = 0;
0197     if (vma_res->bi.readonly)
0198         pte_flags |= PTE_READ_ONLY;
0199     if (vma_res->bi.lmem)
0200         pte_flags |= PTE_LM;
0201 
0202     vm->insert_entries(vm, vma_res, cache_level, pte_flags);
0203     wmb();
0204 }
0205 
0206 void ppgtt_unbind_vma(struct i915_address_space *vm,
0207               struct i915_vma_resource *vma_res)
0208 {
0209     if (!vma_res->allocated)
0210         return;
0211 
0212     vm->clear_range(vm, vma_res->start, vma_res->vma_size);
0213     if (vma_res->tlb)
0214         vma_invalidate_tlb(vm, vma_res->tlb);
0215 }
0216 
0217 static unsigned long pd_count(u64 size, int shift)
0218 {
0219     /* Beware later misalignment */
0220     return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
0221 }
0222 
0223 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
0224                struct i915_vm_pt_stash *stash,
0225                u64 size)
0226 {
0227     unsigned long count;
0228     int shift, n, pt_sz;
0229 
0230     shift = vm->pd_shift;
0231     if (!shift)
0232         return 0;
0233 
0234     pt_sz = stash->pt_sz;
0235     if (!pt_sz)
0236         pt_sz = I915_GTT_PAGE_SIZE_4K;
0237     else
0238         GEM_BUG_ON(!IS_DGFX(vm->i915));
0239 
0240     GEM_BUG_ON(!is_power_of_2(pt_sz));
0241 
0242     count = pd_count(size, shift);
0243     while (count--) {
0244         struct i915_page_table *pt;
0245 
0246         pt = alloc_pt(vm, pt_sz);
0247         if (IS_ERR(pt)) {
0248             i915_vm_free_pt_stash(vm, stash);
0249             return PTR_ERR(pt);
0250         }
0251 
0252         pt->stash = stash->pt[0];
0253         stash->pt[0] = pt;
0254     }
0255 
0256     for (n = 1; n < vm->top; n++) {
0257         shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
0258         count = pd_count(size, shift);
0259         while (count--) {
0260             struct i915_page_directory *pd;
0261 
0262             pd = alloc_pd(vm);
0263             if (IS_ERR(pd)) {
0264                 i915_vm_free_pt_stash(vm, stash);
0265                 return PTR_ERR(pd);
0266             }
0267 
0268             pd->pt.stash = stash->pt[1];
0269             stash->pt[1] = &pd->pt;
0270         }
0271     }
0272 
0273     return 0;
0274 }
0275 
0276 int i915_vm_map_pt_stash(struct i915_address_space *vm,
0277              struct i915_vm_pt_stash *stash)
0278 {
0279     struct i915_page_table *pt;
0280     int n, err;
0281 
0282     for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
0283         for (pt = stash->pt[n]; pt; pt = pt->stash) {
0284             err = map_pt_dma_locked(vm, pt->base);
0285             if (err)
0286                 return err;
0287         }
0288     }
0289 
0290     return 0;
0291 }
0292 
0293 void i915_vm_free_pt_stash(struct i915_address_space *vm,
0294                struct i915_vm_pt_stash *stash)
0295 {
0296     struct i915_page_table *pt;
0297     int n;
0298 
0299     for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
0300         while ((pt = stash->pt[n])) {
0301             stash->pt[n] = pt->stash;
0302             free_px(vm, pt, n);
0303         }
0304     }
0305 }
0306 
0307 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
0308         unsigned long lmem_pt_obj_flags)
0309 {
0310     struct drm_i915_private *i915 = gt->i915;
0311 
0312     ppgtt->vm.gt = gt;
0313     ppgtt->vm.i915 = i915;
0314     ppgtt->vm.dma = i915->drm.dev;
0315     ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
0316     ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
0317 
0318     dma_resv_init(&ppgtt->vm._resv);
0319     i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
0320 
0321     ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
0322     ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
0323 }