0001
0002
0003
0004
0005
0006 #include "gem/i915_gem_domain.h"
0007 #include "gem/i915_gem_internal.h"
0008 #include "gt/gen8_ppgtt.h"
0009
0010 #include "i915_drv.h"
0011 #include "intel_display_types.h"
0012 #include "intel_dpt.h"
0013 #include "intel_fb.h"
0014
0015 struct i915_dpt {
0016 struct i915_address_space vm;
0017
0018 struct drm_i915_gem_object *obj;
0019 struct i915_vma *vma;
0020 void __iomem *iomem;
0021 };
0022
0023 #define i915_is_dpt(vm) ((vm)->is_dpt)
0024
0025 static inline struct i915_dpt *
0026 i915_vm_to_dpt(struct i915_address_space *vm)
0027 {
0028 BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
0029 GEM_BUG_ON(!i915_is_dpt(vm));
0030 return container_of(vm, struct i915_dpt, vm);
0031 }
0032
0033 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
0034
0035 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
0036 {
0037 writeq(pte, addr);
0038 }
0039
0040 static void dpt_insert_page(struct i915_address_space *vm,
0041 dma_addr_t addr,
0042 u64 offset,
0043 enum i915_cache_level level,
0044 u32 flags)
0045 {
0046 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0047 gen8_pte_t __iomem *base = dpt->iomem;
0048
0049 gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
0050 vm->pte_encode(addr, level, flags));
0051 }
0052
0053 static void dpt_insert_entries(struct i915_address_space *vm,
0054 struct i915_vma_resource *vma_res,
0055 enum i915_cache_level level,
0056 u32 flags)
0057 {
0058 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0059 gen8_pte_t __iomem *base = dpt->iomem;
0060 const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
0061 struct sgt_iter sgt_iter;
0062 dma_addr_t addr;
0063 int i;
0064
0065
0066
0067
0068
0069
0070 i = vma_res->start / I915_GTT_PAGE_SIZE;
0071 for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
0072 gen8_set_pte(&base[i++], pte_encode | addr);
0073 }
0074
0075 static void dpt_clear_range(struct i915_address_space *vm,
0076 u64 start, u64 length)
0077 {
0078 }
0079
0080 static void dpt_bind_vma(struct i915_address_space *vm,
0081 struct i915_vm_pt_stash *stash,
0082 struct i915_vma_resource *vma_res,
0083 enum i915_cache_level cache_level,
0084 u32 flags)
0085 {
0086 u32 pte_flags;
0087
0088 if (vma_res->bound_flags)
0089 return;
0090
0091
0092 pte_flags = 0;
0093 if (vm->has_read_only && vma_res->bi.readonly)
0094 pte_flags |= PTE_READ_ONLY;
0095 if (vma_res->bi.lmem)
0096 pte_flags |= PTE_LM;
0097
0098 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
0099
0100 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
0101
0102
0103
0104
0105
0106
0107 vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0108 }
0109
0110 static void dpt_unbind_vma(struct i915_address_space *vm,
0111 struct i915_vma_resource *vma_res)
0112 {
0113 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
0114 }
0115
0116 static void dpt_cleanup(struct i915_address_space *vm)
0117 {
0118 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0119
0120 i915_gem_object_put(dpt->obj);
0121 }
0122
0123 struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
0124 {
0125 struct drm_i915_private *i915 = vm->i915;
0126 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0127 intel_wakeref_t wakeref;
0128 struct i915_vma *vma;
0129 void __iomem *iomem;
0130 struct i915_gem_ww_ctx ww;
0131 u64 pin_flags = 0;
0132 int err;
0133
0134 if (i915_gem_object_is_stolen(dpt->obj))
0135 pin_flags |= PIN_MAPPABLE;
0136
0137 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0138 atomic_inc(&i915->gpu_error.pending_fb_pin);
0139
0140 for_i915_gem_ww(&ww, err, true) {
0141 err = i915_gem_object_lock(dpt->obj, &ww);
0142 if (err)
0143 continue;
0144
0145 vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
0146 pin_flags);
0147 if (IS_ERR(vma)) {
0148 err = PTR_ERR(vma);
0149 continue;
0150 }
0151
0152 iomem = i915_vma_pin_iomap(vma);
0153 i915_vma_unpin(vma);
0154
0155 if (IS_ERR(iomem)) {
0156 err = PTR_ERR(iomem);
0157 continue;
0158 }
0159
0160 dpt->vma = vma;
0161 dpt->iomem = iomem;
0162
0163 i915_vma_get(vma);
0164 }
0165
0166 atomic_dec(&i915->gpu_error.pending_fb_pin);
0167 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
0168
0169 return err ? ERR_PTR(err) : vma;
0170 }
0171
0172 void intel_dpt_unpin(struct i915_address_space *vm)
0173 {
0174 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0175
0176 i915_vma_unpin_iomap(dpt->vma);
0177 i915_vma_put(dpt->vma);
0178 }
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 void intel_dpt_resume(struct drm_i915_private *i915)
0193 {
0194 struct drm_framebuffer *drm_fb;
0195
0196 if (!HAS_DISPLAY(i915))
0197 return;
0198
0199 mutex_lock(&i915->drm.mode_config.fb_lock);
0200 drm_for_each_fb(drm_fb, &i915->drm) {
0201 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
0202
0203 if (fb->dpt_vm)
0204 i915_ggtt_resume_vm(fb->dpt_vm);
0205 }
0206 mutex_unlock(&i915->drm.mode_config.fb_lock);
0207 }
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 void intel_dpt_suspend(struct drm_i915_private *i915)
0220 {
0221 struct drm_framebuffer *drm_fb;
0222
0223 if (!HAS_DISPLAY(i915))
0224 return;
0225
0226 mutex_lock(&i915->drm.mode_config.fb_lock);
0227
0228 drm_for_each_fb(drm_fb, &i915->drm) {
0229 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
0230
0231 if (fb->dpt_vm)
0232 i915_ggtt_suspend_vm(fb->dpt_vm);
0233 }
0234
0235 mutex_unlock(&i915->drm.mode_config.fb_lock);
0236 }
0237
0238 struct i915_address_space *
0239 intel_dpt_create(struct intel_framebuffer *fb)
0240 {
0241 struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
0242 struct drm_i915_private *i915 = to_i915(obj->dev);
0243 struct drm_i915_gem_object *dpt_obj;
0244 struct i915_address_space *vm;
0245 struct i915_dpt *dpt;
0246 size_t size;
0247 int ret;
0248
0249 if (intel_fb_needs_pot_stride_remap(fb))
0250 size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
0251 else
0252 size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
0253
0254 size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
0255
0256 dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
0257 if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
0258 dpt_obj = i915_gem_object_create_stolen(i915, size);
0259 if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
0260 drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
0261 dpt_obj = i915_gem_object_create_internal(i915, size);
0262 }
0263 if (IS_ERR(dpt_obj))
0264 return ERR_CAST(dpt_obj);
0265
0266 ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
0267 if (!ret) {
0268 ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
0269 i915_gem_object_unlock(dpt_obj);
0270 }
0271 if (ret) {
0272 i915_gem_object_put(dpt_obj);
0273 return ERR_PTR(ret);
0274 }
0275
0276 dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
0277 if (!dpt) {
0278 i915_gem_object_put(dpt_obj);
0279 return ERR_PTR(-ENOMEM);
0280 }
0281
0282 vm = &dpt->vm;
0283
0284 vm->gt = to_gt(i915);
0285 vm->i915 = i915;
0286 vm->dma = i915->drm.dev;
0287 vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
0288 vm->is_dpt = true;
0289
0290 i915_address_space_init(vm, VM_CLASS_DPT);
0291
0292 vm->insert_page = dpt_insert_page;
0293 vm->clear_range = dpt_clear_range;
0294 vm->insert_entries = dpt_insert_entries;
0295 vm->cleanup = dpt_cleanup;
0296
0297 vm->vma_ops.bind_vma = dpt_bind_vma;
0298 vm->vma_ops.unbind_vma = dpt_unbind_vma;
0299
0300 vm->pte_encode = gen8_ggtt_pte_encode;
0301
0302 dpt->obj = dpt_obj;
0303
0304 return &dpt->vm;
0305 }
0306
0307 void intel_dpt_destroy(struct i915_address_space *vm)
0308 {
0309 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
0310
0311 i915_vm_put(&dpt->vm);
0312 }