0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #ifndef __INTEL_GTT_H__
0017 #define __INTEL_GTT_H__
0018
0019 #include <linux/io-mapping.h>
0020 #include <linux/kref.h>
0021 #include <linux/mm.h>
0022 #include <linux/pagevec.h>
0023 #include <linux/scatterlist.h>
0024 #include <linux/workqueue.h>
0025
0026 #include <drm/drm_mm.h>
0027
0028 #include "gt/intel_reset.h"
0029 #include "i915_selftest.h"
0030 #include "i915_vma_resource.h"
0031 #include "i915_vma_types.h"
0032 #include "i915_params.h"
0033 #include "intel_memory_region.h"
0034
0035 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
0036
0037 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
0038 #define DBG(...) trace_printk(__VA_ARGS__)
0039 #else
0040 #define DBG(...)
0041 #endif
0042
0043 #define NALLOC 3
0044
0045 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
0046 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
0047 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
0048
0049 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
0050 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
0051
0052 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
0053
0054 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
0055
0056 #define I915_FENCE_REG_NONE -1
0057 #define I915_MAX_NUM_FENCES 32
0058
0059 #define I915_MAX_NUM_FENCE_BITS 6
0060
0061 typedef u32 gen6_pte_t;
0062 typedef u64 gen8_pte_t;
0063
0064 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
0065
0066 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
0067 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
0068 #define I915_PDES 512
0069 #define I915_PDE_MASK (I915_PDES - 1)
0070
0071
0072 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
0073 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
0074 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
0075 #define GEN6_PTE_CACHE_LLC (2 << 1)
0076 #define GEN6_PTE_UNCACHED (1 << 1)
0077 #define GEN6_PTE_VALID REG_BIT(0)
0078
0079 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
0080 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
0081 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
0082 #define GEN6_PDE_SHIFT 22
0083 #define GEN6_PDE_VALID REG_BIT(0)
0084 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
0085
0086 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
0087
0088 #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
0089 #define BYT_PTE_WRITEABLE REG_BIT(1)
0090
0091 #define GEN12_PPGTT_PTE_LM BIT_ULL(11)
0092
0093 #define GEN12_GGTT_PTE_LM BIT_ULL(1)
0094
0095 #define GEN12_PDE_64K BIT(6)
0096
0097
0098
0099
0100
0101 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
0102 (((bits) & 0x8) << (11 - 3)))
0103 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
0104 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
0105 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
0106 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
0107 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
0108 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
0109 #define HSW_PTE_UNCACHED (0)
0110 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
0111 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 #define GEN8_3LVL_PDPES 4
0125
0126 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
0127 #define PPAT_CACHED_PDE 0
0128 #define PPAT_CACHED _PAGE_PAT
0129 #define PPAT_DISPLAY_ELLC _PAGE_PCD
0130
0131 #define CHV_PPAT_SNOOP REG_BIT(6)
0132 #define GEN8_PPAT_AGE(x) ((x)<<4)
0133 #define GEN8_PPAT_LLCeLLC (3<<2)
0134 #define GEN8_PPAT_LLCELLC (2<<2)
0135 #define GEN8_PPAT_LLC (1<<2)
0136 #define GEN8_PPAT_WB (3<<0)
0137 #define GEN8_PPAT_WT (2<<0)
0138 #define GEN8_PPAT_WC (1<<0)
0139 #define GEN8_PPAT_UC (0<<0)
0140 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
0141 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
0142
0143 #define GEN8_PAGE_PRESENT BIT_ULL(0)
0144 #define GEN8_PAGE_RW BIT_ULL(1)
0145
0146 #define GEN8_PDE_IPS_64K BIT(11)
0147 #define GEN8_PDE_PS_2M BIT(7)
0148
0149 enum i915_cache_level;
0150
0151 struct drm_i915_gem_object;
0152 struct i915_fence_reg;
0153 struct i915_vma;
0154 struct intel_gt;
0155
0156 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
0157 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
0158
0159 struct i915_page_table {
0160 struct drm_i915_gem_object *base;
0161 union {
0162 atomic_t used;
0163 struct i915_page_table *stash;
0164 };
0165 bool is_compact;
0166 };
0167
0168 struct i915_page_directory {
0169 struct i915_page_table pt;
0170 spinlock_t lock;
0171 void **entry;
0172 };
0173
0174 #define __px_choose_expr(x, type, expr, other) \
0175 __builtin_choose_expr( \
0176 __builtin_types_compatible_p(typeof(x), type) || \
0177 __builtin_types_compatible_p(typeof(x), const type), \
0178 ({ type __x = (type)(x); expr; }), \
0179 other)
0180
0181 #define px_base(px) \
0182 __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
0183 __px_choose_expr(px, struct i915_page_table *, __x->base, \
0184 __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
0185 (void)0)))
0186
0187 struct page *__px_page(struct drm_i915_gem_object *p);
0188 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
0189 #define px_dma(px) (__px_dma(px_base(px)))
0190
0191 void *__px_vaddr(struct drm_i915_gem_object *p);
0192 #define px_vaddr(px) (__px_vaddr(px_base(px)))
0193
0194 #define px_pt(px) \
0195 __px_choose_expr(px, struct i915_page_table *, __x, \
0196 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
0197 (void)0))
0198 #define px_used(px) (&px_pt(px)->used)
0199
0200 struct i915_vm_pt_stash {
0201
0202 struct i915_page_table *pt[2];
0203
0204
0205
0206
0207
0208
0209
0210 int pt_sz;
0211 };
0212
0213 struct i915_vma_ops {
0214
0215 void (*bind_vma)(struct i915_address_space *vm,
0216 struct i915_vm_pt_stash *stash,
0217 struct i915_vma_resource *vma_res,
0218 enum i915_cache_level cache_level,
0219 u32 flags);
0220
0221
0222
0223
0224 void (*unbind_vma)(struct i915_address_space *vm,
0225 struct i915_vma_resource *vma_res);
0226
0227 };
0228
0229 struct i915_address_space {
0230 struct kref ref;
0231 struct work_struct release_work;
0232
0233 struct drm_mm mm;
0234 struct intel_gt *gt;
0235 struct drm_i915_private *i915;
0236 struct device *dma;
0237 u64 total;
0238 u64 reserved;
0239 u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
0240
0241 unsigned int bind_async_flags;
0242
0243 struct mutex mutex;
0244
0245 struct kref resv_ref;
0246 struct dma_resv _resv;
0247 #define VM_CLASS_GGTT 0
0248 #define VM_CLASS_PPGTT 1
0249 #define VM_CLASS_DPT 2
0250
0251 struct drm_i915_gem_object *scratch[4];
0252
0253
0254
0255 struct list_head bound_list;
0256
0257
0258
0259
0260 struct list_head unbound_list;
0261
0262
0263 bool is_ggtt:1;
0264
0265
0266 bool is_dpt:1;
0267
0268
0269 bool has_read_only:1;
0270
0271
0272 bool skip_pte_rewrite:1;
0273
0274 u8 top;
0275 u8 pd_shift;
0276 u8 scratch_order;
0277
0278
0279 unsigned long lmem_pt_obj_flags;
0280
0281
0282 struct rb_root_cached pending_unbind;
0283
0284 struct drm_i915_gem_object *
0285 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
0286 struct drm_i915_gem_object *
0287 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
0288
0289 u64 (*pte_encode)(dma_addr_t addr,
0290 enum i915_cache_level level,
0291 u32 flags);
0292 #define PTE_READ_ONLY BIT(0)
0293 #define PTE_LM BIT(1)
0294
0295 void (*allocate_va_range)(struct i915_address_space *vm,
0296 struct i915_vm_pt_stash *stash,
0297 u64 start, u64 length);
0298 void (*clear_range)(struct i915_address_space *vm,
0299 u64 start, u64 length);
0300 void (*insert_page)(struct i915_address_space *vm,
0301 dma_addr_t addr,
0302 u64 offset,
0303 enum i915_cache_level cache_level,
0304 u32 flags);
0305 void (*insert_entries)(struct i915_address_space *vm,
0306 struct i915_vma_resource *vma_res,
0307 enum i915_cache_level cache_level,
0308 u32 flags);
0309 void (*raw_insert_page)(struct i915_address_space *vm,
0310 dma_addr_t addr,
0311 u64 offset,
0312 enum i915_cache_level cache_level,
0313 u32 flags);
0314 void (*raw_insert_entries)(struct i915_address_space *vm,
0315 struct i915_vma_resource *vma_res,
0316 enum i915_cache_level cache_level,
0317 u32 flags);
0318 void (*cleanup)(struct i915_address_space *vm);
0319
0320 void (*foreach)(struct i915_address_space *vm,
0321 u64 start, u64 length,
0322 void (*fn)(struct i915_address_space *vm,
0323 struct i915_page_table *pt,
0324 void *data),
0325 void *data);
0326
0327 struct i915_vma_ops vma_ops;
0328
0329 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
0330 I915_SELFTEST_DECLARE(bool scrub_64K);
0331 };
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 struct i915_ggtt {
0342 struct i915_address_space vm;
0343
0344 struct io_mapping iomap;
0345 struct resource gmadr;
0346 resource_size_t mappable_end;
0347
0348
0349 void __iomem *gsm;
0350 void (*invalidate)(struct i915_ggtt *ggtt);
0351
0352
0353 struct i915_ppgtt *alias;
0354
0355 bool do_idle_maps;
0356
0357
0358
0359
0360
0361
0362
0363 bool pte_lost;
0364
0365
0366
0367
0368 u64 probed_pte;
0369
0370 int mtrr;
0371
0372
0373 u32 bit_6_swizzle_x;
0374
0375 u32 bit_6_swizzle_y;
0376
0377 u32 pin_bias;
0378
0379 unsigned int num_fences;
0380 struct i915_fence_reg *fence_regs;
0381 struct list_head fence_list;
0382
0383
0384
0385
0386
0387 struct list_head userfault_list;
0388
0389
0390 struct intel_wakeref_auto userfault_wakeref;
0391
0392 struct mutex error_mutex;
0393 struct drm_mm_node error_capture;
0394 struct drm_mm_node uc_fw;
0395 };
0396
0397 struct i915_ppgtt {
0398 struct i915_address_space vm;
0399
0400 struct i915_page_directory *pd;
0401 };
0402
0403 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
0404 #define i915_is_dpt(vm) ((vm)->is_dpt)
0405 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
0406
0407 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
0408
0409 int __must_check
0410 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
0411
0412 static inline bool
0413 i915_vm_is_4lvl(const struct i915_address_space *vm)
0414 {
0415 return (vm->total - 1) >> 32;
0416 }
0417
0418 static inline bool
0419 i915_vm_has_scratch_64K(struct i915_address_space *vm)
0420 {
0421 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
0422 }
0423
0424 static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
0425 enum intel_memory_type type)
0426 {
0427
0428 if ((int)type >= ARRAY_SIZE(vm->min_alignment))
0429 type = INTEL_MEMORY_SYSTEM;
0430
0431 return vm->min_alignment[type];
0432 }
0433
0434 static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
0435 struct drm_i915_gem_object *obj)
0436 {
0437 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
0438 enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
0439
0440 return i915_vm_min_alignment(vm, type);
0441 }
0442
0443 static inline bool
0444 i915_vm_has_cache_coloring(struct i915_address_space *vm)
0445 {
0446 return i915_is_ggtt(vm) && vm->mm.color_adjust;
0447 }
0448
0449 static inline struct i915_ggtt *
0450 i915_vm_to_ggtt(struct i915_address_space *vm)
0451 {
0452 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
0453 GEM_BUG_ON(!i915_is_ggtt(vm));
0454 return container_of(vm, struct i915_ggtt, vm);
0455 }
0456
0457 static inline struct i915_ppgtt *
0458 i915_vm_to_ppgtt(struct i915_address_space *vm)
0459 {
0460 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
0461 GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
0462 return container_of(vm, struct i915_ppgtt, vm);
0463 }
0464
0465 static inline struct i915_address_space *
0466 i915_vm_get(struct i915_address_space *vm)
0467 {
0468 kref_get(&vm->ref);
0469 return vm;
0470 }
0471
0472 static inline struct i915_address_space *
0473 i915_vm_tryget(struct i915_address_space *vm)
0474 {
0475 return kref_get_unless_zero(&vm->ref) ? vm : NULL;
0476 }
0477
0478 static inline void assert_vm_alive(struct i915_address_space *vm)
0479 {
0480 GEM_BUG_ON(!kref_read(&vm->ref));
0481 }
0482
0483
0484
0485
0486
0487
0488
0489 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
0490 {
0491 kref_get(&vm->resv_ref);
0492 return &vm->_resv;
0493 }
0494
0495 void i915_vm_release(struct kref *kref);
0496
0497 void i915_vm_resv_release(struct kref *kref);
0498
0499 static inline void i915_vm_put(struct i915_address_space *vm)
0500 {
0501 kref_put(&vm->ref, i915_vm_release);
0502 }
0503
0504
0505
0506
0507
0508 static inline void i915_vm_resv_put(struct i915_address_space *vm)
0509 {
0510 kref_put(&vm->resv_ref, i915_vm_resv_release);
0511 }
0512
0513 void i915_address_space_init(struct i915_address_space *vm, int subclass);
0514 void i915_address_space_fini(struct i915_address_space *vm);
0515
0516 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
0517 {
0518 const u32 mask = NUM_PTE(pde_shift) - 1;
0519
0520 return (address >> PAGE_SHIFT) & mask;
0521 }
0522
0523
0524
0525
0526
0527
0528 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
0529 {
0530 const u64 mask = ~((1ULL << pde_shift) - 1);
0531 u64 end;
0532
0533 GEM_BUG_ON(length == 0);
0534 GEM_BUG_ON(offset_in_page(addr | length));
0535
0536 end = addr + length;
0537
0538 if ((addr & mask) != (end & mask))
0539 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
0540
0541 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
0542 }
0543
0544 static inline u32 i915_pde_index(u64 addr, u32 shift)
0545 {
0546 return (addr >> shift) & I915_PDE_MASK;
0547 }
0548
0549 static inline struct i915_page_table *
0550 i915_pt_entry(const struct i915_page_directory * const pd,
0551 const unsigned short n)
0552 {
0553 return pd->entry[n];
0554 }
0555
0556 static inline struct i915_page_directory *
0557 i915_pd_entry(const struct i915_page_directory * const pdp,
0558 const unsigned short n)
0559 {
0560 return pdp->entry[n];
0561 }
0562
0563 static inline dma_addr_t
0564 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
0565 {
0566 struct i915_page_table *pt = ppgtt->pd->entry[n];
0567
0568 return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
0569 }
0570
0571 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
0572 unsigned long lmem_pt_obj_flags);
0573 void intel_ggtt_bind_vma(struct i915_address_space *vm,
0574 struct i915_vm_pt_stash *stash,
0575 struct i915_vma_resource *vma_res,
0576 enum i915_cache_level cache_level,
0577 u32 flags);
0578 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
0579 struct i915_vma_resource *vma_res);
0580
0581 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
0582 int i915_ggtt_init_hw(struct drm_i915_private *i915);
0583 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
0584 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
0585 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
0586 int i915_init_ggtt(struct drm_i915_private *i915);
0587 void i915_ggtt_driver_release(struct drm_i915_private *i915);
0588 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
0589
0590 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
0591 {
0592 return ggtt->mappable_end > 0;
0593 }
0594
0595 int i915_ppgtt_init_hw(struct intel_gt *gt);
0596
0597 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
0598 unsigned long lmem_pt_obj_flags);
0599
0600 void i915_ggtt_suspend_vm(struct i915_address_space *vm);
0601 bool i915_ggtt_resume_vm(struct i915_address_space *vm);
0602 void i915_ggtt_suspend(struct i915_ggtt *gtt);
0603 void i915_ggtt_resume(struct i915_ggtt *ggtt);
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614 void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
0615
0616 void
0617 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
0618
0619 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
0620 #define fill32_px(px, v) do { \
0621 u64 v__ = lower_32_bits(v); \
0622 fill_px((px), v__ << 32 | v__); \
0623 } while (0)
0624
0625 int setup_scratch_page(struct i915_address_space *vm);
0626 void free_scratch(struct i915_address_space *vm);
0627
0628 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
0629 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
0630 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
0631 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
0632 struct i915_page_directory *__alloc_pd(int npde);
0633
0634 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
0635 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
0636
0637 void free_px(struct i915_address_space *vm,
0638 struct i915_page_table *pt, int lvl);
0639 #define free_pt(vm, px) free_px(vm, px, 0)
0640 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
0641
0642 void
0643 __set_pd_entry(struct i915_page_directory * const pd,
0644 const unsigned short idx,
0645 struct i915_page_table *pt,
0646 u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
0647
0648 #define set_pd_entry(pd, idx, to) \
0649 __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
0650
0651 void
0652 clear_pd_entry(struct i915_page_directory * const pd,
0653 const unsigned short idx,
0654 const struct drm_i915_gem_object * const scratch);
0655
0656 bool
0657 release_pd_entry(struct i915_page_directory * const pd,
0658 const unsigned short idx,
0659 struct i915_page_table * const pt,
0660 const struct drm_i915_gem_object * const scratch);
0661 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
0662
0663 void ppgtt_bind_vma(struct i915_address_space *vm,
0664 struct i915_vm_pt_stash *stash,
0665 struct i915_vma_resource *vma_res,
0666 enum i915_cache_level cache_level,
0667 u32 flags);
0668 void ppgtt_unbind_vma(struct i915_address_space *vm,
0669 struct i915_vma_resource *vma_res);
0670
0671 void gtt_write_workarounds(struct intel_gt *gt);
0672
0673 void setup_private_pat(struct intel_uncore *uncore);
0674
0675 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
0676 struct i915_vm_pt_stash *stash,
0677 u64 size);
0678 int i915_vm_map_pt_stash(struct i915_address_space *vm,
0679 struct i915_vm_pt_stash *stash);
0680 void i915_vm_free_pt_stash(struct i915_address_space *vm,
0681 struct i915_vm_pt_stash *stash);
0682
0683 struct i915_vma *
0684 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
0685
0686 struct i915_vma *
0687 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
0688
0689 static inline struct sgt_dma {
0690 struct scatterlist *sg;
0691 dma_addr_t dma, max;
0692 } sgt_dma(struct i915_vma_resource *vma_res) {
0693 struct scatterlist *sg = vma_res->bi.pages->sgl;
0694 dma_addr_t addr = sg_dma_address(sg);
0695
0696 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
0697 }
0698
0699 #endif