0001
0002
0003
0004
0005
0006
0007 #ifndef __I915_GEM_OBJECT_H__
0008 #define __I915_GEM_OBJECT_H__
0009
0010 #include <drm/drm_gem.h>
0011 #include <drm/drm_file.h>
0012 #include <drm/drm_device.h>
0013
0014 #include "display/intel_frontbuffer.h"
0015 #include "intel_memory_region.h"
0016 #include "i915_gem_object_types.h"
0017 #include "i915_gem_gtt.h"
0018 #include "i915_gem_ww.h"
0019 #include "i915_vma_types.h"
0020
0021 enum intel_region_id;
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
0034 GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
0035
0036 static inline bool i915_gem_object_size_2big(u64 size)
0037 {
0038 struct drm_i915_gem_object *obj;
0039
0040 if (GEM_CHECK_SIZE_OVERFLOW(size))
0041 return true;
0042
0043 if (overflows_type(size, obj->base.size))
0044 return true;
0045
0046 return false;
0047 }
0048
0049 void i915_gem_init__objects(struct drm_i915_private *i915);
0050
0051 void i915_objects_module_exit(void);
0052 int i915_objects_module_init(void);
0053
0054 struct drm_i915_gem_object *i915_gem_object_alloc(void);
0055 void i915_gem_object_free(struct drm_i915_gem_object *obj);
0056
0057 void i915_gem_object_init(struct drm_i915_gem_object *obj,
0058 const struct drm_i915_gem_object_ops *ops,
0059 struct lock_class_key *key,
0060 unsigned alloc_flags);
0061
0062 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
0063
0064 struct drm_i915_gem_object *
0065 i915_gem_object_create_shmem(struct drm_i915_private *i915,
0066 resource_size_t size);
0067 struct drm_i915_gem_object *
0068 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
0069 const void *data, resource_size_t size);
0070 struct drm_i915_gem_object *
0071 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
0072 struct intel_memory_region **placements,
0073 unsigned int n_placements);
0074
0075 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
0076
0077 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
0078 struct sg_table *pages,
0079 bool needs_clflush);
0080
0081 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
0082 const struct drm_i915_gem_pwrite *args);
0083 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
0084 const struct drm_i915_gem_pread *args);
0085
0086 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
0087 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
0088 struct sg_table *pages);
0089 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
0090 struct sg_table *pages);
0091
0092 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
0093
0094 struct sg_table *
0095 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 static inline struct drm_i915_gem_object *
0109 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
0110 {
0111 #ifdef CONFIG_LOCKDEP
0112 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
0113 #endif
0114 return idr_find(&file->object_idr, handle);
0115 }
0116
0117 static inline struct drm_i915_gem_object *
0118 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
0119 {
0120 if (obj && !kref_get_unless_zero(&obj->base.refcount))
0121 obj = NULL;
0122
0123 return obj;
0124 }
0125
0126 static inline struct drm_i915_gem_object *
0127 i915_gem_object_lookup(struct drm_file *file, u32 handle)
0128 {
0129 struct drm_i915_gem_object *obj;
0130
0131 rcu_read_lock();
0132 obj = i915_gem_object_lookup_rcu(file, handle);
0133 obj = i915_gem_object_get_rcu(obj);
0134 rcu_read_unlock();
0135
0136 return obj;
0137 }
0138
0139 __deprecated
0140 struct drm_gem_object *
0141 drm_gem_object_lookup(struct drm_file *file, u32 handle);
0142
0143 __attribute__((nonnull))
0144 static inline struct drm_i915_gem_object *
0145 i915_gem_object_get(struct drm_i915_gem_object *obj)
0146 {
0147 drm_gem_object_get(&obj->base);
0148 return obj;
0149 }
0150
0151 __attribute__((nonnull))
0152 static inline void
0153 i915_gem_object_put(struct drm_i915_gem_object *obj)
0154 {
0155 __drm_gem_object_put(&obj->base);
0156 }
0157
0158 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
0159
0160
0161
0162
0163 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
0164 {
0165
0166
0167
0168
0169 if (IS_ENABLED(CONFIG_LOCKDEP) &&
0170 kref_read(&obj->base.refcount) > 0)
0171 assert_object_held(obj);
0172 }
0173
0174 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
0175 struct i915_gem_ww_ctx *ww,
0176 bool intr)
0177 {
0178 int ret;
0179
0180 if (intr)
0181 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
0182 else
0183 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
0184
0185 if (!ret && ww) {
0186 i915_gem_object_get(obj);
0187 list_add_tail(&obj->obj_link, &ww->obj_list);
0188 }
0189 if (ret == -EALREADY)
0190 ret = 0;
0191
0192 if (ret == -EDEADLK) {
0193 i915_gem_object_get(obj);
0194 ww->contended = obj;
0195 }
0196
0197 return ret;
0198 }
0199
0200 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
0201 struct i915_gem_ww_ctx *ww)
0202 {
0203 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
0204 }
0205
0206 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
0207 struct i915_gem_ww_ctx *ww)
0208 {
0209 WARN_ON(ww && !ww->intr);
0210 return __i915_gem_object_lock(obj, ww, true);
0211 }
0212
0213 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
0214 struct i915_gem_ww_ctx *ww)
0215 {
0216 if (!ww)
0217 return dma_resv_trylock(obj->base.resv);
0218 else
0219 return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
0220 }
0221
0222 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
0223 {
0224 if (obj->ops->adjust_lru)
0225 obj->ops->adjust_lru(obj);
0226
0227 dma_resv_unlock(obj->base.resv);
0228 }
0229
0230 static inline void
0231 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
0232 {
0233 obj->flags |= I915_BO_READONLY;
0234 }
0235
0236 static inline bool
0237 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
0238 {
0239 return obj->flags & I915_BO_READONLY;
0240 }
0241
0242 static inline bool
0243 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
0244 {
0245 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
0246 }
0247
0248 static inline bool
0249 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
0250 {
0251 return obj->flags & I915_BO_ALLOC_VOLATILE;
0252 }
0253
0254 static inline void
0255 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
0256 {
0257 obj->flags |= I915_BO_ALLOC_VOLATILE;
0258 }
0259
0260 static inline bool
0261 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
0262 {
0263 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
0264 }
0265
0266 static inline void
0267 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
0268 {
0269 set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
0270 }
0271
0272 static inline void
0273 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
0274 {
0275 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
0276 }
0277
0278 static inline bool
0279 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
0280 {
0281 return obj->flags & I915_BO_PROTECTED;
0282 }
0283
0284 static inline bool
0285 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
0286 unsigned long flags)
0287 {
0288 return obj->ops->flags & flags;
0289 }
0290
0291 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
0292
0293 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
0294
0295 static inline bool
0296 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
0297 {
0298 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
0299 }
0300
0301 static inline bool
0302 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
0303 {
0304 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
0305 }
0306
0307 static inline bool
0308 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
0309 {
0310 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
0311 }
0312
0313 static inline bool
0314 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
0315 {
0316 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
0317 }
0318
0319 static inline bool
0320 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
0321 {
0322 return READ_ONCE(obj->frontbuffer);
0323 }
0324
0325 static inline unsigned int
0326 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
0327 {
0328 return obj->tiling_and_stride & TILING_MASK;
0329 }
0330
0331 static inline bool
0332 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
0333 {
0334 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
0335 }
0336
0337 static inline unsigned int
0338 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
0339 {
0340 return obj->tiling_and_stride & STRIDE_MASK;
0341 }
0342
0343 static inline unsigned int
0344 i915_gem_tile_height(unsigned int tiling)
0345 {
0346 GEM_BUG_ON(!tiling);
0347 return tiling == I915_TILING_Y ? 32 : 8;
0348 }
0349
0350 static inline unsigned int
0351 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
0352 {
0353 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
0354 }
0355
0356 static inline unsigned int
0357 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
0358 {
0359 return (i915_gem_object_get_stride(obj) *
0360 i915_gem_object_get_tile_height(obj));
0361 }
0362
0363 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
0364 unsigned int tiling, unsigned int stride);
0365
0366 struct scatterlist *
0367 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
0368 struct i915_gem_object_page_iter *iter,
0369 unsigned int n,
0370 unsigned int *offset, bool dma);
0371
0372 static inline struct scatterlist *
0373 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
0374 unsigned int n,
0375 unsigned int *offset)
0376 {
0377 return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
0378 }
0379
0380 static inline struct scatterlist *
0381 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
0382 unsigned int n,
0383 unsigned int *offset)
0384 {
0385 return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
0386 }
0387
0388 struct page *
0389 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
0390 unsigned int n);
0391
0392 struct page *
0393 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
0394 unsigned int n);
0395
0396 dma_addr_t
0397 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
0398 unsigned long n,
0399 unsigned int *len);
0400
0401 dma_addr_t
0402 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
0403 unsigned long n);
0404
0405 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
0406 struct sg_table *pages,
0407 unsigned int sg_page_sizes);
0408
0409 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
0410 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
0411
0412 static inline int __must_check
0413 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
0414 {
0415 assert_object_held(obj);
0416
0417 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
0418 return 0;
0419
0420 return __i915_gem_object_get_pages(obj);
0421 }
0422
0423 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
0424
0425 static inline bool
0426 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
0427 {
0428 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
0429 }
0430
0431 static inline void
0432 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
0433 {
0434 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0435
0436 atomic_inc(&obj->mm.pages_pin_count);
0437 }
0438
0439 static inline bool
0440 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
0441 {
0442 return atomic_read(&obj->mm.pages_pin_count);
0443 }
0444
0445 static inline void
0446 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
0447 {
0448 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0449 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0450
0451 atomic_dec(&obj->mm.pages_pin_count);
0452 }
0453
0454 static inline void
0455 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
0456 {
0457 __i915_gem_object_unpin_pages(obj);
0458 }
0459
0460 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
0461 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
0480 enum i915_map_type type);
0481
0482 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
0483 enum i915_map_type type);
0484
0485 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
0486 unsigned long offset,
0487 unsigned long size);
0488 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
0489 {
0490 __i915_gem_object_flush_map(obj, 0, obj->base.size);
0491 }
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
0503 {
0504 i915_gem_object_unpin_pages(obj);
0505 }
0506
0507 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
0508
0509 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
0510 unsigned int *needs_clflush);
0511 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
0512 unsigned int *needs_clflush);
0513 #define CLFLUSH_BEFORE BIT(0)
0514 #define CLFLUSH_AFTER BIT(1)
0515 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
0516
0517 static inline void
0518 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
0519 {
0520 i915_gem_object_unpin_pages(obj);
0521 }
0522
0523 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
0524 struct dma_fence **fence);
0525 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
0526 bool intr);
0527 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
0528
0529 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
0530 unsigned int cache_level);
0531 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
0532 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
0533 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
0534 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
0535
0536 int __must_check
0537 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
0538 int __must_check
0539 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
0540 int __must_check
0541 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
0542 struct i915_vma * __must_check
0543 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
0544 struct i915_gem_ww_ctx *ww,
0545 u32 alignment,
0546 const struct i915_ggtt_view *view,
0547 unsigned int flags);
0548
0549 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
0550 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
0551 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
0552 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
0553 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
0554
0555 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
0556 {
0557 obj->read_domains = I915_GEM_DOMAIN_CPU;
0558 obj->write_domain = I915_GEM_DOMAIN_CPU;
0559 if (i915_gem_cpu_write_needs_clflush(obj))
0560 obj->cache_dirty = true;
0561 }
0562
0563 void i915_gem_fence_wait_priority(struct dma_fence *fence,
0564 const struct i915_sched_attr *attr);
0565
0566 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
0567 unsigned int flags,
0568 long timeout);
0569 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
0570 unsigned int flags,
0571 const struct i915_sched_attr *attr);
0572
0573 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
0574 enum fb_op_origin origin);
0575 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
0576 enum fb_op_origin origin);
0577
0578 static inline void
0579 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
0580 enum fb_op_origin origin)
0581 {
0582 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
0583 __i915_gem_object_flush_frontbuffer(obj, origin);
0584 }
0585
0586 static inline void
0587 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
0588 enum fb_op_origin origin)
0589 {
0590 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
0591 __i915_gem_object_invalidate_frontbuffer(obj, origin);
0592 }
0593
0594 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
0595
0596 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
0597
0598 void __i915_gem_free_object_rcu(struct rcu_head *head);
0599
0600 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
0601
0602 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
0603
0604 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
0605
0606 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
0607
0608 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
0609 struct i915_gem_ww_ctx *ww,
0610 enum intel_region_id id);
0611
0612 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
0613 enum intel_region_id id);
0614
0615 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
0616 unsigned int flags);
0617
0618 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
0619 enum intel_memory_type type);
0620
0621 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
0622
0623 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
0624 size_t size, struct intel_memory_region *mr,
0625 struct address_space *mapping,
0626 unsigned int max_segment);
0627 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
0628 bool dirty, bool backup);
0629 void __shmem_writeback(size_t size, struct address_space *mapping);
0630
0631 #ifdef CONFIG_MMU_NOTIFIER
0632 static inline bool
0633 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
0634 {
0635 return obj->userptr.notifier.mm;
0636 }
0637
0638 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
0639 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
0640 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
0641 #else
0642 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
0643
0644 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
0645 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
0646 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
0647
0648 #endif
0649
0650 #endif