0001
0002
0003
0004
0005
0006
0007 #ifndef __MSM_GEM_H__
0008 #define __MSM_GEM_H__
0009
0010 #include <linux/kref.h>
0011 #include <linux/dma-resv.h>
0012 #include "drm/gpu_scheduler.h"
0013 #include "msm_drv.h"
0014
0015
0016
0017
0018 #define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x))
0019
0020
0021 #define MSM_BO_STOLEN 0x10000000
0022 #define MSM_BO_MAP_PRIV 0x20000000
0023
0024 struct msm_gem_address_space {
0025 const char *name;
0026
0027
0028
0029 struct drm_mm mm;
0030 spinlock_t lock;
0031 struct msm_mmu *mmu;
0032 struct kref kref;
0033
0034
0035
0036
0037 struct pid *pid;
0038
0039
0040 int faults;
0041
0042
0043 uint64_t va_start;
0044
0045
0046 uint64_t va_size;
0047 };
0048
0049 struct msm_gem_address_space *
0050 msm_gem_address_space_get(struct msm_gem_address_space *aspace);
0051
0052 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
0053
0054 struct msm_gem_address_space *
0055 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
0056 u64 va_start, u64 size);
0057
0058 struct msm_fence_context;
0059
0060 struct msm_gem_vma {
0061 struct drm_mm_node node;
0062 uint64_t iova;
0063 struct msm_gem_address_space *aspace;
0064 struct list_head list;
0065 bool mapped;
0066 int inuse;
0067 uint32_t fence_mask;
0068 uint32_t fence[MSM_GPU_MAX_RINGS];
0069 struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
0070 };
0071
0072 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
0073 struct msm_gem_vma *vma, int size,
0074 u64 range_start, u64 range_end);
0075 bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
0076 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
0077 struct msm_gem_vma *vma);
0078 void msm_gem_unpin_vma(struct msm_gem_vma *vma);
0079 void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
0080 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
0081 struct msm_gem_vma *vma, int prot,
0082 struct sg_table *sgt, int size);
0083 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
0084 struct msm_gem_vma *vma);
0085
0086 struct msm_gem_object {
0087 struct drm_gem_object base;
0088
0089 uint32_t flags;
0090
0091
0092
0093
0094 uint8_t madv;
0095
0096
0097
0098
0099 bool dontneed : 1;
0100
0101
0102
0103
0104 bool evictable : 1;
0105
0106
0107
0108
0109 uint8_t vmap_count;
0110
0111
0112
0113
0114
0115 struct list_head node;
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 struct list_head mm_list;
0127
0128 struct page **pages;
0129 struct sg_table *sgt;
0130 void *vaddr;
0131
0132 struct list_head vmas;
0133
0134
0135
0136
0137 struct drm_mm_node *vram_node;
0138
0139 char name[32];
0140
0141 int active_count;
0142 int pin_count;
0143 };
0144 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
0145
0146 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
0147 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
0148 void msm_gem_unpin_locked(struct drm_gem_object *obj);
0149 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
0150 struct msm_gem_address_space *aspace);
0151 int msm_gem_get_iova(struct drm_gem_object *obj,
0152 struct msm_gem_address_space *aspace, uint64_t *iova);
0153 int msm_gem_set_iova(struct drm_gem_object *obj,
0154 struct msm_gem_address_space *aspace, uint64_t iova);
0155 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
0156 struct msm_gem_address_space *aspace, uint64_t *iova,
0157 u64 range_start, u64 range_end);
0158 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
0159 struct msm_gem_address_space *aspace, uint64_t *iova);
0160 void msm_gem_unpin_iova(struct drm_gem_object *obj,
0161 struct msm_gem_address_space *aspace);
0162 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
0163 void msm_gem_put_pages(struct drm_gem_object *obj);
0164 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0165 struct drm_mode_create_dumb *args);
0166 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0167 uint32_t handle, uint64_t *offset);
0168 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
0169 void *msm_gem_get_vaddr(struct drm_gem_object *obj);
0170 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
0171 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
0172 void msm_gem_put_vaddr(struct drm_gem_object *obj);
0173 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
0174 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
0175 void msm_gem_active_put(struct drm_gem_object *obj);
0176 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
0177 int msm_gem_cpu_fini(struct drm_gem_object *obj);
0178 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
0179 uint32_t size, uint32_t flags, uint32_t *handle, char *name);
0180 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
0181 uint32_t size, uint32_t flags);
0182 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
0183 uint32_t flags, struct msm_gem_address_space *aspace,
0184 struct drm_gem_object **bo, uint64_t *iova);
0185 void msm_gem_kernel_put(struct drm_gem_object *bo,
0186 struct msm_gem_address_space *aspace);
0187 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
0188 struct dma_buf *dmabuf, struct sg_table *sgt);
0189 __printf(2, 3)
0190 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
0191
0192 #ifdef CONFIG_DEBUG_FS
0193 struct msm_gem_stats {
0194 struct {
0195 unsigned count;
0196 size_t size;
0197 } all, active, resident, purgeable, purged;
0198 };
0199
0200 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
0201 struct msm_gem_stats *stats);
0202 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
0203 #endif
0204
0205 static inline void
0206 msm_gem_lock(struct drm_gem_object *obj)
0207 {
0208 dma_resv_lock(obj->resv, NULL);
0209 }
0210
0211 static inline bool __must_check
0212 msm_gem_trylock(struct drm_gem_object *obj)
0213 {
0214 return dma_resv_trylock(obj->resv);
0215 }
0216
0217 static inline int
0218 msm_gem_lock_interruptible(struct drm_gem_object *obj)
0219 {
0220 return dma_resv_lock_interruptible(obj->resv, NULL);
0221 }
0222
0223 static inline void
0224 msm_gem_unlock(struct drm_gem_object *obj)
0225 {
0226 dma_resv_unlock(obj->resv);
0227 }
0228
0229 static inline bool
0230 msm_gem_is_locked(struct drm_gem_object *obj)
0231 {
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
0245 }
0246
0247 static inline bool is_active(struct msm_gem_object *msm_obj)
0248 {
0249 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
0250 return msm_obj->active_count;
0251 }
0252
0253
0254 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
0255 {
0256 return msm_obj->base.import_attach || msm_obj->pin_count;
0257 }
0258
0259 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
0260 {
0261 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
0262 !is_unpurgeable(msm_obj);
0263 }
0264
0265 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
0266 {
0267 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
0268 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
0269 }
0270
0271 static inline void mark_purgeable(struct msm_gem_object *msm_obj)
0272 {
0273 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
0274
0275 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
0276
0277 if (is_unpurgeable(msm_obj))
0278 return;
0279
0280 if (GEM_WARN_ON(msm_obj->dontneed))
0281 return;
0282
0283 priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
0284 msm_obj->dontneed = true;
0285 }
0286
0287 static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
0288 {
0289 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
0290
0291 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
0292
0293 if (is_unpurgeable(msm_obj))
0294 return;
0295
0296 if (GEM_WARN_ON(!msm_obj->dontneed))
0297 return;
0298
0299 priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
0300 GEM_WARN_ON(priv->shrinkable_count < 0);
0301 msm_obj->dontneed = false;
0302 }
0303
0304 static inline bool is_unevictable(struct msm_gem_object *msm_obj)
0305 {
0306 return is_unpurgeable(msm_obj) || msm_obj->vaddr;
0307 }
0308
0309 static inline void mark_evictable(struct msm_gem_object *msm_obj)
0310 {
0311 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
0312
0313 WARN_ON(!mutex_is_locked(&priv->mm_lock));
0314
0315 if (is_unevictable(msm_obj))
0316 return;
0317
0318 if (WARN_ON(msm_obj->evictable))
0319 return;
0320
0321 priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
0322 msm_obj->evictable = true;
0323 }
0324
0325 static inline void mark_unevictable(struct msm_gem_object *msm_obj)
0326 {
0327 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
0328
0329 WARN_ON(!mutex_is_locked(&priv->mm_lock));
0330
0331 if (is_unevictable(msm_obj))
0332 return;
0333
0334 if (WARN_ON(!msm_obj->evictable))
0335 return;
0336
0337 priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
0338 WARN_ON(priv->evictable_count < 0);
0339 msm_obj->evictable = false;
0340 }
0341
0342 void msm_gem_purge(struct drm_gem_object *obj);
0343 void msm_gem_evict(struct drm_gem_object *obj);
0344 void msm_gem_vunmap(struct drm_gem_object *obj);
0345
0346
0347
0348
0349
0350 struct msm_gem_submit {
0351 struct drm_sched_job base;
0352 struct kref ref;
0353 struct drm_device *dev;
0354 struct msm_gpu *gpu;
0355 struct msm_gem_address_space *aspace;
0356 struct list_head node;
0357 struct ww_acquire_ctx ticket;
0358 uint32_t seqno;
0359
0360
0361
0362
0363 struct dma_fence *hw_fence;
0364
0365
0366
0367
0368 struct dma_fence *user_fence;
0369
0370 int fence_id;
0371 struct msm_gpu_submitqueue *queue;
0372 struct pid *pid;
0373 bool fault_dumped;
0374 bool valid;
0375 bool in_rb;
0376 struct msm_ringbuffer *ring;
0377 unsigned int nr_cmds;
0378 unsigned int nr_bos;
0379 u32 ident;
0380 struct {
0381 uint32_t type;
0382 uint32_t size;
0383 uint64_t iova;
0384 uint32_t offset;
0385 uint32_t idx;
0386 uint32_t nr_relocs;
0387 struct drm_msm_gem_submit_reloc *relocs;
0388 } *cmd;
0389 struct {
0390
0391 #define BO_VALID 0x8000
0392 #define BO_LOCKED 0x4000
0393 #define BO_ACTIVE 0x2000
0394 #define BO_OBJ_PINNED 0x1000
0395 #define BO_VMA_PINNED 0x0800
0396 uint32_t flags;
0397 union {
0398 struct msm_gem_object *obj;
0399 uint32_t handle;
0400 };
0401 uint64_t iova;
0402 struct msm_gem_vma *vma;
0403 } bos[];
0404 };
0405
0406 static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
0407 {
0408 return container_of(job, struct msm_gem_submit, base);
0409 }
0410
0411 void __msm_gem_submit_destroy(struct kref *kref);
0412
0413 static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
0414 {
0415 kref_get(&submit->ref);
0416 }
0417
0418 static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
0419 {
0420 kref_put(&submit->ref, __msm_gem_submit_destroy);
0421 }
0422
0423 void msm_submit_retire(struct msm_gem_submit *submit);
0424
0425
0426
0427
0428 static inline bool
0429 should_dump(struct msm_gem_submit *submit, int idx)
0430 {
0431 extern bool rd_full;
0432 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
0433 }
0434
0435 #endif