0001
0002
0003
0004
0005
0006
0007 #ifndef __MSM_RINGBUFFER_H__
0008 #define __MSM_RINGBUFFER_H__
0009
0010 #include "drm/gpu_scheduler.h"
0011 #include "msm_drv.h"
0012
0013 #define rbmemptr(ring, member) \
0014 ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
0015
0016 #define rbmemptr_stats(ring, index, member) \
0017 (rbmemptr((ring), stats) + \
0018 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
0019 offsetof(struct msm_gpu_submit_stats, member))
0020
0021 struct msm_gpu_submit_stats {
0022 u64 cpcycles_start;
0023 u64 cpcycles_end;
0024 u64 alwayson_start;
0025 u64 alwayson_end;
0026 };
0027
0028 #define MSM_GPU_SUBMIT_STATS_COUNT 64
0029
0030 struct msm_rbmemptrs {
0031 volatile uint32_t rptr;
0032 volatile uint32_t fence;
0033
0034 volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
0035 volatile u64 ttbr0;
0036 };
0037
0038 struct msm_ringbuffer {
0039 struct msm_gpu *gpu;
0040 int id;
0041 struct drm_gem_object *bo;
0042 uint32_t *start, *end, *cur, *next;
0043
0044
0045
0046
0047 struct drm_gpu_scheduler sched;
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 struct list_head submits;
0059 spinlock_t submit_lock;
0060
0061 uint64_t iova;
0062 uint32_t hangcheck_fence;
0063 struct msm_rbmemptrs *memptrs;
0064 uint64_t memptrs_iova;
0065 struct msm_fence_context *fctx;
0066
0067
0068
0069
0070
0071 spinlock_t preempt_lock;
0072 };
0073
0074 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
0075 void *memptrs, uint64_t memptrs_iova);
0076 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
0077
0078
0079
0080 static inline void
0081 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
0082 {
0083
0084
0085
0086
0087 if (ring->next == ring->end)
0088 ring->next = ring->start;
0089 *(ring->next++) = data;
0090 }
0091
0092 #endif