0001
0002
0003
0004 #ifndef __A5XX_GPU_H__
0005 #define __A5XX_GPU_H__
0006
0007 #include "adreno_gpu.h"
0008
0009
0010 #undef ROP_COPY
0011 #undef ROP_XOR
0012
0013 #include "a5xx.xml.h"
0014
0015 struct a5xx_gpu {
0016 struct adreno_gpu base;
0017
0018 struct drm_gem_object *pm4_bo;
0019 uint64_t pm4_iova;
0020
0021 struct drm_gem_object *pfp_bo;
0022 uint64_t pfp_iova;
0023
0024 struct drm_gem_object *gpmu_bo;
0025 uint64_t gpmu_iova;
0026 uint32_t gpmu_dwords;
0027
0028 uint32_t lm_leakage;
0029
0030 struct msm_ringbuffer *cur_ring;
0031 struct msm_ringbuffer *next_ring;
0032
0033 struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
0034 struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
0035 struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
0036 uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
0037
0038 atomic_t preempt_state;
0039 struct timer_list preempt_timer;
0040
0041 struct drm_gem_object *shadow_bo;
0042 uint64_t shadow_iova;
0043 uint32_t *shadow;
0044
0045
0046 bool has_whereami;
0047 };
0048
0049 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
0050
0051 #ifdef CONFIG_DEBUG_FS
0052 void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
0053 #endif
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 enum preempt_state {
0073 PREEMPT_NONE = 0,
0074 PREEMPT_START,
0075 PREEMPT_ABORT,
0076 PREEMPT_TRIGGERED,
0077 PREEMPT_FAULTED,
0078 PREEMPT_PENDING,
0079 };
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 struct a5xx_preempt_record {
0107 uint32_t magic;
0108 uint32_t info;
0109 uint32_t data;
0110 uint32_t cntl;
0111 uint32_t rptr;
0112 uint32_t wptr;
0113 uint64_t rptr_addr;
0114 uint64_t rbase;
0115 uint64_t counter;
0116 };
0117
0118
0119 #define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
0120
0121
0122
0123
0124
0125 #define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
0126
0127
0128
0129
0130
0131
0132 #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
0133
0134
0135 int a5xx_power_init(struct msm_gpu *gpu);
0136 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
0137
0138 static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
0139 uint32_t reg, uint32_t mask, uint32_t value)
0140 {
0141 while (usecs--) {
0142 udelay(1);
0143 if ((gpu_read(gpu, reg) & mask) == value)
0144 return 0;
0145 cpu_relax();
0146 }
0147
0148 return -ETIMEDOUT;
0149 }
0150
0151 #define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
0152 ((ring)->id * sizeof(uint32_t)))
0153
0154 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
0155 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
0156
0157 void a5xx_preempt_init(struct msm_gpu *gpu);
0158 void a5xx_preempt_hw_init(struct msm_gpu *gpu);
0159 void a5xx_preempt_trigger(struct msm_gpu *gpu);
0160 void a5xx_preempt_irq(struct msm_gpu *gpu);
0161 void a5xx_preempt_fini(struct msm_gpu *gpu);
0162
0163 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
0164
0165
0166 static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
0167 {
0168 int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
0169
0170 return !(preempt_state == PREEMPT_NONE ||
0171 preempt_state == PREEMPT_ABORT);
0172 }
0173
0174 #endif