0001
0002
0003
0004 #ifndef _A6XX_GMU_H_
0005 #define _A6XX_GMU_H_
0006
0007 #include <linux/iopoll.h>
0008 #include <linux/interrupt.h>
0009 #include "msm_drv.h"
0010 #include "a6xx_hfi.h"
0011
0012 struct a6xx_gmu_bo {
0013 struct drm_gem_object *obj;
0014 void *virt;
0015 size_t size;
0016 u64 iova;
0017 };
0018
0019
0020
0021
0022
0023
0024
0025 #define GMU_WARM_BOOT 0
0026
0027
0028 #define GMU_COLD_BOOT 1
0029
0030
0031
0032
0033
0034
0035
0036 #define GMU_IDLE_STATE_ACTIVE 0
0037
0038
0039 #define GMU_IDLE_STATE_SPTP 2
0040
0041
0042 #define GMU_IDLE_STATE_IFPC 3
0043
0044 struct a6xx_gmu {
0045 struct device *dev;
0046
0047
0048 struct mutex lock;
0049
0050 struct msm_gem_address_space *aspace;
0051
0052 void * __iomem mmio;
0053 void * __iomem rscc;
0054
0055 int hfi_irq;
0056 int gmu_irq;
0057
0058 struct device *gxpd;
0059
0060 int idle_level;
0061
0062 struct a6xx_gmu_bo hfi;
0063 struct a6xx_gmu_bo debug;
0064 struct a6xx_gmu_bo icache;
0065 struct a6xx_gmu_bo dcache;
0066 struct a6xx_gmu_bo dummy;
0067 struct a6xx_gmu_bo log;
0068
0069 int nr_clocks;
0070 struct clk_bulk_data *clocks;
0071 struct clk *core_clk;
0072 struct clk *hub_clk;
0073
0074
0075 int current_perf_index;
0076
0077 int nr_gpu_freqs;
0078 unsigned long gpu_freqs[16];
0079 u32 gx_arc_votes[16];
0080
0081 int nr_gmu_freqs;
0082 unsigned long gmu_freqs[4];
0083 u32 cx_arc_votes[4];
0084
0085 unsigned long freq;
0086
0087 struct a6xx_hfi_queue queues[2];
0088
0089 bool initialized;
0090 bool hung;
0091 bool legacy;
0092 };
0093
0094 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
0095 {
0096 return msm_readl(gmu->mmio + (offset << 2));
0097 }
0098
0099 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
0100 {
0101 msm_writel(value, gmu->mmio + (offset << 2));
0102 }
0103
0104 static inline void
0105 gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
0106 {
0107 memcpy_toio(gmu->mmio + (offset << 2), data, size);
0108 wmb();
0109 }
0110
0111 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
0112 {
0113 u32 val = gmu_read(gmu, reg);
0114
0115 val &= ~mask;
0116
0117 gmu_write(gmu, reg, val | or);
0118 }
0119
0120 static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
0121 {
0122 u64 val;
0123
0124 val = (u64) msm_readl(gmu->mmio + (lo << 2));
0125 val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
0126
0127 return val;
0128 }
0129
0130 #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
0131 readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
0132 interval, timeout)
0133
0134 static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
0135 {
0136 return msm_readl(gmu->rscc + (offset << 2));
0137 }
0138
0139 static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
0140 {
0141 msm_writel(value, gmu->rscc + (offset << 2));
0142 }
0143
0144 #define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
0145 readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
0146 interval, timeout)
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 enum a6xx_gmu_oob_state {
0160
0161
0162
0163
0164
0165 GMU_OOB_BOOT_SLUMBER = 0,
0166
0167
0168
0169
0170 GMU_OOB_GPU_SET,
0171
0172
0173
0174 GMU_OOB_DCVS_SET,
0175
0176
0177
0178 GMU_OOB_PERFCOUNTER_SET,
0179 };
0180
0181 void a6xx_hfi_init(struct a6xx_gmu *gmu);
0182 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
0183 void a6xx_hfi_stop(struct a6xx_gmu *gmu);
0184 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
0185 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
0186
0187 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
0188 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
0189
0190 #endif