0001
0002
0003
0004
0005 #include "dpu_hwio.h"
0006 #include "dpu_hw_catalog.h"
0007 #include "dpu_hw_vbif.h"
0008
0009 #define VBIF_VERSION 0x0000
0010 #define VBIF_CLK_FORCE_CTRL0 0x0008
0011 #define VBIF_CLK_FORCE_CTRL1 0x000C
0012 #define VBIF_QOS_REMAP_00 0x0020
0013 #define VBIF_QOS_REMAP_01 0x0024
0014 #define VBIF_QOS_REMAP_10 0x0028
0015 #define VBIF_QOS_REMAP_11 0x002C
0016 #define VBIF_WRITE_GATHER_EN 0x00AC
0017 #define VBIF_IN_RD_LIM_CONF0 0x00B0
0018 #define VBIF_IN_RD_LIM_CONF1 0x00B4
0019 #define VBIF_IN_RD_LIM_CONF2 0x00B8
0020 #define VBIF_IN_WR_LIM_CONF0 0x00C0
0021 #define VBIF_IN_WR_LIM_CONF1 0x00C4
0022 #define VBIF_IN_WR_LIM_CONF2 0x00C8
0023 #define VBIF_OUT_RD_LIM_CONF0 0x00D0
0024 #define VBIF_OUT_WR_LIM_CONF0 0x00D4
0025 #define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
0026 #define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
0027 #define VBIF_XIN_PND_ERR 0x0190
0028 #define VBIF_XIN_SRC_ERR 0x0194
0029 #define VBIF_XIN_CLR_ERR 0x019C
0030 #define VBIF_XIN_HALT_CTRL0 0x0200
0031 #define VBIF_XIN_HALT_CTRL1 0x0204
0032 #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
0033 #define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
0034
0035 static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
0036 u32 *pnd_errors, u32 *src_errors)
0037 {
0038 struct dpu_hw_blk_reg_map *c;
0039 u32 pnd, src;
0040
0041 if (!vbif)
0042 return;
0043 c = &vbif->hw;
0044 pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
0045 src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
0046
0047 if (pnd_errors)
0048 *pnd_errors = pnd;
0049 if (src_errors)
0050 *src_errors = src;
0051
0052 DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
0053 }
0054
0055 static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
0056 u32 xin_id, u32 value)
0057 {
0058 struct dpu_hw_blk_reg_map *c;
0059 u32 reg_off;
0060 u32 bit_off;
0061 u32 reg_val;
0062
0063
0064
0065
0066
0067 if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
0068 return;
0069
0070 c = &vbif->hw;
0071
0072 if (xin_id >= 8) {
0073 xin_id -= 8;
0074 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
0075 } else {
0076 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
0077 }
0078 bit_off = (xin_id & 0x7) * 4;
0079 reg_val = DPU_REG_READ(c, reg_off);
0080 reg_val &= ~(0x7 << bit_off);
0081 reg_val |= (value & 0x7) << bit_off;
0082 DPU_REG_WRITE(c, reg_off, reg_val);
0083 }
0084
0085 static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
0086 u32 xin_id, bool rd, u32 limit)
0087 {
0088 struct dpu_hw_blk_reg_map *c = &vbif->hw;
0089 u32 reg_val;
0090 u32 reg_off;
0091 u32 bit_off;
0092
0093 if (rd)
0094 reg_off = VBIF_IN_RD_LIM_CONF0;
0095 else
0096 reg_off = VBIF_IN_WR_LIM_CONF0;
0097
0098 reg_off += (xin_id / 4) * 4;
0099 bit_off = (xin_id % 4) * 8;
0100 reg_val = DPU_REG_READ(c, reg_off);
0101 reg_val &= ~(0xFF << bit_off);
0102 reg_val |= (limit) << bit_off;
0103 DPU_REG_WRITE(c, reg_off, reg_val);
0104 }
0105
0106 static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
0107 u32 xin_id, bool rd)
0108 {
0109 struct dpu_hw_blk_reg_map *c = &vbif->hw;
0110 u32 reg_val;
0111 u32 reg_off;
0112 u32 bit_off;
0113 u32 limit;
0114
0115 if (rd)
0116 reg_off = VBIF_IN_RD_LIM_CONF0;
0117 else
0118 reg_off = VBIF_IN_WR_LIM_CONF0;
0119
0120 reg_off += (xin_id / 4) * 4;
0121 bit_off = (xin_id % 4) * 8;
0122 reg_val = DPU_REG_READ(c, reg_off);
0123 limit = (reg_val >> bit_off) & 0xFF;
0124
0125 return limit;
0126 }
0127
0128 static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
0129 u32 xin_id, bool enable)
0130 {
0131 struct dpu_hw_blk_reg_map *c = &vbif->hw;
0132 u32 reg_val;
0133
0134 reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
0135
0136 if (enable)
0137 reg_val |= BIT(xin_id);
0138 else
0139 reg_val &= ~BIT(xin_id);
0140
0141 DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
0142 }
0143
0144 static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
0145 u32 xin_id)
0146 {
0147 struct dpu_hw_blk_reg_map *c = &vbif->hw;
0148 u32 reg_val;
0149
0150 reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
0151
0152 return (reg_val & BIT(xin_id)) ? true : false;
0153 }
0154
0155 static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
0156 u32 xin_id, u32 level, u32 remap_level)
0157 {
0158 struct dpu_hw_blk_reg_map *c;
0159 u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
0160
0161 if (!vbif)
0162 return;
0163
0164 c = &vbif->hw;
0165
0166 reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
0167 reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
0168 reg_shift = (xin_id & 0x7) * 4;
0169
0170 reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
0171 reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
0172
0173 mask = 0x7 << reg_shift;
0174
0175 reg_val &= ~mask;
0176 reg_val |= (remap_level << reg_shift) & mask;
0177
0178 reg_val_lvl &= ~mask;
0179 reg_val_lvl |= (remap_level << reg_shift) & mask;
0180
0181 DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
0182 DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
0183 }
0184
0185 static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
0186 {
0187 struct dpu_hw_blk_reg_map *c;
0188 u32 reg_val;
0189
0190 if (!vbif || xin_id >= MAX_XIN_COUNT)
0191 return;
0192
0193 c = &vbif->hw;
0194
0195 reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
0196 reg_val |= BIT(xin_id);
0197 DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
0198 }
0199
0200 static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
0201 unsigned long cap)
0202 {
0203 ops->set_limit_conf = dpu_hw_set_limit_conf;
0204 ops->get_limit_conf = dpu_hw_get_limit_conf;
0205 ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
0206 ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
0207 if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
0208 ops->set_qos_remap = dpu_hw_set_qos_remap;
0209 ops->set_mem_type = dpu_hw_set_mem_type;
0210 ops->clear_errors = dpu_hw_clear_errors;
0211 ops->set_write_gather_en = dpu_hw_set_write_gather_en;
0212 }
0213
0214 static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
0215 const struct dpu_mdss_cfg *m,
0216 void __iomem *addr,
0217 struct dpu_hw_blk_reg_map *b)
0218 {
0219 int i;
0220
0221 for (i = 0; i < m->vbif_count; i++) {
0222 if (vbif == m->vbif[i].id) {
0223 b->blk_addr = addr + m->vbif[i].base;
0224 b->log_mask = DPU_DBG_MASK_VBIF;
0225 return &m->vbif[i];
0226 }
0227 }
0228
0229 return ERR_PTR(-EINVAL);
0230 }
0231
0232 struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
0233 void __iomem *addr,
0234 const struct dpu_mdss_cfg *m)
0235 {
0236 struct dpu_hw_vbif *c;
0237 const struct dpu_vbif_cfg *cfg;
0238
0239 c = kzalloc(sizeof(*c), GFP_KERNEL);
0240 if (!c)
0241 return ERR_PTR(-ENOMEM);
0242
0243 cfg = _top_offset(idx, m, addr, &c->hw);
0244 if (IS_ERR_OR_NULL(cfg)) {
0245 kfree(c);
0246 return ERR_PTR(-EINVAL);
0247 }
0248
0249
0250
0251
0252 c->idx = idx;
0253 c->cap = cfg;
0254 _setup_vbif_ops(&c->ops, c->cap->features);
0255
0256
0257
0258 return c;
0259 }
0260
0261 void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
0262 {
0263 kfree(vbif);
0264 }