0001
0002
0003
0004
0005
0006
0007
0008 #include <drm/drm_blend.h>
0009 #include <drm/drm_print.h>
0010 #include "d71_dev.h"
0011 #include "malidp_io.h"
0012
0013 static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
0014 {
0015 u32 __iomem *reg = d71_pipeline->lpu_addr;
0016 u32 status, raw_status;
0017 u64 evts = 0ULL;
0018
0019 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
0020 if (raw_status & LPU_IRQ_IBSY)
0021 evts |= KOMEDA_EVENT_IBSY;
0022 if (raw_status & LPU_IRQ_EOW)
0023 evts |= KOMEDA_EVENT_EOW;
0024 if (raw_status & LPU_IRQ_OVR)
0025 evts |= KOMEDA_EVENT_OVR;
0026
0027 if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
0028 u32 restore = 0, tbu_status;
0029
0030 status = malidp_read32(reg, BLK_STATUS);
0031 if (status & LPU_STATUS_AXIE) {
0032 restore |= LPU_STATUS_AXIE;
0033 evts |= KOMEDA_ERR_AXIE;
0034 }
0035 if (status & LPU_STATUS_ACE0) {
0036 restore |= LPU_STATUS_ACE0;
0037 evts |= KOMEDA_ERR_ACE0;
0038 }
0039 if (status & LPU_STATUS_ACE1) {
0040 restore |= LPU_STATUS_ACE1;
0041 evts |= KOMEDA_ERR_ACE1;
0042 }
0043 if (status & LPU_STATUS_ACE2) {
0044 restore |= LPU_STATUS_ACE2;
0045 evts |= KOMEDA_ERR_ACE2;
0046 }
0047 if (status & LPU_STATUS_ACE3) {
0048 restore |= LPU_STATUS_ACE3;
0049 evts |= KOMEDA_ERR_ACE3;
0050 }
0051 if (status & LPU_STATUS_FEMPTY) {
0052 restore |= LPU_STATUS_FEMPTY;
0053 evts |= KOMEDA_EVENT_EMPTY;
0054 }
0055 if (status & LPU_STATUS_FFULL) {
0056 restore |= LPU_STATUS_FFULL;
0057 evts |= KOMEDA_EVENT_FULL;
0058 }
0059
0060 if (restore != 0)
0061 malidp_write32_mask(reg, BLK_STATUS, restore, 0);
0062
0063 restore = 0;
0064
0065 tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
0066 if (tbu_status & LPU_TBU_STATUS_TCF) {
0067 restore |= LPU_TBU_STATUS_TCF;
0068 evts |= KOMEDA_ERR_TCF;
0069 }
0070 if (tbu_status & LPU_TBU_STATUS_TTNG) {
0071 restore |= LPU_TBU_STATUS_TTNG;
0072 evts |= KOMEDA_ERR_TTNG;
0073 }
0074 if (tbu_status & LPU_TBU_STATUS_TITR) {
0075 restore |= LPU_TBU_STATUS_TITR;
0076 evts |= KOMEDA_ERR_TITR;
0077 }
0078 if (tbu_status & LPU_TBU_STATUS_TEMR) {
0079 restore |= LPU_TBU_STATUS_TEMR;
0080 evts |= KOMEDA_ERR_TEMR;
0081 }
0082 if (tbu_status & LPU_TBU_STATUS_TTF) {
0083 restore |= LPU_TBU_STATUS_TTF;
0084 evts |= KOMEDA_ERR_TTF;
0085 }
0086 if (restore != 0)
0087 malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
0088 }
0089
0090 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
0091 return evts;
0092 }
0093
0094 static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
0095 {
0096 u32 __iomem *reg = d71_pipeline->cu_addr;
0097 u32 status, raw_status;
0098 u64 evts = 0ULL;
0099
0100 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
0101 if (raw_status & CU_IRQ_OVR)
0102 evts |= KOMEDA_EVENT_OVR;
0103
0104 if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
0105 status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
0106 if (status & CU_STATUS_CPE)
0107 evts |= KOMEDA_ERR_CPE;
0108 if (status & CU_STATUS_ZME)
0109 evts |= KOMEDA_ERR_ZME;
0110 if (status & CU_STATUS_CFGE)
0111 evts |= KOMEDA_ERR_CFGE;
0112 if (status)
0113 malidp_write32_mask(reg, BLK_STATUS, status, 0);
0114 }
0115
0116 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
0117
0118 return evts;
0119 }
0120
0121 static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
0122 {
0123 u32 __iomem *reg = d71_pipeline->dou_addr;
0124 u32 status, raw_status;
0125 u64 evts = 0ULL;
0126
0127 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
0128 if (raw_status & DOU_IRQ_PL0)
0129 evts |= KOMEDA_EVENT_VSYNC;
0130 if (raw_status & DOU_IRQ_UND)
0131 evts |= KOMEDA_EVENT_URUN;
0132
0133 if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
0134 u32 restore = 0;
0135
0136 status = malidp_read32(reg, BLK_STATUS);
0137 if (status & DOU_STATUS_DRIFTTO) {
0138 restore |= DOU_STATUS_DRIFTTO;
0139 evts |= KOMEDA_ERR_DRIFTTO;
0140 }
0141 if (status & DOU_STATUS_FRAMETO) {
0142 restore |= DOU_STATUS_FRAMETO;
0143 evts |= KOMEDA_ERR_FRAMETO;
0144 }
0145 if (status & DOU_STATUS_TETO) {
0146 restore |= DOU_STATUS_TETO;
0147 evts |= KOMEDA_ERR_TETO;
0148 }
0149 if (status & DOU_STATUS_CSCE) {
0150 restore |= DOU_STATUS_CSCE;
0151 evts |= KOMEDA_ERR_CSCE;
0152 }
0153
0154 if (restore != 0)
0155 malidp_write32_mask(reg, BLK_STATUS, restore, 0);
0156 }
0157
0158 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
0159 return evts;
0160 }
0161
0162 static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
0163 {
0164 u32 evts = 0ULL;
0165
0166 if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
0167 evts |= get_lpu_event(d71_pipeline);
0168
0169 if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
0170 evts |= get_cu_event(d71_pipeline);
0171
0172 if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
0173 evts |= get_dou_event(d71_pipeline);
0174
0175 return evts;
0176 }
0177
0178 static irqreturn_t
0179 d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
0180 {
0181 struct d71_dev *d71 = mdev->chip_data;
0182 u32 status, gcu_status, raw_status;
0183
0184 gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
0185
0186 if (gcu_status & GLB_IRQ_STATUS_GCU) {
0187 raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
0188 if (raw_status & GCU_IRQ_CVAL0)
0189 evts->pipes[0] |= KOMEDA_EVENT_FLIP;
0190 if (raw_status & GCU_IRQ_CVAL1)
0191 evts->pipes[1] |= KOMEDA_EVENT_FLIP;
0192 if (raw_status & GCU_IRQ_ERR) {
0193 status = malidp_read32(d71->gcu_addr, BLK_STATUS);
0194 if (status & GCU_STATUS_MERR) {
0195 evts->global |= KOMEDA_ERR_MERR;
0196 malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
0197 GCU_STATUS_MERR, 0);
0198 }
0199 }
0200
0201 malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
0202 }
0203
0204 if (gcu_status & GLB_IRQ_STATUS_PIPE0)
0205 evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
0206
0207 if (gcu_status & GLB_IRQ_STATUS_PIPE1)
0208 evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
0209
0210 return IRQ_RETVAL(gcu_status);
0211 }
0212
0213 #define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
0214 GCU_IRQ_MODE | GCU_IRQ_ERR)
0215 #define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
0216 #define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
0217 #define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
0218
0219 static int d71_enable_irq(struct komeda_dev *mdev)
0220 {
0221 struct d71_dev *d71 = mdev->chip_data;
0222 struct d71_pipeline *pipe;
0223 u32 i;
0224
0225 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
0226 ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
0227 for (i = 0; i < d71->num_pipelines; i++) {
0228 pipe = d71->pipes[i];
0229 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
0230 ENABLED_CU_IRQS, ENABLED_CU_IRQS);
0231 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
0232 ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
0233 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
0234 ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
0235 }
0236 return 0;
0237 }
0238
0239 static int d71_disable_irq(struct komeda_dev *mdev)
0240 {
0241 struct d71_dev *d71 = mdev->chip_data;
0242 struct d71_pipeline *pipe;
0243 u32 i;
0244
0245 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
0246 for (i = 0; i < d71->num_pipelines; i++) {
0247 pipe = d71->pipes[i];
0248 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
0249 ENABLED_CU_IRQS, 0);
0250 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
0251 ENABLED_LPU_IRQS, 0);
0252 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
0253 ENABLED_DOU_IRQS, 0);
0254 }
0255 return 0;
0256 }
0257
0258 static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
0259 {
0260 struct d71_dev *d71 = mdev->chip_data;
0261 struct d71_pipeline *pipe = d71->pipes[master_pipe];
0262
0263 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
0264 DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
0265 }
0266
0267 static int to_d71_opmode(int core_mode)
0268 {
0269 switch (core_mode) {
0270 case KOMEDA_MODE_DISP0:
0271 return DO0_ACTIVE_MODE;
0272 case KOMEDA_MODE_DISP1:
0273 return DO1_ACTIVE_MODE;
0274 case KOMEDA_MODE_DUAL_DISP:
0275 return DO01_ACTIVE_MODE;
0276 case KOMEDA_MODE_INACTIVE:
0277 return INACTIVE_MODE;
0278 default:
0279 WARN(1, "Unknown operation mode");
0280 return INACTIVE_MODE;
0281 }
0282 }
0283
0284 static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
0285 {
0286 struct d71_dev *d71 = mdev->chip_data;
0287 u32 opmode = to_d71_opmode(new_mode);
0288 int ret;
0289
0290 malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
0291
0292 ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
0293 100, 1000, 10000);
0294
0295 return ret;
0296 }
0297
0298 static void d71_flush(struct komeda_dev *mdev,
0299 int master_pipe, u32 active_pipes)
0300 {
0301 struct d71_dev *d71 = mdev->chip_data;
0302 u32 reg_offset = (master_pipe == 0) ?
0303 GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
0304
0305 malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
0306 }
0307
0308 static int d71_reset(struct d71_dev *d71)
0309 {
0310 u32 __iomem *gcu = d71->gcu_addr;
0311 int ret;
0312
0313 malidp_write32_mask(gcu, BLK_CONTROL,
0314 GCU_CONTROL_SRST, GCU_CONTROL_SRST);
0315
0316 ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
0317 100, 1000, 10000);
0318
0319 return ret;
0320 }
0321
0322 void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
0323 {
0324 int i;
0325
0326 blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
0327 if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
0328 return;
0329
0330 blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
0331
0332
0333 for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
0334 blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
0335 for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
0336 blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
0337 }
0338
0339 static void d71_cleanup(struct komeda_dev *mdev)
0340 {
0341 struct d71_dev *d71 = mdev->chip_data;
0342
0343 if (!d71)
0344 return;
0345
0346 devm_kfree(mdev->dev, d71);
0347 mdev->chip_data = NULL;
0348 }
0349
0350 static int d71_enum_resources(struct komeda_dev *mdev)
0351 {
0352 struct d71_dev *d71;
0353 struct komeda_pipeline *pipe;
0354 struct block_header blk;
0355 u32 __iomem *blk_base;
0356 u32 i, value, offset;
0357 int err;
0358
0359 d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
0360 if (!d71)
0361 return -ENOMEM;
0362
0363 mdev->chip_data = d71;
0364 d71->mdev = mdev;
0365 d71->gcu_addr = mdev->reg_base;
0366 d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
0367
0368 err = d71_reset(d71);
0369 if (err) {
0370 DRM_ERROR("Fail to reset d71 device.\n");
0371 goto err_cleanup;
0372 }
0373
0374
0375 value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
0376 d71->num_blocks = value & 0xFF;
0377 d71->num_pipelines = (value >> 8) & 0x7;
0378
0379 if (d71->num_pipelines > D71_MAX_PIPELINE) {
0380 DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
0381 D71_MAX_PIPELINE, d71->num_pipelines);
0382 err = -EINVAL;
0383 goto err_cleanup;
0384 }
0385
0386
0387
0388
0389 value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
0390 if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
0391 d71->periph_addr = NULL;
0392
0393 if (d71->periph_addr) {
0394
0395 value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
0396
0397 d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
0398 d71->max_vsize = 4096;
0399 d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
0400 d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN);
0401 d71->integrates_tbu = !!(value & PERIPH_TBU_EN);
0402 } else {
0403 value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
0404 d71->max_line_size = GCU_MAX_LINE_SIZE(value);
0405 d71->max_vsize = GCU_MAX_NUM_LINES(value);
0406
0407 value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
0408 d71->num_rich_layers = GCU_NUM_RICH_LAYERS(value);
0409 d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value);
0410 d71->integrates_tbu = GCU_DISPLAY_TBU_EN(value);
0411 }
0412
0413 for (i = 0; i < d71->num_pipelines; i++) {
0414 pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
0415 &d71_pipeline_funcs);
0416 if (IS_ERR(pipe)) {
0417 err = PTR_ERR(pipe);
0418 goto err_cleanup;
0419 }
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 value = KOMEDA_PIPELINE_IMPROCS |
0432 BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
0433
0434 pipe->standalone_disabled_comps = value;
0435
0436 d71->pipes[i] = to_d71_pipeline(pipe);
0437 }
0438
0439
0440
0441
0442
0443 i = 1;
0444 offset = D71_BLOCK_SIZE;
0445 while (i < d71->num_blocks) {
0446 blk_base = mdev->reg_base + (offset >> 2);
0447
0448 d71_read_block_header(blk_base, &blk);
0449 if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
0450 err = d71_probe_block(d71, &blk, blk_base);
0451 if (err)
0452 goto err_cleanup;
0453 }
0454
0455 i++;
0456 offset += D71_BLOCK_SIZE;
0457 }
0458
0459 DRM_DEBUG("total %d (out of %d) blocks are found.\n",
0460 i, d71->num_blocks);
0461
0462 return 0;
0463
0464 err_cleanup:
0465 d71_cleanup(mdev);
0466 return err;
0467 }
0468
0469 #define __HW_ID(__group, __format) \
0470 ((((__group) & 0x7) << 3) | ((__format) & 0x7))
0471
0472 #define RICH KOMEDA_FMT_RICH_LAYER
0473 #define SIMPLE KOMEDA_FMT_SIMPLE_LAYER
0474 #define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
0475 #define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
0476 #define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
0477
0478 #define Rot_0 DRM_MODE_ROTATE_0
0479 #define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
0480 #define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V)
0481
0482 #define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
0483 #define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
0484 #define LYT_NM_WB (LYT_NM | LYT_WB)
0485
0486 #define AFB_TH AFBC(_TILED | _SPARSE)
0487 #define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR)
0488 #define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
0489
0490 static struct komeda_format_caps d71_format_caps_table[] = {
0491
0492
0493 {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0494 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0495 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
0496 {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0497 {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0498
0499 {__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0500 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0501 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
0502 {__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0503 {__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0504
0505 {__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0506 {__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0507 {__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0508 {__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
0509
0510 {__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0},
0511 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0},
0512 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS},
0513
0514 {__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0},
0515 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0},
0516 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR},
0517 {__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0},
0518 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0},
0519 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR},
0520 {__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0},
0521
0522 {__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
0523 {__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
0524 {__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
0525 {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
0526 {__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
0527 {__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
0528
0529 {__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0},
0530 {__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0},
0531 {__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
0532 };
0533
0534 static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
0535 u32 layer_type, u64 modifier, u32 rot)
0536 {
0537 uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
0538
0539 if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
0540 drm_rotation_90_or_270(rot)) {
0541 DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
0542 return false;
0543 }
0544
0545 return true;
0546 }
0547
0548 static void d71_init_fmt_tbl(struct komeda_dev *mdev)
0549 {
0550 struct komeda_format_caps_table *table = &mdev->fmt_tbl;
0551
0552 table->format_caps = d71_format_caps_table;
0553 table->format_mod_supported = d71_format_mod_supported;
0554 table->n_formats = ARRAY_SIZE(d71_format_caps_table);
0555 }
0556
0557 static int d71_connect_iommu(struct komeda_dev *mdev)
0558 {
0559 struct d71_dev *d71 = mdev->chip_data;
0560 u32 __iomem *reg = d71->gcu_addr;
0561 u32 check_bits = (d71->num_pipelines == 2) ?
0562 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
0563 int i, ret;
0564
0565 if (!d71->integrates_tbu)
0566 return -1;
0567
0568 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
0569
0570 ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
0571 100, 1000, 1000);
0572 if (ret < 0) {
0573 DRM_ERROR("timed out connecting to TCU!\n");
0574 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
0575 return ret;
0576 }
0577
0578 for (i = 0; i < d71->num_pipelines; i++)
0579 malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
0580 LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
0581 return 0;
0582 }
0583
0584 static int d71_disconnect_iommu(struct komeda_dev *mdev)
0585 {
0586 struct d71_dev *d71 = mdev->chip_data;
0587 u32 __iomem *reg = d71->gcu_addr;
0588 u32 check_bits = (d71->num_pipelines == 2) ?
0589 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
0590 int ret;
0591
0592 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
0593
0594 ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
0595 100, 1000, 1000);
0596 if (ret < 0) {
0597 DRM_ERROR("timed out disconnecting from TCU!\n");
0598 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
0599 }
0600
0601 return ret;
0602 }
0603
0604 static const struct komeda_dev_funcs d71_chip_funcs = {
0605 .init_format_table = d71_init_fmt_tbl,
0606 .enum_resources = d71_enum_resources,
0607 .cleanup = d71_cleanup,
0608 .irq_handler = d71_irq_handler,
0609 .enable_irq = d71_enable_irq,
0610 .disable_irq = d71_disable_irq,
0611 .on_off_vblank = d71_on_off_vblank,
0612 .change_opmode = d71_change_opmode,
0613 .flush = d71_flush,
0614 .connect_iommu = d71_connect_iommu,
0615 .disconnect_iommu = d71_disconnect_iommu,
0616 .dump_register = d71_dump,
0617 };
0618
0619 const struct komeda_dev_funcs *
0620 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
0621 {
0622 const struct komeda_dev_funcs *funcs;
0623 u32 product_id;
0624
0625 chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
0626
0627 product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
0628
0629 switch (product_id) {
0630 case MALIDP_D71_PRODUCT_ID:
0631 case MALIDP_D32_PRODUCT_ID:
0632 funcs = &d71_chip_funcs;
0633 break;
0634 default:
0635 DRM_ERROR("Unsupported product: 0x%x\n", product_id);
0636 return NULL;
0637 }
0638
0639 chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
0640 chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
0641 chip->bus_width = D71_BUS_WIDTH_16_BYTES;
0642
0643 return funcs;
0644 }