0001
0002
0003
0004
0005
0006 #include "mdp5_kms.h"
0007 #include "mdp5_ctl.h"
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define CTL_STAT_BUSY 0x1
0025 #define CTL_STAT_BOOKED 0x2
0026
0027 struct mdp5_ctl {
0028 struct mdp5_ctl_manager *ctlm;
0029
0030 u32 id;
0031
0032
0033 u32 status;
0034
0035 bool encoder_enabled;
0036
0037
0038 u32 flush_mask;
0039
0040
0041 spinlock_t hw_lock;
0042 u32 reg_offset;
0043
0044
0045 u32 pending_ctl_trigger;
0046
0047 bool cursor_on;
0048
0049
0050 bool flush_pending;
0051
0052 struct mdp5_ctl *pair;
0053 };
0054
0055 struct mdp5_ctl_manager {
0056 struct drm_device *dev;
0057
0058
0059 u32 nlm;
0060 u32 nctl;
0061
0062
0063 u32 flush_hw_mask;
0064
0065
0066 bool single_flush_supported;
0067 u32 single_flush_pending_mask;
0068
0069
0070 spinlock_t pool_lock;
0071 struct mdp5_ctl ctls[MAX_CTL];
0072 };
0073
0074 static inline
0075 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
0076 {
0077 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
0078
0079 return to_mdp5_kms(to_mdp_kms(priv->kms));
0080 }
0081
0082 static inline
0083 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
0084 {
0085 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
0086
0087 (void)ctl->reg_offset;
0088 mdp5_write(mdp5_kms, reg, data);
0089 }
0090
0091 static inline
0092 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
0093 {
0094 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
0095
0096 (void)ctl->reg_offset;
0097 return mdp5_read(mdp5_kms, reg);
0098 }
0099
0100 static void set_display_intf(struct mdp5_kms *mdp5_kms,
0101 struct mdp5_interface *intf)
0102 {
0103 unsigned long flags;
0104 u32 intf_sel;
0105
0106 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
0107 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
0108
0109 switch (intf->num) {
0110 case 0:
0111 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
0112 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
0113 break;
0114 case 1:
0115 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
0116 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
0117 break;
0118 case 2:
0119 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
0120 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
0121 break;
0122 case 3:
0123 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
0124 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
0125 break;
0126 default:
0127 BUG();
0128 break;
0129 }
0130
0131 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
0132 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
0133 }
0134
0135 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
0136 {
0137 unsigned long flags;
0138 struct mdp5_interface *intf = pipeline->intf;
0139 u32 ctl_op = 0;
0140
0141 if (!mdp5_cfg_intf_is_virtual(intf->type))
0142 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
0143
0144 switch (intf->type) {
0145 case INTF_DSI:
0146 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
0147 ctl_op |= MDP5_CTL_OP_CMD_MODE;
0148 break;
0149
0150 case INTF_WB:
0151 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
0152 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
0153 break;
0154
0155 default:
0156 break;
0157 }
0158
0159 if (pipeline->r_mixer)
0160 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
0161 MDP5_CTL_OP_PACK_3D(1);
0162
0163 spin_lock_irqsave(&ctl->hw_lock, flags);
0164 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
0165 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0166 }
0167
0168 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
0169 {
0170 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
0171 struct mdp5_interface *intf = pipeline->intf;
0172
0173
0174 if (!mdp5_cfg_intf_is_virtual(intf->type))
0175 set_display_intf(mdp5_kms, intf);
0176
0177 set_ctl_op(ctl, pipeline);
0178
0179 return 0;
0180 }
0181
0182 static bool start_signal_needed(struct mdp5_ctl *ctl,
0183 struct mdp5_pipeline *pipeline)
0184 {
0185 struct mdp5_interface *intf = pipeline->intf;
0186
0187 if (!ctl->encoder_enabled)
0188 return false;
0189
0190 switch (intf->type) {
0191 case INTF_WB:
0192 return true;
0193 case INTF_DSI:
0194 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
0195 default:
0196 return false;
0197 }
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207 static void send_start_signal(struct mdp5_ctl *ctl)
0208 {
0209 unsigned long flags;
0210
0211 spin_lock_irqsave(&ctl->hw_lock, flags);
0212 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
0213 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
0227 struct mdp5_pipeline *pipeline,
0228 bool enabled)
0229 {
0230 struct mdp5_interface *intf = pipeline->intf;
0231
0232 if (WARN_ON(!ctl))
0233 return -EINVAL;
0234
0235 ctl->encoder_enabled = enabled;
0236 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
0237
0238 if (start_signal_needed(ctl, pipeline)) {
0239 send_start_signal(ctl);
0240 }
0241
0242 return 0;
0243 }
0244
0245
0246
0247
0248
0249
0250 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
0251 int cursor_id, bool enable)
0252 {
0253 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
0254 unsigned long flags;
0255 u32 blend_cfg;
0256 struct mdp5_hw_mixer *mixer = pipeline->mixer;
0257
0258 if (WARN_ON(!mixer)) {
0259 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
0260 ctl->id);
0261 return -EINVAL;
0262 }
0263
0264 if (pipeline->r_mixer) {
0265 DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
0266 return -EINVAL;
0267 }
0268
0269 spin_lock_irqsave(&ctl->hw_lock, flags);
0270
0271 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
0272
0273 if (enable)
0274 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
0275 else
0276 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
0277
0278 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
0279 ctl->cursor_on = enable;
0280
0281 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0282
0283 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
0284
0285 return 0;
0286 }
0287
0288 static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
0289 enum mdp_mixer_stage_id stage)
0290 {
0291 switch (pipe) {
0292 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
0293 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
0294 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
0295 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
0296 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
0297 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
0298 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
0299 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
0300 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
0301 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
0302 case SSPP_CURSOR0:
0303 case SSPP_CURSOR1:
0304 default: return 0;
0305 }
0306 }
0307
0308 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
0309 enum mdp_mixer_stage_id stage)
0310 {
0311 if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
0312 return 0;
0313
0314 switch (pipe) {
0315 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
0316 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
0317 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
0318 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
0319 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
0320 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
0321 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
0322 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
0323 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
0324 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
0325 case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
0326 case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
0327 default: return 0;
0328 }
0329 }
0330
0331 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
0332 {
0333 unsigned long flags;
0334 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
0335 int i;
0336
0337 spin_lock_irqsave(&ctl->hw_lock, flags);
0338
0339 for (i = 0; i < ctl_mgr->nlm; i++) {
0340 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
0341 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
0342 }
0343
0344 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0345 }
0346
0347 #define PIPE_LEFT 0
0348 #define PIPE_RIGHT 1
0349 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
0350 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
0351 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
0352 u32 stage_cnt, u32 ctl_blend_op_flags)
0353 {
0354 struct mdp5_hw_mixer *mixer = pipeline->mixer;
0355 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
0356 unsigned long flags;
0357 u32 blend_cfg = 0, blend_ext_cfg = 0;
0358 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
0359 int i, start_stage;
0360
0361 mdp5_ctl_reset_blend_regs(ctl);
0362
0363 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
0364 start_stage = STAGE0;
0365 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
0366 if (r_mixer)
0367 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
0368 } else {
0369 start_stage = STAGE_BASE;
0370 }
0371
0372 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
0373 blend_cfg |=
0374 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
0375 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
0376 blend_ext_cfg |=
0377 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
0378 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
0379 if (r_mixer) {
0380 r_blend_cfg |=
0381 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
0382 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
0383 r_blend_ext_cfg |=
0384 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
0385 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
0386 }
0387 }
0388
0389 spin_lock_irqsave(&ctl->hw_lock, flags);
0390 if (ctl->cursor_on)
0391 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
0392
0393 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
0394 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
0395 blend_ext_cfg);
0396 if (r_mixer) {
0397 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
0398 r_blend_cfg);
0399 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
0400 r_blend_ext_cfg);
0401 }
0402 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0403
0404 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
0405 if (r_mixer)
0406 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
0407
0408 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
0409 blend_cfg, blend_ext_cfg);
0410 if (r_mixer)
0411 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
0412 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
0413
0414 return 0;
0415 }
0416
0417 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
0418 {
0419 if (intf->type == INTF_WB)
0420 return MDP5_CTL_FLUSH_WB;
0421
0422 switch (intf->num) {
0423 case 0: return MDP5_CTL_FLUSH_TIMING_0;
0424 case 1: return MDP5_CTL_FLUSH_TIMING_1;
0425 case 2: return MDP5_CTL_FLUSH_TIMING_2;
0426 case 3: return MDP5_CTL_FLUSH_TIMING_3;
0427 default: return 0;
0428 }
0429 }
0430
0431 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
0432 {
0433 switch (cursor_id) {
0434 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
0435 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
0436 default: return 0;
0437 }
0438 }
0439
0440 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
0441 {
0442 switch (pipe) {
0443 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
0444 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
0445 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
0446 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
0447 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
0448 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
0449 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
0450 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
0451 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
0452 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
0453 case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
0454 case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
0455 default: return 0;
0456 }
0457 }
0458
0459 u32 mdp_ctl_flush_mask_lm(int lm)
0460 {
0461 switch (lm) {
0462 case 0: return MDP5_CTL_FLUSH_LM0;
0463 case 1: return MDP5_CTL_FLUSH_LM1;
0464 case 2: return MDP5_CTL_FLUSH_LM2;
0465 case 3: return MDP5_CTL_FLUSH_LM3;
0466 case 4: return MDP5_CTL_FLUSH_LM4;
0467 case 5: return MDP5_CTL_FLUSH_LM5;
0468 default: return 0;
0469 }
0470 }
0471
0472 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
0473 u32 flush_mask)
0474 {
0475 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
0476 u32 sw_mask = 0;
0477 #define BIT_NEEDS_SW_FIX(bit) \
0478 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
0479
0480
0481 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
0482 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
0483
0484 return sw_mask;
0485 }
0486
0487 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
0488 u32 *flush_id)
0489 {
0490 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
0491
0492 if (ctl->pair) {
0493 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
0494 ctl->flush_pending = true;
0495 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
0496 *flush_mask = 0;
0497
0498 if (ctl->pair->flush_pending) {
0499 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
0500 *flush_mask = ctl_mgr->single_flush_pending_mask;
0501
0502 ctl->flush_pending = false;
0503 ctl->pair->flush_pending = false;
0504 ctl_mgr->single_flush_pending_mask = 0;
0505
0506 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
0507 *flush_id);
0508 }
0509 }
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
0536 struct mdp5_pipeline *pipeline,
0537 u32 flush_mask, bool start)
0538 {
0539 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
0540 unsigned long flags;
0541 u32 flush_id = ctl->id;
0542 u32 curr_ctl_flush_mask;
0543
0544 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
0545
0546 if (ctl->pending_ctl_trigger & flush_mask) {
0547 flush_mask |= MDP5_CTL_FLUSH_CTL;
0548 ctl->pending_ctl_trigger = 0;
0549 }
0550
0551 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
0552
0553 flush_mask &= ctl_mgr->flush_hw_mask;
0554
0555 curr_ctl_flush_mask = flush_mask;
0556
0557 fix_for_single_flush(ctl, &flush_mask, &flush_id);
0558
0559 if (!start) {
0560 ctl->flush_mask |= flush_mask;
0561 return curr_ctl_flush_mask;
0562 } else {
0563 flush_mask |= ctl->flush_mask;
0564 ctl->flush_mask = 0;
0565 }
0566
0567 if (flush_mask) {
0568 spin_lock_irqsave(&ctl->hw_lock, flags);
0569 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
0570 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0571 }
0572
0573 if (start_signal_needed(ctl, pipeline)) {
0574 send_start_signal(ctl);
0575 }
0576
0577 return curr_ctl_flush_mask;
0578 }
0579
0580 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
0581 {
0582 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
0583 }
0584
0585 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
0586 {
0587 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
0588 }
0589
0590
0591
0592
0593 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
0594 {
0595 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
0596 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
0597
0598
0599 if (!ctl_mgr->single_flush_supported)
0600 return 0;
0601
0602 if (!enable) {
0603 ctlx->pair = NULL;
0604 ctly->pair = NULL;
0605 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
0606 return 0;
0607 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
0608 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
0609 return -EINVAL;
0610 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
0611 DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
0612 return -EINVAL;
0613 }
0614
0615 ctlx->pair = ctly;
0616 ctly->pair = ctlx;
0617
0618 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
0619 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
0620
0621 return 0;
0622 }
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
0633 int intf_num)
0634 {
0635 struct mdp5_ctl *ctl = NULL;
0636 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
0637 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
0638 unsigned long flags;
0639 int c;
0640
0641 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
0642
0643
0644 for (c = 0; c < ctl_mgr->nctl; c++)
0645 if ((ctl_mgr->ctls[c].status & checkm) == match)
0646 goto found;
0647
0648 dev_warn(ctl_mgr->dev->dev,
0649 "fall back to the other CTL category for INTF %d!\n", intf_num);
0650
0651 match ^= CTL_STAT_BOOKED;
0652 for (c = 0; c < ctl_mgr->nctl; c++)
0653 if ((ctl_mgr->ctls[c].status & checkm) == match)
0654 goto found;
0655
0656 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
0657 goto unlock;
0658
0659 found:
0660 ctl = &ctl_mgr->ctls[c];
0661 ctl->status |= CTL_STAT_BUSY;
0662 ctl->pending_ctl_trigger = 0;
0663 DBG("CTL %d allocated", ctl->id);
0664
0665 unlock:
0666 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
0667 return ctl;
0668 }
0669
0670 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
0671 {
0672 unsigned long flags;
0673 int c;
0674
0675 for (c = 0; c < ctl_mgr->nctl; c++) {
0676 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
0677
0678 spin_lock_irqsave(&ctl->hw_lock, flags);
0679 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
0680 spin_unlock_irqrestore(&ctl->hw_lock, flags);
0681 }
0682 }
0683
0684 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
0685 {
0686 kfree(ctl_mgr);
0687 }
0688
0689 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
0690 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
0691 {
0692 struct mdp5_ctl_manager *ctl_mgr;
0693 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
0694 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
0695 unsigned dsi_cnt = 0;
0696 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
0697 unsigned long flags;
0698 int c, ret;
0699
0700 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
0701 if (!ctl_mgr) {
0702 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
0703 ret = -ENOMEM;
0704 goto fail;
0705 }
0706
0707 if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
0708 DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
0709 ctl_cfg->count);
0710 ret = -ENOSPC;
0711 goto fail;
0712 }
0713
0714
0715 ctl_mgr->dev = dev;
0716 ctl_mgr->nlm = hw_cfg->lm.count;
0717 ctl_mgr->nctl = ctl_cfg->count;
0718 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
0719 spin_lock_init(&ctl_mgr->pool_lock);
0720
0721
0722 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
0723 for (c = 0; c < ctl_mgr->nctl; c++) {
0724 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
0725
0726 if (WARN_ON(!ctl_cfg->base[c])) {
0727 DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
0728 ret = -EINVAL;
0729 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
0730 goto fail;
0731 }
0732 ctl->ctlm = ctl_mgr;
0733 ctl->id = c;
0734 ctl->reg_offset = ctl_cfg->base[c];
0735 ctl->status = 0;
0736 spin_lock_init(&ctl->hw_lock);
0737 }
0738
0739
0740
0741
0742
0743
0744
0745 for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
0746 if (hw_cfg->intf.connect[c] == INTF_DSI)
0747 dsi_cnt++;
0748 if ((rev >= 3) && (dsi_cnt > 1)) {
0749 ctl_mgr->single_flush_supported = true;
0750
0751 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
0752 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
0753 }
0754 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
0755 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
0756
0757 return ctl_mgr;
0758
0759 fail:
0760 if (ctl_mgr)
0761 mdp5_ctlm_destroy(ctl_mgr);
0762
0763 return ERR_PTR(ret);
0764 }