0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/sort.h>
0009
0010 #include <drm/drm_atomic.h>
0011 #include <drm/drm_blend.h>
0012 #include <drm/drm_mode.h>
0013 #include <drm/drm_crtc.h>
0014 #include <drm/drm_flip_work.h>
0015 #include <drm/drm_fourcc.h>
0016 #include <drm/drm_probe_helper.h>
0017 #include <drm/drm_vblank.h>
0018
0019 #include "mdp5_kms.h"
0020 #include "msm_gem.h"
0021
0022 #define CURSOR_WIDTH 64
0023 #define CURSOR_HEIGHT 64
0024
0025 struct mdp5_crtc {
0026 struct drm_crtc base;
0027 int id;
0028 bool enabled;
0029
0030 spinlock_t lm_lock;
0031
0032
0033 struct drm_pending_vblank_event *event;
0034
0035
0036
0037
0038 u32 flushed_mask;
0039
0040 #define PENDING_CURSOR 0x1
0041 #define PENDING_FLIP 0x2
0042 atomic_t pending;
0043
0044
0045 struct drm_flip_work unref_cursor_work;
0046
0047 struct mdp_irq vblank;
0048 struct mdp_irq err;
0049 struct mdp_irq pp_done;
0050
0051 struct completion pp_completion;
0052
0053 bool lm_cursor_enabled;
0054
0055 struct {
0056
0057 spinlock_t lock;
0058
0059
0060 struct drm_gem_object *scanout_bo;
0061 uint64_t iova;
0062 uint32_t width, height;
0063 int x, y;
0064 } cursor;
0065 };
0066 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
0067
0068 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
0069
0070 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
0071 {
0072 struct msm_drm_private *priv = crtc->dev->dev_private;
0073 return to_mdp5_kms(to_mdp_kms(priv->kms));
0074 }
0075
0076 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
0077 {
0078 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0079
0080 atomic_or(pending, &mdp5_crtc->pending);
0081 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
0082 }
0083
0084 static void request_pp_done_pending(struct drm_crtc *crtc)
0085 {
0086 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0087 reinit_completion(&mdp5_crtc->pp_completion);
0088 }
0089
0090 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
0091 {
0092 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0093 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
0094 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0095 bool start = !mdp5_cstate->defer_start;
0096
0097 mdp5_cstate->defer_start = false;
0098
0099 DBG("%s: flush=%08x", crtc->name, flush_mask);
0100
0101 return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
0102 }
0103
0104
0105
0106
0107
0108
0109 static u32 crtc_flush_all(struct drm_crtc *crtc)
0110 {
0111 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0112 struct mdp5_hw_mixer *mixer, *r_mixer;
0113 struct drm_plane *plane;
0114 uint32_t flush_mask = 0;
0115
0116
0117 if (WARN_ON(!mdp5_cstate->ctl))
0118 return 0;
0119
0120 drm_atomic_crtc_for_each_plane(plane, crtc) {
0121 if (!plane->state->visible)
0122 continue;
0123 flush_mask |= mdp5_plane_get_flush(plane);
0124 }
0125
0126 mixer = mdp5_cstate->pipeline.mixer;
0127 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
0128
0129 r_mixer = mdp5_cstate->pipeline.r_mixer;
0130 if (r_mixer)
0131 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
0132
0133 return crtc_flush(crtc, flush_mask);
0134 }
0135
0136
0137 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
0138 {
0139 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0140 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0141 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0142 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
0143 struct drm_device *dev = crtc->dev;
0144 struct drm_pending_vblank_event *event;
0145 unsigned long flags;
0146
0147 spin_lock_irqsave(&dev->event_lock, flags);
0148 event = mdp5_crtc->event;
0149 if (event) {
0150 mdp5_crtc->event = NULL;
0151 DBG("%s: send event: %p", crtc->name, event);
0152 drm_crtc_send_vblank_event(crtc, event);
0153 }
0154 spin_unlock_irqrestore(&dev->event_lock, flags);
0155
0156 if (ctl && !crtc->state->enable) {
0157
0158 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
0159
0160
0161 }
0162 }
0163
0164 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
0165 {
0166 struct mdp5_crtc *mdp5_crtc =
0167 container_of(work, struct mdp5_crtc, unref_cursor_work);
0168 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
0169 struct msm_kms *kms = &mdp5_kms->base.base;
0170
0171 msm_gem_unpin_iova(val, kms->aspace);
0172 drm_gem_object_put(val);
0173 }
0174
0175 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
0176 {
0177 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0178
0179 drm_crtc_cleanup(crtc);
0180 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
0181
0182 kfree(mdp5_crtc);
0183 }
0184
0185 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
0186 {
0187 switch (stage) {
0188 case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
0189 case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
0190 case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
0191 case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
0192 case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
0193 case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
0194 case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
0195 default:
0196 return 0;
0197 }
0198 }
0199
0200
0201
0202
0203 #define PIPE_LEFT 0
0204 #define PIPE_RIGHT 1
0205
0206
0207
0208
0209
0210
0211
0212
0213 static void blend_setup(struct drm_crtc *crtc)
0214 {
0215 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0216 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0217 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0218 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0219 struct drm_plane *plane;
0220 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
0221 const struct mdp_format *format;
0222 struct mdp5_hw_mixer *mixer = pipeline->mixer;
0223 uint32_t lm = mixer->lm;
0224 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
0225 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
0226 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
0227 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
0228 unsigned long flags;
0229 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
0230 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
0231 int i, plane_cnt = 0;
0232 bool bg_alpha_enabled = false;
0233 u32 mixer_op_mode = 0;
0234 u32 val;
0235 #define blender(stage) ((stage) - STAGE0)
0236
0237 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
0238
0239
0240
0241 if (!ctl)
0242 goto out;
0243
0244
0245 drm_atomic_crtc_for_each_plane(plane, crtc) {
0246 enum mdp5_pipe right_pipe;
0247
0248 if (!plane->state->visible)
0249 continue;
0250
0251 pstate = to_mdp5_plane_state(plane->state);
0252 pstates[pstate->stage] = pstate;
0253 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
0254
0255
0256
0257
0258 if (r_mixer)
0259 r_stage[pstate->stage][PIPE_LEFT] =
0260 mdp5_plane_pipe(plane);
0261
0262
0263
0264
0265
0266 right_pipe = mdp5_plane_right_pipe(plane);
0267 if (right_pipe) {
0268 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
0269 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
0270 }
0271
0272 plane_cnt++;
0273 }
0274
0275 if (!pstates[STAGE_BASE]) {
0276 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
0277 DBG("Border Color is enabled");
0278 } else if (plane_cnt) {
0279 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
0280
0281 if (format->alpha_enable)
0282 bg_alpha_enabled = true;
0283 }
0284
0285
0286 for (i = STAGE0; i <= STAGE_MAX; i++) {
0287 if (!pstates[i])
0288 continue;
0289
0290 format = to_mdp_format(
0291 msm_framebuffer_format(pstates[i]->base.fb));
0292 plane = pstates[i]->base.plane;
0293 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
0294 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
0295 fg_alpha = pstates[i]->base.alpha >> 8;
0296 bg_alpha = 0xFF - fg_alpha;
0297
0298 if (!format->alpha_enable && bg_alpha_enabled)
0299 mixer_op_mode = 0;
0300 else
0301 mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
0302
0303 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
0304
0305 if (format->alpha_enable &&
0306 pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
0307 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
0308 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
0309 if (fg_alpha != 0xff) {
0310 bg_alpha = fg_alpha;
0311 blend_op |=
0312 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
0313 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
0314 } else {
0315 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
0316 }
0317 } else if (format->alpha_enable &&
0318 pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
0319 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
0320 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
0321 if (fg_alpha != 0xff) {
0322 bg_alpha = fg_alpha;
0323 blend_op |=
0324 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
0325 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
0326 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
0327 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
0328 } else {
0329 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
0330 }
0331 }
0332
0333 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
0334 blender(i)), blend_op);
0335 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
0336 blender(i)), fg_alpha);
0337 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
0338 blender(i)), bg_alpha);
0339 if (r_mixer) {
0340 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
0341 blender(i)), blend_op);
0342 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
0343 blender(i)), fg_alpha);
0344 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
0345 blender(i)), bg_alpha);
0346 }
0347 }
0348
0349 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
0350 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
0351 val | mixer_op_mode);
0352 if (r_mixer) {
0353 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
0354 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
0355 val | mixer_op_mode);
0356 }
0357
0358 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
0359 ctl_blend_flags);
0360 out:
0361 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
0362 }
0363
0364 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
0365 {
0366 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0367 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0368 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0369 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
0370 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
0371 uint32_t lm = mixer->lm;
0372 u32 mixer_width, val;
0373 unsigned long flags;
0374 struct drm_display_mode *mode;
0375
0376 if (WARN_ON(!crtc->state))
0377 return;
0378
0379 mode = &crtc->state->adjusted_mode;
0380
0381 DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
0382
0383 mixer_width = mode->hdisplay;
0384 if (r_mixer)
0385 mixer_width /= 2;
0386
0387 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
0388 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
0389 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
0390 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
0391
0392
0393 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
0394 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
0395 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
0396
0397 if (r_mixer) {
0398 u32 r_lm = r_mixer->lm;
0399
0400 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
0401 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
0402 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
0403
0404
0405 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
0406 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
0407 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
0408 }
0409
0410 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
0411 }
0412
0413 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
0414 {
0415 struct drm_device *dev = crtc->dev;
0416 struct drm_encoder *encoder;
0417
0418 drm_for_each_encoder(encoder, dev)
0419 if (encoder->crtc == crtc)
0420 return encoder;
0421
0422 return NULL;
0423 }
0424
0425 static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
0426 bool in_vblank_irq,
0427 int *vpos, int *hpos,
0428 ktime_t *stime, ktime_t *etime,
0429 const struct drm_display_mode *mode)
0430 {
0431 unsigned int pipe = crtc->index;
0432 struct drm_encoder *encoder;
0433 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
0434
0435
0436 encoder = get_encoder_from_crtc(crtc);
0437 if (!encoder) {
0438 DRM_ERROR("no encoder found for crtc %d\n", pipe);
0439 return false;
0440 }
0441
0442 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
0443 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
0444
0445
0446
0447
0448
0449
0450
0451 vactive_start = vsw + vbp + 1;
0452
0453 vactive_end = vactive_start + mode->crtc_vdisplay;
0454
0455
0456 vfp_end = mode->crtc_vtotal;
0457
0458 if (stime)
0459 *stime = ktime_get();
0460
0461 line = mdp5_encoder_get_linecount(encoder);
0462
0463 if (line < vactive_start)
0464 line -= vactive_start;
0465 else if (line > vactive_end)
0466 line = line - vfp_end - vactive_start;
0467 else
0468 line -= vactive_start;
0469
0470 *vpos = line;
0471 *hpos = 0;
0472
0473 if (etime)
0474 *etime = ktime_get();
0475
0476 return true;
0477 }
0478
0479 static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
0480 {
0481 struct drm_encoder *encoder;
0482
0483 encoder = get_encoder_from_crtc(crtc);
0484 if (!encoder)
0485 return 0;
0486
0487 return mdp5_encoder_get_framecount(encoder);
0488 }
0489
0490 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
0491 struct drm_atomic_state *state)
0492 {
0493 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0494 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0495 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0496 struct device *dev = &mdp5_kms->pdev->dev;
0497 unsigned long flags;
0498
0499 DBG("%s", crtc->name);
0500
0501 if (WARN_ON(!mdp5_crtc->enabled))
0502 return;
0503
0504
0505 drm_crtc_vblank_off(crtc);
0506
0507 if (mdp5_cstate->cmd_mode)
0508 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
0509
0510 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
0511 pm_runtime_put_sync(dev);
0512
0513 if (crtc->state->event && !crtc->state->active) {
0514 WARN_ON(mdp5_crtc->event);
0515 spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
0516 drm_crtc_send_vblank_event(crtc, crtc->state->event);
0517 crtc->state->event = NULL;
0518 spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
0519 }
0520
0521 mdp5_crtc->enabled = false;
0522 }
0523
0524 static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
0525 {
0526 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0527 struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
0528 u32 count;
0529
0530 count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
0531 drm_crtc_set_max_vblank_count(crtc, count);
0532
0533 drm_crtc_vblank_on(crtc);
0534 }
0535
0536 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
0537 struct drm_atomic_state *state)
0538 {
0539 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0540 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0541 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0542 struct device *dev = &mdp5_kms->pdev->dev;
0543
0544 DBG("%s", crtc->name);
0545
0546 if (WARN_ON(mdp5_crtc->enabled))
0547 return;
0548
0549 pm_runtime_get_sync(dev);
0550
0551 if (mdp5_crtc->lm_cursor_enabled) {
0552
0553
0554
0555
0556 if (mdp5_crtc->cursor.iova) {
0557 unsigned long flags;
0558
0559 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
0560 mdp5_crtc_restore_cursor(crtc);
0561 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
0562
0563 mdp5_ctl_set_cursor(mdp5_cstate->ctl,
0564 &mdp5_cstate->pipeline, 0, true);
0565 } else {
0566 mdp5_ctl_set_cursor(mdp5_cstate->ctl,
0567 &mdp5_cstate->pipeline, 0, false);
0568 }
0569 }
0570
0571
0572 mdp5_crtc_vblank_on(crtc);
0573
0574 mdp5_crtc_mode_set_nofb(crtc);
0575
0576 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
0577
0578 if (mdp5_cstate->cmd_mode)
0579 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
0580
0581 mdp5_crtc->enabled = true;
0582 }
0583
0584 static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
0585 struct drm_crtc_state *new_crtc_state,
0586 bool need_right_mixer)
0587 {
0588 struct mdp5_crtc_state *mdp5_cstate =
0589 to_mdp5_crtc_state(new_crtc_state);
0590 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0591 struct mdp5_interface *intf;
0592 bool new_mixer = false;
0593
0594 new_mixer = !pipeline->mixer;
0595
0596 if ((need_right_mixer && !pipeline->r_mixer) ||
0597 (!need_right_mixer && pipeline->r_mixer))
0598 new_mixer = true;
0599
0600 if (new_mixer) {
0601 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
0602 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
0603 u32 caps;
0604 int ret;
0605
0606 caps = MDP_LM_CAP_DISPLAY;
0607 if (need_right_mixer)
0608 caps |= MDP_LM_CAP_PAIR;
0609
0610 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
0611 &pipeline->mixer, need_right_mixer ?
0612 &pipeline->r_mixer : NULL);
0613 if (ret)
0614 return ret;
0615
0616 ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
0617 if (ret)
0618 return ret;
0619
0620 if (old_r_mixer) {
0621 ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
0622 if (ret)
0623 return ret;
0624
0625 if (!need_right_mixer)
0626 pipeline->r_mixer = NULL;
0627 }
0628 }
0629
0630
0631
0632
0633
0634 intf = pipeline->intf;
0635
0636 mdp5_cstate->err_irqmask = intf2err(intf->num);
0637 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
0638
0639 if ((intf->type == INTF_DSI) &&
0640 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
0641 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
0642 mdp5_cstate->cmd_mode = true;
0643 } else {
0644 mdp5_cstate->pp_done_irqmask = 0;
0645 mdp5_cstate->cmd_mode = false;
0646 }
0647
0648 return 0;
0649 }
0650
0651 struct plane_state {
0652 struct drm_plane *plane;
0653 struct mdp5_plane_state *state;
0654 };
0655
0656 static int pstate_cmp(const void *a, const void *b)
0657 {
0658 struct plane_state *pa = (struct plane_state *)a;
0659 struct plane_state *pb = (struct plane_state *)b;
0660 return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
0661 }
0662
0663
0664 static bool is_fullscreen(struct drm_crtc_state *cstate,
0665 struct drm_plane_state *pstate)
0666 {
0667 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
0668 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
0669 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
0670 }
0671
0672 static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
0673 struct drm_crtc_state *new_crtc_state,
0674 struct drm_plane_state *bpstate)
0675 {
0676 struct mdp5_crtc_state *mdp5_cstate =
0677 to_mdp5_crtc_state(new_crtc_state);
0678
0679
0680
0681
0682
0683 if (mdp5_cstate->pipeline.r_mixer)
0684 return STAGE0;
0685
0686
0687
0688
0689 if (!is_fullscreen(new_crtc_state, bpstate))
0690 return STAGE0;
0691
0692 return STAGE_BASE;
0693 }
0694
0695 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
0696 struct drm_atomic_state *state)
0697 {
0698 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
0699 crtc);
0700 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
0701 struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
0702 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0703 struct drm_plane *plane;
0704 struct drm_device *dev = crtc->dev;
0705 struct plane_state pstates[STAGE_MAX + 1];
0706 const struct mdp5_cfg_hw *hw_cfg;
0707 const struct drm_plane_state *pstate;
0708 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
0709 bool cursor_plane = false;
0710 bool need_right_mixer = false;
0711 int cnt = 0, i;
0712 int ret;
0713 enum mdp_mixer_stage_id start;
0714
0715 DBG("%s: check", crtc->name);
0716
0717 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
0718 struct mdp5_plane_state *mdp5_pstate =
0719 to_mdp5_plane_state(pstate);
0720
0721 if (!pstate->visible)
0722 continue;
0723
0724 pstates[cnt].plane = plane;
0725 pstates[cnt].state = to_mdp5_plane_state(pstate);
0726
0727 mdp5_pstate->needs_dirtyfb =
0728 intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
0729
0730
0731
0732
0733
0734 if (pstates[cnt].state->r_hwpipe)
0735 need_right_mixer = true;
0736 cnt++;
0737
0738 if (plane->type == DRM_PLANE_TYPE_CURSOR)
0739 cursor_plane = true;
0740 }
0741
0742
0743 if (!cnt)
0744 return 0;
0745
0746 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
0747
0748
0749
0750
0751
0752 if (mode->hdisplay > hw_cfg->lm.max_width)
0753 need_right_mixer = true;
0754
0755 ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
0756 if (ret) {
0757 DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
0758 return ret;
0759 }
0760
0761
0762 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
0763
0764
0765 WARN_ON(cursor_plane &&
0766 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
0767
0768 start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
0769
0770
0771
0772
0773 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
0774 DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
0775 cnt, start);
0776 return -EINVAL;
0777 }
0778
0779 for (i = 0; i < cnt; i++) {
0780 if (cursor_plane && (i == (cnt - 1)))
0781 pstates[i].state->stage = hw_cfg->lm.nb_stages;
0782 else
0783 pstates[i].state->stage = start + i;
0784 DBG("%s: assign pipe %s on stage=%d", crtc->name,
0785 pstates[i].plane->name,
0786 pstates[i].state->stage);
0787 }
0788
0789 return 0;
0790 }
0791
0792 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
0793 struct drm_atomic_state *state)
0794 {
0795 DBG("%s: begin", crtc->name);
0796 }
0797
0798 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
0799 struct drm_atomic_state *state)
0800 {
0801 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0802 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0803 struct drm_device *dev = crtc->dev;
0804 unsigned long flags;
0805
0806 DBG("%s: event: %p", crtc->name, crtc->state->event);
0807
0808 WARN_ON(mdp5_crtc->event);
0809
0810 spin_lock_irqsave(&dev->event_lock, flags);
0811 mdp5_crtc->event = crtc->state->event;
0812 crtc->state->event = NULL;
0813 spin_unlock_irqrestore(&dev->event_lock, flags);
0814
0815
0816
0817
0818
0819
0820
0821 if (unlikely(!mdp5_cstate->ctl))
0822 return;
0823
0824 blend_setup(crtc);
0825
0826
0827
0828
0829
0830
0831
0832 if (mdp5_cstate->cmd_mode)
0833 request_pp_done_pending(crtc);
0834
0835 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
0836
0837
0838 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
0839 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
0840 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
0841
0842 request_pending(crtc, PENDING_FLIP);
0843 }
0844
0845 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
0846 {
0847 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0848 uint32_t xres = crtc->mode.hdisplay;
0849 uint32_t yres = crtc->mode.vdisplay;
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870 if (mdp5_crtc->cursor.x >= 0)
0871 *roi_w = min(mdp5_crtc->cursor.width, xres -
0872 mdp5_crtc->cursor.x);
0873 else
0874 *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
0875 if (mdp5_crtc->cursor.y >= 0)
0876 *roi_h = min(mdp5_crtc->cursor.height, yres -
0877 mdp5_crtc->cursor.y);
0878 else
0879 *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
0880 }
0881
0882 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
0883 {
0884 const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
0885 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0886 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0887 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0888 const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
0889 uint32_t blendcfg, stride;
0890 uint32_t x, y, src_x, src_y, width, height;
0891 uint32_t roi_w, roi_h;
0892 int lm;
0893
0894 assert_spin_locked(&mdp5_crtc->cursor.lock);
0895
0896 lm = mdp5_cstate->pipeline.mixer->lm;
0897
0898 x = mdp5_crtc->cursor.x;
0899 y = mdp5_crtc->cursor.y;
0900 width = mdp5_crtc->cursor.width;
0901 height = mdp5_crtc->cursor.height;
0902
0903 stride = width * info->cpp[0];
0904
0905 get_roi(crtc, &roi_w, &roi_h);
0906
0907
0908
0909
0910
0911
0912 if (mdp5_crtc->cursor.x < 0) {
0913 src_x = abs(mdp5_crtc->cursor.x);
0914 x = 0;
0915 } else {
0916 src_x = 0;
0917 }
0918 if (mdp5_crtc->cursor.y < 0) {
0919 src_y = abs(mdp5_crtc->cursor.y);
0920 y = 0;
0921 } else {
0922 src_y = 0;
0923 }
0924 DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
0925 crtc->name, x, y, roi_w, roi_h, src_x, src_y);
0926
0927 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
0928 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
0929 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
0930 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
0931 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
0932 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
0933 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
0934 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
0935 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
0936 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
0937 MDP5_LM_CURSOR_START_XY_Y_START(y) |
0938 MDP5_LM_CURSOR_START_XY_X_START(x));
0939 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
0940 MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
0941 MDP5_LM_CURSOR_XY_SRC_X(src_x));
0942 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
0943 mdp5_crtc->cursor.iova);
0944
0945 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
0946 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
0947 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
0948 }
0949
0950 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
0951 struct drm_file *file, uint32_t handle,
0952 uint32_t width, uint32_t height)
0953 {
0954 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
0955 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
0956 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
0957 struct drm_device *dev = crtc->dev;
0958 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0959 struct platform_device *pdev = mdp5_kms->pdev;
0960 struct msm_kms *kms = &mdp5_kms->base.base;
0961 struct drm_gem_object *cursor_bo, *old_bo = NULL;
0962 struct mdp5_ctl *ctl;
0963 int ret;
0964 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
0965 bool cursor_enable = true;
0966 unsigned long flags;
0967
0968 if (!mdp5_crtc->lm_cursor_enabled) {
0969 dev_warn(dev->dev,
0970 "cursor_set is deprecated with cursor planes\n");
0971 return -EINVAL;
0972 }
0973
0974 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
0975 DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
0976 return -EINVAL;
0977 }
0978
0979 ctl = mdp5_cstate->ctl;
0980 if (!ctl)
0981 return -EINVAL;
0982
0983
0984 if (mdp5_cstate->pipeline.r_mixer)
0985 return -EINVAL;
0986
0987 if (!handle) {
0988 DBG("Cursor off");
0989 cursor_enable = false;
0990 mdp5_crtc->cursor.iova = 0;
0991 pm_runtime_get_sync(&pdev->dev);
0992 goto set_cursor;
0993 }
0994
0995 cursor_bo = drm_gem_object_lookup(file, handle);
0996 if (!cursor_bo)
0997 return -ENOENT;
0998
0999 ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
1000 &mdp5_crtc->cursor.iova);
1001 if (ret) {
1002 drm_gem_object_put(cursor_bo);
1003 return -EINVAL;
1004 }
1005
1006 pm_runtime_get_sync(&pdev->dev);
1007
1008 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
1009 old_bo = mdp5_crtc->cursor.scanout_bo;
1010
1011 mdp5_crtc->cursor.scanout_bo = cursor_bo;
1012 mdp5_crtc->cursor.width = width;
1013 mdp5_crtc->cursor.height = height;
1014
1015 mdp5_crtc_restore_cursor(crtc);
1016
1017 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
1018
1019 set_cursor:
1020 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
1021 if (ret) {
1022 DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
1023 cursor_enable ? "en" : "dis", ret);
1024 goto end;
1025 }
1026
1027 crtc_flush(crtc, flush_mask);
1028
1029 end:
1030 pm_runtime_put_sync(&pdev->dev);
1031 if (old_bo) {
1032 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
1033
1034 request_pending(crtc, PENDING_CURSOR);
1035 }
1036 return ret;
1037 }
1038
1039 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1040 {
1041 struct mdp5_kms *mdp5_kms = get_kms(crtc);
1042 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1043 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1044 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
1045 struct drm_device *dev = crtc->dev;
1046 uint32_t roi_w;
1047 uint32_t roi_h;
1048 unsigned long flags;
1049
1050 if (!mdp5_crtc->lm_cursor_enabled) {
1051 dev_warn(dev->dev,
1052 "cursor_move is deprecated with cursor planes\n");
1053 return -EINVAL;
1054 }
1055
1056
1057 if (mdp5_cstate->pipeline.r_mixer)
1058 return -EINVAL;
1059
1060
1061 if (unlikely(!crtc->state->enable))
1062 return 0;
1063
1064
1065 mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
1066 mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
1067
1068 get_roi(crtc, &roi_w, &roi_h);
1069
1070 pm_runtime_get_sync(&mdp5_kms->pdev->dev);
1071
1072 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
1073 mdp5_crtc_restore_cursor(crtc);
1074 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
1075
1076 crtc_flush(crtc, flush_mask);
1077
1078 pm_runtime_put_sync(&mdp5_kms->pdev->dev);
1079
1080 return 0;
1081 }
1082
1083 static void
1084 mdp5_crtc_atomic_print_state(struct drm_printer *p,
1085 const struct drm_crtc_state *state)
1086 {
1087 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1088 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
1089 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
1090
1091 if (WARN_ON(!pipeline))
1092 return;
1093
1094 if (mdp5_cstate->ctl)
1095 drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
1096
1097 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
1098 pipeline->mixer->name : "(null)");
1099
1100 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
1101 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
1102 pipeline->r_mixer->name : "(null)");
1103
1104 drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
1105 }
1106
1107 static struct drm_crtc_state *
1108 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
1109 {
1110 struct mdp5_crtc_state *mdp5_cstate;
1111
1112 if (WARN_ON(!crtc->state))
1113 return NULL;
1114
1115 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
1116 sizeof(*mdp5_cstate), GFP_KERNEL);
1117 if (!mdp5_cstate)
1118 return NULL;
1119
1120 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
1121
1122 return &mdp5_cstate->base;
1123 }
1124
1125 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
1126 {
1127 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1128
1129 __drm_atomic_helper_crtc_destroy_state(state);
1130
1131 kfree(mdp5_cstate);
1132 }
1133
1134 static void mdp5_crtc_reset(struct drm_crtc *crtc)
1135 {
1136 struct mdp5_crtc_state *mdp5_cstate =
1137 kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
1138
1139 if (crtc->state)
1140 mdp5_crtc_destroy_state(crtc, crtc->state);
1141
1142 __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
1143 }
1144
1145 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
1146 .set_config = drm_atomic_helper_set_config,
1147 .destroy = mdp5_crtc_destroy,
1148 .page_flip = drm_atomic_helper_page_flip,
1149 .reset = mdp5_crtc_reset,
1150 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
1151 .atomic_destroy_state = mdp5_crtc_destroy_state,
1152 .atomic_print_state = mdp5_crtc_atomic_print_state,
1153 .get_vblank_counter = mdp5_crtc_get_vblank_counter,
1154 .enable_vblank = msm_crtc_enable_vblank,
1155 .disable_vblank = msm_crtc_disable_vblank,
1156 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1157 };
1158
1159 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1160 .set_config = drm_atomic_helper_set_config,
1161 .destroy = mdp5_crtc_destroy,
1162 .page_flip = drm_atomic_helper_page_flip,
1163 .reset = mdp5_crtc_reset,
1164 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
1165 .atomic_destroy_state = mdp5_crtc_destroy_state,
1166 .cursor_set = mdp5_crtc_cursor_set,
1167 .cursor_move = mdp5_crtc_cursor_move,
1168 .atomic_print_state = mdp5_crtc_atomic_print_state,
1169 .get_vblank_counter = mdp5_crtc_get_vblank_counter,
1170 .enable_vblank = msm_crtc_enable_vblank,
1171 .disable_vblank = msm_crtc_disable_vblank,
1172 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1173 };
1174
1175 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1176 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
1177 .atomic_check = mdp5_crtc_atomic_check,
1178 .atomic_begin = mdp5_crtc_atomic_begin,
1179 .atomic_flush = mdp5_crtc_atomic_flush,
1180 .atomic_enable = mdp5_crtc_atomic_enable,
1181 .atomic_disable = mdp5_crtc_atomic_disable,
1182 .get_scanout_position = mdp5_crtc_get_scanout_position,
1183 };
1184
1185 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1186 {
1187 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1188 struct drm_crtc *crtc = &mdp5_crtc->base;
1189 struct msm_drm_private *priv = crtc->dev->dev_private;
1190 unsigned pending;
1191
1192 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1193
1194 pending = atomic_xchg(&mdp5_crtc->pending, 0);
1195
1196 if (pending & PENDING_FLIP) {
1197 complete_flip(crtc, NULL);
1198 }
1199
1200 if (pending & PENDING_CURSOR)
1201 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1202 }
1203
1204 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1205 {
1206 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1207
1208 DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1209 }
1210
1211 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1212 {
1213 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1214 pp_done);
1215
1216 complete_all(&mdp5_crtc->pp_completion);
1217 }
1218
1219 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1220 {
1221 struct drm_device *dev = crtc->dev;
1222 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1223 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1224 int ret;
1225
1226 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1227 msecs_to_jiffies(50));
1228 if (ret == 0)
1229 dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
1230 mdp5_cstate->pipeline.mixer->lm);
1231 }
1232
1233 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1234 {
1235 struct drm_device *dev = crtc->dev;
1236 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1237 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1238 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1239 int ret;
1240
1241
1242 if (!ctl)
1243 return;
1244
1245 ret = drm_crtc_vblank_get(crtc);
1246 if (ret)
1247 return;
1248
1249 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1250 ((mdp5_ctl_get_commit_status(ctl) &
1251 mdp5_crtc->flushed_mask) == 0),
1252 msecs_to_jiffies(50));
1253 if (ret <= 0)
1254 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1255
1256 mdp5_crtc->flushed_mask = 0;
1257
1258 drm_crtc_vblank_put(crtc);
1259 }
1260
1261 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1262 {
1263 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1264 return mdp5_crtc->vblank.irqmask;
1265 }
1266
1267 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1268 {
1269 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1270 struct mdp5_kms *mdp5_kms = get_kms(crtc);
1271
1272
1273 mdp_irq_update(&mdp5_kms->base);
1274
1275 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1276 }
1277
1278 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1279 {
1280 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1281
1282 return mdp5_cstate->ctl;
1283 }
1284
1285 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1286 {
1287 struct mdp5_crtc_state *mdp5_cstate;
1288
1289 if (WARN_ON(!crtc))
1290 return ERR_PTR(-EINVAL);
1291
1292 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1293
1294 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1295 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1296 }
1297
1298 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1299 {
1300 struct mdp5_crtc_state *mdp5_cstate;
1301
1302 if (WARN_ON(!crtc))
1303 return ERR_PTR(-EINVAL);
1304
1305 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1306
1307 return &mdp5_cstate->pipeline;
1308 }
1309
1310 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1311 {
1312 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1313
1314 if (mdp5_cstate->cmd_mode)
1315 mdp5_crtc_wait_for_pp_done(crtc);
1316 else
1317 mdp5_crtc_wait_for_flush_done(crtc);
1318 }
1319
1320
1321 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1322 struct drm_plane *plane,
1323 struct drm_plane *cursor_plane, int id)
1324 {
1325 struct drm_crtc *crtc = NULL;
1326 struct mdp5_crtc *mdp5_crtc;
1327
1328 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1329 if (!mdp5_crtc)
1330 return ERR_PTR(-ENOMEM);
1331
1332 crtc = &mdp5_crtc->base;
1333
1334 mdp5_crtc->id = id;
1335
1336 spin_lock_init(&mdp5_crtc->lm_lock);
1337 spin_lock_init(&mdp5_crtc->cursor.lock);
1338 init_completion(&mdp5_crtc->pp_completion);
1339
1340 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1341 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1342 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1343
1344 mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1345
1346 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1347 cursor_plane ?
1348 &mdp5_crtc_no_lm_cursor_funcs :
1349 &mdp5_crtc_funcs, NULL);
1350
1351 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1352 "unref cursor", unref_cursor_worker);
1353
1354 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1355
1356 return crtc;
1357 }