Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include <drm/drm_crtc.h>
0008 #include <drm/drm_flip_work.h>
0009 #include <drm/drm_mode.h>
0010 #include <drm/drm_probe_helper.h>
0011 #include <drm/drm_vblank.h>
0012 
0013 #include "mdp4_kms.h"
0014 #include "msm_gem.h"
0015 
0016 struct mdp4_crtc {
0017     struct drm_crtc base;
0018     char name[8];
0019     int id;
0020     int ovlp;
0021     enum mdp4_dma dma;
0022     bool enabled;
0023 
0024     /* which mixer/encoder we route output to: */
0025     int mixer;
0026 
0027     struct {
0028         spinlock_t lock;
0029         bool stale;
0030         uint32_t width, height;
0031         uint32_t x, y;
0032 
0033         /* next cursor to scan-out: */
0034         uint32_t next_iova;
0035         struct drm_gem_object *next_bo;
0036 
0037         /* current cursor being scanned out: */
0038         struct drm_gem_object *scanout_bo;
0039     } cursor;
0040 
0041 
0042     /* if there is a pending flip, these will be non-null: */
0043     struct drm_pending_vblank_event *event;
0044 
0045     /* Bits have been flushed at the last commit,
0046      * used to decide if a vsync has happened since last commit.
0047      */
0048     u32 flushed_mask;
0049 
0050 #define PENDING_CURSOR 0x1
0051 #define PENDING_FLIP   0x2
0052     atomic_t pending;
0053 
0054     /* for unref'ing cursor bo's after scanout completes: */
0055     struct drm_flip_work unref_cursor_work;
0056 
0057     struct mdp_irq vblank;
0058     struct mdp_irq err;
0059 };
0060 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
0061 
0062 static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
0063 {
0064     struct msm_drm_private *priv = crtc->dev->dev_private;
0065     return to_mdp4_kms(to_mdp_kms(priv->kms));
0066 }
0067 
0068 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
0069 {
0070     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0071 
0072     atomic_or(pending, &mdp4_crtc->pending);
0073     mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
0074 }
0075 
0076 static void crtc_flush(struct drm_crtc *crtc)
0077 {
0078     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0079     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0080     struct drm_plane *plane;
0081     uint32_t flush = 0;
0082 
0083     drm_atomic_crtc_for_each_plane(plane, crtc) {
0084         enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
0085         flush |= pipe2flush(pipe_id);
0086     }
0087 
0088     flush |= ovlp2flush(mdp4_crtc->ovlp);
0089 
0090     DBG("%s: flush=%08x", mdp4_crtc->name, flush);
0091 
0092     mdp4_crtc->flushed_mask = flush;
0093 
0094     mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
0095 }
0096 
0097 /* if file!=NULL, this is preclose potential cancel-flip path */
0098 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
0099 {
0100     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0101     struct drm_device *dev = crtc->dev;
0102     struct drm_pending_vblank_event *event;
0103     unsigned long flags;
0104 
0105     spin_lock_irqsave(&dev->event_lock, flags);
0106     event = mdp4_crtc->event;
0107     if (event) {
0108         mdp4_crtc->event = NULL;
0109         DBG("%s: send event: %p", mdp4_crtc->name, event);
0110         drm_crtc_send_vblank_event(crtc, event);
0111     }
0112     spin_unlock_irqrestore(&dev->event_lock, flags);
0113 }
0114 
0115 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
0116 {
0117     struct mdp4_crtc *mdp4_crtc =
0118         container_of(work, struct mdp4_crtc, unref_cursor_work);
0119     struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
0120     struct msm_kms *kms = &mdp4_kms->base.base;
0121 
0122     msm_gem_unpin_iova(val, kms->aspace);
0123     drm_gem_object_put(val);
0124 }
0125 
0126 static void mdp4_crtc_destroy(struct drm_crtc *crtc)
0127 {
0128     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0129 
0130     drm_crtc_cleanup(crtc);
0131     drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
0132 
0133     kfree(mdp4_crtc);
0134 }
0135 
0136 /* statically (for now) map planes to mixer stage (z-order): */
0137 static const int idxs[] = {
0138         [VG1]  = 1,
0139         [VG2]  = 2,
0140         [RGB1] = 0,
0141         [RGB2] = 0,
0142         [RGB3] = 0,
0143         [VG3]  = 3,
0144         [VG4]  = 4,
0145 
0146 };
0147 
0148 /* setup mixer config, for which we need to consider all crtc's and
0149  * the planes attached to them
0150  *
0151  * TODO may possibly need some extra locking here
0152  */
0153 static void setup_mixer(struct mdp4_kms *mdp4_kms)
0154 {
0155     struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
0156     struct drm_crtc *crtc;
0157     uint32_t mixer_cfg = 0;
0158     static const enum mdp_mixer_stage_id stages[] = {
0159             STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
0160     };
0161 
0162     list_for_each_entry(crtc, &config->crtc_list, head) {
0163         struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0164         struct drm_plane *plane;
0165 
0166         drm_atomic_crtc_for_each_plane(plane, crtc) {
0167             enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
0168             int idx = idxs[pipe_id];
0169             mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
0170                     pipe_id, stages[idx]);
0171         }
0172     }
0173 
0174     mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
0175 }
0176 
0177 static void blend_setup(struct drm_crtc *crtc)
0178 {
0179     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0180     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0181     struct drm_plane *plane;
0182     int i, ovlp = mdp4_crtc->ovlp;
0183     bool alpha[4]= { false, false, false, false };
0184 
0185     mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
0186     mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
0187     mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
0188     mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
0189 
0190     drm_atomic_crtc_for_each_plane(plane, crtc) {
0191         enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
0192         int idx = idxs[pipe_id];
0193         if (idx > 0) {
0194             const struct mdp_format *format =
0195                     to_mdp_format(msm_framebuffer_format(plane->state->fb));
0196             alpha[idx-1] = format->alpha_enable;
0197         }
0198     }
0199 
0200     for (i = 0; i < 4; i++) {
0201         uint32_t op;
0202 
0203         if (alpha[i]) {
0204             op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
0205                     MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
0206                     MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
0207         } else {
0208             op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
0209                     MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
0210         }
0211 
0212         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
0213         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
0214         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
0215         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
0216         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
0217         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
0218         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
0219         mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
0220     }
0221 
0222     setup_mixer(mdp4_kms);
0223 }
0224 
0225 static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
0226 {
0227     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0228     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0229     enum mdp4_dma dma = mdp4_crtc->dma;
0230     int ovlp = mdp4_crtc->ovlp;
0231     struct drm_display_mode *mode;
0232 
0233     if (WARN_ON(!crtc->state))
0234         return;
0235 
0236     mode = &crtc->state->adjusted_mode;
0237 
0238     DBG("%s: set mode: " DRM_MODE_FMT,
0239             mdp4_crtc->name, DRM_MODE_ARG(mode));
0240 
0241     mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
0242             MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
0243             MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
0244 
0245     /* take data from pipe: */
0246     mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
0247     mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
0248     mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
0249             MDP4_DMA_DST_SIZE_WIDTH(0) |
0250             MDP4_DMA_DST_SIZE_HEIGHT(0));
0251 
0252     mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
0253     mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
0254             MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
0255             MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
0256     mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
0257 
0258     mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
0259 
0260     if (dma == DMA_E) {
0261         mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
0262         mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
0263         mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
0264     }
0265 }
0266 
0267 static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
0268                      struct drm_atomic_state *state)
0269 {
0270     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0271     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0272 
0273     DBG("%s", mdp4_crtc->name);
0274 
0275     if (WARN_ON(!mdp4_crtc->enabled))
0276         return;
0277 
0278     /* Disable/save vblank irq handling before power is disabled */
0279     drm_crtc_vblank_off(crtc);
0280 
0281     mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
0282     mdp4_disable(mdp4_kms);
0283 
0284     mdp4_crtc->enabled = false;
0285 }
0286 
0287 static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
0288                     struct drm_atomic_state *state)
0289 {
0290     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0291     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0292 
0293     DBG("%s", mdp4_crtc->name);
0294 
0295     if (WARN_ON(mdp4_crtc->enabled))
0296         return;
0297 
0298     mdp4_enable(mdp4_kms);
0299 
0300     /* Restore vblank irq handling after power is enabled */
0301     drm_crtc_vblank_on(crtc);
0302 
0303     mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
0304 
0305     crtc_flush(crtc);
0306 
0307     mdp4_crtc->enabled = true;
0308 }
0309 
0310 static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
0311         struct drm_atomic_state *state)
0312 {
0313     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0314     DBG("%s: check", mdp4_crtc->name);
0315     // TODO anything else to check?
0316     return 0;
0317 }
0318 
0319 static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
0320                    struct drm_atomic_state *state)
0321 {
0322     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0323     DBG("%s: begin", mdp4_crtc->name);
0324 }
0325 
0326 static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
0327                    struct drm_atomic_state *state)
0328 {
0329     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0330     struct drm_device *dev = crtc->dev;
0331     unsigned long flags;
0332 
0333     DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
0334 
0335     WARN_ON(mdp4_crtc->event);
0336 
0337     spin_lock_irqsave(&dev->event_lock, flags);
0338     mdp4_crtc->event = crtc->state->event;
0339     crtc->state->event = NULL;
0340     spin_unlock_irqrestore(&dev->event_lock, flags);
0341 
0342     blend_setup(crtc);
0343     crtc_flush(crtc);
0344     request_pending(crtc, PENDING_FLIP);
0345 }
0346 
0347 #define CURSOR_WIDTH 64
0348 #define CURSOR_HEIGHT 64
0349 
0350 /* called from IRQ to update cursor related registers (if needed).  The
0351  * cursor registers, other than x/y position, appear not to be double
0352  * buffered, and changing them other than from vblank seems to trigger
0353  * underflow.
0354  */
0355 static void update_cursor(struct drm_crtc *crtc)
0356 {
0357     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0358     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0359     struct msm_kms *kms = &mdp4_kms->base.base;
0360     enum mdp4_dma dma = mdp4_crtc->dma;
0361     unsigned long flags;
0362 
0363     spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
0364     if (mdp4_crtc->cursor.stale) {
0365         struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
0366         struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
0367         uint64_t iova = mdp4_crtc->cursor.next_iova;
0368 
0369         if (next_bo) {
0370             /* take a obj ref + iova ref when we start scanning out: */
0371             drm_gem_object_get(next_bo);
0372             msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
0373 
0374             /* enable cursor: */
0375             mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
0376                     MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
0377                     MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
0378             mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
0379             mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
0380                     MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
0381                     MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
0382         } else {
0383             /* disable cursor: */
0384             mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
0385                     mdp4_kms->blank_cursor_iova);
0386         }
0387 
0388         /* and drop the iova ref + obj rev when done scanning out: */
0389         if (prev_bo)
0390             drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
0391 
0392         mdp4_crtc->cursor.scanout_bo = next_bo;
0393         mdp4_crtc->cursor.stale = false;
0394     }
0395 
0396     mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
0397             MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
0398             MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
0399 
0400     spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
0401 }
0402 
0403 static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
0404         struct drm_file *file_priv, uint32_t handle,
0405         uint32_t width, uint32_t height)
0406 {
0407     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0408     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0409     struct msm_kms *kms = &mdp4_kms->base.base;
0410     struct drm_device *dev = crtc->dev;
0411     struct drm_gem_object *cursor_bo, *old_bo;
0412     unsigned long flags;
0413     uint64_t iova;
0414     int ret;
0415 
0416     if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
0417         DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
0418         return -EINVAL;
0419     }
0420 
0421     if (handle) {
0422         cursor_bo = drm_gem_object_lookup(file_priv, handle);
0423         if (!cursor_bo)
0424             return -ENOENT;
0425     } else {
0426         cursor_bo = NULL;
0427     }
0428 
0429     if (cursor_bo) {
0430         ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
0431         if (ret)
0432             goto fail;
0433     } else {
0434         iova = 0;
0435     }
0436 
0437     spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
0438     old_bo = mdp4_crtc->cursor.next_bo;
0439     mdp4_crtc->cursor.next_bo   = cursor_bo;
0440     mdp4_crtc->cursor.next_iova = iova;
0441     mdp4_crtc->cursor.width     = width;
0442     mdp4_crtc->cursor.height    = height;
0443     mdp4_crtc->cursor.stale     = true;
0444     spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
0445 
0446     if (old_bo) {
0447         /* drop our previous reference: */
0448         drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
0449     }
0450 
0451     request_pending(crtc, PENDING_CURSOR);
0452 
0453     return 0;
0454 
0455 fail:
0456     drm_gem_object_put(cursor_bo);
0457     return ret;
0458 }
0459 
0460 static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
0461 {
0462     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0463     unsigned long flags;
0464 
0465     spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
0466     mdp4_crtc->cursor.x = x;
0467     mdp4_crtc->cursor.y = y;
0468     spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
0469 
0470     crtc_flush(crtc);
0471     request_pending(crtc, PENDING_CURSOR);
0472 
0473     return 0;
0474 }
0475 
0476 static const struct drm_crtc_funcs mdp4_crtc_funcs = {
0477     .set_config = drm_atomic_helper_set_config,
0478     .destroy = mdp4_crtc_destroy,
0479     .page_flip = drm_atomic_helper_page_flip,
0480     .cursor_set = mdp4_crtc_cursor_set,
0481     .cursor_move = mdp4_crtc_cursor_move,
0482     .reset = drm_atomic_helper_crtc_reset,
0483     .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
0484     .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
0485     .enable_vblank  = msm_crtc_enable_vblank,
0486     .disable_vblank = msm_crtc_disable_vblank,
0487 };
0488 
0489 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
0490     .mode_set_nofb = mdp4_crtc_mode_set_nofb,
0491     .atomic_check = mdp4_crtc_atomic_check,
0492     .atomic_begin = mdp4_crtc_atomic_begin,
0493     .atomic_flush = mdp4_crtc_atomic_flush,
0494     .atomic_enable = mdp4_crtc_atomic_enable,
0495     .atomic_disable = mdp4_crtc_atomic_disable,
0496 };
0497 
0498 static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
0499 {
0500     struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
0501     struct drm_crtc *crtc = &mdp4_crtc->base;
0502     struct msm_drm_private *priv = crtc->dev->dev_private;
0503     unsigned pending;
0504 
0505     mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
0506 
0507     pending = atomic_xchg(&mdp4_crtc->pending, 0);
0508 
0509     if (pending & PENDING_FLIP) {
0510         complete_flip(crtc, NULL);
0511     }
0512 
0513     if (pending & PENDING_CURSOR) {
0514         update_cursor(crtc);
0515         drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
0516     }
0517 }
0518 
0519 static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
0520 {
0521     struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
0522     struct drm_crtc *crtc = &mdp4_crtc->base;
0523     DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
0524     crtc_flush(crtc);
0525 }
0526 
0527 static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
0528 {
0529     struct drm_device *dev = crtc->dev;
0530     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0531     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0532     int ret;
0533 
0534     ret = drm_crtc_vblank_get(crtc);
0535     if (ret)
0536         return;
0537 
0538     ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
0539         !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
0540             mdp4_crtc->flushed_mask),
0541         msecs_to_jiffies(50));
0542     if (ret <= 0)
0543         dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
0544 
0545     mdp4_crtc->flushed_mask = 0;
0546 
0547     drm_crtc_vblank_put(crtc);
0548 }
0549 
0550 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
0551 {
0552     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0553     return mdp4_crtc->vblank.irqmask;
0554 }
0555 
0556 /* set dma config, ie. the format the encoder wants. */
0557 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
0558 {
0559     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0560     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0561 
0562     mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
0563 }
0564 
0565 /* set interface for routing crtc->encoder: */
0566 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
0567 {
0568     struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0569     struct mdp4_kms *mdp4_kms = get_kms(crtc);
0570     uint32_t intf_sel;
0571 
0572     intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
0573 
0574     switch (mdp4_crtc->dma) {
0575     case DMA_P:
0576         intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
0577         intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
0578         break;
0579     case DMA_S:
0580         intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
0581         intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
0582         break;
0583     case DMA_E:
0584         intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
0585         intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
0586         break;
0587     }
0588 
0589     if (intf == INTF_DSI_VIDEO) {
0590         intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
0591         intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
0592     } else if (intf == INTF_DSI_CMD) {
0593         intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
0594         intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
0595     }
0596 
0597     mdp4_crtc->mixer = mixer;
0598 
0599     blend_setup(crtc);
0600 
0601     DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
0602 
0603     mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
0604 }
0605 
0606 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
0607 {
0608     /* wait_for_flush_done is the only case for now.
0609      * Later we will have command mode CRTC to wait for
0610      * other event.
0611      */
0612     mdp4_crtc_wait_for_flush_done(crtc);
0613 }
0614 
0615 static const char *dma_names[] = {
0616         "DMA_P", "DMA_S", "DMA_E",
0617 };
0618 
0619 /* initialize crtc */
0620 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
0621         struct drm_plane *plane, int id, int ovlp_id,
0622         enum mdp4_dma dma_id)
0623 {
0624     struct drm_crtc *crtc = NULL;
0625     struct mdp4_crtc *mdp4_crtc;
0626 
0627     mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
0628     if (!mdp4_crtc)
0629         return ERR_PTR(-ENOMEM);
0630 
0631     crtc = &mdp4_crtc->base;
0632 
0633     mdp4_crtc->id = id;
0634 
0635     mdp4_crtc->ovlp = ovlp_id;
0636     mdp4_crtc->dma = dma_id;
0637 
0638     mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
0639     mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
0640 
0641     mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
0642     mdp4_crtc->err.irq = mdp4_crtc_err_irq;
0643 
0644     snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
0645             dma_names[dma_id], ovlp_id);
0646 
0647     spin_lock_init(&mdp4_crtc->cursor.lock);
0648 
0649     drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
0650             "unref cursor", unref_cursor_worker);
0651 
0652     drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
0653                   NULL);
0654     drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
0655 
0656     return crtc;
0657 }