Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2015 MediaTek Inc.
0004  */
0005 
0006 #include <linux/clk.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/mailbox_controller.h>
0009 #include <linux/pm_runtime.h>
0010 #include <linux/soc/mediatek/mtk-cmdq.h>
0011 #include <linux/soc/mediatek/mtk-mmsys.h>
0012 #include <linux/soc/mediatek/mtk-mutex.h>
0013 
0014 #include <asm/barrier.h>
0015 
0016 #include <drm/drm_atomic.h>
0017 #include <drm/drm_atomic_helper.h>
0018 #include <drm/drm_plane_helper.h>
0019 #include <drm/drm_probe_helper.h>
0020 #include <drm/drm_vblank.h>
0021 
0022 #include "mtk_drm_drv.h"
0023 #include "mtk_drm_crtc.h"
0024 #include "mtk_drm_ddp_comp.h"
0025 #include "mtk_drm_gem.h"
0026 #include "mtk_drm_plane.h"
0027 
0028 /*
0029  * struct mtk_drm_crtc - MediaTek specific crtc structure.
0030  * @base: crtc object.
0031  * @enabled: records whether crtc_enable succeeded
0032  * @planes: array of 4 drm_plane structures, one for each overlay plane
0033  * @pending_planes: whether any plane has pending changes to be applied
0034  * @mmsys_dev: pointer to the mmsys device for configuration registers
0035  * @mutex: handle to one of the ten disp_mutex streams
0036  * @ddp_comp_nr: number of components in ddp_comp
0037  * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
0038  *
0039  * TODO: Needs update: this header is missing a bunch of member descriptions.
0040  */
0041 struct mtk_drm_crtc {
0042     struct drm_crtc         base;
0043     bool                enabled;
0044 
0045     bool                pending_needs_vblank;
0046     struct drm_pending_vblank_event *event;
0047 
0048     struct drm_plane        *planes;
0049     unsigned int            layer_nr;
0050     bool                pending_planes;
0051     bool                pending_async_planes;
0052 
0053 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0054     struct cmdq_client      cmdq_client;
0055     struct cmdq_pkt         cmdq_handle;
0056     u32             cmdq_event;
0057     u32             cmdq_vblank_cnt;
0058     wait_queue_head_t       cb_blocking_queue;
0059 #endif
0060 
0061     struct device           *mmsys_dev;
0062     struct mtk_mutex        *mutex;
0063     unsigned int            ddp_comp_nr;
0064     struct mtk_ddp_comp     **ddp_comp;
0065 
0066     /* lock for display hardware access */
0067     struct mutex            hw_lock;
0068     bool                config_updating;
0069 };
0070 
0071 struct mtk_crtc_state {
0072     struct drm_crtc_state       base;
0073 
0074     bool                pending_config;
0075     unsigned int            pending_width;
0076     unsigned int            pending_height;
0077     unsigned int            pending_vrefresh;
0078 };
0079 
0080 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
0081 {
0082     return container_of(c, struct mtk_drm_crtc, base);
0083 }
0084 
0085 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
0086 {
0087     return container_of(s, struct mtk_crtc_state, base);
0088 }
0089 
0090 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
0091 {
0092     struct drm_crtc *crtc = &mtk_crtc->base;
0093     unsigned long flags;
0094 
0095     spin_lock_irqsave(&crtc->dev->event_lock, flags);
0096     drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
0097     drm_crtc_vblank_put(crtc);
0098     mtk_crtc->event = NULL;
0099     spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
0100 }
0101 
0102 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
0103 {
0104     drm_crtc_handle_vblank(&mtk_crtc->base);
0105     if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
0106         mtk_drm_crtc_finish_page_flip(mtk_crtc);
0107         mtk_crtc->pending_needs_vblank = false;
0108     }
0109 }
0110 
0111 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0112 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
0113                    size_t size)
0114 {
0115     struct device *dev;
0116     dma_addr_t dma_addr;
0117 
0118     pkt->va_base = kzalloc(size, GFP_KERNEL);
0119     if (!pkt->va_base) {
0120         kfree(pkt);
0121         return -ENOMEM;
0122     }
0123     pkt->buf_size = size;
0124     pkt->cl = (void *)client;
0125 
0126     dev = client->chan->mbox->dev;
0127     dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
0128                   DMA_TO_DEVICE);
0129     if (dma_mapping_error(dev, dma_addr)) {
0130         dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
0131         kfree(pkt->va_base);
0132         kfree(pkt);
0133         return -ENOMEM;
0134     }
0135 
0136     pkt->pa_base = dma_addr;
0137 
0138     return 0;
0139 }
0140 
0141 static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
0142 {
0143     struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
0144 
0145     dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
0146              DMA_TO_DEVICE);
0147     kfree(pkt->va_base);
0148     kfree(pkt);
0149 }
0150 #endif
0151 
0152 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
0153 {
0154     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0155     int i;
0156 
0157     mtk_mutex_put(mtk_crtc->mutex);
0158 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0159     mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
0160 
0161     if (mtk_crtc->cmdq_client.chan) {
0162         mbox_free_channel(mtk_crtc->cmdq_client.chan);
0163         mtk_crtc->cmdq_client.chan = NULL;
0164     }
0165 #endif
0166 
0167     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0168         struct mtk_ddp_comp *comp;
0169 
0170         comp = mtk_crtc->ddp_comp[i];
0171         mtk_ddp_comp_unregister_vblank_cb(comp);
0172     }
0173 
0174     drm_crtc_cleanup(crtc);
0175 }
0176 
0177 static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
0178 {
0179     struct mtk_crtc_state *state;
0180 
0181     if (crtc->state)
0182         __drm_atomic_helper_crtc_destroy_state(crtc->state);
0183 
0184     kfree(to_mtk_crtc_state(crtc->state));
0185     crtc->state = NULL;
0186 
0187     state = kzalloc(sizeof(*state), GFP_KERNEL);
0188     if (state)
0189         __drm_atomic_helper_crtc_reset(crtc, &state->base);
0190 }
0191 
0192 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
0193 {
0194     struct mtk_crtc_state *state;
0195 
0196     state = kmalloc(sizeof(*state), GFP_KERNEL);
0197     if (!state)
0198         return NULL;
0199 
0200     __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
0201 
0202     WARN_ON(state->base.crtc != crtc);
0203     state->base.crtc = crtc;
0204     state->pending_config = false;
0205 
0206     return &state->base;
0207 }
0208 
0209 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
0210                        struct drm_crtc_state *state)
0211 {
0212     __drm_atomic_helper_crtc_destroy_state(state);
0213     kfree(to_mtk_crtc_state(state));
0214 }
0215 
0216 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
0217                     const struct drm_display_mode *mode,
0218                     struct drm_display_mode *adjusted_mode)
0219 {
0220     /* Nothing to do here, but this callback is mandatory. */
0221     return true;
0222 }
0223 
0224 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
0225 {
0226     struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
0227 
0228     state->pending_width = crtc->mode.hdisplay;
0229     state->pending_height = crtc->mode.vdisplay;
0230     state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
0231     wmb();  /* Make sure the above parameters are set before update */
0232     state->pending_config = true;
0233 }
0234 
0235 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
0236 {
0237     int ret;
0238     int i;
0239 
0240     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0241         ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
0242         if (ret) {
0243             DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
0244             goto err;
0245         }
0246     }
0247 
0248     return 0;
0249 err:
0250     while (--i >= 0)
0251         mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
0252     return ret;
0253 }
0254 
0255 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
0256 {
0257     int i;
0258 
0259     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
0260         mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
0261 }
0262 
0263 static
0264 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
0265                         struct drm_plane *plane,
0266                         unsigned int *local_layer)
0267 {
0268     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0269     struct mtk_ddp_comp *comp;
0270     int i, count = 0;
0271     unsigned int local_index = plane - mtk_crtc->planes;
0272 
0273     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0274         comp = mtk_crtc->ddp_comp[i];
0275         if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
0276             *local_layer = local_index - count;
0277             return comp;
0278         }
0279         count += mtk_ddp_comp_layer_nr(comp);
0280     }
0281 
0282     WARN(1, "Failed to find component for plane %d\n", plane->index);
0283     return NULL;
0284 }
0285 
0286 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0287 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
0288 {
0289     struct cmdq_cb_data *data = mssg;
0290     struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
0291     struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
0292     struct mtk_crtc_state *state;
0293     unsigned int i;
0294 
0295     if (data->sta < 0)
0296         return;
0297 
0298     state = to_mtk_crtc_state(mtk_crtc->base.state);
0299 
0300     state->pending_config = false;
0301 
0302     if (mtk_crtc->pending_planes) {
0303         for (i = 0; i < mtk_crtc->layer_nr; i++) {
0304             struct drm_plane *plane = &mtk_crtc->planes[i];
0305             struct mtk_plane_state *plane_state;
0306 
0307             plane_state = to_mtk_plane_state(plane->state);
0308 
0309             plane_state->pending.config = false;
0310         }
0311         mtk_crtc->pending_planes = false;
0312     }
0313 
0314     if (mtk_crtc->pending_async_planes) {
0315         for (i = 0; i < mtk_crtc->layer_nr; i++) {
0316             struct drm_plane *plane = &mtk_crtc->planes[i];
0317             struct mtk_plane_state *plane_state;
0318 
0319             plane_state = to_mtk_plane_state(plane->state);
0320 
0321             plane_state->pending.async_config = false;
0322         }
0323         mtk_crtc->pending_async_planes = false;
0324     }
0325 
0326     mtk_crtc->cmdq_vblank_cnt = 0;
0327     wake_up(&mtk_crtc->cb_blocking_queue);
0328 }
0329 #endif
0330 
0331 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
0332 {
0333     struct drm_crtc *crtc = &mtk_crtc->base;
0334     struct drm_connector *connector;
0335     struct drm_encoder *encoder;
0336     struct drm_connector_list_iter conn_iter;
0337     unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
0338     int ret;
0339     int i;
0340 
0341     if (WARN_ON(!crtc->state))
0342         return -EINVAL;
0343 
0344     width = crtc->state->adjusted_mode.hdisplay;
0345     height = crtc->state->adjusted_mode.vdisplay;
0346     vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
0347 
0348     drm_for_each_encoder(encoder, crtc->dev) {
0349         if (encoder->crtc != crtc)
0350             continue;
0351 
0352         drm_connector_list_iter_begin(crtc->dev, &conn_iter);
0353         drm_for_each_connector_iter(connector, &conn_iter) {
0354             if (connector->encoder != encoder)
0355                 continue;
0356             if (connector->display_info.bpc != 0 &&
0357                 bpc > connector->display_info.bpc)
0358                 bpc = connector->display_info.bpc;
0359         }
0360         drm_connector_list_iter_end(&conn_iter);
0361     }
0362 
0363     ret = pm_runtime_resume_and_get(crtc->dev->dev);
0364     if (ret < 0) {
0365         DRM_ERROR("Failed to enable power domain: %d\n", ret);
0366         return ret;
0367     }
0368 
0369     ret = mtk_mutex_prepare(mtk_crtc->mutex);
0370     if (ret < 0) {
0371         DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
0372         goto err_pm_runtime_put;
0373     }
0374 
0375     ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
0376     if (ret < 0) {
0377         DRM_ERROR("Failed to enable component clocks: %d\n", ret);
0378         goto err_mutex_unprepare;
0379     }
0380 
0381     for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
0382         mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
0383                       mtk_crtc->ddp_comp[i]->id,
0384                       mtk_crtc->ddp_comp[i + 1]->id);
0385         mtk_mutex_add_comp(mtk_crtc->mutex,
0386                     mtk_crtc->ddp_comp[i]->id);
0387     }
0388     mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
0389     mtk_mutex_enable(mtk_crtc->mutex);
0390 
0391     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0392         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
0393 
0394         if (i == 1)
0395             mtk_ddp_comp_bgclr_in_on(comp);
0396 
0397         mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
0398         mtk_ddp_comp_start(comp);
0399     }
0400 
0401     /* Initially configure all planes */
0402     for (i = 0; i < mtk_crtc->layer_nr; i++) {
0403         struct drm_plane *plane = &mtk_crtc->planes[i];
0404         struct mtk_plane_state *plane_state;
0405         struct mtk_ddp_comp *comp;
0406         unsigned int local_layer;
0407 
0408         plane_state = to_mtk_plane_state(plane->state);
0409         comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
0410         if (comp)
0411             mtk_ddp_comp_layer_config(comp, local_layer,
0412                           plane_state, NULL);
0413     }
0414 
0415     return 0;
0416 
0417 err_mutex_unprepare:
0418     mtk_mutex_unprepare(mtk_crtc->mutex);
0419 err_pm_runtime_put:
0420     pm_runtime_put(crtc->dev->dev);
0421     return ret;
0422 }
0423 
0424 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
0425 {
0426     struct drm_device *drm = mtk_crtc->base.dev;
0427     struct drm_crtc *crtc = &mtk_crtc->base;
0428     int i;
0429 
0430     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0431         mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
0432         if (i == 1)
0433             mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
0434     }
0435 
0436     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
0437         mtk_mutex_remove_comp(mtk_crtc->mutex,
0438                        mtk_crtc->ddp_comp[i]->id);
0439     mtk_mutex_disable(mtk_crtc->mutex);
0440     for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
0441         mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
0442                      mtk_crtc->ddp_comp[i]->id,
0443                      mtk_crtc->ddp_comp[i + 1]->id);
0444         mtk_mutex_remove_comp(mtk_crtc->mutex,
0445                        mtk_crtc->ddp_comp[i]->id);
0446     }
0447     mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
0448     mtk_crtc_ddp_clk_disable(mtk_crtc);
0449     mtk_mutex_unprepare(mtk_crtc->mutex);
0450 
0451     pm_runtime_put(drm->dev);
0452 
0453     if (crtc->state->event && !crtc->state->active) {
0454         spin_lock_irq(&crtc->dev->event_lock);
0455         drm_crtc_send_vblank_event(crtc, crtc->state->event);
0456         crtc->state->event = NULL;
0457         spin_unlock_irq(&crtc->dev->event_lock);
0458     }
0459 }
0460 
0461 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
0462                 struct cmdq_pkt *cmdq_handle)
0463 {
0464     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0465     struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
0466     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
0467     unsigned int i;
0468     unsigned int local_layer;
0469 
0470     /*
0471      * TODO: instead of updating the registers here, we should prepare
0472      * working registers in atomic_commit and let the hardware command
0473      * queue update module registers on vblank.
0474      */
0475     if (state->pending_config) {
0476         mtk_ddp_comp_config(comp, state->pending_width,
0477                     state->pending_height,
0478                     state->pending_vrefresh, 0,
0479                     cmdq_handle);
0480 
0481         if (!cmdq_handle)
0482             state->pending_config = false;
0483     }
0484 
0485     if (mtk_crtc->pending_planes) {
0486         for (i = 0; i < mtk_crtc->layer_nr; i++) {
0487             struct drm_plane *plane = &mtk_crtc->planes[i];
0488             struct mtk_plane_state *plane_state;
0489 
0490             plane_state = to_mtk_plane_state(plane->state);
0491 
0492             if (!plane_state->pending.config)
0493                 continue;
0494 
0495             comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
0496                               &local_layer);
0497 
0498             if (comp)
0499                 mtk_ddp_comp_layer_config(comp, local_layer,
0500                               plane_state,
0501                               cmdq_handle);
0502             if (!cmdq_handle)
0503                 plane_state->pending.config = false;
0504         }
0505 
0506         if (!cmdq_handle)
0507             mtk_crtc->pending_planes = false;
0508     }
0509 
0510     if (mtk_crtc->pending_async_planes) {
0511         for (i = 0; i < mtk_crtc->layer_nr; i++) {
0512             struct drm_plane *plane = &mtk_crtc->planes[i];
0513             struct mtk_plane_state *plane_state;
0514 
0515             plane_state = to_mtk_plane_state(plane->state);
0516 
0517             if (!plane_state->pending.async_config)
0518                 continue;
0519 
0520             comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
0521                               &local_layer);
0522 
0523             if (comp)
0524                 mtk_ddp_comp_layer_config(comp, local_layer,
0525                               plane_state,
0526                               cmdq_handle);
0527             if (!cmdq_handle)
0528                 plane_state->pending.async_config = false;
0529         }
0530 
0531         if (!cmdq_handle)
0532             mtk_crtc->pending_async_planes = false;
0533     }
0534 }
0535 
0536 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
0537                        bool needs_vblank)
0538 {
0539 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0540     struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
0541 #endif
0542     struct drm_crtc *crtc = &mtk_crtc->base;
0543     struct mtk_drm_private *priv = crtc->dev->dev_private;
0544     unsigned int pending_planes = 0, pending_async_planes = 0;
0545     int i;
0546 
0547     mutex_lock(&mtk_crtc->hw_lock);
0548     mtk_crtc->config_updating = true;
0549     if (needs_vblank)
0550         mtk_crtc->pending_needs_vblank = true;
0551 
0552     for (i = 0; i < mtk_crtc->layer_nr; i++) {
0553         struct drm_plane *plane = &mtk_crtc->planes[i];
0554         struct mtk_plane_state *plane_state;
0555 
0556         plane_state = to_mtk_plane_state(plane->state);
0557         if (plane_state->pending.dirty) {
0558             plane_state->pending.config = true;
0559             plane_state->pending.dirty = false;
0560             pending_planes |= BIT(i);
0561         } else if (plane_state->pending.async_dirty) {
0562             plane_state->pending.async_config = true;
0563             plane_state->pending.async_dirty = false;
0564             pending_async_planes |= BIT(i);
0565         }
0566     }
0567     if (pending_planes)
0568         mtk_crtc->pending_planes = true;
0569     if (pending_async_planes)
0570         mtk_crtc->pending_async_planes = true;
0571 
0572     if (priv->data->shadow_register) {
0573         mtk_mutex_acquire(mtk_crtc->mutex);
0574         mtk_crtc_ddp_config(crtc, NULL);
0575         mtk_mutex_release(mtk_crtc->mutex);
0576     }
0577 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0578     if (mtk_crtc->cmdq_client.chan) {
0579         mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
0580         cmdq_handle->cmd_buf_size = 0;
0581         cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
0582         cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
0583         mtk_crtc_ddp_config(crtc, cmdq_handle);
0584         cmdq_pkt_finalize(cmdq_handle);
0585         dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
0586                        cmdq_handle->pa_base,
0587                        cmdq_handle->cmd_buf_size,
0588                        DMA_TO_DEVICE);
0589         /*
0590          * CMDQ command should execute in next 3 vblank.
0591          * One vblank interrupt before send message (occasionally)
0592          * and one vblank interrupt after cmdq done,
0593          * so it's timeout after 3 vblank interrupt.
0594          * If it fail to execute in next 3 vblank, timeout happen.
0595          */
0596         mtk_crtc->cmdq_vblank_cnt = 3;
0597 
0598         mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
0599         mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
0600     }
0601 #endif
0602     mtk_crtc->config_updating = false;
0603     mutex_unlock(&mtk_crtc->hw_lock);
0604 }
0605 
0606 static void mtk_crtc_ddp_irq(void *data)
0607 {
0608     struct drm_crtc *crtc = data;
0609     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0610     struct mtk_drm_private *priv = crtc->dev->dev_private;
0611 
0612 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0613     if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
0614         mtk_crtc_ddp_config(crtc, NULL);
0615     else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
0616         DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
0617               drm_crtc_index(&mtk_crtc->base));
0618 #else
0619     if (!priv->data->shadow_register)
0620         mtk_crtc_ddp_config(crtc, NULL);
0621 #endif
0622     mtk_drm_finish_page_flip(mtk_crtc);
0623 }
0624 
0625 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
0626 {
0627     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0628     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
0629 
0630     mtk_ddp_comp_enable_vblank(comp);
0631 
0632     return 0;
0633 }
0634 
0635 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
0636 {
0637     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0638     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
0639 
0640     mtk_ddp_comp_disable_vblank(comp);
0641 }
0642 
0643 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
0644                  struct mtk_plane_state *state)
0645 {
0646     unsigned int local_layer;
0647     struct mtk_ddp_comp *comp;
0648 
0649     comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
0650     if (comp)
0651         return mtk_ddp_comp_layer_check(comp, local_layer, state);
0652     return 0;
0653 }
0654 
0655 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
0656                    struct drm_atomic_state *state)
0657 {
0658     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0659 
0660     if (!mtk_crtc->enabled)
0661         return;
0662 
0663     mtk_drm_crtc_update_config(mtk_crtc, false);
0664 }
0665 
0666 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
0667                        struct drm_atomic_state *state)
0668 {
0669     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0670     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
0671     int ret;
0672 
0673     DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
0674 
0675     ret = pm_runtime_resume_and_get(comp->dev);
0676     if (ret < 0) {
0677         DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
0678         return;
0679     }
0680 
0681     ret = mtk_crtc_ddp_hw_init(mtk_crtc);
0682     if (ret) {
0683         pm_runtime_put(comp->dev);
0684         return;
0685     }
0686 
0687     drm_crtc_vblank_on(crtc);
0688     mtk_crtc->enabled = true;
0689 }
0690 
0691 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
0692                     struct drm_atomic_state *state)
0693 {
0694     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0695     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
0696     int i, ret;
0697 
0698     DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
0699     if (!mtk_crtc->enabled)
0700         return;
0701 
0702     /* Set all pending plane state to disabled */
0703     for (i = 0; i < mtk_crtc->layer_nr; i++) {
0704         struct drm_plane *plane = &mtk_crtc->planes[i];
0705         struct mtk_plane_state *plane_state;
0706 
0707         plane_state = to_mtk_plane_state(plane->state);
0708         plane_state->pending.enable = false;
0709         plane_state->pending.config = true;
0710     }
0711     mtk_crtc->pending_planes = true;
0712 
0713     mtk_drm_crtc_update_config(mtk_crtc, false);
0714 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0715     /* Wait for planes to be disabled by cmdq */
0716     if (mtk_crtc->cmdq_client.chan)
0717         wait_event_timeout(mtk_crtc->cb_blocking_queue,
0718                    mtk_crtc->cmdq_vblank_cnt == 0,
0719                    msecs_to_jiffies(500));
0720 #endif
0721     /* Wait for planes to be disabled */
0722     drm_crtc_wait_one_vblank(crtc);
0723 
0724     drm_crtc_vblank_off(crtc);
0725     mtk_crtc_ddp_hw_fini(mtk_crtc);
0726     ret = pm_runtime_put(comp->dev);
0727     if (ret < 0)
0728         DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
0729 
0730     mtk_crtc->enabled = false;
0731 }
0732 
0733 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
0734                       struct drm_atomic_state *state)
0735 {
0736     struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
0737                                       crtc);
0738     struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
0739     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0740 
0741     if (mtk_crtc->event && mtk_crtc_state->base.event)
0742         DRM_ERROR("new event while there is still a pending event\n");
0743 
0744     if (mtk_crtc_state->base.event) {
0745         mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
0746         WARN_ON(drm_crtc_vblank_get(crtc) != 0);
0747         mtk_crtc->event = mtk_crtc_state->base.event;
0748         mtk_crtc_state->base.event = NULL;
0749     }
0750 }
0751 
0752 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
0753                       struct drm_atomic_state *state)
0754 {
0755     struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
0756     int i;
0757 
0758     if (crtc->state->color_mgmt_changed)
0759         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0760             mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
0761             mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
0762         }
0763     mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
0764 }
0765 
0766 static const struct drm_crtc_funcs mtk_crtc_funcs = {
0767     .set_config     = drm_atomic_helper_set_config,
0768     .page_flip      = drm_atomic_helper_page_flip,
0769     .destroy        = mtk_drm_crtc_destroy,
0770     .reset          = mtk_drm_crtc_reset,
0771     .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
0772     .atomic_destroy_state   = mtk_drm_crtc_destroy_state,
0773     .enable_vblank      = mtk_drm_crtc_enable_vblank,
0774     .disable_vblank     = mtk_drm_crtc_disable_vblank,
0775 };
0776 
0777 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
0778     .mode_fixup = mtk_drm_crtc_mode_fixup,
0779     .mode_set_nofb  = mtk_drm_crtc_mode_set_nofb,
0780     .atomic_begin   = mtk_drm_crtc_atomic_begin,
0781     .atomic_flush   = mtk_drm_crtc_atomic_flush,
0782     .atomic_enable  = mtk_drm_crtc_atomic_enable,
0783     .atomic_disable = mtk_drm_crtc_atomic_disable,
0784 };
0785 
0786 static int mtk_drm_crtc_init(struct drm_device *drm,
0787                  struct mtk_drm_crtc *mtk_crtc,
0788                  unsigned int pipe)
0789 {
0790     struct drm_plane *primary = NULL;
0791     struct drm_plane *cursor = NULL;
0792     int i, ret;
0793 
0794     for (i = 0; i < mtk_crtc->layer_nr; i++) {
0795         if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
0796             primary = &mtk_crtc->planes[i];
0797         else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
0798             cursor = &mtk_crtc->planes[i];
0799     }
0800 
0801     ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
0802                     &mtk_crtc_funcs, NULL);
0803     if (ret)
0804         goto err_cleanup_crtc;
0805 
0806     drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
0807 
0808     return 0;
0809 
0810 err_cleanup_crtc:
0811     drm_crtc_cleanup(&mtk_crtc->base);
0812     return ret;
0813 }
0814 
0815 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
0816                     int comp_idx)
0817 {
0818     struct mtk_ddp_comp *comp;
0819 
0820     if (comp_idx > 1)
0821         return 0;
0822 
0823     comp = mtk_crtc->ddp_comp[comp_idx];
0824     if (!comp->funcs)
0825         return 0;
0826 
0827     if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
0828         return 0;
0829 
0830     return mtk_ddp_comp_layer_nr(comp);
0831 }
0832 
0833 static inline
0834 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
0835                         unsigned int num_planes)
0836 {
0837     if (plane_idx == 0)
0838         return DRM_PLANE_TYPE_PRIMARY;
0839     else if (plane_idx == (num_planes - 1))
0840         return DRM_PLANE_TYPE_CURSOR;
0841     else
0842         return DRM_PLANE_TYPE_OVERLAY;
0843 
0844 }
0845 
0846 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
0847                      struct mtk_drm_crtc *mtk_crtc,
0848                      int comp_idx, int pipe)
0849 {
0850     int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
0851     struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
0852     int i, ret;
0853 
0854     for (i = 0; i < num_planes; i++) {
0855         ret = mtk_plane_init(drm_dev,
0856                 &mtk_crtc->planes[mtk_crtc->layer_nr],
0857                 BIT(pipe),
0858                 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
0859                             num_planes),
0860                 mtk_ddp_comp_supported_rotations(comp));
0861         if (ret)
0862             return ret;
0863 
0864         mtk_crtc->layer_nr++;
0865     }
0866     return 0;
0867 }
0868 
0869 int mtk_drm_crtc_create(struct drm_device *drm_dev,
0870             const enum mtk_ddp_comp_id *path, unsigned int path_len)
0871 {
0872     struct mtk_drm_private *priv = drm_dev->dev_private;
0873     struct device *dev = drm_dev->dev;
0874     struct mtk_drm_crtc *mtk_crtc;
0875     unsigned int num_comp_planes = 0;
0876     int pipe = priv->num_pipes;
0877     int ret;
0878     int i;
0879     bool has_ctm = false;
0880     uint gamma_lut_size = 0;
0881 
0882     if (!path)
0883         return 0;
0884 
0885     for (i = 0; i < path_len; i++) {
0886         enum mtk_ddp_comp_id comp_id = path[i];
0887         struct device_node *node;
0888         struct mtk_ddp_comp *comp;
0889 
0890         node = priv->comp_node[comp_id];
0891         comp = &priv->ddp_comp[comp_id];
0892 
0893         if (!node) {
0894             dev_info(dev,
0895                  "Not creating crtc %d because component %d is disabled or missing\n",
0896                  pipe, comp_id);
0897             return 0;
0898         }
0899 
0900         if (!comp->dev) {
0901             dev_err(dev, "Component %pOF not initialized\n", node);
0902             return -ENODEV;
0903         }
0904     }
0905 
0906     mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
0907     if (!mtk_crtc)
0908         return -ENOMEM;
0909 
0910     mtk_crtc->mmsys_dev = priv->mmsys_dev;
0911     mtk_crtc->ddp_comp_nr = path_len;
0912     mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
0913                         sizeof(*mtk_crtc->ddp_comp),
0914                         GFP_KERNEL);
0915     if (!mtk_crtc->ddp_comp)
0916         return -ENOMEM;
0917 
0918     mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
0919     if (IS_ERR(mtk_crtc->mutex)) {
0920         ret = PTR_ERR(mtk_crtc->mutex);
0921         dev_err(dev, "Failed to get mutex: %d\n", ret);
0922         return ret;
0923     }
0924 
0925     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0926         enum mtk_ddp_comp_id comp_id = path[i];
0927         struct mtk_ddp_comp *comp;
0928 
0929         comp = &priv->ddp_comp[comp_id];
0930         mtk_crtc->ddp_comp[i] = comp;
0931 
0932         if (comp->funcs) {
0933             if (comp->funcs->gamma_set)
0934                 gamma_lut_size = MTK_LUT_SIZE;
0935 
0936             if (comp->funcs->ctm_set)
0937                 has_ctm = true;
0938         }
0939 
0940         mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
0941                         &mtk_crtc->base);
0942     }
0943 
0944     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
0945         num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
0946 
0947     mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
0948                     sizeof(struct drm_plane), GFP_KERNEL);
0949 
0950     for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
0951         ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
0952                             pipe);
0953         if (ret)
0954             return ret;
0955     }
0956 
0957     ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
0958     if (ret < 0)
0959         return ret;
0960 
0961     if (gamma_lut_size)
0962         drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
0963     drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
0964     priv->num_pipes++;
0965     mutex_init(&mtk_crtc->hw_lock);
0966 
0967 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
0968     mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
0969     mtk_crtc->cmdq_client.client.tx_block = false;
0970     mtk_crtc->cmdq_client.client.knows_txdone = true;
0971     mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
0972     mtk_crtc->cmdq_client.chan =
0973             mbox_request_channel(&mtk_crtc->cmdq_client.client,
0974                          drm_crtc_index(&mtk_crtc->base));
0975     if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
0976         dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
0977             drm_crtc_index(&mtk_crtc->base));
0978         mtk_crtc->cmdq_client.chan = NULL;
0979     }
0980 
0981     if (mtk_crtc->cmdq_client.chan) {
0982         ret = of_property_read_u32_index(priv->mutex_node,
0983                          "mediatek,gce-events",
0984                          drm_crtc_index(&mtk_crtc->base),
0985                          &mtk_crtc->cmdq_event);
0986         if (ret) {
0987             dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
0988                 drm_crtc_index(&mtk_crtc->base));
0989             mbox_free_channel(mtk_crtc->cmdq_client.chan);
0990             mtk_crtc->cmdq_client.chan = NULL;
0991         } else {
0992             ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
0993                               &mtk_crtc->cmdq_handle,
0994                               PAGE_SIZE);
0995             if (ret) {
0996                 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
0997                     drm_crtc_index(&mtk_crtc->base));
0998                 mbox_free_channel(mtk_crtc->cmdq_client.chan);
0999                 mtk_crtc->cmdq_client.chan = NULL;
1000             }
1001         }
1002 
1003         /* for sending blocking cmd in crtc disable */
1004         init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1005     }
1006 #endif
1007     return 0;
1008 }