0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/delay.h>
0009 #include <linux/interconnect.h>
0010 #include <linux/of_irq.h>
0011
0012 #include <drm/drm_debugfs.h>
0013 #include <drm/drm_drv.h>
0014 #include <drm/drm_file.h>
0015 #include <drm/drm_vblank.h>
0016
0017 #include "msm_drv.h"
0018 #include "msm_gem.h"
0019 #include "msm_mmu.h"
0020 #include "mdp5_kms.h"
0021
0022 static int mdp5_hw_init(struct msm_kms *kms)
0023 {
0024 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0025 struct device *dev = &mdp5_kms->pdev->dev;
0026 unsigned long flags;
0027
0028 pm_runtime_get_sync(dev);
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
0055 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
0056 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
0057
0058 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
0059
0060 pm_runtime_put_sync(dev);
0061
0062 return 0;
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072 struct mdp5_global_state *
0073 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
0074 {
0075 return to_mdp5_global_state(mdp5_kms->glob_state.state);
0076 }
0077
0078
0079
0080
0081
0082 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
0083 {
0084 struct msm_drm_private *priv = s->dev->dev_private;
0085 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
0086 struct drm_private_state *priv_state;
0087 int ret;
0088
0089 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
0090 if (ret)
0091 return ERR_PTR(ret);
0092
0093 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
0094 if (IS_ERR(priv_state))
0095 return ERR_CAST(priv_state);
0096
0097 return to_mdp5_global_state(priv_state);
0098 }
0099
0100 static struct drm_private_state *
0101 mdp5_global_duplicate_state(struct drm_private_obj *obj)
0102 {
0103 struct mdp5_global_state *state;
0104
0105 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
0106 if (!state)
0107 return NULL;
0108
0109 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
0110
0111 return &state->base;
0112 }
0113
0114 static void mdp5_global_destroy_state(struct drm_private_obj *obj,
0115 struct drm_private_state *state)
0116 {
0117 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
0118
0119 kfree(mdp5_state);
0120 }
0121
0122 static const struct drm_private_state_funcs mdp5_global_state_funcs = {
0123 .atomic_duplicate_state = mdp5_global_duplicate_state,
0124 .atomic_destroy_state = mdp5_global_destroy_state,
0125 };
0126
0127 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
0128 {
0129 struct mdp5_global_state *state;
0130
0131 drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
0132
0133 state = kzalloc(sizeof(*state), GFP_KERNEL);
0134 if (!state)
0135 return -ENOMEM;
0136
0137 state->mdp5_kms = mdp5_kms;
0138
0139 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
0140 &state->base,
0141 &mdp5_global_state_funcs);
0142 return 0;
0143 }
0144
0145 static void mdp5_enable_commit(struct msm_kms *kms)
0146 {
0147 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0148 pm_runtime_get_sync(&mdp5_kms->pdev->dev);
0149 }
0150
0151 static void mdp5_disable_commit(struct msm_kms *kms)
0152 {
0153 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0154 pm_runtime_put_sync(&mdp5_kms->pdev->dev);
0155 }
0156
0157 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
0158 {
0159 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0160 struct mdp5_global_state *global_state;
0161
0162 global_state = mdp5_get_existing_global_state(mdp5_kms);
0163
0164 if (mdp5_kms->smp)
0165 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
0166 }
0167
0168 static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
0169 {
0170
0171 }
0172
0173 static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
0174 {
0175 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0176 struct drm_crtc *crtc;
0177
0178 for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
0179 mdp5_crtc_wait_for_commit_done(crtc);
0180 }
0181
0182 static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
0183 {
0184 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0185 struct mdp5_global_state *global_state;
0186
0187 global_state = mdp5_get_existing_global_state(mdp5_kms);
0188
0189 if (mdp5_kms->smp)
0190 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
0191 }
0192
0193 static int mdp5_set_split_display(struct msm_kms *kms,
0194 struct drm_encoder *encoder,
0195 struct drm_encoder *slave_encoder,
0196 bool is_cmd_mode)
0197 {
0198 if (is_cmd_mode)
0199 return mdp5_cmd_encoder_set_split_display(encoder,
0200 slave_encoder);
0201 else
0202 return mdp5_vid_encoder_set_split_display(encoder,
0203 slave_encoder);
0204 }
0205
0206 static void mdp5_destroy(struct platform_device *pdev);
0207
0208 static void mdp5_kms_destroy(struct msm_kms *kms)
0209 {
0210 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0211 struct msm_gem_address_space *aspace = kms->aspace;
0212 int i;
0213
0214 for (i = 0; i < mdp5_kms->num_hwmixers; i++)
0215 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
0216
0217 for (i = 0; i < mdp5_kms->num_hwpipes; i++)
0218 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
0219
0220 if (aspace) {
0221 aspace->mmu->funcs->detach(aspace->mmu);
0222 msm_gem_address_space_put(aspace);
0223 }
0224
0225 mdp_kms_destroy(&mdp5_kms->base);
0226 mdp5_destroy(mdp5_kms->pdev);
0227 }
0228
0229 #ifdef CONFIG_DEBUG_FS
0230 static int smp_show(struct seq_file *m, void *arg)
0231 {
0232 struct drm_info_node *node = (struct drm_info_node *) m->private;
0233 struct drm_device *dev = node->minor->dev;
0234 struct msm_drm_private *priv = dev->dev_private;
0235 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
0236 struct drm_printer p = drm_seq_file_printer(m);
0237
0238 if (!mdp5_kms->smp) {
0239 drm_printf(&p, "no SMP pool\n");
0240 return 0;
0241 }
0242
0243 mdp5_smp_dump(mdp5_kms->smp, &p);
0244
0245 return 0;
0246 }
0247
0248 static struct drm_info_list mdp5_debugfs_list[] = {
0249 {"smp", smp_show },
0250 };
0251
0252 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
0253 {
0254 drm_debugfs_create_files(mdp5_debugfs_list,
0255 ARRAY_SIZE(mdp5_debugfs_list),
0256 minor->debugfs_root, minor);
0257
0258 return 0;
0259 }
0260 #endif
0261
0262 static const struct mdp_kms_funcs kms_funcs = {
0263 .base = {
0264 .hw_init = mdp5_hw_init,
0265 .irq_preinstall = mdp5_irq_preinstall,
0266 .irq_postinstall = mdp5_irq_postinstall,
0267 .irq_uninstall = mdp5_irq_uninstall,
0268 .irq = mdp5_irq,
0269 .enable_vblank = mdp5_enable_vblank,
0270 .disable_vblank = mdp5_disable_vblank,
0271 .flush_commit = mdp5_flush_commit,
0272 .enable_commit = mdp5_enable_commit,
0273 .disable_commit = mdp5_disable_commit,
0274 .prepare_commit = mdp5_prepare_commit,
0275 .wait_flush = mdp5_wait_flush,
0276 .complete_commit = mdp5_complete_commit,
0277 .get_format = mdp_get_format,
0278 .set_split_display = mdp5_set_split_display,
0279 .destroy = mdp5_kms_destroy,
0280 #ifdef CONFIG_DEBUG_FS
0281 .debugfs_init = mdp5_kms_debugfs_init,
0282 #endif
0283 },
0284 .set_irqmask = mdp5_set_irqmask,
0285 };
0286
0287 static int mdp5_disable(struct mdp5_kms *mdp5_kms)
0288 {
0289 DBG("");
0290
0291 mdp5_kms->enable_count--;
0292 WARN_ON(mdp5_kms->enable_count < 0);
0293
0294 clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
0295 clk_disable_unprepare(mdp5_kms->tbu_clk);
0296 clk_disable_unprepare(mdp5_kms->ahb_clk);
0297 clk_disable_unprepare(mdp5_kms->axi_clk);
0298 clk_disable_unprepare(mdp5_kms->core_clk);
0299 clk_disable_unprepare(mdp5_kms->lut_clk);
0300
0301 return 0;
0302 }
0303
0304 static int mdp5_enable(struct mdp5_kms *mdp5_kms)
0305 {
0306 DBG("");
0307
0308 mdp5_kms->enable_count++;
0309
0310 clk_prepare_enable(mdp5_kms->ahb_clk);
0311 clk_prepare_enable(mdp5_kms->axi_clk);
0312 clk_prepare_enable(mdp5_kms->core_clk);
0313 clk_prepare_enable(mdp5_kms->lut_clk);
0314 clk_prepare_enable(mdp5_kms->tbu_clk);
0315 clk_prepare_enable(mdp5_kms->tbu_rt_clk);
0316
0317 return 0;
0318 }
0319
0320 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
0321 struct mdp5_interface *intf,
0322 struct mdp5_ctl *ctl)
0323 {
0324 struct drm_device *dev = mdp5_kms->dev;
0325 struct drm_encoder *encoder;
0326
0327 encoder = mdp5_encoder_init(dev, intf, ctl);
0328 if (IS_ERR(encoder)) {
0329 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
0330 return encoder;
0331 }
0332
0333 return encoder;
0334 }
0335
0336 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
0337 {
0338 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
0339 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
0340 int id = 0, i;
0341
0342 for (i = 0; i < intf_cnt; i++) {
0343 if (intfs[i] == INTF_DSI) {
0344 if (intf_num == i)
0345 return id;
0346
0347 id++;
0348 }
0349 }
0350
0351 return -EINVAL;
0352 }
0353
0354 static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
0355 struct mdp5_interface *intf)
0356 {
0357 struct drm_device *dev = mdp5_kms->dev;
0358 struct msm_drm_private *priv = dev->dev_private;
0359 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
0360 struct mdp5_ctl *ctl;
0361 struct drm_encoder *encoder;
0362 int ret = 0;
0363
0364 switch (intf->type) {
0365 case INTF_eDP:
0366 DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
0367 break;
0368 case INTF_HDMI:
0369 if (!priv->hdmi)
0370 break;
0371
0372 ctl = mdp5_ctlm_request(ctlm, intf->num);
0373 if (!ctl) {
0374 ret = -EINVAL;
0375 break;
0376 }
0377
0378 encoder = construct_encoder(mdp5_kms, intf, ctl);
0379 if (IS_ERR(encoder)) {
0380 ret = PTR_ERR(encoder);
0381 break;
0382 }
0383
0384 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
0385 break;
0386 case INTF_DSI:
0387 {
0388 const struct mdp5_cfg_hw *hw_cfg =
0389 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
0390 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
0391
0392 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
0393 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
0394 intf->num);
0395 ret = -EINVAL;
0396 break;
0397 }
0398
0399 if (!priv->dsi[dsi_id])
0400 break;
0401
0402 ctl = mdp5_ctlm_request(ctlm, intf->num);
0403 if (!ctl) {
0404 ret = -EINVAL;
0405 break;
0406 }
0407
0408 encoder = construct_encoder(mdp5_kms, intf, ctl);
0409 if (IS_ERR(encoder)) {
0410 ret = PTR_ERR(encoder);
0411 break;
0412 }
0413
0414 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
0415 if (!ret)
0416 mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
0417
0418 break;
0419 }
0420 default:
0421 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
0422 ret = -EINVAL;
0423 break;
0424 }
0425
0426 return ret;
0427 }
0428
0429 static int modeset_init(struct mdp5_kms *mdp5_kms)
0430 {
0431 struct drm_device *dev = mdp5_kms->dev;
0432 struct msm_drm_private *priv = dev->dev_private;
0433 unsigned int num_crtcs;
0434 int i, ret, pi = 0, ci = 0;
0435 struct drm_plane *primary[MAX_BASES] = { NULL };
0436 struct drm_plane *cursor[MAX_BASES] = { NULL };
0437 struct drm_encoder *encoder;
0438 unsigned int num_encoders;
0439
0440
0441
0442
0443
0444 for (i = 0; i < mdp5_kms->num_intfs; i++) {
0445 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
0446 if (ret)
0447 goto fail;
0448 }
0449
0450 num_encoders = 0;
0451 drm_for_each_encoder(encoder, dev)
0452 num_encoders++;
0453
0454
0455
0456
0457
0458
0459 num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
0460
0461
0462
0463
0464
0465
0466 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
0467 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
0468 struct drm_plane *plane;
0469 enum drm_plane_type type;
0470
0471 if (i < num_crtcs)
0472 type = DRM_PLANE_TYPE_PRIMARY;
0473 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
0474 type = DRM_PLANE_TYPE_CURSOR;
0475 else
0476 type = DRM_PLANE_TYPE_OVERLAY;
0477
0478 plane = mdp5_plane_init(dev, type);
0479 if (IS_ERR(plane)) {
0480 ret = PTR_ERR(plane);
0481 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
0482 goto fail;
0483 }
0484
0485 if (type == DRM_PLANE_TYPE_PRIMARY)
0486 primary[pi++] = plane;
0487 if (type == DRM_PLANE_TYPE_CURSOR)
0488 cursor[ci++] = plane;
0489 }
0490
0491 for (i = 0; i < num_crtcs; i++) {
0492 struct drm_crtc *crtc;
0493
0494 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
0495 if (IS_ERR(crtc)) {
0496 ret = PTR_ERR(crtc);
0497 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
0498 goto fail;
0499 }
0500 priv->crtcs[priv->num_crtcs++] = crtc;
0501 }
0502
0503
0504
0505
0506
0507 drm_for_each_encoder(encoder, dev)
0508 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
0509
0510 return 0;
0511
0512 fail:
0513 return ret;
0514 }
0515
0516 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
0517 u32 *major, u32 *minor)
0518 {
0519 struct device *dev = &mdp5_kms->pdev->dev;
0520 u32 version;
0521
0522 pm_runtime_get_sync(dev);
0523 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
0524 pm_runtime_put_sync(dev);
0525
0526 *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
0527 *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
0528
0529 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
0530 }
0531
0532 static int get_clk(struct platform_device *pdev, struct clk **clkp,
0533 const char *name, bool mandatory)
0534 {
0535 struct device *dev = &pdev->dev;
0536 struct clk *clk = msm_clk_get(pdev, name);
0537 if (IS_ERR(clk) && mandatory) {
0538 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
0539 return PTR_ERR(clk);
0540 }
0541 if (IS_ERR(clk))
0542 DBG("skipping %s", name);
0543 else
0544 *clkp = clk;
0545
0546 return 0;
0547 }
0548
0549 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
0550
0551 static int mdp5_kms_init(struct drm_device *dev)
0552 {
0553 struct msm_drm_private *priv = dev->dev_private;
0554 struct platform_device *pdev;
0555 struct mdp5_kms *mdp5_kms;
0556 struct mdp5_cfg *config;
0557 struct msm_kms *kms;
0558 struct msm_gem_address_space *aspace;
0559 int irq, i, ret;
0560
0561 ret = mdp5_init(to_platform_device(dev->dev), dev);
0562
0563
0564 kms = priv->kms;
0565 if (!kms)
0566 return -ENOMEM;
0567
0568 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
0569 pdev = mdp5_kms->pdev;
0570
0571 ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
0572 if (ret) {
0573 DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
0574 goto fail;
0575 }
0576
0577 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
0578 if (!irq) {
0579 ret = -EINVAL;
0580 DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
0581 goto fail;
0582 }
0583
0584 kms->irq = irq;
0585
0586 config = mdp5_cfg_get_config(mdp5_kms->cfg);
0587
0588
0589
0590
0591
0592 pm_runtime_get_sync(&pdev->dev);
0593 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
0594 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
0595 !config->hw->intf.base[i])
0596 continue;
0597 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
0598
0599 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
0600 }
0601 mdelay(16);
0602
0603 aspace = msm_kms_init_aspace(mdp5_kms->dev);
0604 if (IS_ERR(aspace)) {
0605 ret = PTR_ERR(aspace);
0606 goto fail;
0607 }
0608
0609 kms->aspace = aspace;
0610
0611 pm_runtime_put_sync(&pdev->dev);
0612
0613 ret = modeset_init(mdp5_kms);
0614 if (ret) {
0615 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
0616 goto fail;
0617 }
0618
0619 dev->mode_config.min_width = 0;
0620 dev->mode_config.min_height = 0;
0621 dev->mode_config.max_width = 0xffff;
0622 dev->mode_config.max_height = 0xffff;
0623
0624 dev->max_vblank_count = 0;
0625 dev->vblank_disable_immediate = true;
0626
0627 return 0;
0628 fail:
0629 if (kms)
0630 mdp5_kms_destroy(kms);
0631
0632 return ret;
0633 }
0634
0635 static void mdp5_destroy(struct platform_device *pdev)
0636 {
0637 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
0638 int i;
0639
0640 if (mdp5_kms->ctlm)
0641 mdp5_ctlm_destroy(mdp5_kms->ctlm);
0642 if (mdp5_kms->smp)
0643 mdp5_smp_destroy(mdp5_kms->smp);
0644 if (mdp5_kms->cfg)
0645 mdp5_cfg_destroy(mdp5_kms->cfg);
0646
0647 for (i = 0; i < mdp5_kms->num_intfs; i++)
0648 kfree(mdp5_kms->intfs[i]);
0649
0650 if (mdp5_kms->rpm_enabled)
0651 pm_runtime_disable(&pdev->dev);
0652
0653 drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
0654 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
0655 }
0656
0657 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
0658 const enum mdp5_pipe *pipes, const uint32_t *offsets,
0659 uint32_t caps)
0660 {
0661 struct drm_device *dev = mdp5_kms->dev;
0662 int i, ret;
0663
0664 for (i = 0; i < cnt; i++) {
0665 struct mdp5_hw_pipe *hwpipe;
0666
0667 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
0668 if (IS_ERR(hwpipe)) {
0669 ret = PTR_ERR(hwpipe);
0670 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
0671 pipe2name(pipes[i]), ret);
0672 return ret;
0673 }
0674 hwpipe->idx = mdp5_kms->num_hwpipes;
0675 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
0676 }
0677
0678 return 0;
0679 }
0680
0681 static int hwpipe_init(struct mdp5_kms *mdp5_kms)
0682 {
0683 static const enum mdp5_pipe rgb_planes[] = {
0684 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
0685 };
0686 static const enum mdp5_pipe vig_planes[] = {
0687 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
0688 };
0689 static const enum mdp5_pipe dma_planes[] = {
0690 SSPP_DMA0, SSPP_DMA1,
0691 };
0692 static const enum mdp5_pipe cursor_planes[] = {
0693 SSPP_CURSOR0, SSPP_CURSOR1,
0694 };
0695 const struct mdp5_cfg_hw *hw_cfg;
0696 int ret;
0697
0698 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
0699
0700
0701 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
0702 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
0703 if (ret)
0704 return ret;
0705
0706
0707 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
0708 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
0709 if (ret)
0710 return ret;
0711
0712
0713 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
0714 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
0715 if (ret)
0716 return ret;
0717
0718
0719 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
0720 cursor_planes, hw_cfg->pipe_cursor.base,
0721 hw_cfg->pipe_cursor.caps);
0722 if (ret)
0723 return ret;
0724
0725 return 0;
0726 }
0727
0728 static int hwmixer_init(struct mdp5_kms *mdp5_kms)
0729 {
0730 struct drm_device *dev = mdp5_kms->dev;
0731 const struct mdp5_cfg_hw *hw_cfg;
0732 int i, ret;
0733
0734 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
0735
0736 for (i = 0; i < hw_cfg->lm.count; i++) {
0737 struct mdp5_hw_mixer *mixer;
0738
0739 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
0740 if (IS_ERR(mixer)) {
0741 ret = PTR_ERR(mixer);
0742 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
0743 i, ret);
0744 return ret;
0745 }
0746
0747 mixer->idx = mdp5_kms->num_hwmixers;
0748 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
0749 }
0750
0751 return 0;
0752 }
0753
0754 static int interface_init(struct mdp5_kms *mdp5_kms)
0755 {
0756 struct drm_device *dev = mdp5_kms->dev;
0757 const struct mdp5_cfg_hw *hw_cfg;
0758 const enum mdp5_intf_type *intf_types;
0759 int i;
0760
0761 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
0762 intf_types = hw_cfg->intf.connect;
0763
0764 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
0765 struct mdp5_interface *intf;
0766
0767 if (intf_types[i] == INTF_DISABLED)
0768 continue;
0769
0770 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
0771 if (!intf) {
0772 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
0773 return -ENOMEM;
0774 }
0775
0776 intf->num = i;
0777 intf->type = intf_types[i];
0778 intf->mode = MDP5_INTF_MODE_NONE;
0779 intf->idx = mdp5_kms->num_intfs;
0780 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
0781 }
0782
0783 return 0;
0784 }
0785
0786 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
0787 {
0788 struct msm_drm_private *priv = dev->dev_private;
0789 struct mdp5_kms *mdp5_kms;
0790 struct mdp5_cfg *config;
0791 u32 major, minor;
0792 int ret;
0793
0794 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
0795 if (!mdp5_kms) {
0796 ret = -ENOMEM;
0797 goto fail;
0798 }
0799
0800 platform_set_drvdata(pdev, mdp5_kms);
0801
0802 spin_lock_init(&mdp5_kms->resource_lock);
0803
0804 mdp5_kms->dev = dev;
0805 mdp5_kms->pdev = pdev;
0806
0807 ret = mdp5_global_obj_init(mdp5_kms);
0808 if (ret)
0809 goto fail;
0810
0811 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
0812 if (IS_ERR(mdp5_kms->mmio)) {
0813 ret = PTR_ERR(mdp5_kms->mmio);
0814 goto fail;
0815 }
0816
0817
0818 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
0819 if (ret)
0820 goto fail;
0821 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
0822 if (ret)
0823 goto fail;
0824 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
0825 if (ret)
0826 goto fail;
0827 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
0828 if (ret)
0829 goto fail;
0830
0831
0832 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
0833 get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
0834 get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
0835
0836
0837
0838
0839
0840 clk_set_rate(mdp5_kms->core_clk, 200000000);
0841
0842 pm_runtime_enable(&pdev->dev);
0843 mdp5_kms->rpm_enabled = true;
0844
0845 read_mdp_hw_revision(mdp5_kms, &major, &minor);
0846
0847 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
0848 if (IS_ERR(mdp5_kms->cfg)) {
0849 ret = PTR_ERR(mdp5_kms->cfg);
0850 mdp5_kms->cfg = NULL;
0851 goto fail;
0852 }
0853
0854 config = mdp5_cfg_get_config(mdp5_kms->cfg);
0855 mdp5_kms->caps = config->hw->mdp.caps;
0856
0857
0858 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
0859
0860
0861
0862
0863
0864
0865 if (mdp5_kms->caps & MDP_CAP_SMP) {
0866 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
0867 if (IS_ERR(mdp5_kms->smp)) {
0868 ret = PTR_ERR(mdp5_kms->smp);
0869 mdp5_kms->smp = NULL;
0870 goto fail;
0871 }
0872 }
0873
0874 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
0875 if (IS_ERR(mdp5_kms->ctlm)) {
0876 ret = PTR_ERR(mdp5_kms->ctlm);
0877 mdp5_kms->ctlm = NULL;
0878 goto fail;
0879 }
0880
0881 ret = hwpipe_init(mdp5_kms);
0882 if (ret)
0883 goto fail;
0884
0885 ret = hwmixer_init(mdp5_kms);
0886 if (ret)
0887 goto fail;
0888
0889 ret = interface_init(mdp5_kms);
0890 if (ret)
0891 goto fail;
0892
0893
0894 priv->kms = &mdp5_kms->base.base;
0895
0896 return 0;
0897 fail:
0898 if (mdp5_kms)
0899 mdp5_destroy(pdev);
0900 return ret;
0901 }
0902
0903 static int mdp5_setup_interconnect(struct platform_device *pdev)
0904 {
0905
0906
0907 struct device *mdss_dev = pdev->dev.parent;
0908 struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
0909 struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
0910 struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
0911
0912 if (IS_ERR(path0))
0913 return PTR_ERR(path0);
0914
0915 if (!path0) {
0916
0917
0918
0919
0920
0921
0922 dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
0923 return 0;
0924 }
0925
0926 icc_set_bw(path0, 0, MBps_to_icc(6400));
0927
0928 if (!IS_ERR_OR_NULL(path1))
0929 icc_set_bw(path1, 0, MBps_to_icc(6400));
0930 if (!IS_ERR_OR_NULL(path_rot))
0931 icc_set_bw(path_rot, 0, MBps_to_icc(6400));
0932
0933 return 0;
0934 }
0935
0936 static int mdp5_dev_probe(struct platform_device *pdev)
0937 {
0938 int ret;
0939
0940 DBG("");
0941
0942 ret = mdp5_setup_interconnect(pdev);
0943 if (ret)
0944 return ret;
0945
0946 return msm_drv_probe(&pdev->dev, mdp5_kms_init);
0947 }
0948
0949 static int mdp5_dev_remove(struct platform_device *pdev)
0950 {
0951 DBG("");
0952 component_master_del(&pdev->dev, &msm_drm_ops);
0953 return 0;
0954 }
0955
0956 static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
0957 {
0958 struct platform_device *pdev = to_platform_device(dev);
0959 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
0960
0961 DBG("");
0962
0963 return mdp5_disable(mdp5_kms);
0964 }
0965
0966 static __maybe_unused int mdp5_runtime_resume(struct device *dev)
0967 {
0968 struct platform_device *pdev = to_platform_device(dev);
0969 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
0970
0971 DBG("");
0972
0973 return mdp5_enable(mdp5_kms);
0974 }
0975
0976 static const struct dev_pm_ops mdp5_pm_ops = {
0977 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
0978 .prepare = msm_pm_prepare,
0979 .complete = msm_pm_complete,
0980 };
0981
0982 static const struct of_device_id mdp5_dt_match[] = {
0983 { .compatible = "qcom,mdp5", },
0984
0985 { .compatible = "qcom,mdss_mdp", },
0986 {}
0987 };
0988 MODULE_DEVICE_TABLE(of, mdp5_dt_match);
0989
0990 static struct platform_driver mdp5_driver = {
0991 .probe = mdp5_dev_probe,
0992 .remove = mdp5_dev_remove,
0993 .shutdown = msm_drv_shutdown,
0994 .driver = {
0995 .name = "msm_mdp",
0996 .of_match_table = mdp5_dt_match,
0997 .pm = &mdp5_pm_ops,
0998 },
0999 };
1000
1001 void __init msm_mdp_register(void)
1002 {
1003 DBG("");
1004 platform_driver_register(&mdp5_driver);
1005 }
1006
1007 void __exit msm_mdp_unregister(void)
1008 {
1009 DBG("");
1010 platform_driver_unregister(&mdp5_driver);
1011 }