0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
0011
0012 #include <linux/debugfs.h>
0013 #include <linux/dma-buf.h>
0014 #include <linux/of_irq.h>
0015 #include <linux/pm_opp.h>
0016
0017 #include <drm/drm_crtc.h>
0018 #include <drm/drm_file.h>
0019 #include <drm/drm_framebuffer.h>
0020 #include <drm/drm_vblank.h>
0021 #include <drm/drm_writeback.h>
0022
0023 #include "msm_drv.h"
0024 #include "msm_mmu.h"
0025 #include "msm_gem.h"
0026 #include "disp/msm_disp_snapshot.h"
0027
0028 #include "dpu_core_irq.h"
0029 #include "dpu_crtc.h"
0030 #include "dpu_encoder.h"
0031 #include "dpu_formats.h"
0032 #include "dpu_hw_vbif.h"
0033 #include "dpu_kms.h"
0034 #include "dpu_plane.h"
0035 #include "dpu_vbif.h"
0036 #include "dpu_writeback.h"
0037
0038 #define CREATE_TRACE_POINTS
0039 #include "dpu_trace.h"
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 #define DPU_DEBUGFS_DIR "msm_dpu"
0051 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
0052
0053 static int dpu_kms_hw_init(struct msm_kms *kms);
0054 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
0055
0056 #ifdef CONFIG_DEBUG_FS
0057 static int _dpu_danger_signal_status(struct seq_file *s,
0058 bool danger_status)
0059 {
0060 struct dpu_kms *kms = (struct dpu_kms *)s->private;
0061 struct dpu_danger_safe_status status;
0062 int i;
0063
0064 if (!kms->hw_mdp) {
0065 DPU_ERROR("invalid arg(s)\n");
0066 return 0;
0067 }
0068
0069 memset(&status, 0, sizeof(struct dpu_danger_safe_status));
0070
0071 pm_runtime_get_sync(&kms->pdev->dev);
0072 if (danger_status) {
0073 seq_puts(s, "\nDanger signal status:\n");
0074 if (kms->hw_mdp->ops.get_danger_status)
0075 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
0076 &status);
0077 } else {
0078 seq_puts(s, "\nSafe signal status:\n");
0079 if (kms->hw_mdp->ops.get_safe_status)
0080 kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
0081 &status);
0082 }
0083 pm_runtime_put_sync(&kms->pdev->dev);
0084
0085 seq_printf(s, "MDP : 0x%x\n", status.mdp);
0086
0087 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
0088 seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
0089 status.sspp[i]);
0090 seq_puts(s, "\n");
0091
0092 return 0;
0093 }
0094
0095 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
0096 {
0097 return _dpu_danger_signal_status(s, true);
0098 }
0099 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
0100
0101 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
0102 {
0103 return _dpu_danger_signal_status(s, false);
0104 }
0105 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
0106
0107 static ssize_t _dpu_plane_danger_read(struct file *file,
0108 char __user *buff, size_t count, loff_t *ppos)
0109 {
0110 struct dpu_kms *kms = file->private_data;
0111 int len;
0112 char buf[40];
0113
0114 len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
0115
0116 return simple_read_from_buffer(buff, count, ppos, buf, len);
0117 }
0118
0119 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
0120 {
0121 struct drm_plane *plane;
0122
0123 drm_for_each_plane(plane, kms->dev) {
0124 if (plane->fb && plane->state) {
0125 dpu_plane_danger_signal_ctrl(plane, enable);
0126 DPU_DEBUG("plane:%d img:%dx%d ",
0127 plane->base.id, plane->fb->width,
0128 plane->fb->height);
0129 DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
0130 plane->state->src_x >> 16,
0131 plane->state->src_y >> 16,
0132 plane->state->src_w >> 16,
0133 plane->state->src_h >> 16,
0134 plane->state->crtc_x, plane->state->crtc_y,
0135 plane->state->crtc_w, plane->state->crtc_h);
0136 } else {
0137 DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
0138 }
0139 }
0140 }
0141
0142 static ssize_t _dpu_plane_danger_write(struct file *file,
0143 const char __user *user_buf, size_t count, loff_t *ppos)
0144 {
0145 struct dpu_kms *kms = file->private_data;
0146 int disable_panic;
0147 int ret;
0148
0149 ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
0150 if (ret)
0151 return ret;
0152
0153 if (disable_panic) {
0154
0155 DPU_DEBUG("Disabling danger:\n");
0156 _dpu_plane_set_danger_state(kms, false);
0157 kms->has_danger_ctrl = false;
0158 } else {
0159
0160 DPU_DEBUG("Enabling danger:\n");
0161 kms->has_danger_ctrl = true;
0162 _dpu_plane_set_danger_state(kms, true);
0163 }
0164
0165 return count;
0166 }
0167
0168 static const struct file_operations dpu_plane_danger_enable = {
0169 .open = simple_open,
0170 .read = _dpu_plane_danger_read,
0171 .write = _dpu_plane_danger_write,
0172 };
0173
0174 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
0175 struct dentry *parent)
0176 {
0177 struct dentry *entry = debugfs_create_dir("danger", parent);
0178
0179 debugfs_create_file("danger_status", 0600, entry,
0180 dpu_kms, &dpu_debugfs_danger_stats_fops);
0181 debugfs_create_file("safe_status", 0600, entry,
0182 dpu_kms, &dpu_debugfs_safe_stats_fops);
0183 debugfs_create_file("disable_danger", 0600, entry,
0184 dpu_kms, &dpu_plane_danger_enable);
0185
0186 }
0187
0188
0189
0190
0191 struct dpu_debugfs_regset32 {
0192 uint32_t offset;
0193 uint32_t blk_len;
0194 struct dpu_kms *dpu_kms;
0195 };
0196
0197 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
0198 {
0199 struct dpu_debugfs_regset32 *regset = s->private;
0200 struct dpu_kms *dpu_kms = regset->dpu_kms;
0201 void __iomem *base;
0202 uint32_t i, addr;
0203
0204 if (!dpu_kms->mmio)
0205 return 0;
0206
0207 base = dpu_kms->mmio + regset->offset;
0208
0209
0210 if (regset->offset & 0xF) {
0211 seq_printf(s, "[%x]", regset->offset & ~0xF);
0212 for (i = 0; i < (regset->offset & 0xF); i += 4)
0213 seq_puts(s, " ");
0214 }
0215
0216 pm_runtime_get_sync(&dpu_kms->pdev->dev);
0217
0218
0219 for (i = 0; i < regset->blk_len; i += 4) {
0220 addr = regset->offset + i;
0221 if ((addr & 0xF) == 0x0)
0222 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
0223 seq_printf(s, " %08x", readl_relaxed(base + i));
0224 }
0225 seq_puts(s, "\n");
0226 pm_runtime_put_sync(&dpu_kms->pdev->dev);
0227
0228 return 0;
0229 }
0230
0231 static int dpu_debugfs_open_regset32(struct inode *inode,
0232 struct file *file)
0233 {
0234 return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
0235 }
0236
0237 static const struct file_operations dpu_fops_regset32 = {
0238 .open = dpu_debugfs_open_regset32,
0239 .read = seq_read,
0240 .llseek = seq_lseek,
0241 .release = single_release,
0242 };
0243
0244 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
0245 void *parent,
0246 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
0247 {
0248 struct dpu_debugfs_regset32 *regset;
0249
0250 if (WARN_ON(!name || !dpu_kms || !length))
0251 return;
0252
0253 regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
0254 if (!regset)
0255 return;
0256
0257
0258 regset->offset = round_down(offset, 4);
0259 regset->blk_len = length;
0260 regset->dpu_kms = dpu_kms;
0261
0262 debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
0263 }
0264
0265 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
0266 {
0267 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0268 void *p = dpu_hw_util_get_log_mask_ptr();
0269 struct dentry *entry;
0270 struct drm_device *dev;
0271 struct msm_drm_private *priv;
0272 int i;
0273
0274 if (!p)
0275 return -EINVAL;
0276
0277
0278 if (minor->type != DRM_MINOR_PRIMARY)
0279 return 0;
0280
0281 dev = dpu_kms->dev;
0282 priv = dev->dev_private;
0283
0284 entry = debugfs_create_dir("debug", minor->debugfs_root);
0285
0286 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
0287
0288 dpu_debugfs_danger_init(dpu_kms, entry);
0289 dpu_debugfs_vbif_init(dpu_kms, entry);
0290 dpu_debugfs_core_irq_init(dpu_kms, entry);
0291 dpu_debugfs_sspp_init(dpu_kms, entry);
0292
0293 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
0294 if (priv->dp[i])
0295 msm_dp_debugfs_init(priv->dp[i], minor);
0296 }
0297
0298 return dpu_core_perf_debugfs_init(dpu_kms, entry);
0299 }
0300 #endif
0301
0302
0303
0304
0305
0306
0307
0308
0309 struct dpu_global_state *
0310 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
0311 {
0312 return to_dpu_global_state(dpu_kms->global_state.state);
0313 }
0314
0315
0316
0317
0318
0319 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
0320 {
0321 struct msm_drm_private *priv = s->dev->dev_private;
0322 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
0323 struct drm_private_state *priv_state;
0324 int ret;
0325
0326 ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
0327 if (ret)
0328 return ERR_PTR(ret);
0329
0330 priv_state = drm_atomic_get_private_obj_state(s,
0331 &dpu_kms->global_state);
0332 if (IS_ERR(priv_state))
0333 return ERR_CAST(priv_state);
0334
0335 return to_dpu_global_state(priv_state);
0336 }
0337
0338 static struct drm_private_state *
0339 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
0340 {
0341 struct dpu_global_state *state;
0342
0343 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
0344 if (!state)
0345 return NULL;
0346
0347 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
0348
0349 return &state->base;
0350 }
0351
0352 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
0353 struct drm_private_state *state)
0354 {
0355 struct dpu_global_state *dpu_state = to_dpu_global_state(state);
0356
0357 kfree(dpu_state);
0358 }
0359
0360 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
0361 .atomic_duplicate_state = dpu_kms_global_duplicate_state,
0362 .atomic_destroy_state = dpu_kms_global_destroy_state,
0363 };
0364
0365 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
0366 {
0367 struct dpu_global_state *state;
0368
0369 drm_modeset_lock_init(&dpu_kms->global_state_lock);
0370
0371 state = kzalloc(sizeof(*state), GFP_KERNEL);
0372 if (!state)
0373 return -ENOMEM;
0374
0375 drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
0376 &state->base,
0377 &dpu_kms_global_state_funcs);
0378 return 0;
0379 }
0380
0381 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
0382 {
0383 struct icc_path *path0;
0384 struct icc_path *path1;
0385 struct drm_device *dev = dpu_kms->dev;
0386 struct device *dpu_dev = dev->dev;
0387 struct device *mdss_dev = dpu_dev->parent;
0388
0389
0390
0391 path0 = of_icc_get(mdss_dev, "mdp0-mem");
0392 path1 = of_icc_get(mdss_dev, "mdp1-mem");
0393
0394 if (IS_ERR_OR_NULL(path0))
0395 return PTR_ERR_OR_ZERO(path0);
0396
0397 dpu_kms->path[0] = path0;
0398 dpu_kms->num_paths = 1;
0399
0400 if (!IS_ERR_OR_NULL(path1)) {
0401 dpu_kms->path[1] = path1;
0402 dpu_kms->num_paths++;
0403 }
0404 return 0;
0405 }
0406
0407 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
0408 {
0409 return dpu_crtc_vblank(crtc, true);
0410 }
0411
0412 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
0413 {
0414 dpu_crtc_vblank(crtc, false);
0415 }
0416
0417 static void dpu_kms_enable_commit(struct msm_kms *kms)
0418 {
0419 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0420 pm_runtime_get_sync(&dpu_kms->pdev->dev);
0421 }
0422
0423 static void dpu_kms_disable_commit(struct msm_kms *kms)
0424 {
0425 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0426 pm_runtime_put_sync(&dpu_kms->pdev->dev);
0427 }
0428
0429 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
0430 {
0431 struct drm_encoder *encoder;
0432
0433 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
0434 ktime_t vsync_time;
0435
0436 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
0437 return vsync_time;
0438 }
0439
0440 return ktime_get();
0441 }
0442
0443 static void dpu_kms_prepare_commit(struct msm_kms *kms,
0444 struct drm_atomic_state *state)
0445 {
0446 struct drm_crtc *crtc;
0447 struct drm_crtc_state *crtc_state;
0448 struct drm_encoder *encoder;
0449 int i;
0450
0451 if (!kms)
0452 return;
0453
0454
0455 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
0456 drm_for_each_encoder_mask(encoder, crtc->dev,
0457 crtc_state->encoder_mask) {
0458 dpu_encoder_prepare_commit(encoder);
0459 }
0460 }
0461 }
0462
0463 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
0464 {
0465 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0466 struct drm_crtc *crtc;
0467
0468 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
0469 if (!crtc->state->active)
0470 continue;
0471
0472 trace_dpu_kms_commit(DRMID(crtc));
0473 dpu_crtc_commit_kickoff(crtc);
0474 }
0475 }
0476
0477 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
0478 {
0479 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0480 struct drm_crtc *crtc;
0481
0482 DPU_ATRACE_BEGIN("kms_complete_commit");
0483
0484 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
0485 dpu_crtc_complete_commit(crtc);
0486
0487 DPU_ATRACE_END("kms_complete_commit");
0488 }
0489
0490 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
0491 struct drm_crtc *crtc)
0492 {
0493 struct drm_encoder *encoder;
0494 struct drm_device *dev;
0495 int ret;
0496
0497 if (!kms || !crtc || !crtc->state) {
0498 DPU_ERROR("invalid params\n");
0499 return;
0500 }
0501
0502 dev = crtc->dev;
0503
0504 if (!crtc->state->enable) {
0505 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
0506 return;
0507 }
0508
0509 if (!crtc->state->active) {
0510 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
0511 return;
0512 }
0513
0514 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
0515 if (encoder->crtc != crtc)
0516 continue;
0517
0518
0519
0520
0521
0522 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
0523 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
0524 if (ret && ret != -EWOULDBLOCK) {
0525 DPU_ERROR("wait for commit done returned %d\n", ret);
0526 break;
0527 }
0528 }
0529 }
0530
0531 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
0532 {
0533 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0534 struct drm_crtc *crtc;
0535
0536 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
0537 dpu_kms_wait_for_commit_done(kms, crtc);
0538 }
0539
0540 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
0541 struct msm_drm_private *priv,
0542 struct dpu_kms *dpu_kms)
0543 {
0544 struct drm_encoder *encoder = NULL;
0545 struct msm_display_info info;
0546 int i, rc = 0;
0547
0548 if (!(priv->dsi[0] || priv->dsi[1]))
0549 return rc;
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
0560 int other = (i + 1) % 2;
0561
0562 if (!priv->dsi[i])
0563 continue;
0564
0565 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
0566 !msm_dsi_is_master_dsi(priv->dsi[i]))
0567 continue;
0568
0569 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
0570 if (IS_ERR(encoder)) {
0571 DPU_ERROR("encoder init failed for dsi display\n");
0572 return PTR_ERR(encoder);
0573 }
0574
0575 memset(&info, 0, sizeof(info));
0576 info.intf_type = encoder->encoder_type;
0577
0578 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
0579 if (rc) {
0580 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
0581 i, rc);
0582 break;
0583 }
0584
0585 info.h_tile_instance[info.num_of_h_tiles++] = i;
0586 info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
0587
0588 info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
0589
0590 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
0591 rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
0592 if (rc) {
0593 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
0594 other, rc);
0595 break;
0596 }
0597
0598 info.h_tile_instance[info.num_of_h_tiles++] = other;
0599 }
0600
0601 rc = dpu_encoder_setup(dev, encoder, &info);
0602 if (rc)
0603 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
0604 encoder->base.id, rc);
0605 }
0606
0607 return rc;
0608 }
0609
0610 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
0611 struct msm_drm_private *priv,
0612 struct dpu_kms *dpu_kms)
0613 {
0614 struct drm_encoder *encoder = NULL;
0615 struct msm_display_info info;
0616 int rc;
0617 int i;
0618
0619 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
0620 if (!priv->dp[i])
0621 continue;
0622
0623 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
0624 if (IS_ERR(encoder)) {
0625 DPU_ERROR("encoder init failed for dsi display\n");
0626 return PTR_ERR(encoder);
0627 }
0628
0629 memset(&info, 0, sizeof(info));
0630 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
0631 if (rc) {
0632 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
0633 drm_encoder_cleanup(encoder);
0634 return rc;
0635 }
0636
0637 info.num_of_h_tiles = 1;
0638 info.h_tile_instance[0] = i;
0639 info.intf_type = encoder->encoder_type;
0640 rc = dpu_encoder_setup(dev, encoder, &info);
0641 if (rc) {
0642 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
0643 encoder->base.id, rc);
0644 return rc;
0645 }
0646 }
0647
0648 return 0;
0649 }
0650
0651 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
0652 struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
0653 const u32 *wb_formats, int n_formats)
0654 {
0655 struct drm_encoder *encoder = NULL;
0656 struct msm_display_info info;
0657 int rc;
0658
0659 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
0660 if (IS_ERR(encoder)) {
0661 DPU_ERROR("encoder init failed for dsi display\n");
0662 return PTR_ERR(encoder);
0663 }
0664
0665 memset(&info, 0, sizeof(info));
0666
0667 rc = dpu_writeback_init(dev, encoder, wb_formats,
0668 n_formats);
0669 if (rc) {
0670 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
0671 drm_encoder_cleanup(encoder);
0672 return rc;
0673 }
0674
0675 info.num_of_h_tiles = 1;
0676
0677 info.h_tile_instance[0] = WB_2;
0678 info.intf_type = encoder->encoder_type;
0679
0680 rc = dpu_encoder_setup(dev, encoder, &info);
0681 if (rc) {
0682 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
0683 encoder->base.id, rc);
0684 return rc;
0685 }
0686
0687 return 0;
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 static int _dpu_kms_setup_displays(struct drm_device *dev,
0699 struct msm_drm_private *priv,
0700 struct dpu_kms *dpu_kms)
0701 {
0702 int rc = 0;
0703 int i;
0704
0705 rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
0706 if (rc) {
0707 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
0708 return rc;
0709 }
0710
0711 rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
0712 if (rc) {
0713 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
0714 return rc;
0715 }
0716
0717
0718 if (dpu_kms->catalog->wb_count) {
0719 for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
0720 if (dpu_kms->catalog->wb[i].id == WB_2) {
0721 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
0722 dpu_kms->catalog->wb[i].format_list,
0723 dpu_kms->catalog->wb[i].num_formats);
0724 if (rc) {
0725 DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
0726 return rc;
0727 }
0728 }
0729 }
0730 }
0731
0732 return rc;
0733 }
0734
0735 #define MAX_PLANES 20
0736 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
0737 {
0738 struct drm_device *dev;
0739 struct drm_plane *primary_planes[MAX_PLANES], *plane;
0740 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
0741 struct drm_crtc *crtc;
0742 struct drm_encoder *encoder;
0743 unsigned int num_encoders;
0744
0745 struct msm_drm_private *priv;
0746 const struct dpu_mdss_cfg *catalog;
0747
0748 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
0749 int max_crtc_count;
0750 dev = dpu_kms->dev;
0751 priv = dev->dev_private;
0752 catalog = dpu_kms->catalog;
0753
0754
0755
0756
0757
0758 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
0759 if (ret)
0760 return ret;
0761
0762 num_encoders = 0;
0763 drm_for_each_encoder(encoder, dev)
0764 num_encoders++;
0765
0766 max_crtc_count = min(catalog->mixer_count, num_encoders);
0767
0768
0769 for (i = 0; i < catalog->sspp_count; i++) {
0770 enum drm_plane_type type;
0771
0772 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
0773 && cursor_planes_idx < max_crtc_count)
0774 type = DRM_PLANE_TYPE_CURSOR;
0775 else if (primary_planes_idx < max_crtc_count)
0776 type = DRM_PLANE_TYPE_PRIMARY;
0777 else
0778 type = DRM_PLANE_TYPE_OVERLAY;
0779
0780 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
0781 type, catalog->sspp[i].features,
0782 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
0783
0784 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
0785 (1UL << max_crtc_count) - 1, 0);
0786 if (IS_ERR(plane)) {
0787 DPU_ERROR("dpu_plane_init failed\n");
0788 ret = PTR_ERR(plane);
0789 return ret;
0790 }
0791
0792 if (type == DRM_PLANE_TYPE_CURSOR)
0793 cursor_planes[cursor_planes_idx++] = plane;
0794 else if (type == DRM_PLANE_TYPE_PRIMARY)
0795 primary_planes[primary_planes_idx++] = plane;
0796 }
0797
0798 max_crtc_count = min(max_crtc_count, primary_planes_idx);
0799
0800
0801 for (i = 0; i < max_crtc_count; i++) {
0802 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
0803 if (IS_ERR(crtc)) {
0804 ret = PTR_ERR(crtc);
0805 return ret;
0806 }
0807 priv->crtcs[priv->num_crtcs++] = crtc;
0808 }
0809
0810
0811 drm_for_each_encoder(encoder, dev)
0812 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
0813
0814 return 0;
0815 }
0816
0817 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
0818 {
0819 int i;
0820
0821 if (dpu_kms->hw_intr)
0822 dpu_hw_intr_destroy(dpu_kms->hw_intr);
0823 dpu_kms->hw_intr = NULL;
0824
0825
0826 _dpu_kms_mmu_destroy(dpu_kms);
0827
0828 if (dpu_kms->catalog) {
0829 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
0830 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
0831
0832 if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
0833 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
0834 dpu_kms->hw_vbif[vbif_idx] = NULL;
0835 }
0836 }
0837 }
0838
0839 if (dpu_kms->rm_init)
0840 dpu_rm_destroy(&dpu_kms->rm);
0841 dpu_kms->rm_init = false;
0842
0843 dpu_kms->catalog = NULL;
0844
0845 if (dpu_kms->vbif[VBIF_NRT])
0846 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
0847 dpu_kms->vbif[VBIF_NRT] = NULL;
0848
0849 if (dpu_kms->vbif[VBIF_RT])
0850 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
0851 dpu_kms->vbif[VBIF_RT] = NULL;
0852
0853 if (dpu_kms->hw_mdp)
0854 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
0855 dpu_kms->hw_mdp = NULL;
0856
0857 if (dpu_kms->mmio)
0858 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
0859 dpu_kms->mmio = NULL;
0860 }
0861
0862 static void dpu_kms_destroy(struct msm_kms *kms)
0863 {
0864 struct dpu_kms *dpu_kms;
0865
0866 if (!kms) {
0867 DPU_ERROR("invalid kms\n");
0868 return;
0869 }
0870
0871 dpu_kms = to_dpu_kms(kms);
0872
0873 _dpu_kms_hw_destroy(dpu_kms);
0874
0875 msm_kms_destroy(&dpu_kms->base);
0876
0877 if (dpu_kms->rpm_enabled)
0878 pm_runtime_disable(&dpu_kms->pdev->dev);
0879 }
0880
0881 static int dpu_irq_postinstall(struct msm_kms *kms)
0882 {
0883 struct msm_drm_private *priv;
0884 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0885 int i;
0886
0887 if (!dpu_kms || !dpu_kms->dev)
0888 return -EINVAL;
0889
0890 priv = dpu_kms->dev->dev_private;
0891 if (!priv)
0892 return -EINVAL;
0893
0894 for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
0895 msm_dp_irq_postinstall(priv->dp[i]);
0896
0897 return 0;
0898 }
0899
0900 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
0901 {
0902 int i;
0903 struct dpu_kms *dpu_kms;
0904 const struct dpu_mdss_cfg *cat;
0905 struct dpu_hw_mdp *top;
0906
0907 dpu_kms = to_dpu_kms(kms);
0908
0909 cat = dpu_kms->catalog;
0910 top = dpu_kms->hw_mdp;
0911
0912 pm_runtime_get_sync(&dpu_kms->pdev->dev);
0913
0914
0915 for (i = 0; i < cat->ctl_count; i++)
0916 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
0917 dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
0918
0919
0920 for (i = 0; i < cat->dspp_count; i++)
0921 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
0922 dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
0923
0924
0925 for (i = 0; i < cat->intf_count; i++)
0926 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
0927 dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
0928
0929
0930 for (i = 0; i < cat->pingpong_count; i++)
0931 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
0932 dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
0933
0934
0935 for (i = 0; i < cat->sspp_count; i++)
0936 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
0937 dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
0938
0939
0940 for (i = 0; i < cat->mixer_count; i++)
0941 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
0942 dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
0943
0944
0945 for (i = 0; i < cat->wb_count; i++)
0946 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
0947 dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
0948
0949 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
0950 dpu_kms->mmio + cat->mdp[0].base, "top");
0951
0952 pm_runtime_put_sync(&dpu_kms->pdev->dev);
0953 }
0954
0955 static const struct msm_kms_funcs kms_funcs = {
0956 .hw_init = dpu_kms_hw_init,
0957 .irq_preinstall = dpu_core_irq_preinstall,
0958 .irq_postinstall = dpu_irq_postinstall,
0959 .irq_uninstall = dpu_core_irq_uninstall,
0960 .irq = dpu_core_irq,
0961 .enable_commit = dpu_kms_enable_commit,
0962 .disable_commit = dpu_kms_disable_commit,
0963 .vsync_time = dpu_kms_vsync_time,
0964 .prepare_commit = dpu_kms_prepare_commit,
0965 .flush_commit = dpu_kms_flush_commit,
0966 .wait_flush = dpu_kms_wait_flush,
0967 .complete_commit = dpu_kms_complete_commit,
0968 .enable_vblank = dpu_kms_enable_vblank,
0969 .disable_vblank = dpu_kms_disable_vblank,
0970 .check_modified_format = dpu_format_check_modified_format,
0971 .get_format = dpu_get_msm_format,
0972 .destroy = dpu_kms_destroy,
0973 .snapshot = dpu_kms_mdp_snapshot,
0974 #ifdef CONFIG_DEBUG_FS
0975 .debugfs_init = dpu_kms_debugfs_init,
0976 #endif
0977 };
0978
0979 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
0980 {
0981 struct msm_mmu *mmu;
0982
0983 if (!dpu_kms->base.aspace)
0984 return;
0985
0986 mmu = dpu_kms->base.aspace->mmu;
0987
0988 mmu->funcs->detach(mmu);
0989 msm_gem_address_space_put(dpu_kms->base.aspace);
0990
0991 dpu_kms->base.aspace = NULL;
0992 }
0993
0994 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
0995 {
0996 struct msm_gem_address_space *aspace;
0997
0998 aspace = msm_kms_init_aspace(dpu_kms->dev);
0999 if (IS_ERR(aspace))
1000 return PTR_ERR(aspace);
1001
1002 dpu_kms->base.aspace = aspace;
1003
1004 return 0;
1005 }
1006
1007 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
1008 {
1009 struct clk *clk;
1010
1011 clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1012 if (!clk)
1013 return -EINVAL;
1014
1015 return clk_get_rate(clk);
1016 }
1017
1018 static int dpu_kms_hw_init(struct msm_kms *kms)
1019 {
1020 struct dpu_kms *dpu_kms;
1021 struct drm_device *dev;
1022 int i, rc = -EINVAL;
1023
1024 if (!kms) {
1025 DPU_ERROR("invalid kms\n");
1026 return rc;
1027 }
1028
1029 dpu_kms = to_dpu_kms(kms);
1030 dev = dpu_kms->dev;
1031
1032 rc = dpu_kms_global_obj_init(dpu_kms);
1033 if (rc)
1034 return rc;
1035
1036 atomic_set(&dpu_kms->bandwidth_ref, 0);
1037
1038 dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
1039 if (IS_ERR(dpu_kms->mmio)) {
1040 rc = PTR_ERR(dpu_kms->mmio);
1041 DPU_ERROR("mdp register memory map failed: %d\n", rc);
1042 dpu_kms->mmio = NULL;
1043 goto error;
1044 }
1045 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1046
1047 dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
1048 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1049 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1050 DPU_ERROR("vbif register memory map failed: %d\n", rc);
1051 dpu_kms->vbif[VBIF_RT] = NULL;
1052 goto error;
1053 }
1054 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
1055 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1056 dpu_kms->vbif[VBIF_NRT] = NULL;
1057 DPU_DEBUG("VBIF NRT is not defined");
1058 }
1059
1060 dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
1061 if (IS_ERR(dpu_kms->reg_dma)) {
1062 dpu_kms->reg_dma = NULL;
1063 DPU_DEBUG("REG_DMA is not defined");
1064 }
1065
1066 dpu_kms_parse_data_bus_icc_path(dpu_kms);
1067
1068 rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1069 if (rc < 0)
1070 goto error;
1071
1072 dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1073
1074 pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
1075
1076 dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
1077 if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
1078 rc = PTR_ERR(dpu_kms->catalog);
1079 if (!dpu_kms->catalog)
1080 rc = -EINVAL;
1081 DPU_ERROR("catalog init failed: %d\n", rc);
1082 dpu_kms->catalog = NULL;
1083 goto power_error;
1084 }
1085
1086
1087
1088
1089
1090 rc = _dpu_kms_mmu_init(dpu_kms);
1091 if (rc) {
1092 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1093 goto power_error;
1094 }
1095
1096 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
1097 if (rc) {
1098 DPU_ERROR("rm init failed: %d\n", rc);
1099 goto power_error;
1100 }
1101
1102 dpu_kms->rm_init = true;
1103
1104 dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
1105 dpu_kms->catalog);
1106 if (IS_ERR(dpu_kms->hw_mdp)) {
1107 rc = PTR_ERR(dpu_kms->hw_mdp);
1108 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1109 dpu_kms->hw_mdp = NULL;
1110 goto power_error;
1111 }
1112
1113 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1114 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
1115
1116 dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
1117 dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
1118 if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
1119 rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
1120 if (!dpu_kms->hw_vbif[vbif_idx])
1121 rc = -EINVAL;
1122 DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1123 dpu_kms->hw_vbif[vbif_idx] = NULL;
1124 goto power_error;
1125 }
1126 }
1127
1128 rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1129 msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
1130 if (rc) {
1131 DPU_ERROR("failed to init perf %d\n", rc);
1132 goto perf_err;
1133 }
1134
1135 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1136 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1137 rc = PTR_ERR(dpu_kms->hw_intr);
1138 DPU_ERROR("hw_intr init failed: %d\n", rc);
1139 dpu_kms->hw_intr = NULL;
1140 goto hw_intr_init_err;
1141 }
1142
1143 dev->mode_config.min_width = 0;
1144 dev->mode_config.min_height = 0;
1145
1146
1147
1148
1149
1150 dev->mode_config.max_width =
1151 dpu_kms->catalog->caps->max_mixer_width * 2;
1152 dev->mode_config.max_height = 4096;
1153
1154 dev->max_vblank_count = 0xffffffff;
1155
1156 dev->vblank_disable_immediate = true;
1157
1158
1159
1160
1161
1162 rc = _dpu_kms_drm_obj_init(dpu_kms);
1163 if (rc) {
1164 DPU_ERROR("modeset init failed: %d\n", rc);
1165 goto drm_obj_init_err;
1166 }
1167
1168 dpu_vbif_init_memtypes(dpu_kms);
1169
1170 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1171
1172 return 0;
1173
1174 drm_obj_init_err:
1175 dpu_core_perf_destroy(&dpu_kms->perf);
1176 hw_intr_init_err:
1177 perf_err:
1178 power_error:
1179 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1180 error:
1181 _dpu_kms_hw_destroy(dpu_kms);
1182
1183 return rc;
1184 }
1185
1186 static int dpu_kms_init(struct drm_device *ddev)
1187 {
1188 struct msm_drm_private *priv = ddev->dev_private;
1189 struct device *dev = ddev->dev;
1190 struct platform_device *pdev = to_platform_device(dev);
1191 struct dpu_kms *dpu_kms;
1192 int irq;
1193 struct dev_pm_opp *opp;
1194 int ret = 0;
1195 unsigned long max_freq = ULONG_MAX;
1196
1197 dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1198 if (!dpu_kms)
1199 return -ENOMEM;
1200
1201 ret = devm_pm_opp_set_clkname(dev, "core");
1202 if (ret)
1203 return ret;
1204
1205 ret = devm_pm_opp_of_add_table(dev);
1206 if (ret && ret != -ENODEV) {
1207 dev_err(dev, "invalid OPP table in device tree\n");
1208 return ret;
1209 }
1210
1211 ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1212 if (ret < 0) {
1213 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1214 return ret;
1215 }
1216 dpu_kms->num_clocks = ret;
1217
1218 opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1219 if (!IS_ERR(opp))
1220 dev_pm_opp_put(opp);
1221
1222 dev_pm_opp_set_rate(dev, max_freq);
1223
1224 ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1225 if (ret) {
1226 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1227 return ret;
1228 }
1229 dpu_kms->dev = ddev;
1230 dpu_kms->pdev = pdev;
1231
1232 pm_runtime_enable(&pdev->dev);
1233 dpu_kms->rpm_enabled = true;
1234
1235 priv->kms = &dpu_kms->base;
1236
1237 irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1238 if (!irq) {
1239 DPU_ERROR("failed to get irq\n");
1240 return -EINVAL;
1241 }
1242 dpu_kms->base.irq = irq;
1243
1244 return 0;
1245 }
1246
1247 static int dpu_dev_probe(struct platform_device *pdev)
1248 {
1249 return msm_drv_probe(&pdev->dev, dpu_kms_init);
1250 }
1251
1252 static int dpu_dev_remove(struct platform_device *pdev)
1253 {
1254 component_master_del(&pdev->dev, &msm_drm_ops);
1255
1256 return 0;
1257 }
1258
1259 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1260 {
1261 int i;
1262 struct platform_device *pdev = to_platform_device(dev);
1263 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1264 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1265
1266
1267 dev_pm_opp_set_rate(dev, 0);
1268 clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1269
1270 for (i = 0; i < dpu_kms->num_paths; i++)
1271 icc_set_bw(dpu_kms->path[i], 0, 0);
1272
1273 return 0;
1274 }
1275
1276 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1277 {
1278 int rc = -1;
1279 struct platform_device *pdev = to_platform_device(dev);
1280 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1281 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1282 struct drm_encoder *encoder;
1283 struct drm_device *ddev;
1284
1285 ddev = dpu_kms->dev;
1286
1287 rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1288 if (rc) {
1289 DPU_ERROR("clock enable failed rc:%d\n", rc);
1290 return rc;
1291 }
1292
1293 dpu_vbif_init_memtypes(dpu_kms);
1294
1295 drm_for_each_encoder(encoder, ddev)
1296 dpu_encoder_virt_runtime_resume(encoder);
1297
1298 return rc;
1299 }
1300
1301 static const struct dev_pm_ops dpu_pm_ops = {
1302 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1303 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1304 pm_runtime_force_resume)
1305 .prepare = msm_pm_prepare,
1306 .complete = msm_pm_complete,
1307 };
1308
1309 static const struct of_device_id dpu_dt_match[] = {
1310 { .compatible = "qcom,msm8998-dpu", },
1311 { .compatible = "qcom,qcm2290-dpu", },
1312 { .compatible = "qcom,sdm845-dpu", },
1313 { .compatible = "qcom,sc7180-dpu", },
1314 { .compatible = "qcom,sc7280-dpu", },
1315 { .compatible = "qcom,sc8180x-dpu", },
1316 { .compatible = "qcom,sm8150-dpu", },
1317 { .compatible = "qcom,sm8250-dpu", },
1318 {}
1319 };
1320 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1321
1322 static struct platform_driver dpu_driver = {
1323 .probe = dpu_dev_probe,
1324 .remove = dpu_dev_remove,
1325 .shutdown = msm_drv_shutdown,
1326 .driver = {
1327 .name = "msm_dpu",
1328 .of_match_table = dpu_dt_match,
1329 .pm = &dpu_pm_ops,
1330 },
1331 };
1332
1333 void __init msm_dpu_register(void)
1334 {
1335 platform_driver_register(&dpu_driver);
1336 }
1337
1338 void __exit msm_dpu_unregister(void)
1339 {
1340 platform_driver_unregister(&dpu_driver);
1341 }