0001
0002
0003
0004
0005
0006 #include <linux/clk.h>
0007 #include <linux/delay.h>
0008 #include <linux/interconnect.h>
0009 #include <linux/irq.h>
0010 #include <linux/irqchip.h>
0011 #include <linux/irqdesc.h>
0012 #include <linux/irqchip/chained_irq.h>
0013 #include <linux/pm_runtime.h>
0014 #include <linux/reset.h>
0015
0016 #include "msm_drv.h"
0017 #include "msm_kms.h"
0018
0019
0020 #include "disp/dpu1/dpu_hw_catalog.h"
0021
0022 #define HW_REV 0x0
0023 #define HW_INTR_STATUS 0x0010
0024
0025 #define UBWC_STATIC 0x144
0026 #define UBWC_CTRL_2 0x150
0027 #define UBWC_PREDICTION_MODE 0x154
0028
0029 #define MIN_IB_BW 400000000UL
0030
0031 struct msm_mdss {
0032 struct device *dev;
0033
0034 void __iomem *mmio;
0035 struct clk_bulk_data *clocks;
0036 size_t num_clocks;
0037 bool is_mdp5;
0038 struct {
0039 unsigned long enabled_mask;
0040 struct irq_domain *domain;
0041 } irq_controller;
0042 struct icc_path *path[2];
0043 u32 num_paths;
0044 };
0045
0046 static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
0047 struct msm_mdss *msm_mdss)
0048 {
0049 struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
0050 struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
0051
0052 if (IS_ERR_OR_NULL(path0))
0053 return PTR_ERR_OR_ZERO(path0);
0054
0055 msm_mdss->path[0] = path0;
0056 msm_mdss->num_paths = 1;
0057
0058 if (!IS_ERR_OR_NULL(path1)) {
0059 msm_mdss->path[1] = path1;
0060 msm_mdss->num_paths++;
0061 }
0062
0063 return 0;
0064 }
0065
0066 static void msm_mdss_put_icc_path(void *data)
0067 {
0068 struct msm_mdss *msm_mdss = data;
0069 int i;
0070
0071 for (i = 0; i < msm_mdss->num_paths; i++)
0072 icc_put(msm_mdss->path[i]);
0073 }
0074
0075 static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
0076 {
0077 int i;
0078
0079 for (i = 0; i < msm_mdss->num_paths; i++)
0080 icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
0081 }
0082
0083 static void msm_mdss_irq(struct irq_desc *desc)
0084 {
0085 struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
0086 struct irq_chip *chip = irq_desc_get_chip(desc);
0087 u32 interrupts;
0088
0089 chained_irq_enter(chip, desc);
0090
0091 interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
0092
0093 while (interrupts) {
0094 irq_hw_number_t hwirq = fls(interrupts) - 1;
0095 int rc;
0096
0097 rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
0098 hwirq);
0099 if (rc < 0) {
0100 dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
0101 hwirq, rc);
0102 break;
0103 }
0104
0105 interrupts &= ~(1 << hwirq);
0106 }
0107
0108 chained_irq_exit(chip, desc);
0109 }
0110
0111 static void msm_mdss_irq_mask(struct irq_data *irqd)
0112 {
0113 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
0114
0115
0116 smp_mb__before_atomic();
0117 clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
0118
0119 smp_mb__after_atomic();
0120 }
0121
0122 static void msm_mdss_irq_unmask(struct irq_data *irqd)
0123 {
0124 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
0125
0126
0127 smp_mb__before_atomic();
0128 set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
0129
0130 smp_mb__after_atomic();
0131 }
0132
0133 static struct irq_chip msm_mdss_irq_chip = {
0134 .name = "msm_mdss",
0135 .irq_mask = msm_mdss_irq_mask,
0136 .irq_unmask = msm_mdss_irq_unmask,
0137 };
0138
0139 static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
0140
0141 static int msm_mdss_irqdomain_map(struct irq_domain *domain,
0142 unsigned int irq, irq_hw_number_t hwirq)
0143 {
0144 struct msm_mdss *msm_mdss = domain->host_data;
0145
0146 irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
0147 irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
0148
0149 return irq_set_chip_data(irq, msm_mdss);
0150 }
0151
0152 static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
0153 .map = msm_mdss_irqdomain_map,
0154 .xlate = irq_domain_xlate_onecell,
0155 };
0156
0157 static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
0158 {
0159 struct device *dev;
0160 struct irq_domain *domain;
0161
0162 dev = msm_mdss->dev;
0163
0164 domain = irq_domain_add_linear(dev->of_node, 32,
0165 &msm_mdss_irqdomain_ops, msm_mdss);
0166 if (!domain) {
0167 dev_err(dev, "failed to add irq_domain\n");
0168 return -EINVAL;
0169 }
0170
0171 msm_mdss->irq_controller.enabled_mask = 0;
0172 msm_mdss->irq_controller.domain = domain;
0173
0174 return 0;
0175 }
0176
0177 static int msm_mdss_enable(struct msm_mdss *msm_mdss)
0178 {
0179 int ret;
0180
0181
0182
0183
0184
0185
0186 msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
0187
0188 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
0189 if (ret) {
0190 dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
0191 return ret;
0192 }
0193
0194
0195
0196
0197
0198 if (msm_mdss->is_mdp5)
0199 return 0;
0200
0201
0202
0203
0204
0205 switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
0206 case DPU_HW_VER_500:
0207 case DPU_HW_VER_501:
0208 writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
0209 break;
0210 case DPU_HW_VER_600:
0211
0212 writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
0213 writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
0214 writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
0215 break;
0216 case DPU_HW_VER_620:
0217 writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
0218 break;
0219 case DPU_HW_VER_720:
0220 writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
0221 break;
0222 }
0223
0224 return ret;
0225 }
0226
0227 static int msm_mdss_disable(struct msm_mdss *msm_mdss)
0228 {
0229 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
0230 msm_mdss_icc_request_bw(msm_mdss, 0);
0231
0232 return 0;
0233 }
0234
0235 static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
0236 {
0237 struct platform_device *pdev = to_platform_device(msm_mdss->dev);
0238 int irq;
0239
0240 pm_runtime_suspend(msm_mdss->dev);
0241 pm_runtime_disable(msm_mdss->dev);
0242 irq_domain_remove(msm_mdss->irq_controller.domain);
0243 msm_mdss->irq_controller.domain = NULL;
0244 irq = platform_get_irq(pdev, 0);
0245 irq_set_chained_handler_and_data(irq, NULL, NULL);
0246 }
0247
0248 static int msm_mdss_reset(struct device *dev)
0249 {
0250 struct reset_control *reset;
0251
0252 reset = reset_control_get_optional_exclusive(dev, NULL);
0253 if (!reset) {
0254
0255 return 0;
0256 } else if (IS_ERR(reset)) {
0257 return dev_err_probe(dev, PTR_ERR(reset),
0258 "failed to acquire mdss reset\n");
0259 }
0260
0261 reset_control_assert(reset);
0262
0263
0264
0265
0266 msleep(20);
0267 reset_control_deassert(reset);
0268
0269 reset_control_put(reset);
0270
0271 return 0;
0272 }
0273
0274
0275
0276
0277 #define MDP5_MDSS_NUM_CLOCKS 3
0278 static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
0279 {
0280 struct clk_bulk_data *bulk;
0281 int num_clocks = 0;
0282 int ret;
0283
0284 if (!pdev)
0285 return -EINVAL;
0286
0287 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
0288 if (!bulk)
0289 return -ENOMEM;
0290
0291 bulk[num_clocks++].id = "iface";
0292 bulk[num_clocks++].id = "bus";
0293 bulk[num_clocks++].id = "vsync";
0294
0295 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
0296 if (ret)
0297 return ret;
0298
0299 *clocks = bulk;
0300
0301 return num_clocks;
0302 }
0303
0304 static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
0305 {
0306 struct msm_mdss *msm_mdss;
0307 int ret;
0308 int irq;
0309
0310 ret = msm_mdss_reset(&pdev->dev);
0311 if (ret)
0312 return ERR_PTR(ret);
0313
0314 msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
0315 if (!msm_mdss)
0316 return ERR_PTR(-ENOMEM);
0317
0318 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
0319 if (IS_ERR(msm_mdss->mmio))
0320 return ERR_CAST(msm_mdss->mmio);
0321
0322 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
0323
0324 ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
0325 if (ret)
0326 return ERR_PTR(ret);
0327 ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
0328 if (ret)
0329 return ERR_PTR(ret);
0330
0331 if (is_mdp5)
0332 ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
0333 else
0334 ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
0335 if (ret < 0) {
0336 dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
0337 return ERR_PTR(ret);
0338 }
0339 msm_mdss->num_clocks = ret;
0340 msm_mdss->is_mdp5 = is_mdp5;
0341
0342 msm_mdss->dev = &pdev->dev;
0343
0344 irq = platform_get_irq(pdev, 0);
0345 if (irq < 0)
0346 return ERR_PTR(irq);
0347
0348 ret = _msm_mdss_irq_domain_add(msm_mdss);
0349 if (ret)
0350 return ERR_PTR(ret);
0351
0352 irq_set_chained_handler_and_data(irq, msm_mdss_irq,
0353 msm_mdss);
0354
0355 pm_runtime_enable(&pdev->dev);
0356
0357 return msm_mdss;
0358 }
0359
0360 static int __maybe_unused mdss_runtime_suspend(struct device *dev)
0361 {
0362 struct msm_mdss *mdss = dev_get_drvdata(dev);
0363
0364 DBG("");
0365
0366 return msm_mdss_disable(mdss);
0367 }
0368
0369 static int __maybe_unused mdss_runtime_resume(struct device *dev)
0370 {
0371 struct msm_mdss *mdss = dev_get_drvdata(dev);
0372
0373 DBG("");
0374
0375 return msm_mdss_enable(mdss);
0376 }
0377
0378 static int __maybe_unused mdss_pm_suspend(struct device *dev)
0379 {
0380
0381 if (pm_runtime_suspended(dev))
0382 return 0;
0383
0384 return mdss_runtime_suspend(dev);
0385 }
0386
0387 static int __maybe_unused mdss_pm_resume(struct device *dev)
0388 {
0389 if (pm_runtime_suspended(dev))
0390 return 0;
0391
0392 return mdss_runtime_resume(dev);
0393 }
0394
0395 static const struct dev_pm_ops mdss_pm_ops = {
0396 SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
0397 SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
0398 };
0399
0400 static int mdss_probe(struct platform_device *pdev)
0401 {
0402 struct msm_mdss *mdss;
0403 bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
0404 struct device *dev = &pdev->dev;
0405 int ret;
0406
0407 mdss = msm_mdss_init(pdev, is_mdp5);
0408 if (IS_ERR(mdss))
0409 return PTR_ERR(mdss);
0410
0411 platform_set_drvdata(pdev, mdss);
0412
0413
0414
0415
0416
0417
0418
0419 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
0420 if (ret) {
0421 DRM_DEV_ERROR(dev, "failed to populate children devices\n");
0422 msm_mdss_destroy(mdss);
0423 return ret;
0424 }
0425
0426 return 0;
0427 }
0428
0429 static int mdss_remove(struct platform_device *pdev)
0430 {
0431 struct msm_mdss *mdss = platform_get_drvdata(pdev);
0432
0433 of_platform_depopulate(&pdev->dev);
0434
0435 msm_mdss_destroy(mdss);
0436
0437 return 0;
0438 }
0439
0440 static const struct of_device_id mdss_dt_match[] = {
0441 { .compatible = "qcom,mdss" },
0442 { .compatible = "qcom,msm8998-mdss" },
0443 { .compatible = "qcom,qcm2290-mdss" },
0444 { .compatible = "qcom,sdm845-mdss" },
0445 { .compatible = "qcom,sc7180-mdss" },
0446 { .compatible = "qcom,sc7280-mdss" },
0447 { .compatible = "qcom,sc8180x-mdss" },
0448 { .compatible = "qcom,sm8150-mdss" },
0449 { .compatible = "qcom,sm8250-mdss" },
0450 {}
0451 };
0452 MODULE_DEVICE_TABLE(of, mdss_dt_match);
0453
0454 static struct platform_driver mdss_platform_driver = {
0455 .probe = mdss_probe,
0456 .remove = mdss_remove,
0457 .driver = {
0458 .name = "msm-mdss",
0459 .of_match_table = mdss_dt_match,
0460 .pm = &mdss_pm_ops,
0461 },
0462 };
0463
0464 void __init msm_mdss_register(void)
0465 {
0466 platform_driver_register(&mdss_platform_driver);
0467 }
0468
0469 void __exit msm_mdss_unregister(void)
0470 {
0471 platform_driver_unregister(&mdss_platform_driver);
0472 }