0001
0002
0003
0004
0005 #include <linux/bitops.h>
0006 #include <linux/debugfs.h>
0007 #include <linux/slab.h>
0008
0009 #include "dpu_core_irq.h"
0010 #include "dpu_kms.h"
0011 #include "dpu_hw_interrupts.h"
0012 #include "dpu_hw_util.h"
0013 #include "dpu_hw_mdss.h"
0014 #include "dpu_trace.h"
0015
0016
0017
0018
0019
0020 #define MDP_SSPP_TOP0_OFF 0x0
0021 #define MDP_INTF_0_OFF 0x6A000
0022 #define MDP_INTF_1_OFF 0x6A800
0023 #define MDP_INTF_2_OFF 0x6B000
0024 #define MDP_INTF_3_OFF 0x6B800
0025 #define MDP_INTF_4_OFF 0x6C000
0026 #define MDP_INTF_5_OFF 0x6C800
0027 #define MDP_AD4_0_OFF 0x7C000
0028 #define MDP_AD4_1_OFF 0x7D000
0029 #define MDP_AD4_INTR_EN_OFF 0x41c
0030 #define MDP_AD4_INTR_CLEAR_OFF 0x424
0031 #define MDP_AD4_INTR_STATUS_OFF 0x420
0032 #define MDP_INTF_0_OFF_REV_7xxx 0x34000
0033 #define MDP_INTF_1_OFF_REV_7xxx 0x35000
0034 #define MDP_INTF_2_OFF_REV_7xxx 0x36000
0035 #define MDP_INTF_3_OFF_REV_7xxx 0x37000
0036 #define MDP_INTF_4_OFF_REV_7xxx 0x38000
0037 #define MDP_INTF_5_OFF_REV_7xxx 0x39000
0038
0039
0040
0041
0042
0043
0044
0045 struct dpu_intr_reg {
0046 u32 clr_off;
0047 u32 en_off;
0048 u32 status_off;
0049 };
0050
0051
0052
0053
0054
0055
0056 static const struct dpu_intr_reg dpu_intr_set[] = {
0057 [MDP_SSPP_TOP0_INTR] = {
0058 MDP_SSPP_TOP0_OFF+INTR_CLEAR,
0059 MDP_SSPP_TOP0_OFF+INTR_EN,
0060 MDP_SSPP_TOP0_OFF+INTR_STATUS
0061 },
0062 [MDP_SSPP_TOP0_INTR2] = {
0063 MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
0064 MDP_SSPP_TOP0_OFF+INTR2_EN,
0065 MDP_SSPP_TOP0_OFF+INTR2_STATUS
0066 },
0067 [MDP_SSPP_TOP0_HIST_INTR] = {
0068 MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
0069 MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
0070 MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
0071 },
0072 [MDP_INTF0_INTR] = {
0073 MDP_INTF_0_OFF+INTF_INTR_CLEAR,
0074 MDP_INTF_0_OFF+INTF_INTR_EN,
0075 MDP_INTF_0_OFF+INTF_INTR_STATUS
0076 },
0077 [MDP_INTF1_INTR] = {
0078 MDP_INTF_1_OFF+INTF_INTR_CLEAR,
0079 MDP_INTF_1_OFF+INTF_INTR_EN,
0080 MDP_INTF_1_OFF+INTF_INTR_STATUS
0081 },
0082 [MDP_INTF2_INTR] = {
0083 MDP_INTF_2_OFF+INTF_INTR_CLEAR,
0084 MDP_INTF_2_OFF+INTF_INTR_EN,
0085 MDP_INTF_2_OFF+INTF_INTR_STATUS
0086 },
0087 [MDP_INTF3_INTR] = {
0088 MDP_INTF_3_OFF+INTF_INTR_CLEAR,
0089 MDP_INTF_3_OFF+INTF_INTR_EN,
0090 MDP_INTF_3_OFF+INTF_INTR_STATUS
0091 },
0092 [MDP_INTF4_INTR] = {
0093 MDP_INTF_4_OFF+INTF_INTR_CLEAR,
0094 MDP_INTF_4_OFF+INTF_INTR_EN,
0095 MDP_INTF_4_OFF+INTF_INTR_STATUS
0096 },
0097 [MDP_INTF5_INTR] = {
0098 MDP_INTF_5_OFF+INTF_INTR_CLEAR,
0099 MDP_INTF_5_OFF+INTF_INTR_EN,
0100 MDP_INTF_5_OFF+INTF_INTR_STATUS
0101 },
0102 [MDP_AD4_0_INTR] = {
0103 MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
0104 MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
0105 MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
0106 },
0107 [MDP_AD4_1_INTR] = {
0108 MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
0109 MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
0110 MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
0111 },
0112 [MDP_INTF0_7xxx_INTR] = {
0113 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
0114 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
0115 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
0116 },
0117 [MDP_INTF1_7xxx_INTR] = {
0118 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
0119 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
0120 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
0121 },
0122 [MDP_INTF2_7xxx_INTR] = {
0123 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
0124 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
0125 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
0126 },
0127 [MDP_INTF3_7xxx_INTR] = {
0128 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
0129 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
0130 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
0131 },
0132 [MDP_INTF4_7xxx_INTR] = {
0133 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
0134 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
0135 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
0136 },
0137 [MDP_INTF5_7xxx_INTR] = {
0138 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
0139 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
0140 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
0141 },
0142 };
0143
0144 #define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
0145 #define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
0146
0147
0148
0149
0150
0151
0152 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
0153 {
0154 VERB("irq_idx=%d\n", irq_idx);
0155
0156 if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
0157 DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
0158
0159 atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
0160
0161
0162
0163
0164 dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
0165 }
0166
0167 irqreturn_t dpu_core_irq(struct msm_kms *kms)
0168 {
0169 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0170 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
0171 int reg_idx;
0172 int irq_idx;
0173 u32 irq_status;
0174 u32 enable_mask;
0175 int bit;
0176 unsigned long irq_flags;
0177
0178 if (!intr)
0179 return IRQ_NONE;
0180
0181 spin_lock_irqsave(&intr->irq_lock, irq_flags);
0182 for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
0183 if (!test_bit(reg_idx, &intr->irq_mask))
0184 continue;
0185
0186
0187 irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
0188
0189
0190 enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
0191
0192
0193 if (irq_status)
0194 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
0195 irq_status);
0196
0197
0198 irq_status &= enable_mask;
0199
0200 if (!irq_status)
0201 continue;
0202
0203
0204
0205
0206 while ((bit = ffs(irq_status)) != 0) {
0207 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
0208
0209 dpu_core_irq_callback_handler(dpu_kms, irq_idx);
0210
0211
0212
0213
0214
0215
0216 irq_status &= ~BIT(bit - 1);
0217 }
0218 }
0219
0220
0221 wmb();
0222
0223 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
0224
0225 return IRQ_HANDLED;
0226 }
0227
0228 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
0229 {
0230 int reg_idx;
0231 const struct dpu_intr_reg *reg;
0232 const char *dbgstr = NULL;
0233 uint32_t cache_irq_mask;
0234
0235 if (!intr)
0236 return -EINVAL;
0237
0238 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
0239 pr_err("invalid IRQ index: [%d]\n", irq_idx);
0240 return -EINVAL;
0241 }
0242
0243
0244
0245
0246
0247
0248 assert_spin_locked(&intr->irq_lock);
0249
0250 reg_idx = DPU_IRQ_REG(irq_idx);
0251 reg = &dpu_intr_set[reg_idx];
0252
0253 cache_irq_mask = intr->cache_irq_mask[reg_idx];
0254 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
0255 dbgstr = "DPU IRQ already set:";
0256 } else {
0257 dbgstr = "DPU IRQ enabled:";
0258
0259 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
0260
0261 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
0262
0263 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
0264
0265
0266 wmb();
0267
0268 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
0269 }
0270
0271 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
0272 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
0273
0274 return 0;
0275 }
0276
0277 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
0278 {
0279 int reg_idx;
0280 const struct dpu_intr_reg *reg;
0281 const char *dbgstr = NULL;
0282 uint32_t cache_irq_mask;
0283
0284 if (!intr)
0285 return -EINVAL;
0286
0287 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
0288 pr_err("invalid IRQ index: [%d]\n", irq_idx);
0289 return -EINVAL;
0290 }
0291
0292
0293
0294
0295
0296
0297 assert_spin_locked(&intr->irq_lock);
0298
0299 reg_idx = DPU_IRQ_REG(irq_idx);
0300 reg = &dpu_intr_set[reg_idx];
0301
0302 cache_irq_mask = intr->cache_irq_mask[reg_idx];
0303 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
0304 dbgstr = "DPU IRQ is already cleared:";
0305 } else {
0306 dbgstr = "DPU IRQ mask disable:";
0307
0308 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
0309
0310 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
0311
0312 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
0313
0314
0315 wmb();
0316
0317 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
0318 }
0319
0320 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
0321 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
0322
0323 return 0;
0324 }
0325
0326 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
0327 {
0328 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
0329 int i;
0330
0331 if (!intr)
0332 return;
0333
0334 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
0335 if (test_bit(i, &intr->irq_mask))
0336 DPU_REG_WRITE(&intr->hw,
0337 dpu_intr_set[i].clr_off, 0xffffffff);
0338 }
0339
0340
0341 wmb();
0342 }
0343
0344 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
0345 {
0346 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
0347 int i;
0348
0349 if (!intr)
0350 return;
0351
0352 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
0353 if (test_bit(i, &intr->irq_mask))
0354 DPU_REG_WRITE(&intr->hw,
0355 dpu_intr_set[i].en_off, 0x00000000);
0356 }
0357
0358
0359 wmb();
0360 }
0361
0362 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
0363 {
0364 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
0365 int reg_idx;
0366 unsigned long irq_flags;
0367 u32 intr_status;
0368
0369 if (!intr)
0370 return 0;
0371
0372 if (irq_idx < 0) {
0373 DPU_ERROR("[%pS] invalid irq_idx=%d\n",
0374 __builtin_return_address(0), irq_idx);
0375 return 0;
0376 }
0377
0378 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
0379 pr_err("invalid IRQ index: [%d]\n", irq_idx);
0380 return 0;
0381 }
0382
0383 spin_lock_irqsave(&intr->irq_lock, irq_flags);
0384
0385 reg_idx = DPU_IRQ_REG(irq_idx);
0386 intr_status = DPU_REG_READ(&intr->hw,
0387 dpu_intr_set[reg_idx].status_off) &
0388 DPU_IRQ_MASK(irq_idx);
0389 if (intr_status)
0390 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
0391 intr_status);
0392
0393
0394 wmb();
0395
0396 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
0397
0398 return intr_status;
0399 }
0400
0401 static void __intr_offset(const struct dpu_mdss_cfg *m,
0402 void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
0403 {
0404 hw->blk_addr = addr + m->mdp[0].base;
0405 }
0406
0407 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
0408 const struct dpu_mdss_cfg *m)
0409 {
0410 struct dpu_hw_intr *intr;
0411 int nirq = MDP_INTR_MAX * 32;
0412
0413 if (!addr || !m)
0414 return ERR_PTR(-EINVAL);
0415
0416 intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
0417 if (!intr)
0418 return ERR_PTR(-ENOMEM);
0419
0420 __intr_offset(m, addr, &intr->hw);
0421
0422 intr->total_irqs = nirq;
0423
0424 intr->irq_mask = m->mdss_irqs;
0425
0426 spin_lock_init(&intr->irq_lock);
0427
0428 return intr;
0429 }
0430
0431 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
0432 {
0433 kfree(intr);
0434 }
0435
0436 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
0437 void (*irq_cb)(void *arg, int irq_idx),
0438 void *irq_arg)
0439 {
0440 unsigned long irq_flags;
0441 int ret;
0442
0443 if (!irq_cb) {
0444 DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
0445 return -EINVAL;
0446 }
0447
0448 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
0449 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
0450 return -EINVAL;
0451 }
0452
0453 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
0454
0455 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
0456
0457 if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
0458 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
0459
0460 return -EBUSY;
0461 }
0462
0463 trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
0464 dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
0465 dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
0466
0467 ret = dpu_hw_intr_enable_irq_locked(
0468 dpu_kms->hw_intr,
0469 irq_idx);
0470 if (ret)
0471 DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
0472 irq_idx);
0473 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
0474
0475 trace_dpu_irq_register_success(irq_idx);
0476
0477 return 0;
0478 }
0479
0480 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
0481 {
0482 unsigned long irq_flags;
0483 int ret;
0484
0485 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
0486 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
0487 return -EINVAL;
0488 }
0489
0490 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
0491
0492 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
0493 trace_dpu_core_irq_unregister_callback(irq_idx);
0494
0495 ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
0496 if (ret)
0497 DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
0498 irq_idx, ret);
0499
0500 dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
0501 dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
0502
0503 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
0504
0505 trace_dpu_irq_unregister_success(irq_idx);
0506
0507 return 0;
0508 }
0509
0510 #ifdef CONFIG_DEBUG_FS
0511 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
0512 {
0513 struct dpu_kms *dpu_kms = s->private;
0514 unsigned long irq_flags;
0515 int i, irq_count;
0516 void *cb;
0517
0518 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
0519 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
0520 irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
0521 cb = dpu_kms->hw_intr->irq_tbl[i].cb;
0522 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
0523
0524 if (irq_count || cb)
0525 seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
0526 }
0527
0528 return 0;
0529 }
0530
0531 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
0532
0533 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
0534 struct dentry *parent)
0535 {
0536 debugfs_create_file("core_irq", 0600, parent, dpu_kms,
0537 &dpu_debugfs_core_irq_fops);
0538 }
0539 #endif
0540
0541 void dpu_core_irq_preinstall(struct msm_kms *kms)
0542 {
0543 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0544 int i;
0545
0546 pm_runtime_get_sync(&dpu_kms->pdev->dev);
0547 dpu_clear_irqs(dpu_kms);
0548 dpu_disable_all_irqs(dpu_kms);
0549 pm_runtime_put_sync(&dpu_kms->pdev->dev);
0550
0551 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
0552 atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
0553 }
0554
0555 void dpu_core_irq_uninstall(struct msm_kms *kms)
0556 {
0557 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
0558 int i;
0559
0560 if (!dpu_kms->hw_intr)
0561 return;
0562
0563 pm_runtime_get_sync(&dpu_kms->pdev->dev);
0564 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
0565 if (dpu_kms->hw_intr->irq_tbl[i].cb)
0566 DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
0567
0568 dpu_clear_irqs(dpu_kms);
0569 dpu_disable_all_irqs(dpu_kms);
0570 pm_runtime_put_sync(&dpu_kms->pdev->dev);
0571 }