0001
0002
0003
0004
0005
0006 #include <linux/delay.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/platform_device.h>
0010 #include <linux/slab.h>
0011
0012 #include "dcss-dev.h"
0013
0014 #define DCSS_CTXLD_CONTROL_STATUS 0x0
0015 #define CTXLD_ENABLE BIT(0)
0016 #define ARB_SEL BIT(1)
0017 #define RD_ERR_EN BIT(2)
0018 #define DB_COMP_EN BIT(3)
0019 #define SB_HP_COMP_EN BIT(4)
0020 #define SB_LP_COMP_EN BIT(5)
0021 #define DB_PEND_SB_REC_EN BIT(6)
0022 #define SB_PEND_DISP_ACTIVE_EN BIT(7)
0023 #define AHB_ERR_EN BIT(8)
0024 #define RD_ERR BIT(16)
0025 #define DB_COMP BIT(17)
0026 #define SB_HP_COMP BIT(18)
0027 #define SB_LP_COMP BIT(19)
0028 #define DB_PEND_SB_REC BIT(20)
0029 #define SB_PEND_DISP_ACTIVE BIT(21)
0030 #define AHB_ERR BIT(22)
0031 #define DCSS_CTXLD_DB_BASE_ADDR 0x10
0032 #define DCSS_CTXLD_DB_COUNT 0x14
0033 #define DCSS_CTXLD_SB_BASE_ADDR 0x18
0034 #define DCSS_CTXLD_SB_COUNT 0x1C
0035 #define SB_HP_COUNT_POS 0
0036 #define SB_HP_COUNT_MASK 0xffff
0037 #define SB_LP_COUNT_POS 16
0038 #define SB_LP_COUNT_MASK 0xffff0000
0039 #define DCSS_AHB_ERR_ADDR 0x20
0040
0041 #define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
0042 #define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
0043
0044
0045 #define CTXLD_DB_CTX_ENTRIES 1024
0046 #define CTXLD_SB_LP_CTX_ENTRIES 10240
0047 #define CTXLD_SB_HP_CTX_ENTRIES 20000
0048 #define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
0049 CTXLD_SB_HP_CTX_ENTRIES)
0050
0051
0052 static u16 dcss_ctxld_ctx_size[3] = {
0053 CTXLD_DB_CTX_ENTRIES,
0054 CTXLD_SB_HP_CTX_ENTRIES,
0055 CTXLD_SB_LP_CTX_ENTRIES
0056 };
0057
0058
0059 struct dcss_ctxld_item {
0060 u32 val;
0061 u32 ofs;
0062 };
0063
0064 #define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
0065
0066 struct dcss_ctxld {
0067 struct device *dev;
0068 void __iomem *ctxld_reg;
0069 int irq;
0070 bool irq_en;
0071
0072 struct dcss_ctxld_item *db[2];
0073 struct dcss_ctxld_item *sb_hp[2];
0074 struct dcss_ctxld_item *sb_lp[2];
0075
0076 dma_addr_t db_paddr[2];
0077 dma_addr_t sb_paddr[2];
0078
0079 u16 ctx_size[2][3];
0080 u8 current_ctx;
0081
0082 bool in_use;
0083 bool armed;
0084
0085 spinlock_t lock;
0086 };
0087
0088 static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
0089 {
0090 struct dcss_ctxld *ctxld = data;
0091 struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
0092 u32 irq_status;
0093
0094 irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
0095
0096 if (irq_status & CTXLD_IRQ_COMPLETION &&
0097 !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
0098 ctxld->in_use = false;
0099
0100 if (dcss && dcss->disable_callback)
0101 dcss->disable_callback(dcss);
0102 } else if (irq_status & CTXLD_IRQ_ERROR) {
0103
0104
0105
0106
0107 dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
0108 irq_status);
0109 dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
0110 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
0111 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
0112 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
0113 }
0114
0115 dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
0116 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
0117
0118 return IRQ_HANDLED;
0119 }
0120
0121 static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
0122 struct platform_device *pdev)
0123 {
0124 int ret;
0125
0126 ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
0127 if (ctxld->irq < 0)
0128 return ctxld->irq;
0129
0130 ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
0131 0, "dcss_ctxld", ctxld);
0132 if (ret) {
0133 dev_err(ctxld->dev, "ctxld: irq request failed.\n");
0134 return ret;
0135 }
0136
0137 ctxld->irq_en = true;
0138
0139 return 0;
0140 }
0141
0142 static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
0143 {
0144 dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
0145 DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
0146 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
0147 }
0148
0149 static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
0150 {
0151 struct dcss_ctxld_item *ctx;
0152 int i;
0153
0154 for (i = 0; i < 2; i++) {
0155 if (ctxld->db[i]) {
0156 dma_free_coherent(ctxld->dev,
0157 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
0158 ctxld->db[i], ctxld->db_paddr[i]);
0159 ctxld->db[i] = NULL;
0160 ctxld->db_paddr[i] = 0;
0161 }
0162
0163 if (ctxld->sb_hp[i]) {
0164 dma_free_coherent(ctxld->dev,
0165 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
0166 ctxld->sb_hp[i], ctxld->sb_paddr[i]);
0167 ctxld->sb_hp[i] = NULL;
0168 ctxld->sb_paddr[i] = 0;
0169 }
0170 }
0171 }
0172
0173 static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
0174 {
0175 struct dcss_ctxld_item *ctx;
0176 int i;
0177
0178 for (i = 0; i < 2; i++) {
0179 ctx = dma_alloc_coherent(ctxld->dev,
0180 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
0181 &ctxld->db_paddr[i], GFP_KERNEL);
0182 if (!ctx)
0183 return -ENOMEM;
0184
0185 ctxld->db[i] = ctx;
0186
0187 ctx = dma_alloc_coherent(ctxld->dev,
0188 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
0189 &ctxld->sb_paddr[i], GFP_KERNEL);
0190 if (!ctx)
0191 return -ENOMEM;
0192
0193 ctxld->sb_hp[i] = ctx;
0194 ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
0195 }
0196
0197 return 0;
0198 }
0199
0200 int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
0201 {
0202 struct dcss_ctxld *ctxld;
0203 int ret;
0204
0205 ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
0206 if (!ctxld)
0207 return -ENOMEM;
0208
0209 dcss->ctxld = ctxld;
0210 ctxld->dev = dcss->dev;
0211
0212 spin_lock_init(&ctxld->lock);
0213
0214 ret = dcss_ctxld_alloc_ctx(ctxld);
0215 if (ret) {
0216 dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
0217 goto err;
0218 }
0219
0220 ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
0221 if (!ctxld->ctxld_reg) {
0222 dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
0223 ret = -ENOMEM;
0224 goto err;
0225 }
0226
0227 ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
0228 if (ret)
0229 goto err_irq;
0230
0231 dcss_ctxld_hw_cfg(ctxld);
0232
0233 return 0;
0234
0235 err_irq:
0236 iounmap(ctxld->ctxld_reg);
0237
0238 err:
0239 dcss_ctxld_free_ctx(ctxld);
0240 kfree(ctxld);
0241
0242 return ret;
0243 }
0244
0245 void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
0246 {
0247 free_irq(ctxld->irq, ctxld);
0248
0249 if (ctxld->ctxld_reg)
0250 iounmap(ctxld->ctxld_reg);
0251
0252 dcss_ctxld_free_ctx(ctxld);
0253 kfree(ctxld);
0254 }
0255
0256 static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
0257 {
0258 int curr_ctx = ctxld->current_ctx;
0259 u32 db_base, sb_base, sb_count;
0260 u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
0261 struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
0262
0263 if (!dcss)
0264 return 0;
0265
0266 dcss_dpr_write_sysctrl(dcss->dpr);
0267
0268 dcss_scaler_write_sclctrl(dcss->scaler);
0269
0270 sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
0271 sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
0272 db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
0273
0274
0275 if (sb_lp_cnt &&
0276 ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
0277 struct dcss_ctxld_item *sb_lp_adjusted;
0278
0279 sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
0280
0281 memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
0282 sb_lp_cnt * CTX_ITEM_SIZE);
0283 }
0284
0285 db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
0286
0287 dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
0288 dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
0289
0290 if (sb_hp_cnt)
0291 sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
0292 ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
0293 else
0294 sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
0295
0296 sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
0297
0298 dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
0299 dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
0300
0301
0302 dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
0303
0304 ctxld->in_use = true;
0305
0306
0307
0308
0309
0310 ctxld->current_ctx ^= 1;
0311
0312 ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
0313 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
0314 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
0315
0316 return 0;
0317 }
0318
0319 int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
0320 {
0321 spin_lock_irq(&ctxld->lock);
0322 ctxld->armed = true;
0323 spin_unlock_irq(&ctxld->lock);
0324
0325 return 0;
0326 }
0327
0328 void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
0329 {
0330 unsigned long flags;
0331
0332 spin_lock_irqsave(&ctxld->lock, flags);
0333 if (ctxld->armed && !ctxld->in_use) {
0334 ctxld->armed = false;
0335 dcss_ctxld_enable_locked(ctxld);
0336 }
0337 spin_unlock_irqrestore(&ctxld->lock, flags);
0338 }
0339
0340 void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
0341 u32 reg_ofs)
0342 {
0343 int curr_ctx = ctxld->current_ctx;
0344 struct dcss_ctxld_item *ctx[] = {
0345 [CTX_DB] = ctxld->db[curr_ctx],
0346 [CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
0347 [CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
0348 };
0349 int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
0350
0351 if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
0352 WARN_ON(1);
0353 return;
0354 }
0355
0356 ctx[ctx_id][item_idx].val = val;
0357 ctx[ctx_id][item_idx].ofs = reg_ofs;
0358 ctxld->ctx_size[curr_ctx][ctx_id] += 1;
0359 }
0360
0361 void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
0362 u32 val, u32 reg_ofs)
0363 {
0364 spin_lock_irq(&ctxld->lock);
0365 dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
0366 spin_unlock_irq(&ctxld->lock);
0367 }
0368
0369 bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
0370 {
0371 return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
0372 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
0373 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
0374 }
0375
0376 int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
0377 {
0378 dcss_ctxld_hw_cfg(ctxld);
0379
0380 if (!ctxld->irq_en) {
0381 enable_irq(ctxld->irq);
0382 ctxld->irq_en = true;
0383 }
0384
0385 return 0;
0386 }
0387
0388 int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
0389 {
0390 int ret = 0;
0391 unsigned long timeout = jiffies + msecs_to_jiffies(500);
0392
0393 if (!dcss_ctxld_is_flushed(ctxld)) {
0394 dcss_ctxld_kick(ctxld);
0395
0396 while (!time_after(jiffies, timeout) && ctxld->in_use)
0397 msleep(20);
0398
0399 if (time_after(jiffies, timeout))
0400 return -ETIMEDOUT;
0401 }
0402
0403 spin_lock_irq(&ctxld->lock);
0404
0405 if (ctxld->irq_en) {
0406 disable_irq_nosync(ctxld->irq);
0407 ctxld->irq_en = false;
0408 }
0409
0410
0411 ctxld->current_ctx = 0;
0412 ctxld->ctx_size[0][CTX_DB] = 0;
0413 ctxld->ctx_size[0][CTX_SB_HP] = 0;
0414 ctxld->ctx_size[0][CTX_SB_LP] = 0;
0415
0416 spin_unlock_irq(&ctxld->lock);
0417
0418 return ret;
0419 }
0420
0421 void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
0422 {
0423 lockdep_assert_held(&ctxld->lock);
0424 }