0001
0002
0003
0004
0005
0006
0007 #include <linux/cpu.h>
0008 #include <linux/err.h>
0009 #include <linux/init.h>
0010 #include <linux/smp.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/log2.h>
0013 #include <linux/io.h>
0014 #include <linux/of.h>
0015 #include <linux/of_address.h>
0016
0017 #include <asm/cacheflush.h>
0018 #include <asm/cp15.h>
0019 #include <asm/cputype.h>
0020 #include <asm/hardware/cache-l2x0.h>
0021 #include <asm/hardware/cache-aurora-l2.h>
0022 #include "cache-tauros3.h"
0023
0024 struct l2c_init_data {
0025 const char *type;
0026 unsigned way_size_0;
0027 unsigned num_lock;
0028 void (*of_parse)(const struct device_node *, u32 *, u32 *);
0029 void (*enable)(void __iomem *, unsigned);
0030 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
0031 void (*save)(void __iomem *);
0032 void (*configure)(void __iomem *);
0033 void (*unlock)(void __iomem *, unsigned);
0034 struct outer_cache_fns outer_cache;
0035 };
0036
0037 #define CACHE_LINE_SIZE 32
0038
0039 static void __iomem *l2x0_base;
0040 static const struct l2c_init_data *l2x0_data;
0041 static DEFINE_RAW_SPINLOCK(l2x0_lock);
0042 static u32 l2x0_way_mask;
0043 static u32 l2x0_size;
0044 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
0045
0046 struct l2x0_regs l2x0_saved_regs;
0047
0048 static bool l2x0_bresp_disable;
0049 static bool l2x0_flz_disable;
0050
0051
0052
0053
0054 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
0055 {
0056
0057 while (readl_relaxed(reg) & mask)
0058 cpu_relax();
0059 }
0060
0061
0062
0063
0064
0065 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
0066 {
0067 if (val == readl_relaxed(base + reg))
0068 return;
0069 if (outer_cache.write_sec)
0070 outer_cache.write_sec(val, reg);
0071 else
0072 writel_relaxed(val, base + reg);
0073 }
0074
0075
0076
0077
0078
0079
0080 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
0081 {
0082 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
0083 }
0084
0085 static void __l2c_op_way(void __iomem *reg)
0086 {
0087 writel_relaxed(l2x0_way_mask, reg);
0088 l2c_wait_mask(reg, l2x0_way_mask);
0089 }
0090
0091 static inline void l2c_unlock(void __iomem *base, unsigned num)
0092 {
0093 unsigned i;
0094
0095 for (i = 0; i < num; i++) {
0096 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
0097 i * L2X0_LOCKDOWN_STRIDE);
0098 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
0099 i * L2X0_LOCKDOWN_STRIDE);
0100 }
0101 }
0102
0103 static void l2c_configure(void __iomem *base)
0104 {
0105 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
0106 }
0107
0108
0109
0110
0111
0112 static void l2c_enable(void __iomem *base, unsigned num_lock)
0113 {
0114 unsigned long flags;
0115
0116 if (outer_cache.configure)
0117 outer_cache.configure(&l2x0_saved_regs);
0118 else
0119 l2x0_data->configure(base);
0120
0121 l2x0_data->unlock(base, num_lock);
0122
0123 local_irq_save(flags);
0124 __l2c_op_way(base + L2X0_INV_WAY);
0125 writel_relaxed(0, base + sync_reg_offset);
0126 l2c_wait_mask(base + sync_reg_offset, 1);
0127 local_irq_restore(flags);
0128
0129 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
0130 }
0131
0132 static void l2c_disable(void)
0133 {
0134 void __iomem *base = l2x0_base;
0135
0136 l2x0_pmu_suspend();
0137
0138 outer_cache.flush_all();
0139 l2c_write_sec(0, base, L2X0_CTRL);
0140 dsb(st);
0141 }
0142
0143 static void l2c_save(void __iomem *base)
0144 {
0145 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
0146 }
0147
0148 static void l2c_resume(void)
0149 {
0150 void __iomem *base = l2x0_base;
0151
0152
0153 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
0154 l2c_enable(base, l2x0_data->num_lock);
0155
0156 l2x0_pmu_resume();
0157 }
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 static void __l2c210_cache_sync(void __iomem *base)
0174 {
0175 writel_relaxed(0, base + sync_reg_offset);
0176 }
0177
0178 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
0179 unsigned long end)
0180 {
0181 while (start < end) {
0182 writel_relaxed(start, reg);
0183 start += CACHE_LINE_SIZE;
0184 }
0185 }
0186
0187 static void l2c210_inv_range(unsigned long start, unsigned long end)
0188 {
0189 void __iomem *base = l2x0_base;
0190
0191 if (start & (CACHE_LINE_SIZE - 1)) {
0192 start &= ~(CACHE_LINE_SIZE - 1);
0193 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
0194 start += CACHE_LINE_SIZE;
0195 }
0196
0197 if (end & (CACHE_LINE_SIZE - 1)) {
0198 end &= ~(CACHE_LINE_SIZE - 1);
0199 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
0200 }
0201
0202 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
0203 __l2c210_cache_sync(base);
0204 }
0205
0206 static void l2c210_clean_range(unsigned long start, unsigned long end)
0207 {
0208 void __iomem *base = l2x0_base;
0209
0210 start &= ~(CACHE_LINE_SIZE - 1);
0211 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
0212 __l2c210_cache_sync(base);
0213 }
0214
0215 static void l2c210_flush_range(unsigned long start, unsigned long end)
0216 {
0217 void __iomem *base = l2x0_base;
0218
0219 start &= ~(CACHE_LINE_SIZE - 1);
0220 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
0221 __l2c210_cache_sync(base);
0222 }
0223
0224 static void l2c210_flush_all(void)
0225 {
0226 void __iomem *base = l2x0_base;
0227
0228 BUG_ON(!irqs_disabled());
0229
0230 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
0231 __l2c210_cache_sync(base);
0232 }
0233
0234 static void l2c210_sync(void)
0235 {
0236 __l2c210_cache_sync(l2x0_base);
0237 }
0238
0239 static const struct l2c_init_data l2c210_data __initconst = {
0240 .type = "L2C-210",
0241 .way_size_0 = SZ_8K,
0242 .num_lock = 1,
0243 .enable = l2c_enable,
0244 .save = l2c_save,
0245 .configure = l2c_configure,
0246 .unlock = l2c_unlock,
0247 .outer_cache = {
0248 .inv_range = l2c210_inv_range,
0249 .clean_range = l2c210_clean_range,
0250 .flush_range = l2c210_flush_range,
0251 .flush_all = l2c210_flush_all,
0252 .disable = l2c_disable,
0253 .sync = l2c210_sync,
0254 .resume = l2c_resume,
0255 },
0256 };
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 static inline void __l2c220_cache_sync(void __iomem *base)
0269 {
0270 writel_relaxed(0, base + L2X0_CACHE_SYNC);
0271 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
0272 }
0273
0274 static void l2c220_op_way(void __iomem *base, unsigned reg)
0275 {
0276 unsigned long flags;
0277
0278 raw_spin_lock_irqsave(&l2x0_lock, flags);
0279 __l2c_op_way(base + reg);
0280 __l2c220_cache_sync(base);
0281 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0282 }
0283
0284 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
0285 unsigned long end, unsigned long flags)
0286 {
0287 raw_spinlock_t *lock = &l2x0_lock;
0288
0289 while (start < end) {
0290 unsigned long blk_end = start + min(end - start, 4096UL);
0291
0292 while (start < blk_end) {
0293 l2c_wait_mask(reg, 1);
0294 writel_relaxed(start, reg);
0295 start += CACHE_LINE_SIZE;
0296 }
0297
0298 if (blk_end < end) {
0299 raw_spin_unlock_irqrestore(lock, flags);
0300 raw_spin_lock_irqsave(lock, flags);
0301 }
0302 }
0303
0304 return flags;
0305 }
0306
0307 static void l2c220_inv_range(unsigned long start, unsigned long end)
0308 {
0309 void __iomem *base = l2x0_base;
0310 unsigned long flags;
0311
0312 raw_spin_lock_irqsave(&l2x0_lock, flags);
0313 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
0314 if (start & (CACHE_LINE_SIZE - 1)) {
0315 start &= ~(CACHE_LINE_SIZE - 1);
0316 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
0317 start += CACHE_LINE_SIZE;
0318 }
0319
0320 if (end & (CACHE_LINE_SIZE - 1)) {
0321 end &= ~(CACHE_LINE_SIZE - 1);
0322 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
0323 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
0324 }
0325 }
0326
0327 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
0328 start, end, flags);
0329 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
0330 __l2c220_cache_sync(base);
0331 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0332 }
0333
0334 static void l2c220_clean_range(unsigned long start, unsigned long end)
0335 {
0336 void __iomem *base = l2x0_base;
0337 unsigned long flags;
0338
0339 start &= ~(CACHE_LINE_SIZE - 1);
0340 if ((end - start) >= l2x0_size) {
0341 l2c220_op_way(base, L2X0_CLEAN_WAY);
0342 return;
0343 }
0344
0345 raw_spin_lock_irqsave(&l2x0_lock, flags);
0346 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
0347 start, end, flags);
0348 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
0349 __l2c220_cache_sync(base);
0350 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0351 }
0352
0353 static void l2c220_flush_range(unsigned long start, unsigned long end)
0354 {
0355 void __iomem *base = l2x0_base;
0356 unsigned long flags;
0357
0358 start &= ~(CACHE_LINE_SIZE - 1);
0359 if ((end - start) >= l2x0_size) {
0360 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
0361 return;
0362 }
0363
0364 raw_spin_lock_irqsave(&l2x0_lock, flags);
0365 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
0366 start, end, flags);
0367 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
0368 __l2c220_cache_sync(base);
0369 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0370 }
0371
0372 static void l2c220_flush_all(void)
0373 {
0374 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
0375 }
0376
0377 static void l2c220_sync(void)
0378 {
0379 unsigned long flags;
0380
0381 raw_spin_lock_irqsave(&l2x0_lock, flags);
0382 __l2c220_cache_sync(l2x0_base);
0383 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0384 }
0385
0386 static void l2c220_enable(void __iomem *base, unsigned num_lock)
0387 {
0388
0389
0390
0391
0392
0393 l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
0394
0395 l2c_enable(base, num_lock);
0396 }
0397
0398 static void l2c220_unlock(void __iomem *base, unsigned num_lock)
0399 {
0400 if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
0401 l2c_unlock(base, num_lock);
0402 }
0403
0404 static const struct l2c_init_data l2c220_data = {
0405 .type = "L2C-220",
0406 .way_size_0 = SZ_8K,
0407 .num_lock = 1,
0408 .enable = l2c220_enable,
0409 .save = l2c_save,
0410 .configure = l2c_configure,
0411 .unlock = l2c220_unlock,
0412 .outer_cache = {
0413 .inv_range = l2c220_inv_range,
0414 .clean_range = l2c220_clean_range,
0415 .flush_range = l2c220_flush_range,
0416 .flush_all = l2c220_flush_all,
0417 .disable = l2c_disable,
0418 .sync = l2c220_sync,
0419 .resume = l2c_resume,
0420 },
0421 };
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
0468 {
0469 void __iomem *base = l2x0_base;
0470
0471 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
0472 unsigned long flags;
0473
0474
0475 raw_spin_lock_irqsave(&l2x0_lock, flags);
0476 l2c_set_debug(base, 0x03);
0477
0478 if (start & (CACHE_LINE_SIZE - 1)) {
0479 start &= ~(CACHE_LINE_SIZE - 1);
0480 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
0481 writel_relaxed(start, base + L2X0_INV_LINE_PA);
0482 start += CACHE_LINE_SIZE;
0483 }
0484
0485 if (end & (CACHE_LINE_SIZE - 1)) {
0486 end &= ~(CACHE_LINE_SIZE - 1);
0487 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
0488 writel_relaxed(end, base + L2X0_INV_LINE_PA);
0489 }
0490
0491 l2c_set_debug(base, 0x00);
0492 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0493 }
0494
0495 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
0496 __l2c210_cache_sync(base);
0497 }
0498
0499 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
0500 {
0501 raw_spinlock_t *lock = &l2x0_lock;
0502 unsigned long flags;
0503 void __iomem *base = l2x0_base;
0504
0505 raw_spin_lock_irqsave(lock, flags);
0506 while (start < end) {
0507 unsigned long blk_end = start + min(end - start, 4096UL);
0508
0509 l2c_set_debug(base, 0x03);
0510 while (start < blk_end) {
0511 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
0512 writel_relaxed(start, base + L2X0_INV_LINE_PA);
0513 start += CACHE_LINE_SIZE;
0514 }
0515 l2c_set_debug(base, 0x00);
0516
0517 if (blk_end < end) {
0518 raw_spin_unlock_irqrestore(lock, flags);
0519 raw_spin_lock_irqsave(lock, flags);
0520 }
0521 }
0522 raw_spin_unlock_irqrestore(lock, flags);
0523 __l2c210_cache_sync(base);
0524 }
0525
0526 static void l2c310_flush_all_erratum(void)
0527 {
0528 void __iomem *base = l2x0_base;
0529 unsigned long flags;
0530
0531 raw_spin_lock_irqsave(&l2x0_lock, flags);
0532 l2c_set_debug(base, 0x03);
0533 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
0534 l2c_set_debug(base, 0x00);
0535 __l2c210_cache_sync(base);
0536 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
0537 }
0538
0539 static void __init l2c310_save(void __iomem *base)
0540 {
0541 unsigned revision;
0542
0543 l2c_save(base);
0544
0545 l2x0_saved_regs.tag_latency = readl_relaxed(base +
0546 L310_TAG_LATENCY_CTRL);
0547 l2x0_saved_regs.data_latency = readl_relaxed(base +
0548 L310_DATA_LATENCY_CTRL);
0549 l2x0_saved_regs.filter_end = readl_relaxed(base +
0550 L310_ADDR_FILTER_END);
0551 l2x0_saved_regs.filter_start = readl_relaxed(base +
0552 L310_ADDR_FILTER_START);
0553
0554 revision = readl_relaxed(base + L2X0_CACHE_ID) &
0555 L2X0_CACHE_ID_RTL_MASK;
0556
0557
0558 if (revision >= L310_CACHE_ID_RTL_R2P0)
0559 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
0560 L310_PREFETCH_CTRL);
0561
0562
0563 if (revision >= L310_CACHE_ID_RTL_R3P0)
0564 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
0565 L310_POWER_CTRL);
0566 }
0567
0568 static void l2c310_configure(void __iomem *base)
0569 {
0570 unsigned revision;
0571
0572 l2c_configure(base);
0573
0574
0575 l2c_write_sec(l2x0_saved_regs.tag_latency, base,
0576 L310_TAG_LATENCY_CTRL);
0577 l2c_write_sec(l2x0_saved_regs.data_latency, base,
0578 L310_DATA_LATENCY_CTRL);
0579 l2c_write_sec(l2x0_saved_regs.filter_end, base,
0580 L310_ADDR_FILTER_END);
0581 l2c_write_sec(l2x0_saved_regs.filter_start, base,
0582 L310_ADDR_FILTER_START);
0583
0584 revision = readl_relaxed(base + L2X0_CACHE_ID) &
0585 L2X0_CACHE_ID_RTL_MASK;
0586
0587 if (revision >= L310_CACHE_ID_RTL_R2P0)
0588 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
0589 L310_PREFETCH_CTRL);
0590 if (revision >= L310_CACHE_ID_RTL_R3P0)
0591 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
0592 L310_POWER_CTRL);
0593 }
0594
0595 static int l2c310_starting_cpu(unsigned int cpu)
0596 {
0597 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
0598 return 0;
0599 }
0600
0601 static int l2c310_dying_cpu(unsigned int cpu)
0602 {
0603 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
0604 return 0;
0605 }
0606
0607 static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
0608 {
0609 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
0610 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
0611 u32 aux = l2x0_saved_regs.aux_ctrl;
0612
0613 if (rev >= L310_CACHE_ID_RTL_R2P0) {
0614 if (cortex_a9 && !l2x0_bresp_disable) {
0615 aux |= L310_AUX_CTRL_EARLY_BRESP;
0616 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
0617 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
0618 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
0619 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
0620 }
0621 }
0622
0623 if (cortex_a9 && !l2x0_flz_disable) {
0624 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
0625 u32 acr = get_auxcr();
0626
0627 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
0628
0629 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
0630 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
0631
0632 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
0633 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
0634
0635 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
0636 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
0637 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
0638 }
0639 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
0640 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
0641 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
0642 }
0643
0644
0645
0646
0647
0648
0649 l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
0650
0651 l2c_enable(base, num_lock);
0652
0653
0654 aux = readl_relaxed(base + L2X0_AUX_CTRL);
0655
0656 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
0657 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
0658
0659 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
0660 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
0661 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
0662 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
0663 }
0664
0665
0666 if (rev >= L310_CACHE_ID_RTL_R3P0) {
0667 u32 power_ctrl;
0668
0669 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
0670 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
0671 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
0672 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
0673 }
0674
0675 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
0676 cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
0677 "arm/l2x0:starting", l2c310_starting_cpu,
0678 l2c310_dying_cpu);
0679 }
0680
0681 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
0682 struct outer_cache_fns *fns)
0683 {
0684 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
0685 const char *errata[8];
0686 unsigned n = 0;
0687
0688 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
0689 revision < L310_CACHE_ID_RTL_R2P0 &&
0690
0691 fns->inv_range == l2c210_inv_range) {
0692 fns->inv_range = l2c310_inv_range_erratum;
0693 fns->flush_range = l2c310_flush_range_erratum;
0694 errata[n++] = "588369";
0695 }
0696
0697 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
0698 revision >= L310_CACHE_ID_RTL_R2P0 &&
0699 revision < L310_CACHE_ID_RTL_R3P1) {
0700 fns->flush_all = l2c310_flush_all_erratum;
0701 errata[n++] = "727915";
0702 }
0703
0704 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
0705 revision < L310_CACHE_ID_RTL_R3P2) {
0706 u32 val = l2x0_saved_regs.prefetch_ctrl;
0707 if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
0708 val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
0709 l2x0_saved_regs.prefetch_ctrl = val;
0710 errata[n++] = "752271";
0711 }
0712 }
0713
0714 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
0715 revision == L310_CACHE_ID_RTL_R3P0) {
0716 sync_reg_offset = L2X0_DUMMY_REG;
0717 errata[n++] = "753970";
0718 }
0719
0720 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
0721 errata[n++] = "769419";
0722
0723 if (n) {
0724 unsigned i;
0725
0726 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
0727 for (i = 0; i < n; i++)
0728 pr_cont(" %s", errata[i]);
0729 pr_cont(" enabled\n");
0730 }
0731 }
0732
0733 static void l2c310_disable(void)
0734 {
0735
0736
0737
0738
0739 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
0740 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
0741
0742 l2c_disable();
0743 }
0744
0745 static void l2c310_resume(void)
0746 {
0747 l2c_resume();
0748
0749
0750 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
0751 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
0752 }
0753
0754 static void l2c310_unlock(void __iomem *base, unsigned num_lock)
0755 {
0756 if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
0757 l2c_unlock(base, num_lock);
0758 }
0759
0760 static const struct l2c_init_data l2c310_init_fns __initconst = {
0761 .type = "L2C-310",
0762 .way_size_0 = SZ_8K,
0763 .num_lock = 8,
0764 .enable = l2c310_enable,
0765 .fixup = l2c310_fixup,
0766 .save = l2c310_save,
0767 .configure = l2c310_configure,
0768 .unlock = l2c310_unlock,
0769 .outer_cache = {
0770 .inv_range = l2c210_inv_range,
0771 .clean_range = l2c210_clean_range,
0772 .flush_range = l2c210_flush_range,
0773 .flush_all = l2c210_flush_all,
0774 .disable = l2c310_disable,
0775 .sync = l2c210_sync,
0776 .resume = l2c310_resume,
0777 },
0778 };
0779
0780 static int __init __l2c_init(const struct l2c_init_data *data,
0781 u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
0782 {
0783 struct outer_cache_fns fns;
0784 unsigned way_size_bits, ways;
0785 u32 aux, old_aux;
0786
0787
0788
0789
0790
0791 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
0792 if (!l2x0_data)
0793 return -ENOMEM;
0794
0795
0796
0797
0798
0799
0800 if (aux_val & aux_mask)
0801 pr_alert("L2C: platform provided aux values permit register corruption.\n");
0802
0803 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
0804 aux &= aux_mask;
0805 aux |= aux_val;
0806
0807 if (old_aux != aux)
0808 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
0809 old_aux, aux);
0810
0811
0812 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
0813 case L2X0_CACHE_ID_PART_L310:
0814 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
0815 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
0816 if (aux & (1 << 16))
0817 ways = 16;
0818 else
0819 ways = 8;
0820 break;
0821
0822 case L2X0_CACHE_ID_PART_L210:
0823 case L2X0_CACHE_ID_PART_L220:
0824 ways = (aux >> 13) & 0xf;
0825 break;
0826
0827 case AURORA_CACHE_ID:
0828 ways = (aux >> 13) & 0xf;
0829 ways = 2 << ((ways + 1) >> 2);
0830 break;
0831
0832 default:
0833
0834 ways = 8;
0835 break;
0836 }
0837
0838 l2x0_way_mask = (1 << ways) - 1;
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
0849 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
0850 l2x0_size = ways * (data->way_size_0 << way_size_bits);
0851
0852 fns = data->outer_cache;
0853 fns.write_sec = outer_cache.write_sec;
0854 fns.configure = outer_cache.configure;
0855 if (data->fixup)
0856 data->fixup(l2x0_base, cache_id, &fns);
0857 if (nosync) {
0858 pr_info("L2C: disabling outer sync\n");
0859 fns.sync = NULL;
0860 }
0861
0862
0863
0864
0865
0866 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
0867 l2x0_saved_regs.aux_ctrl = aux;
0868
0869 data->enable(l2x0_base, data->num_lock);
0870 }
0871
0872 outer_cache = fns;
0873
0874
0875
0876
0877
0878 if (data->save)
0879 data->save(l2x0_base);
0880
0881
0882 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
0883
0884 pr_info("%s cache controller enabled, %d ways, %d kB\n",
0885 data->type, ways, l2x0_size >> 10);
0886 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
0887 data->type, cache_id, aux);
0888
0889 l2x0_pmu_register(l2x0_base, cache_id);
0890
0891 return 0;
0892 }
0893
0894 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
0895 {
0896 const struct l2c_init_data *data;
0897 u32 cache_id;
0898
0899 l2x0_base = base;
0900
0901 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
0902
0903 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
0904 default:
0905 case L2X0_CACHE_ID_PART_L210:
0906 data = &l2c210_data;
0907 break;
0908
0909 case L2X0_CACHE_ID_PART_L220:
0910 data = &l2c220_data;
0911 break;
0912
0913 case L2X0_CACHE_ID_PART_L310:
0914 data = &l2c310_init_fns;
0915 break;
0916 }
0917
0918
0919 if (data->save)
0920 data->save(l2x0_base);
0921
0922 __l2c_init(data, aux_val, aux_mask, cache_id, false);
0923 }
0924
0925 #ifdef CONFIG_OF
0926 static int l2_wt_override;
0927
0928
0929
0930 static u32 cache_id_part_number_from_dt;
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942 static int __init l2x0_cache_size_of_parse(const struct device_node *np,
0943 u32 *aux_val, u32 *aux_mask,
0944 u32 *associativity,
0945 u32 max_way_size)
0946 {
0947 u32 mask = 0, val = 0;
0948 u32 cache_size = 0, sets = 0;
0949 u32 way_size_bits = 1;
0950 u32 way_size = 0;
0951 u32 block_size = 0;
0952 u32 line_size = 0;
0953
0954 of_property_read_u32(np, "cache-size", &cache_size);
0955 of_property_read_u32(np, "cache-sets", &sets);
0956 of_property_read_u32(np, "cache-block-size", &block_size);
0957 of_property_read_u32(np, "cache-line-size", &line_size);
0958
0959 if (!cache_size || !sets)
0960 return -ENODEV;
0961
0962
0963 if (!line_size) {
0964 if (block_size) {
0965
0966 line_size = block_size;
0967 } else {
0968
0969 pr_warn("L2C OF: no cache block/line size given: "
0970 "falling back to default size %d bytes\n",
0971 CACHE_LINE_SIZE);
0972 line_size = CACHE_LINE_SIZE;
0973 }
0974 }
0975
0976 if (line_size != CACHE_LINE_SIZE)
0977 pr_warn("L2C OF: DT supplied line size %d bytes does "
0978 "not match hardware line size of %d bytes\n",
0979 line_size,
0980 CACHE_LINE_SIZE);
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990 way_size = sets * line_size;
0991 *associativity = cache_size / way_size;
0992
0993 if (way_size > max_way_size) {
0994 pr_err("L2C OF: set size %dKB is too large\n", way_size);
0995 return -EINVAL;
0996 }
0997
0998 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
0999 cache_size, cache_size >> 10);
1000 pr_info("L2C OF: override line size: %d bytes\n", line_size);
1001 pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1002 way_size, way_size >> 10);
1003 pr_info("L2C OF: override associativity: %d\n", *associativity);
1004
1005
1006
1007
1008
1009 way_size_bits = ilog2(way_size >> 10) - 3;
1010 if (way_size_bits < 1 || way_size_bits > 6) {
1011 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1012 way_size);
1013 return -EINVAL;
1014 }
1015
1016 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1017 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1018
1019 *aux_val &= ~mask;
1020 *aux_val |= val;
1021 *aux_mask &= ~mask;
1022
1023 return 0;
1024 }
1025
1026 static void __init l2x0_of_parse(const struct device_node *np,
1027 u32 *aux_val, u32 *aux_mask)
1028 {
1029 u32 data[2] = { 0, 0 };
1030 u32 tag = 0;
1031 u32 dirty = 0;
1032 u32 val = 0, mask = 0;
1033 u32 assoc;
1034 int ret;
1035
1036 of_property_read_u32(np, "arm,tag-latency", &tag);
1037 if (tag) {
1038 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1039 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1040 }
1041
1042 of_property_read_u32_array(np, "arm,data-latency",
1043 data, ARRAY_SIZE(data));
1044 if (data[0] && data[1]) {
1045 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1046 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1047 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1048 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1049 }
1050
1051 of_property_read_u32(np, "arm,dirty-latency", &dirty);
1052 if (dirty) {
1053 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1054 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1055 }
1056
1057 if (of_property_read_bool(np, "arm,parity-enable")) {
1058 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1059 val |= L2C_AUX_CTRL_PARITY_ENABLE;
1060 } else if (of_property_read_bool(np, "arm,parity-disable")) {
1061 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1062 }
1063
1064 if (of_property_read_bool(np, "arm,shared-override")) {
1065 mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1066 val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1067 }
1068
1069 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1070 if (ret)
1071 return;
1072
1073 if (assoc > 8) {
1074 pr_err("l2x0 of: cache setting yield too high associativity\n");
1075 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1076 } else {
1077 mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1078 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1079 }
1080
1081 *aux_val &= ~mask;
1082 *aux_val |= val;
1083 *aux_mask &= ~mask;
1084 }
1085
1086 static const struct l2c_init_data of_l2c210_data __initconst = {
1087 .type = "L2C-210",
1088 .way_size_0 = SZ_8K,
1089 .num_lock = 1,
1090 .of_parse = l2x0_of_parse,
1091 .enable = l2c_enable,
1092 .save = l2c_save,
1093 .configure = l2c_configure,
1094 .unlock = l2c_unlock,
1095 .outer_cache = {
1096 .inv_range = l2c210_inv_range,
1097 .clean_range = l2c210_clean_range,
1098 .flush_range = l2c210_flush_range,
1099 .flush_all = l2c210_flush_all,
1100 .disable = l2c_disable,
1101 .sync = l2c210_sync,
1102 .resume = l2c_resume,
1103 },
1104 };
1105
1106 static const struct l2c_init_data of_l2c220_data __initconst = {
1107 .type = "L2C-220",
1108 .way_size_0 = SZ_8K,
1109 .num_lock = 1,
1110 .of_parse = l2x0_of_parse,
1111 .enable = l2c220_enable,
1112 .save = l2c_save,
1113 .configure = l2c_configure,
1114 .unlock = l2c220_unlock,
1115 .outer_cache = {
1116 .inv_range = l2c220_inv_range,
1117 .clean_range = l2c220_clean_range,
1118 .flush_range = l2c220_flush_range,
1119 .flush_all = l2c220_flush_all,
1120 .disable = l2c_disable,
1121 .sync = l2c220_sync,
1122 .resume = l2c_resume,
1123 },
1124 };
1125
1126 static void __init l2c310_of_parse(const struct device_node *np,
1127 u32 *aux_val, u32 *aux_mask)
1128 {
1129 u32 data[3] = { 0, 0, 0 };
1130 u32 tag[3] = { 0, 0, 0 };
1131 u32 filter[2] = { 0, 0 };
1132 u32 assoc;
1133 u32 prefetch;
1134 u32 power;
1135 u32 val;
1136 int ret;
1137
1138 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1139 if (tag[0] && tag[1] && tag[2])
1140 l2x0_saved_regs.tag_latency =
1141 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1142 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1143 L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1144
1145 of_property_read_u32_array(np, "arm,data-latency",
1146 data, ARRAY_SIZE(data));
1147 if (data[0] && data[1] && data[2])
1148 l2x0_saved_regs.data_latency =
1149 L310_LATENCY_CTRL_RD(data[0] - 1) |
1150 L310_LATENCY_CTRL_WR(data[1] - 1) |
1151 L310_LATENCY_CTRL_SETUP(data[2] - 1);
1152
1153 of_property_read_u32_array(np, "arm,filter-ranges",
1154 filter, ARRAY_SIZE(filter));
1155 if (filter[1]) {
1156 l2x0_saved_regs.filter_end =
1157 ALIGN(filter[0] + filter[1], SZ_1M);
1158 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1159 | L310_ADDR_FILTER_EN;
1160 }
1161
1162 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1163 if (!ret) {
1164 switch (assoc) {
1165 case 16:
1166 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1167 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1168 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1169 break;
1170 case 8:
1171 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1172 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1173 break;
1174 default:
1175 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1176 assoc);
1177 break;
1178 }
1179 }
1180
1181 if (of_property_read_bool(np, "arm,shared-override")) {
1182 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1183 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1184 }
1185
1186 if (of_property_read_bool(np, "arm,parity-enable")) {
1187 *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
1188 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1189 } else if (of_property_read_bool(np, "arm,parity-disable")) {
1190 *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1191 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1192 }
1193
1194 if (of_property_read_bool(np, "arm,early-bresp-disable"))
1195 l2x0_bresp_disable = true;
1196
1197 if (of_property_read_bool(np, "arm,full-line-zero-disable"))
1198 l2x0_flz_disable = true;
1199
1200 prefetch = l2x0_saved_regs.prefetch_ctrl;
1201
1202 ret = of_property_read_u32(np, "arm,double-linefill", &val);
1203 if (ret == 0) {
1204 if (val)
1205 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1206 else
1207 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1208 } else if (ret != -EINVAL) {
1209 pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1210 }
1211
1212 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1213 if (ret == 0) {
1214 if (val)
1215 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1216 else
1217 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1218 } else if (ret != -EINVAL) {
1219 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1220 }
1221
1222 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1223 if (ret == 0) {
1224 if (!val)
1225 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1226 else
1227 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1228 } else if (ret != -EINVAL) {
1229 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1230 }
1231
1232 ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1233 if (ret == 0) {
1234 if (val)
1235 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1236 else
1237 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1238 } else if (ret != -EINVAL) {
1239 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1240 }
1241
1242 ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1243 if (ret == 0) {
1244 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1245 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1246 } else if (ret != -EINVAL) {
1247 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1248 }
1249
1250 ret = of_property_read_u32(np, "prefetch-data", &val);
1251 if (ret == 0) {
1252 if (val) {
1253 prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1254 *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1255 } else {
1256 prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1257 *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1258 }
1259 *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1260 } else if (ret != -EINVAL) {
1261 pr_err("L2C-310 OF prefetch-data property value is missing\n");
1262 }
1263
1264 ret = of_property_read_u32(np, "prefetch-instr", &val);
1265 if (ret == 0) {
1266 if (val) {
1267 prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1268 *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1269 } else {
1270 prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1271 *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1272 }
1273 *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1274 } else if (ret != -EINVAL) {
1275 pr_err("L2C-310 OF prefetch-instr property value is missing\n");
1276 }
1277
1278 l2x0_saved_regs.prefetch_ctrl = prefetch;
1279
1280 power = l2x0_saved_regs.pwr_ctrl |
1281 L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
1282
1283 ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
1284 if (!ret) {
1285 if (!val)
1286 power &= ~L310_DYNAMIC_CLK_GATING_EN;
1287 } else if (ret != -EINVAL) {
1288 pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
1289 }
1290 ret = of_property_read_u32(np, "arm,standby-mode", &val);
1291 if (!ret) {
1292 if (!val)
1293 power &= ~L310_STNDBY_MODE_EN;
1294 } else if (ret != -EINVAL) {
1295 pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
1296 }
1297
1298 l2x0_saved_regs.pwr_ctrl = power;
1299 }
1300
1301 static const struct l2c_init_data of_l2c310_data __initconst = {
1302 .type = "L2C-310",
1303 .way_size_0 = SZ_8K,
1304 .num_lock = 8,
1305 .of_parse = l2c310_of_parse,
1306 .enable = l2c310_enable,
1307 .fixup = l2c310_fixup,
1308 .save = l2c310_save,
1309 .configure = l2c310_configure,
1310 .unlock = l2c310_unlock,
1311 .outer_cache = {
1312 .inv_range = l2c210_inv_range,
1313 .clean_range = l2c210_clean_range,
1314 .flush_range = l2c210_flush_range,
1315 .flush_all = l2c210_flush_all,
1316 .disable = l2c310_disable,
1317 .sync = l2c210_sync,
1318 .resume = l2c310_resume,
1319 },
1320 };
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1332 .type = "L2C-310 Coherent",
1333 .way_size_0 = SZ_8K,
1334 .num_lock = 8,
1335 .of_parse = l2c310_of_parse,
1336 .enable = l2c310_enable,
1337 .fixup = l2c310_fixup,
1338 .save = l2c310_save,
1339 .configure = l2c310_configure,
1340 .unlock = l2c310_unlock,
1341 .outer_cache = {
1342 .inv_range = l2c210_inv_range,
1343 .clean_range = l2c210_clean_range,
1344 .flush_range = l2c210_flush_range,
1345 .flush_all = l2c210_flush_all,
1346 .disable = l2c310_disable,
1347 .resume = l2c310_resume,
1348 },
1349 };
1350
1351
1352
1353
1354
1355
1356 static unsigned long aurora_range_end(unsigned long start, unsigned long end)
1357 {
1358
1359
1360
1361
1362
1363 if (end > start + AURORA_MAX_RANGE_SIZE)
1364 end = start + AURORA_MAX_RANGE_SIZE;
1365
1366
1367
1368
1369 if (end > PAGE_ALIGN(start+1))
1370 end = PAGE_ALIGN(start+1);
1371
1372 return end;
1373 }
1374
1375 static void aurora_pa_range(unsigned long start, unsigned long end,
1376 unsigned long offset)
1377 {
1378 void __iomem *base = l2x0_base;
1379 unsigned long range_end;
1380 unsigned long flags;
1381
1382
1383
1384
1385 start &= ~(CACHE_LINE_SIZE - 1);
1386 end = ALIGN(end, CACHE_LINE_SIZE);
1387
1388
1389
1390
1391 while (start < end) {
1392 range_end = aurora_range_end(start, end);
1393
1394 raw_spin_lock_irqsave(&l2x0_lock, flags);
1395 writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
1396 writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
1397 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1398
1399 writel_relaxed(0, base + AURORA_SYNC_REG);
1400 start = range_end;
1401 }
1402 }
1403 static void aurora_inv_range(unsigned long start, unsigned long end)
1404 {
1405 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1406 }
1407
1408 static void aurora_clean_range(unsigned long start, unsigned long end)
1409 {
1410
1411
1412
1413
1414 if (!l2_wt_override)
1415 aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
1416 }
1417
1418 static void aurora_flush_range(unsigned long start, unsigned long end)
1419 {
1420 if (l2_wt_override)
1421 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1422 else
1423 aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
1424 }
1425
1426 static void aurora_flush_all(void)
1427 {
1428 void __iomem *base = l2x0_base;
1429 unsigned long flags;
1430
1431
1432 raw_spin_lock_irqsave(&l2x0_lock, flags);
1433 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1434 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1435
1436 writel_relaxed(0, base + AURORA_SYNC_REG);
1437 }
1438
1439 static void aurora_cache_sync(void)
1440 {
1441 writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
1442 }
1443
1444 static void aurora_disable(void)
1445 {
1446 void __iomem *base = l2x0_base;
1447 unsigned long flags;
1448
1449 raw_spin_lock_irqsave(&l2x0_lock, flags);
1450 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1451 writel_relaxed(0, base + AURORA_SYNC_REG);
1452 l2c_write_sec(0, base, L2X0_CTRL);
1453 dsb(st);
1454 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1455 }
1456
1457 static void aurora_save(void __iomem *base)
1458 {
1459 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1460 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1461 }
1462
1463
1464
1465
1466
1467 static void __init aurora_enable_no_outer(void __iomem *base,
1468 unsigned num_lock)
1469 {
1470 u32 u;
1471
1472 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1473 u |= AURORA_CTRL_FW;
1474 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1475
1476 isb();
1477
1478 l2c_enable(base, num_lock);
1479 }
1480
1481 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1482 struct outer_cache_fns *fns)
1483 {
1484 sync_reg_offset = AURORA_SYNC_REG;
1485 }
1486
1487 static void __init aurora_of_parse(const struct device_node *np,
1488 u32 *aux_val, u32 *aux_mask)
1489 {
1490 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1491 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1492
1493 of_property_read_u32(np, "cache-id-part",
1494 &cache_id_part_number_from_dt);
1495
1496
1497 l2_wt_override = of_property_read_bool(np, "wt-override");
1498
1499 if (l2_wt_override) {
1500 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1501 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1502 }
1503
1504 if (of_property_read_bool(np, "marvell,ecc-enable")) {
1505 mask |= AURORA_ACR_ECC_EN;
1506 val |= AURORA_ACR_ECC_EN;
1507 }
1508
1509 if (of_property_read_bool(np, "arm,parity-enable")) {
1510 mask |= AURORA_ACR_PARITY_EN;
1511 val |= AURORA_ACR_PARITY_EN;
1512 } else if (of_property_read_bool(np, "arm,parity-disable")) {
1513 mask |= AURORA_ACR_PARITY_EN;
1514 }
1515
1516 *aux_val &= ~mask;
1517 *aux_val |= val;
1518 *aux_mask &= ~mask;
1519 }
1520
1521 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1522 .type = "Aurora",
1523 .way_size_0 = SZ_4K,
1524 .num_lock = 4,
1525 .of_parse = aurora_of_parse,
1526 .enable = l2c_enable,
1527 .fixup = aurora_fixup,
1528 .save = aurora_save,
1529 .configure = l2c_configure,
1530 .unlock = l2c_unlock,
1531 .outer_cache = {
1532 .inv_range = aurora_inv_range,
1533 .clean_range = aurora_clean_range,
1534 .flush_range = aurora_flush_range,
1535 .flush_all = aurora_flush_all,
1536 .disable = aurora_disable,
1537 .sync = aurora_cache_sync,
1538 .resume = l2c_resume,
1539 },
1540 };
1541
1542 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1543 .type = "Aurora",
1544 .way_size_0 = SZ_4K,
1545 .num_lock = 4,
1546 .of_parse = aurora_of_parse,
1547 .enable = aurora_enable_no_outer,
1548 .fixup = aurora_fixup,
1549 .save = aurora_save,
1550 .configure = l2c_configure,
1551 .unlock = l2c_unlock,
1552 .outer_cache = {
1553 .resume = l2c_resume,
1554 },
1555 };
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
1588 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1589
1590 #define BCM_SYS_EMI_OFFSET 0x40000000UL
1591 #define BCM_VC_EMI_OFFSET 0x80000000UL
1592
1593 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1594 {
1595 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1596 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1597 }
1598
1599 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1600 {
1601 if (bcm_addr_is_sys_emi(addr))
1602 return addr + BCM_SYS_EMI_OFFSET;
1603 else
1604 return addr + BCM_VC_EMI_OFFSET;
1605 }
1606
1607 static void bcm_inv_range(unsigned long start, unsigned long end)
1608 {
1609 unsigned long new_start, new_end;
1610
1611 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1612
1613 if (unlikely(end <= start))
1614 return;
1615
1616 new_start = bcm_l2_phys_addr(start);
1617 new_end = bcm_l2_phys_addr(end);
1618
1619
1620 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1621 l2c210_inv_range(new_start, new_end);
1622 return;
1623 }
1624
1625
1626
1627
1628 l2c210_inv_range(new_start,
1629 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1630 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1631 new_end);
1632 }
1633
1634 static void bcm_clean_range(unsigned long start, unsigned long end)
1635 {
1636 unsigned long new_start, new_end;
1637
1638 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1639
1640 if (unlikely(end <= start))
1641 return;
1642
1643 new_start = bcm_l2_phys_addr(start);
1644 new_end = bcm_l2_phys_addr(end);
1645
1646
1647 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1648 l2c210_clean_range(new_start, new_end);
1649 return;
1650 }
1651
1652
1653
1654
1655 l2c210_clean_range(new_start,
1656 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1657 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1658 new_end);
1659 }
1660
1661 static void bcm_flush_range(unsigned long start, unsigned long end)
1662 {
1663 unsigned long new_start, new_end;
1664
1665 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1666
1667 if (unlikely(end <= start))
1668 return;
1669
1670 if ((end - start) >= l2x0_size) {
1671 outer_cache.flush_all();
1672 return;
1673 }
1674
1675 new_start = bcm_l2_phys_addr(start);
1676 new_end = bcm_l2_phys_addr(end);
1677
1678
1679 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1680 l2c210_flush_range(new_start, new_end);
1681 return;
1682 }
1683
1684
1685
1686
1687 l2c210_flush_range(new_start,
1688 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1689 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1690 new_end);
1691 }
1692
1693
1694 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1695 .type = "BCM-L2C-310",
1696 .way_size_0 = SZ_8K,
1697 .num_lock = 8,
1698 .of_parse = l2c310_of_parse,
1699 .enable = l2c310_enable,
1700 .save = l2c310_save,
1701 .configure = l2c310_configure,
1702 .unlock = l2c310_unlock,
1703 .outer_cache = {
1704 .inv_range = bcm_inv_range,
1705 .clean_range = bcm_clean_range,
1706 .flush_range = bcm_flush_range,
1707 .flush_all = l2c210_flush_all,
1708 .disable = l2c310_disable,
1709 .sync = l2c210_sync,
1710 .resume = l2c310_resume,
1711 },
1712 };
1713
1714 static void __init tauros3_save(void __iomem *base)
1715 {
1716 l2c_save(base);
1717
1718 l2x0_saved_regs.aux2_ctrl =
1719 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1720 l2x0_saved_regs.prefetch_ctrl =
1721 readl_relaxed(base + L310_PREFETCH_CTRL);
1722 }
1723
1724 static void tauros3_configure(void __iomem *base)
1725 {
1726 l2c_configure(base);
1727 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1728 base + TAUROS3_AUX2_CTRL);
1729 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1730 base + L310_PREFETCH_CTRL);
1731 }
1732
1733 static const struct l2c_init_data of_tauros3_data __initconst = {
1734 .type = "Tauros3",
1735 .way_size_0 = SZ_8K,
1736 .num_lock = 8,
1737 .enable = l2c_enable,
1738 .save = tauros3_save,
1739 .configure = tauros3_configure,
1740 .unlock = l2c_unlock,
1741
1742 .outer_cache = {
1743 .resume = l2c_resume,
1744 },
1745 };
1746
1747 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1748 static const struct of_device_id l2x0_ids[] __initconst = {
1749 L2C_ID("arm,l210-cache", of_l2c210_data),
1750 L2C_ID("arm,l220-cache", of_l2c220_data),
1751 L2C_ID("arm,pl310-cache", of_l2c310_data),
1752 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1753 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1754 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1755 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1756
1757 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1758 {}
1759 };
1760
1761 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1762 {
1763 const struct l2c_init_data *data;
1764 struct device_node *np;
1765 struct resource res;
1766 u32 cache_id, old_aux;
1767 u32 cache_level = 2;
1768 bool nosync = false;
1769
1770 np = of_find_matching_node(NULL, l2x0_ids);
1771 if (!np)
1772 return -ENODEV;
1773
1774 if (of_address_to_resource(np, 0, &res))
1775 return -ENODEV;
1776
1777 l2x0_base = ioremap(res.start, resource_size(&res));
1778 if (!l2x0_base)
1779 return -ENOMEM;
1780
1781 l2x0_saved_regs.phy_base = res.start;
1782
1783 data = of_match_node(l2x0_ids, np)->data;
1784
1785 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1786 of_property_read_bool(np, "arm,io-coherent"))
1787 data = &of_l2c310_coherent_data;
1788
1789 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1790 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1791 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1792 old_aux, (old_aux & aux_mask) | aux_val);
1793 } else if (aux_mask != ~0U && aux_val != 0) {
1794 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
1795 }
1796
1797
1798 if (!of_property_read_bool(np, "cache-unified"))
1799 pr_err("L2C: device tree omits to specify unified cache\n");
1800
1801 if (of_property_read_u32(np, "cache-level", &cache_level))
1802 pr_err("L2C: device tree omits to specify cache-level\n");
1803
1804 if (cache_level != 2)
1805 pr_err("L2C: device tree specifies invalid cache level\n");
1806
1807 nosync = of_property_read_bool(np, "arm,outer-sync-disable");
1808
1809
1810 if (data->save)
1811 data->save(l2x0_base);
1812
1813
1814 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1815 if (data->of_parse)
1816 data->of_parse(np, &aux_val, &aux_mask);
1817
1818 if (cache_id_part_number_from_dt)
1819 cache_id = cache_id_part_number_from_dt;
1820 else
1821 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1822
1823 return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
1824 }
1825 #endif