0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/mm.h>
0011 #include <linux/sched.h>
0012 #include <linux/cache.h>
0013 #include <linux/mmu_context.h>
0014 #include <linux/syscalls.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/pagemap.h>
0017 #include <asm/cacheflush.h>
0018 #include <asm/cachectl.h>
0019 #include <asm/setup.h>
0020
0021 #ifdef CONFIG_ISA_ARCV2
0022 #define USE_RGN_FLSH 1
0023 #endif
0024
0025 static int l2_line_sz;
0026 static int ioc_exists;
0027 int slc_enable = 1, ioc_enable = 1;
0028 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE;
0029 unsigned long perip_end = 0xFFFFFFFF;
0030
0031 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
0032 unsigned long sz, const int op, const int full_page);
0033
0034 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
0035 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
0036 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
0037
0038 char *arc_cache_mumbojumbo(int c, char *buf, int len)
0039 {
0040 int n = 0;
0041 struct cpuinfo_arc_cache *p;
0042
0043 #define PR_CACHE(p, cfg, str) \
0044 if (!(p)->line_len) \
0045 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
0046 else \
0047 n += scnprintf(buf + n, len - n, \
0048 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
0049 (p)->sz_k, (p)->assoc, (p)->line_len, \
0050 (p)->vipt ? "VIPT" : "PIPT", \
0051 (p)->alias ? " aliasing" : "", \
0052 IS_USED_CFG(cfg));
0053
0054 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
0055 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
0056
0057 p = &cpuinfo_arc700[c].slc;
0058 if (p->line_len)
0059 n += scnprintf(buf + n, len - n,
0060 "SLC\t\t: %uK, %uB Line%s\n",
0061 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
0062
0063 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
0064 perip_base,
0065 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
0066
0067 return buf;
0068 }
0069
0070
0071
0072
0073
0074
0075 static void read_decode_cache_bcr_arcv2(int cpu)
0076 {
0077 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
0078 struct bcr_generic sbcr;
0079
0080 struct bcr_slc_cfg {
0081 #ifdef CONFIG_CPU_BIG_ENDIAN
0082 unsigned int pad:24, way:2, lsz:2, sz:4;
0083 #else
0084 unsigned int sz:4, lsz:2, way:2, pad:24;
0085 #endif
0086 } slc_cfg;
0087
0088 struct bcr_clust_cfg {
0089 #ifdef CONFIG_CPU_BIG_ENDIAN
0090 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
0091 #else
0092 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
0093 #endif
0094 } cbcr;
0095
0096 struct bcr_volatile {
0097 #ifdef CONFIG_CPU_BIG_ENDIAN
0098 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
0099 #else
0100 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
0101 #endif
0102 } vol;
0103
0104
0105 READ_BCR(ARC_REG_SLC_BCR, sbcr);
0106 if (sbcr.ver) {
0107 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
0108 p_slc->sz_k = 128 << slc_cfg.sz;
0109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
0110 }
0111
0112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
0113 if (cbcr.c) {
0114 ioc_exists = 1;
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
0127 ioc_enable = 0;
0128 } else {
0129 ioc_enable = 0;
0130 }
0131
0132
0133 if (cpuinfo_arc700[cpu].core.family > 0x51) {
0134 READ_BCR(AUX_VOL, vol);
0135 perip_base = vol.start << 28;
0136
0137 if (cpuinfo_arc700[cpu].core.family > 0x52)
0138 perip_end = (vol.limit << 28) - 1;
0139 }
0140 }
0141
0142 void read_decode_cache_bcr(void)
0143 {
0144 struct cpuinfo_arc_cache *p_ic, *p_dc;
0145 unsigned int cpu = smp_processor_id();
0146 struct bcr_cache {
0147 #ifdef CONFIG_CPU_BIG_ENDIAN
0148 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
0149 #else
0150 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
0151 #endif
0152 } ibcr, dbcr;
0153
0154 p_ic = &cpuinfo_arc700[cpu].icache;
0155 READ_BCR(ARC_REG_IC_BCR, ibcr);
0156
0157 if (!ibcr.ver)
0158 goto dc_chk;
0159
0160 if (ibcr.ver <= 3) {
0161 BUG_ON(ibcr.config != 3);
0162 p_ic->assoc = 2;
0163 } else if (ibcr.ver >= 4) {
0164 p_ic->assoc = 1 << ibcr.config;
0165 }
0166
0167 p_ic->line_len = 8 << ibcr.line_len;
0168 p_ic->sz_k = 1 << (ibcr.sz - 1);
0169 p_ic->vipt = 1;
0170 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
0171
0172 dc_chk:
0173 p_dc = &cpuinfo_arc700[cpu].dcache;
0174 READ_BCR(ARC_REG_DC_BCR, dbcr);
0175
0176 if (!dbcr.ver)
0177 goto slc_chk;
0178
0179 if (dbcr.ver <= 3) {
0180 BUG_ON(dbcr.config != 2);
0181 p_dc->assoc = 4;
0182 p_dc->vipt = 1;
0183 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
0184 } else if (dbcr.ver >= 4) {
0185 p_dc->assoc = 1 << dbcr.config;
0186 p_dc->vipt = 0;
0187 p_dc->alias = 0;
0188 }
0189
0190 p_dc->line_len = 16 << dbcr.line_len;
0191 p_dc->sz_k = 1 << (dbcr.sz - 1);
0192
0193 slc_chk:
0194 if (is_isa_arcv2())
0195 read_decode_cache_bcr_arcv2(cpu);
0196 }
0197
0198
0199
0200
0201
0202 #define OP_INV 0x1
0203 #define OP_FLUSH 0x2
0204 #define OP_FLUSH_N_INV 0x3
0205 #define OP_INV_IC 0x4
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 static inline
0226 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
0227 unsigned long sz, const int op, const int full_page)
0228 {
0229 unsigned int aux_cmd, aux_tag;
0230 int num_lines;
0231
0232 if (op == OP_INV_IC) {
0233 aux_cmd = ARC_REG_IC_IVIL;
0234 aux_tag = ARC_REG_IC_PTAG;
0235 } else {
0236 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
0237 aux_tag = ARC_REG_DC_PTAG;
0238 }
0239
0240
0241
0242
0243
0244
0245
0246 if (!full_page) {
0247 sz += paddr & ~CACHE_LINE_MASK;
0248 paddr &= CACHE_LINE_MASK;
0249 vaddr &= CACHE_LINE_MASK;
0250 }
0251 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
0252
0253
0254
0255
0256
0257 if (full_page)
0258 write_aux_reg(aux_tag, paddr);
0259
0260
0261
0262
0263
0264
0265
0266
0267 if (is_pae40_enabled() && op == OP_INV_IC)
0268 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
0269
0270 while (num_lines-- > 0) {
0271 if (!full_page) {
0272 write_aux_reg(aux_tag, paddr);
0273 paddr += L1_CACHE_BYTES;
0274 }
0275
0276 write_aux_reg(aux_cmd, vaddr);
0277 vaddr += L1_CACHE_BYTES;
0278 }
0279 }
0280
0281 #ifndef USE_RGN_FLSH
0282
0283
0284
0285 static inline
0286 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
0287 unsigned long sz, const int op, const int full_page)
0288 {
0289 unsigned int aux_cmd;
0290 int num_lines;
0291
0292 if (op == OP_INV_IC) {
0293 aux_cmd = ARC_REG_IC_IVIL;
0294 } else {
0295
0296 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
0297 }
0298
0299
0300
0301
0302
0303
0304
0305 if (!full_page) {
0306 sz += paddr & ~CACHE_LINE_MASK;
0307 paddr &= CACHE_LINE_MASK;
0308 }
0309
0310 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
0311
0312
0313
0314
0315
0316
0317 if (is_pae40_enabled()) {
0318 if (op == OP_INV_IC)
0319
0320
0321
0322
0323 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
0324 else
0325 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
0326 }
0327
0328 while (num_lines-- > 0) {
0329 write_aux_reg(aux_cmd, paddr);
0330 paddr += L1_CACHE_BYTES;
0331 }
0332 }
0333
0334 #else
0335
0336
0337
0338
0339 static inline
0340 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
0341 unsigned long sz, const int op, const int full_page)
0342 {
0343 unsigned int s, e;
0344
0345
0346 if (op == OP_INV_IC) {
0347 s = ARC_REG_IC_IVIR;
0348 e = ARC_REG_IC_ENDR;
0349 } else {
0350 s = ARC_REG_DC_STARTR;
0351 e = ARC_REG_DC_ENDR;
0352 }
0353
0354 if (!full_page) {
0355
0356 sz += paddr & ~CACHE_LINE_MASK;
0357 paddr &= CACHE_LINE_MASK;
0358
0359
0360
0361
0362
0363 sz += L1_CACHE_BYTES - 1;
0364 }
0365
0366 if (is_pae40_enabled()) {
0367
0368 if (op == OP_INV_IC)
0369 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
0370 else
0371 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
0372 }
0373
0374
0375 write_aux_reg(e, paddr + sz);
0376 write_aux_reg(s, paddr);
0377
0378
0379 }
0380
0381 #endif
0382
0383 #ifdef CONFIG_ARC_MMU_V3
0384 #define __cache_line_loop __cache_line_loop_v3
0385 #else
0386 #define __cache_line_loop __cache_line_loop_v4
0387 #endif
0388
0389 #ifdef CONFIG_ARC_HAS_DCACHE
0390
0391
0392
0393
0394
0395 #ifndef USE_RGN_FLSH
0396
0397
0398
0399
0400 static inline void __before_dc_op(const int op)
0401 {
0402 if (op == OP_FLUSH_N_INV) {
0403
0404
0405
0406
0407
0408 const unsigned int ctl = ARC_REG_DC_CTRL;
0409 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
0410 }
0411 }
0412
0413 #else
0414
0415 static inline void __before_dc_op(const int op)
0416 {
0417 const unsigned int ctl = ARC_REG_DC_CTRL;
0418 unsigned int val = read_aux_reg(ctl);
0419
0420 if (op == OP_FLUSH_N_INV) {
0421 val |= DC_CTRL_INV_MODE_FLUSH;
0422 }
0423
0424 if (op != OP_INV_IC) {
0425
0426
0427
0428
0429 val &= ~DC_CTRL_RGN_OP_MSK;
0430 if (op & OP_INV)
0431 val |= DC_CTRL_RGN_OP_INV;
0432 }
0433 write_aux_reg(ctl, val);
0434 }
0435
0436 #endif
0437
0438
0439 static inline void __after_dc_op(const int op)
0440 {
0441 if (op & OP_FLUSH) {
0442 const unsigned int ctl = ARC_REG_DC_CTRL;
0443 unsigned int reg;
0444
0445
0446 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
0447 ;
0448
0449
0450 if (op == OP_FLUSH_N_INV)
0451 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
0452 }
0453 }
0454
0455
0456
0457
0458
0459
0460
0461 static inline void __dc_entire_op(const int op)
0462 {
0463 int aux;
0464
0465 __before_dc_op(op);
0466
0467 if (op & OP_INV)
0468 aux = ARC_REG_DC_IVDC;
0469 else
0470 aux = ARC_REG_DC_FLSH;
0471
0472 write_aux_reg(aux, 0x1);
0473
0474 __after_dc_op(op);
0475 }
0476
0477 static inline void __dc_disable(void)
0478 {
0479 const int r = ARC_REG_DC_CTRL;
0480
0481 __dc_entire_op(OP_FLUSH_N_INV);
0482 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
0483 }
0484
0485 static void __dc_enable(void)
0486 {
0487 const int r = ARC_REG_DC_CTRL;
0488
0489 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
0490 }
0491
0492
0493 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
0494
0495
0496
0497
0498 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
0499 unsigned long sz, const int op)
0500 {
0501 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
0502 unsigned long flags;
0503
0504 local_irq_save(flags);
0505
0506 __before_dc_op(op);
0507
0508 __cache_line_loop(paddr, vaddr, sz, op, full_page);
0509
0510 __after_dc_op(op);
0511
0512 local_irq_restore(flags);
0513 }
0514
0515 #else
0516
0517 #define __dc_entire_op(op)
0518 #define __dc_disable()
0519 #define __dc_enable()
0520 #define __dc_line_op(paddr, vaddr, sz, op)
0521 #define __dc_line_op_k(paddr, sz, op)
0522
0523 #endif
0524
0525 #ifdef CONFIG_ARC_HAS_ICACHE
0526
0527 static inline void __ic_entire_inv(void)
0528 {
0529 write_aux_reg(ARC_REG_IC_IVIC, 1);
0530 read_aux_reg(ARC_REG_IC_CTRL);
0531 }
0532
0533 static inline void
0534 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
0535 unsigned long sz)
0536 {
0537 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
0538 unsigned long flags;
0539
0540 local_irq_save(flags);
0541 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
0542 local_irq_restore(flags);
0543 }
0544
0545 #ifndef CONFIG_SMP
0546
0547 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
0548
0549 #else
0550
0551 struct ic_inv_args {
0552 phys_addr_t paddr, vaddr;
0553 int sz;
0554 };
0555
0556 static void __ic_line_inv_vaddr_helper(void *info)
0557 {
0558 struct ic_inv_args *ic_inv = info;
0559
0560 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
0561 }
0562
0563 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
0564 unsigned long sz)
0565 {
0566 struct ic_inv_args ic_inv = {
0567 .paddr = paddr,
0568 .vaddr = vaddr,
0569 .sz = sz
0570 };
0571
0572 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
0573 }
0574
0575 #endif
0576
0577 #else
0578
0579 #define __ic_entire_inv()
0580 #define __ic_line_inv_vaddr(pstart, vstart, sz)
0581
0582 #endif
0583
0584 noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
0585 {
0586 #ifdef CONFIG_ISA_ARCV2
0587
0588
0589
0590
0591
0592
0593
0594 static DEFINE_SPINLOCK(lock);
0595 unsigned long flags;
0596 unsigned int ctrl;
0597 phys_addr_t end;
0598
0599 spin_lock_irqsave(&lock, flags);
0600
0601
0602
0603
0604
0605
0606
0607 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
0608
0609
0610 if (!(op & OP_FLUSH))
0611 ctrl &= ~SLC_CTRL_IM;
0612 else
0613 ctrl |= SLC_CTRL_IM;
0614
0615 if (op & OP_INV)
0616 ctrl |= SLC_CTRL_RGN_OP_INV;
0617 else
0618 ctrl &= ~SLC_CTRL_RGN_OP_INV;
0619
0620 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
0621
0622
0623
0624
0625
0626
0627 end = paddr + sz + l2_line_sz - 1;
0628 if (is_pae40_enabled())
0629 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
0630
0631 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
0632
0633 if (is_pae40_enabled())
0634 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
0635
0636 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
0637
0638
0639 read_aux_reg(ARC_REG_SLC_CTRL);
0640
0641 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
0642
0643 spin_unlock_irqrestore(&lock, flags);
0644 #endif
0645 }
0646
0647 noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
0648 {
0649 #ifdef CONFIG_ISA_ARCV2
0650
0651
0652
0653
0654
0655
0656
0657 static DEFINE_SPINLOCK(lock);
0658
0659 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
0660 unsigned int ctrl, cmd;
0661 unsigned long flags;
0662 int num_lines;
0663
0664 spin_lock_irqsave(&lock, flags);
0665
0666 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
0667
0668
0669 if (!(op & OP_FLUSH))
0670 ctrl &= ~SLC_CTRL_IM;
0671 else
0672 ctrl |= SLC_CTRL_IM;
0673
0674 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
0675
0676 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
0677
0678 sz += paddr & ~SLC_LINE_MASK;
0679 paddr &= SLC_LINE_MASK;
0680
0681 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
0682
0683 while (num_lines-- > 0) {
0684 write_aux_reg(cmd, paddr);
0685 paddr += l2_line_sz;
0686 }
0687
0688
0689 read_aux_reg(ARC_REG_SLC_CTRL);
0690
0691 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
0692
0693 spin_unlock_irqrestore(&lock, flags);
0694 #endif
0695 }
0696
0697 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
0698
0699 noinline static void slc_entire_op(const int op)
0700 {
0701 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
0702
0703 ctrl = read_aux_reg(r);
0704
0705 if (!(op & OP_FLUSH))
0706 ctrl &= ~SLC_CTRL_IM;
0707 else
0708 ctrl |= SLC_CTRL_IM;
0709
0710 write_aux_reg(r, ctrl);
0711
0712 if (op & OP_INV)
0713 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
0714 else
0715 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
0716
0717
0718 read_aux_reg(r);
0719
0720
0721 while (read_aux_reg(r) & SLC_CTRL_BUSY);
0722 }
0723
0724 static inline void arc_slc_disable(void)
0725 {
0726 const int r = ARC_REG_SLC_CTRL;
0727
0728 slc_entire_op(OP_FLUSH_N_INV);
0729 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
0730 }
0731
0732 static inline void arc_slc_enable(void)
0733 {
0734 const int r = ARC_REG_SLC_CTRL;
0735
0736 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
0737 }
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755 void flush_dcache_page(struct page *page)
0756 {
0757 struct address_space *mapping;
0758
0759 if (!cache_is_vipt_aliasing()) {
0760 clear_bit(PG_dc_clean, &page->flags);
0761 return;
0762 }
0763
0764
0765 mapping = page_mapping_file(page);
0766 if (!mapping)
0767 return;
0768
0769
0770
0771
0772
0773 if (!mapping_mapped(mapping)) {
0774 clear_bit(PG_dc_clean, &page->flags);
0775 } else if (page_mapcount(page)) {
0776
0777
0778 phys_addr_t paddr = (unsigned long)page_address(page);
0779 unsigned long vaddr = page->index << PAGE_SHIFT;
0780
0781 if (addr_not_cache_congruent(paddr, vaddr))
0782 __flush_dcache_page(paddr, vaddr);
0783 }
0784 }
0785 EXPORT_SYMBOL(flush_dcache_page);
0786
0787
0788
0789
0790
0791 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
0792 {
0793 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
0794 }
0795
0796 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
0797 {
0798 __dc_line_op_k(start, sz, OP_INV);
0799 }
0800
0801 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
0802 {
0803 __dc_line_op_k(start, sz, OP_FLUSH);
0804 }
0805
0806
0807
0808
0809
0810 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
0811 {
0812 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
0813 slc_op(start, sz, OP_FLUSH_N_INV);
0814 }
0815
0816 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
0817 {
0818 __dc_line_op_k(start, sz, OP_INV);
0819 slc_op(start, sz, OP_INV);
0820 }
0821
0822 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
0823 {
0824 __dc_line_op_k(start, sz, OP_FLUSH);
0825 slc_op(start, sz, OP_FLUSH);
0826 }
0827
0828
0829
0830
0831 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
0832 {
0833 __dma_cache_wback_inv(start, sz);
0834 }
0835 EXPORT_SYMBOL(dma_cache_wback_inv);
0836
0837 void dma_cache_inv(phys_addr_t start, unsigned long sz)
0838 {
0839 __dma_cache_inv(start, sz);
0840 }
0841 EXPORT_SYMBOL(dma_cache_inv);
0842
0843 void dma_cache_wback(phys_addr_t start, unsigned long sz)
0844 {
0845 __dma_cache_wback(start, sz);
0846 }
0847 EXPORT_SYMBOL(dma_cache_wback);
0848
0849
0850
0851
0852
0853
0854
0855
0856 void flush_icache_range(unsigned long kstart, unsigned long kend)
0857 {
0858 unsigned int tot_sz;
0859
0860 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
0861
0862
0863
0864
0865 tot_sz = kend - kstart;
0866 if (tot_sz > PAGE_SIZE) {
0867 flush_cache_all();
0868 return;
0869 }
0870
0871
0872 if (likely(kstart > PAGE_OFFSET)) {
0873
0874
0875
0876
0877
0878
0879 __sync_icache_dcache(kstart, kstart, kend - kstart);
0880 return;
0881 }
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892 while (tot_sz > 0) {
0893 unsigned int off, sz;
0894 unsigned long phy, pfn;
0895
0896 off = kstart % PAGE_SIZE;
0897 pfn = vmalloc_to_pfn((void *)kstart);
0898 phy = (pfn << PAGE_SHIFT) + off;
0899 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
0900 __sync_icache_dcache(phy, kstart, sz);
0901 kstart += sz;
0902 tot_sz -= sz;
0903 }
0904 }
0905 EXPORT_SYMBOL(flush_icache_range);
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
0918 {
0919 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
0920 __ic_line_inv_vaddr(paddr, vaddr, len);
0921 }
0922
0923
0924 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
0925 {
0926 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
0927 }
0928
0929
0930
0931
0932
0933 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
0934 {
0935 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
0936 }
0937
0938 noinline void flush_cache_all(void)
0939 {
0940 unsigned long flags;
0941
0942 local_irq_save(flags);
0943
0944 __ic_entire_inv();
0945 __dc_entire_op(OP_FLUSH_N_INV);
0946
0947 local_irq_restore(flags);
0948
0949 }
0950
0951 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
0952
0953 void flush_cache_mm(struct mm_struct *mm)
0954 {
0955 flush_cache_all();
0956 }
0957
0958 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
0959 unsigned long pfn)
0960 {
0961 phys_addr_t paddr = pfn << PAGE_SHIFT;
0962
0963 u_vaddr &= PAGE_MASK;
0964
0965 __flush_dcache_page(paddr, u_vaddr);
0966
0967 if (vma->vm_flags & VM_EXEC)
0968 __inv_icache_page(paddr, u_vaddr);
0969 }
0970
0971 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
0972 unsigned long end)
0973 {
0974 flush_cache_all();
0975 }
0976
0977 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
0978 unsigned long u_vaddr)
0979 {
0980
0981 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
0982 __flush_dcache_page((phys_addr_t)page_address(page),
0983 (phys_addr_t)page_address(page));
0984
0985 }
0986
0987 #endif
0988
0989 void copy_user_highpage(struct page *to, struct page *from,
0990 unsigned long u_vaddr, struct vm_area_struct *vma)
0991 {
0992 void *kfrom = kmap_atomic(from);
0993 void *kto = kmap_atomic(to);
0994 int clean_src_k_mappings = 0;
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1008 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1009 clean_src_k_mappings = 1;
1010 }
1011
1012 copy_page(kto, kfrom);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 clear_bit(PG_dc_clean, &to->flags);
1023
1024
1025
1026
1027
1028 if (clean_src_k_mappings) {
1029 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1030 set_bit(PG_dc_clean, &from->flags);
1031 } else {
1032 clear_bit(PG_dc_clean, &from->flags);
1033 }
1034
1035 kunmap_atomic(kto);
1036 kunmap_atomic(kfrom);
1037 }
1038
1039 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1040 {
1041 clear_page(to);
1042 clear_bit(PG_dc_clean, &page->flags);
1043 }
1044 EXPORT_SYMBOL(clear_user_page);
1045
1046
1047
1048
1049
1050 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1051 {
1052
1053 flush_cache_all();
1054 return 0;
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 noinline void __init arc_ioc_setup(void)
1073 {
1074 unsigned int ioc_base, mem_sz;
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1085 panic("IOC already enabled, please upgrade bootloader!\n");
1086
1087 if (!ioc_enable)
1088 return;
1089
1090
1091 __dc_disable();
1092
1093
1094 if (read_aux_reg(ARC_REG_SLC_BCR))
1095 slc_entire_op(OP_FLUSH_N_INV);
1096
1097
1098
1099
1100
1101
1102 mem_sz = arc_get_mem_sz();
1103
1104 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1105 panic("IOC Aperture size must be power of 2 larger than 4KB");
1106
1107
1108
1109
1110
1111 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1112
1113
1114 ioc_base = CONFIG_LINUX_RAM_BASE;
1115
1116 if (ioc_base % mem_sz != 0)
1117 panic("IOC Aperture start must be aligned to the size of the aperture");
1118
1119 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1120 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1121 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1122
1123
1124 __dc_enable();
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134 void __init arc_cache_init_master(void)
1135 {
1136 unsigned int __maybe_unused cpu = smp_processor_id();
1137
1138 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1140
1141 if (!ic->line_len)
1142 panic("cache support enabled but non-existent cache\n");
1143
1144 if (ic->line_len != L1_CACHE_BYTES)
1145 panic("ICache line [%d] != kernel Config [%d]",
1146 ic->line_len, L1_CACHE_BYTES);
1147
1148
1149
1150
1151
1152 if (is_isa_arcv2() && ic->alias)
1153 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1154 else
1155 _cache_line_loop_ic_fn = __cache_line_loop;
1156 }
1157
1158 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1159 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1160
1161 if (!dc->line_len)
1162 panic("cache support enabled but non-existent cache\n");
1163
1164 if (dc->line_len != L1_CACHE_BYTES)
1165 panic("DCache line [%d] != kernel Config [%d]",
1166 dc->line_len, L1_CACHE_BYTES);
1167
1168
1169 if (is_isa_arcompact()) {
1170 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1171 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1172
1173 if (dc->alias) {
1174 if (!handled)
1175 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1176 if (CACHE_COLORS_NUM != num_colors)
1177 panic("CACHE_COLORS_NUM not optimized for config\n");
1178 } else if (!dc->alias && handled) {
1179 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1180 }
1181 }
1182 }
1183
1184
1185
1186
1187
1188 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1189 "SMP_CACHE_BYTES must be >= any cache line length");
1190 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1191 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1192 l2_line_sz, SMP_CACHE_BYTES);
1193
1194
1195 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1196 arc_slc_disable();
1197
1198 if (is_isa_arcv2() && ioc_exists)
1199 arc_ioc_setup();
1200
1201 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1202 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1203 __dma_cache_inv = __dma_cache_inv_slc;
1204 __dma_cache_wback = __dma_cache_wback_slc;
1205 } else {
1206 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1207 __dma_cache_inv = __dma_cache_inv_l1;
1208 __dma_cache_wback = __dma_cache_wback_l1;
1209 }
1210
1211
1212
1213
1214
1215
1216 }
1217
1218 void __ref arc_cache_init(void)
1219 {
1220 unsigned int __maybe_unused cpu = smp_processor_id();
1221 char str[256];
1222
1223 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1224
1225 if (!cpu)
1226 arc_cache_init_master();
1227
1228
1229
1230
1231
1232
1233
1234
1235 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1236
1237 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1238 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1239
1240 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1241 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1242
1243 if (l2_line_sz) {
1244 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1245 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1246 }
1247 }
1248 }