0001
0002
0003
0004
0005
0006
0007
0008 #undef DEBUG
0009
0010 #include <linux/kernel.h>
0011 #include <linux/mm.h>
0012 #include <linux/bitops.h>
0013
0014 #include <asm/addrspace.h>
0015 #include <asm/bcache.h>
0016 #include <asm/cacheops.h>
0017 #include <asm/mipsregs.h>
0018 #include <asm/processor.h>
0019 #include <asm/sections.h>
0020 #include <asm/cacheflush.h> /* for run_uncached() */
0021
0022
0023 #define sc_lsize 32
0024 #define tc_pagesize (32*128)
0025
0026
0027 #define scache_size (256*1024)
0028
0029
0030 #define tc_lsize 32
0031
0032 extern unsigned long icache_way_size, dcache_way_size;
0033 static unsigned long tcache_size;
0034
0035 #include <asm/r4kcache.h>
0036
0037 static int rm7k_tcache_init;
0038
0039
0040
0041
0042
0043 static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
0044 {
0045 unsigned long end, a;
0046
0047 pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
0048
0049
0050 BUG_ON(size == 0);
0051
0052 blast_scache_range(addr, addr + size);
0053
0054 if (!rm7k_tcache_init)
0055 return;
0056
0057 a = addr & ~(tc_pagesize - 1);
0058 end = (addr + size - 1) & ~(tc_pagesize - 1);
0059 while(1) {
0060 invalidate_tcache_page(a);
0061 if (a == end)
0062 break;
0063 a += tc_pagesize;
0064 }
0065 }
0066
0067 static void rm7k_sc_inv(unsigned long addr, unsigned long size)
0068 {
0069 unsigned long end, a;
0070
0071 pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
0072
0073
0074 BUG_ON(size == 0);
0075
0076 blast_inv_scache_range(addr, addr + size);
0077
0078 if (!rm7k_tcache_init)
0079 return;
0080
0081 a = addr & ~(tc_pagesize - 1);
0082 end = (addr + size - 1) & ~(tc_pagesize - 1);
0083 while(1) {
0084 invalidate_tcache_page(a);
0085 if (a == end)
0086 break;
0087 a += tc_pagesize;
0088 }
0089 }
0090
0091 static void blast_rm7k_tcache(void)
0092 {
0093 unsigned long start = CKSEG0ADDR(0);
0094 unsigned long end = start + tcache_size;
0095
0096 write_c0_taglo(0);
0097
0098 while (start < end) {
0099 cache_op(Page_Invalidate_T, start);
0100 start += tc_pagesize;
0101 }
0102 }
0103
0104
0105
0106
0107 static void __rm7k_tc_enable(void)
0108 {
0109 int i;
0110
0111 set_c0_config(RM7K_CONF_TE);
0112
0113 write_c0_taglo(0);
0114 write_c0_taghi(0);
0115
0116 for (i = 0; i < tcache_size; i += tc_lsize)
0117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
0118 }
0119
0120 static void rm7k_tc_enable(void)
0121 {
0122 if (read_c0_config() & RM7K_CONF_TE)
0123 return;
0124
0125 BUG_ON(tcache_size == 0);
0126
0127 run_uncached(__rm7k_tc_enable);
0128 }
0129
0130
0131
0132
0133 static void __rm7k_sc_enable(void)
0134 {
0135 int i;
0136
0137 set_c0_config(RM7K_CONF_SE);
0138
0139 write_c0_taglo(0);
0140 write_c0_taghi(0);
0141
0142 for (i = 0; i < scache_size; i += sc_lsize)
0143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
0144 }
0145
0146 static void rm7k_sc_enable(void)
0147 {
0148 if (read_c0_config() & RM7K_CONF_SE)
0149 return;
0150
0151 pr_info("Enabling secondary cache...\n");
0152 run_uncached(__rm7k_sc_enable);
0153
0154 if (rm7k_tcache_init)
0155 rm7k_tc_enable();
0156 }
0157
0158 static void rm7k_tc_disable(void)
0159 {
0160 unsigned long flags;
0161
0162 local_irq_save(flags);
0163 blast_rm7k_tcache();
0164 clear_c0_config(RM7K_CONF_TE);
0165 local_irq_restore(flags);
0166 }
0167
0168 static void rm7k_sc_disable(void)
0169 {
0170 clear_c0_config(RM7K_CONF_SE);
0171
0172 if (rm7k_tcache_init)
0173 rm7k_tc_disable();
0174 }
0175
0176 static struct bcache_ops rm7k_sc_ops = {
0177 .bc_enable = rm7k_sc_enable,
0178 .bc_disable = rm7k_sc_disable,
0179 .bc_wback_inv = rm7k_sc_wback_inv,
0180 .bc_inv = rm7k_sc_inv
0181 };
0182
0183
0184
0185
0186
0187 static void __probe_tcache(void)
0188 {
0189 unsigned long flags, addr, begin, end, pow2;
0190
0191 begin = (unsigned long) &_stext;
0192 begin &= ~((8 * 1024 * 1024) - 1);
0193 end = begin + (8 * 1024 * 1024);
0194
0195 local_irq_save(flags);
0196
0197 set_c0_config(RM7K_CONF_TE);
0198
0199
0200 pow2 = (256 * 1024);
0201 for (addr = begin; addr <= end; addr = (begin + pow2)) {
0202 unsigned long *p = (unsigned long *) addr;
0203 __asm__ __volatile__("nop" : : "r" (*p));
0204 pow2 <<= 1;
0205 }
0206
0207
0208 write_c0_taglo(0);
0209 write_c0_taghi(0);
0210 cache_op(Index_Store_Tag_T, begin);
0211
0212
0213 pow2 = (512 * 1024);
0214 for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
0215 cache_op(Index_Load_Tag_T, addr);
0216 if (!read_c0_taglo())
0217 break;
0218 pow2 <<= 1;
0219 }
0220
0221 addr -= begin;
0222 tcache_size = addr;
0223
0224 clear_c0_config(RM7K_CONF_TE);
0225
0226 local_irq_restore(flags);
0227 }
0228
0229 void rm7k_sc_init(void)
0230 {
0231 struct cpuinfo_mips *c = ¤t_cpu_data;
0232 unsigned int config = read_c0_config();
0233
0234 if ((config & RM7K_CONF_SC))
0235 return;
0236
0237 c->scache.linesz = sc_lsize;
0238 c->scache.ways = 4;
0239 c->scache.waybit= __ffs(scache_size / c->scache.ways);
0240 c->scache.waysize = scache_size / c->scache.ways;
0241 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
0242 printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
0243 (scache_size >> 10), sc_lsize);
0244
0245 if (!(config & RM7K_CONF_SE))
0246 rm7k_sc_enable();
0247
0248 bcops = &rm7k_sc_ops;
0249
0250
0251
0252
0253
0254 rm7k_tcache_init = 0;
0255 tcache_size = 0;
0256
0257 if (config & RM7K_CONF_TC)
0258 return;
0259
0260
0261
0262
0263
0264 run_uncached(__probe_tcache);
0265 rm7k_tc_enable();
0266 rm7k_tcache_init = 1;
0267 c->tcache.linesz = tc_lsize;
0268 c->tcache.ways = 1;
0269 pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
0270 }