Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Inline assembly cache operations.
0007  *
0008  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
0009  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
0010  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
0011  */
0012 #ifndef _ASM_R4KCACHE_H
0013 #define _ASM_R4KCACHE_H
0014 
0015 #include <linux/stringify.h>
0016 
0017 #include <asm/asm.h>
0018 #include <asm/asm-eva.h>
0019 #include <asm/cacheops.h>
0020 #include <asm/compiler.h>
0021 #include <asm/cpu-features.h>
0022 #include <asm/cpu-type.h>
0023 #include <asm/mipsmtregs.h>
0024 #include <asm/mmzone.h>
0025 #include <asm/unroll.h>
0026 
0027 extern void (*r4k_blast_dcache)(void);
0028 extern void (*r4k_blast_icache)(void);
0029 
0030 /*
0031  * This macro return a properly sign-extended address suitable as base address
0032  * for indexed cache operations.  Two issues here:
0033  *
0034  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
0035  *    the index bits from the virtual address.  This breaks with tradition
0036  *    set by the R4000.  To keep unpleasant surprises from happening we pick
0037  *    an address in KSEG0 / CKSEG0.
0038  *  - We need a properly sign extended address for 64-bit code.  To get away
0039  *    without ifdefs we let the compiler do it by a type cast.
0040  */
0041 #define INDEX_BASE  CKSEG0
0042 
0043 #define _cache_op(insn, op, addr)                   \
0044     __asm__ __volatile__(                       \
0045     "   .set    push                    \n" \
0046     "   .set    noreorder               \n" \
0047     "   .set "MIPS_ISA_ARCH_LEVEL"          \n" \
0048     "   " insn("%0", "%1") "                \n" \
0049     "   .set    pop                 \n" \
0050     :                               \
0051     : "i" (op), "R" (*(unsigned char *)(addr)))
0052 
0053 #define cache_op(op, addr)                      \
0054     _cache_op(kernel_cache, op, addr)
0055 
0056 static inline void flush_icache_line_indexed(unsigned long addr)
0057 {
0058     cache_op(Index_Invalidate_I, addr);
0059 }
0060 
0061 static inline void flush_dcache_line_indexed(unsigned long addr)
0062 {
0063     cache_op(Index_Writeback_Inv_D, addr);
0064 }
0065 
0066 static inline void flush_scache_line_indexed(unsigned long addr)
0067 {
0068     cache_op(Index_Writeback_Inv_SD, addr);
0069 }
0070 
0071 static inline void flush_icache_line(unsigned long addr)
0072 {
0073     switch (boot_cpu_type()) {
0074     case CPU_LOONGSON2EF:
0075         cache_op(Hit_Invalidate_I_Loongson2, addr);
0076         break;
0077 
0078     default:
0079         cache_op(Hit_Invalidate_I, addr);
0080         break;
0081     }
0082 }
0083 
0084 static inline void flush_dcache_line(unsigned long addr)
0085 {
0086     cache_op(Hit_Writeback_Inv_D, addr);
0087 }
0088 
0089 static inline void invalidate_dcache_line(unsigned long addr)
0090 {
0091     cache_op(Hit_Invalidate_D, addr);
0092 }
0093 
0094 static inline void invalidate_scache_line(unsigned long addr)
0095 {
0096     cache_op(Hit_Invalidate_SD, addr);
0097 }
0098 
0099 static inline void flush_scache_line(unsigned long addr)
0100 {
0101     cache_op(Hit_Writeback_Inv_SD, addr);
0102 }
0103 
0104 #ifdef CONFIG_EVA
0105 
0106 #define protected_cache_op(op, addr)                \
0107 ({                              \
0108     int __err = 0;                      \
0109     __asm__ __volatile__(                   \
0110     "   .set    push            \n"     \
0111     "   .set    noreorder       \n"     \
0112     "   .set    mips0           \n"     \
0113     "   .set    eva         \n"     \
0114     "1: cachee  %1, (%2)        \n"     \
0115     "2: .insn               \n"     \
0116     "   .set    pop         \n"     \
0117     "   .section .fixup,\"ax\"      \n"     \
0118     "3: li  %0, %3          \n"     \
0119     "   j   2b          \n"     \
0120     "   .previous           \n"     \
0121     "   .section __ex_table,\"a\"   \n"     \
0122     "   "STR(PTR_WD)" 1b, 3b        \n"     \
0123     "   .previous"                  \
0124     : "+r" (__err)                      \
0125     : "i" (op), "r" (addr), "i" (-EFAULT));         \
0126     __err;                          \
0127 })
0128 #else
0129 
0130 #define protected_cache_op(op, addr)                \
0131 ({                              \
0132     int __err = 0;                      \
0133     __asm__ __volatile__(                   \
0134     "   .set    push            \n"     \
0135     "   .set    noreorder       \n"     \
0136     "   .set "MIPS_ISA_ARCH_LEVEL"  \n"     \
0137     "1: cache   %1, (%2)        \n"     \
0138     "2: .insn               \n"     \
0139     "   .set    pop         \n"     \
0140     "   .section .fixup,\"ax\"      \n"     \
0141     "3: li  %0, %3          \n"     \
0142     "   j   2b          \n"     \
0143     "   .previous           \n"     \
0144     "   .section __ex_table,\"a\"   \n"     \
0145     "   "STR(PTR_WD)" 1b, 3b        \n"     \
0146     "   .previous"                  \
0147     : "+r" (__err)                      \
0148     : "i" (op), "r" (addr), "i" (-EFAULT));         \
0149     __err;                          \
0150 })
0151 #endif
0152 
0153 /*
0154  * The next two are for badland addresses like signal trampolines.
0155  */
0156 static inline int protected_flush_icache_line(unsigned long addr)
0157 {
0158     switch (boot_cpu_type()) {
0159     case CPU_LOONGSON2EF:
0160         return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
0161 
0162     default:
0163         return protected_cache_op(Hit_Invalidate_I, addr);
0164     }
0165 }
0166 
0167 /*
0168  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
0169  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
0170  * caches.  We're talking about one cacheline unnecessarily getting invalidated
0171  * here so the penalty isn't overly hard.
0172  */
0173 static inline int protected_writeback_dcache_line(unsigned long addr)
0174 {
0175     return protected_cache_op(Hit_Writeback_Inv_D, addr);
0176 }
0177 
0178 static inline int protected_writeback_scache_line(unsigned long addr)
0179 {
0180     return protected_cache_op(Hit_Writeback_Inv_SD, addr);
0181 }
0182 
0183 /*
0184  * This one is RM7000-specific
0185  */
0186 static inline void invalidate_tcache_page(unsigned long addr)
0187 {
0188     cache_op(Page_Invalidate_T, addr);
0189 }
0190 
0191 #define cache_unroll(times, insn, op, addr, lsize) do {         \
0192     int i = 0;                          \
0193     unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));   \
0194 } while (0)
0195 
0196 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
0197 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
0198 static inline void extra##blast_##pfx##cache##lsize(void)       \
0199 {                                   \
0200     unsigned long start = INDEX_BASE;               \
0201     unsigned long end = start + current_cpu_data.desc.waysize;  \
0202     unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
0203     unsigned long ws_end = current_cpu_data.desc.ways <<        \
0204                    current_cpu_data.desc.waybit;        \
0205     unsigned long ws, addr;                     \
0206                                     \
0207     for (ws = 0; ws < ws_end; ws += ws_inc)             \
0208         for (addr = start; addr < end; addr += lsize * 32)  \
0209             cache_unroll(32, kernel_cache, indexop,     \
0210                      addr | ws, lsize);         \
0211 }                                   \
0212                                     \
0213 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
0214 {                                   \
0215     unsigned long start = page;                 \
0216     unsigned long end = page + PAGE_SIZE;               \
0217                                     \
0218     do {                                \
0219         cache_unroll(32, kernel_cache, hitop, start, lsize);    \
0220         start += lsize * 32;                    \
0221     } while (start < end);                      \
0222 }                                   \
0223                                     \
0224 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
0225 {                                   \
0226     unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
0227     unsigned long start = INDEX_BASE + (page & indexmask);      \
0228     unsigned long end = start + PAGE_SIZE;              \
0229     unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
0230     unsigned long ws_end = current_cpu_data.desc.ways <<        \
0231                    current_cpu_data.desc.waybit;        \
0232     unsigned long ws, addr;                     \
0233                                     \
0234     for (ws = 0; ws < ws_end; ws += ws_inc)             \
0235         for (addr = start; addr < end; addr += lsize * 32)  \
0236             cache_unroll(32, kernel_cache, indexop,     \
0237                      addr | ws, lsize);         \
0238 }
0239 
0240 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
0241 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
0242 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
0243 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
0244 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
0245 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
0246 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
0247 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
0248 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
0249 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
0250 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
0251 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
0252 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
0253 
0254 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
0255 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
0256 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
0257 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
0258 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
0259 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
0260 
0261 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
0262 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
0263 {                                   \
0264     unsigned long start = page;                 \
0265     unsigned long end = page + PAGE_SIZE;               \
0266                                     \
0267     do {                                \
0268         cache_unroll(32, user_cache, hitop, start, lsize);  \
0269         start += lsize * 32;                    \
0270     } while (start < end);                      \
0271 }
0272 
0273 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
0274              16)
0275 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
0276 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
0277              32)
0278 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
0279 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
0280              64)
0281 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
0282 
0283 /* build blast_xxx_range, protected_blast_xxx_range */
0284 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)    \
0285 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
0286                             unsigned long end)  \
0287 {                                   \
0288     unsigned long lsize = cpu_##desc##_line_size();         \
0289     unsigned long addr = start & ~(lsize - 1);          \
0290     unsigned long aend = (end - 1) & ~(lsize - 1);          \
0291                                     \
0292     while (1) {                         \
0293         prot##cache_op(hitop, addr);                \
0294         if (addr == aend)                   \
0295             break;                      \
0296         addr += lsize;                      \
0297     }                               \
0298 }
0299 
0300 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
0301 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
0302 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
0303 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
0304     protected_, loongson2_)
0305 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
0306 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
0307 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
0308 /* blast_inv_dcache_range */
0309 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
0310 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
0311 
0312 /* Currently, this is very specific to Loongson-3 */
0313 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)  \
0314 static inline void blast_##pfx##cache##lsize##_node(long node)      \
0315 {                                   \
0316     unsigned long start = CAC_BASE | nid_to_addrbase(node);     \
0317     unsigned long end = start + current_cpu_data.desc.waysize;  \
0318     unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
0319     unsigned long ws_end = current_cpu_data.desc.ways <<        \
0320                    current_cpu_data.desc.waybit;        \
0321     unsigned long ws, addr;                     \
0322                                     \
0323     for (ws = 0; ws < ws_end; ws += ws_inc)             \
0324         for (addr = start; addr < end; addr += lsize * 32)  \
0325             cache_unroll(32, kernel_cache, indexop,     \
0326                      addr | ws, lsize);         \
0327 }
0328 
0329 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
0330 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
0331 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
0332 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
0333 
0334 #endif /* _ASM_R4KCACHE_H */