Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2005-2007 Cavium Networks
0007  */
0008 #include <linux/export.h>
0009 #include <linux/kernel.h>
0010 #include <linux/sched.h>
0011 #include <linux/smp.h>
0012 #include <linux/mm.h>
0013 #include <linux/bitops.h>
0014 #include <linux/cpu.h>
0015 #include <linux/io.h>
0016 
0017 #include <asm/bcache.h>
0018 #include <asm/bootinfo.h>
0019 #include <asm/cacheops.h>
0020 #include <asm/cpu-features.h>
0021 #include <asm/cpu-type.h>
0022 #include <asm/page.h>
0023 #include <asm/r4kcache.h>
0024 #include <asm/traps.h>
0025 #include <asm/mmu_context.h>
0026 
0027 #include <asm/octeon/octeon.h>
0028 
0029 unsigned long long cache_err_dcache[NR_CPUS];
0030 EXPORT_SYMBOL_GPL(cache_err_dcache);
0031 
0032 /*
0033  * Octeon automatically flushes the dcache on tlb changes, so
0034  * from Linux's viewpoint it acts much like a physically
0035  * tagged cache. No flushing is needed
0036  *
0037  */
0038 static void octeon_flush_data_cache_page(unsigned long addr)
0039 {
0040     /* Nothing to do */
0041 }
0042 
0043 static inline void octeon_local_flush_icache(void)
0044 {
0045     asm volatile ("synci 0($0)");
0046 }
0047 
0048 /*
0049  * Flush local I-cache for the specified range.
0050  */
0051 static void local_octeon_flush_icache_range(unsigned long start,
0052                         unsigned long end)
0053 {
0054     octeon_local_flush_icache();
0055 }
0056 
0057 /**
0058  * octeon_flush_icache_all_cores -  Flush caches as necessary for all cores
0059  * affected by a vma. If no vma is supplied, all cores are flushed.
0060  *
0061  * @vma:    VMA to flush or NULL to flush all icaches.
0062  */
0063 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
0064 {
0065     extern void octeon_send_ipi_single(int cpu, unsigned int action);
0066 #ifdef CONFIG_SMP
0067     int cpu;
0068     cpumask_t mask;
0069 #endif
0070 
0071     mb();
0072     octeon_local_flush_icache();
0073 #ifdef CONFIG_SMP
0074     preempt_disable();
0075     cpu = smp_processor_id();
0076 
0077     /*
0078      * If we have a vma structure, we only need to worry about
0079      * cores it has been used on
0080      */
0081     if (vma)
0082         mask = *mm_cpumask(vma->vm_mm);
0083     else
0084         mask = *cpu_online_mask;
0085     cpumask_clear_cpu(cpu, &mask);
0086     for_each_cpu(cpu, &mask)
0087         octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
0088 
0089     preempt_enable();
0090 #endif
0091 }
0092 
0093 
0094 /*
0095  * Called to flush the icache on all cores
0096  */
0097 static void octeon_flush_icache_all(void)
0098 {
0099     octeon_flush_icache_all_cores(NULL);
0100 }
0101 
0102 
0103 /**
0104  * octeon_flush_cache_mm - flush all memory associated with a memory context.
0105  *
0106  * @mm:     Memory context to flush
0107  */
0108 static void octeon_flush_cache_mm(struct mm_struct *mm)
0109 {
0110     /*
0111      * According to the R4K version of this file, CPUs without
0112      * dcache aliases don't need to do anything here
0113      */
0114 }
0115 
0116 
0117 /*
0118  * Flush a range of kernel addresses out of the icache
0119  *
0120  */
0121 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
0122 {
0123     octeon_flush_icache_all_cores(NULL);
0124 }
0125 
0126 
0127 /**
0128  * octeon_flush_cache_range - Flush a range out of a vma
0129  *
0130  * @vma:    VMA to flush
0131  * @start:  beginning address for flush
0132  * @end:    ending address for flush
0133  */
0134 static void octeon_flush_cache_range(struct vm_area_struct *vma,
0135                      unsigned long start, unsigned long end)
0136 {
0137     if (vma->vm_flags & VM_EXEC)
0138         octeon_flush_icache_all_cores(vma);
0139 }
0140 
0141 
0142 /**
0143  * octeon_flush_cache_page - Flush a specific page of a vma
0144  *
0145  * @vma:    VMA to flush page for
0146  * @page:   Page to flush
0147  * @pfn:    Page frame number
0148  */
0149 static void octeon_flush_cache_page(struct vm_area_struct *vma,
0150                     unsigned long page, unsigned long pfn)
0151 {
0152     if (vma->vm_flags & VM_EXEC)
0153         octeon_flush_icache_all_cores(vma);
0154 }
0155 
0156 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
0157 {
0158     BUG();
0159 }
0160 
0161 /*
0162  * Probe Octeon's caches
0163  *
0164  */
0165 static void probe_octeon(void)
0166 {
0167     unsigned long icache_size;
0168     unsigned long dcache_size;
0169     unsigned int config1;
0170     struct cpuinfo_mips *c = &current_cpu_data;
0171     int cputype = current_cpu_type();
0172 
0173     config1 = read_c0_config1();
0174     switch (cputype) {
0175     case CPU_CAVIUM_OCTEON:
0176     case CPU_CAVIUM_OCTEON_PLUS:
0177         c->icache.linesz = 2 << ((config1 >> 19) & 7);
0178         c->icache.sets = 64 << ((config1 >> 22) & 7);
0179         c->icache.ways = 1 + ((config1 >> 16) & 7);
0180         c->icache.flags |= MIPS_CACHE_VTAG;
0181         icache_size =
0182             c->icache.sets * c->icache.ways * c->icache.linesz;
0183         c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
0184         c->dcache.linesz = 128;
0185         if (cputype == CPU_CAVIUM_OCTEON_PLUS)
0186             c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
0187         else
0188             c->dcache.sets = 1; /* CN3XXX has one Dcache set */
0189         c->dcache.ways = 64;
0190         dcache_size =
0191             c->dcache.sets * c->dcache.ways * c->dcache.linesz;
0192         c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
0193         c->options |= MIPS_CPU_PREFETCH;
0194         break;
0195 
0196     case CPU_CAVIUM_OCTEON2:
0197         c->icache.linesz = 2 << ((config1 >> 19) & 7);
0198         c->icache.sets = 8;
0199         c->icache.ways = 37;
0200         c->icache.flags |= MIPS_CACHE_VTAG;
0201         icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
0202 
0203         c->dcache.linesz = 128;
0204         c->dcache.ways = 32;
0205         c->dcache.sets = 8;
0206         dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
0207         c->options |= MIPS_CPU_PREFETCH;
0208         break;
0209 
0210     case CPU_CAVIUM_OCTEON3:
0211         c->icache.linesz = 128;
0212         c->icache.sets = 16;
0213         c->icache.ways = 39;
0214         c->icache.flags |= MIPS_CACHE_VTAG;
0215         icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
0216 
0217         c->dcache.linesz = 128;
0218         c->dcache.ways = 32;
0219         c->dcache.sets = 8;
0220         dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
0221         c->options |= MIPS_CPU_PREFETCH;
0222         break;
0223 
0224     default:
0225         panic("Unsupported Cavium Networks CPU type");
0226         break;
0227     }
0228 
0229     /* compute a couple of other cache variables */
0230     c->icache.waysize = icache_size / c->icache.ways;
0231     c->dcache.waysize = dcache_size / c->dcache.ways;
0232 
0233     c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
0234     c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
0235 
0236     if (smp_processor_id() == 0) {
0237         pr_info("Primary instruction cache %ldkB, %s, %d way, "
0238             "%d sets, linesize %d bytes.\n",
0239             icache_size >> 10,
0240             cpu_has_vtag_icache ?
0241                 "virtually tagged" : "physically tagged",
0242             c->icache.ways, c->icache.sets, c->icache.linesz);
0243 
0244         pr_info("Primary data cache %ldkB, %d-way, %d sets, "
0245             "linesize %d bytes.\n",
0246             dcache_size >> 10, c->dcache.ways,
0247             c->dcache.sets, c->dcache.linesz);
0248     }
0249 }
0250 
0251 static void  octeon_cache_error_setup(void)
0252 {
0253     extern char except_vec2_octeon;
0254     set_handler(0x100, &except_vec2_octeon, 0x80);
0255 }
0256 
0257 /*
0258  * Setup the Octeon cache flush routines
0259  *
0260  */
0261 void octeon_cache_init(void)
0262 {
0263     probe_octeon();
0264 
0265     shm_align_mask = PAGE_SIZE - 1;
0266 
0267     flush_cache_all         = octeon_flush_icache_all;
0268     __flush_cache_all       = octeon_flush_icache_all;
0269     flush_cache_mm          = octeon_flush_cache_mm;
0270     flush_cache_page        = octeon_flush_cache_page;
0271     flush_cache_range       = octeon_flush_cache_range;
0272     flush_icache_all        = octeon_flush_icache_all;
0273     flush_data_cache_page       = octeon_flush_data_cache_page;
0274     flush_icache_range      = octeon_flush_icache_range;
0275     local_flush_icache_range    = local_octeon_flush_icache_range;
0276     __flush_icache_user_range   = octeon_flush_icache_range;
0277     __local_flush_icache_user_range = local_octeon_flush_icache_range;
0278 
0279     __flush_kernel_vmap_range   = octeon_flush_kernel_vmap_range;
0280 
0281     build_clear_page();
0282     build_copy_page();
0283 
0284     board_cache_error_setup = octeon_cache_error_setup;
0285 }
0286 
0287 /*
0288  * Handle a cache error exception
0289  */
0290 static RAW_NOTIFIER_HEAD(co_cache_error_chain);
0291 
0292 int register_co_cache_error_notifier(struct notifier_block *nb)
0293 {
0294     return raw_notifier_chain_register(&co_cache_error_chain, nb);
0295 }
0296 EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
0297 
0298 int unregister_co_cache_error_notifier(struct notifier_block *nb)
0299 {
0300     return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
0301 }
0302 EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
0303 
0304 static void co_cache_error_call_notifiers(unsigned long val)
0305 {
0306     int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
0307     if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
0308         u64 dcache_err;
0309         unsigned long coreid = cvmx_get_core_num();
0310         u64 icache_err = read_octeon_c0_icacheerr();
0311 
0312         if (val) {
0313             dcache_err = cache_err_dcache[coreid];
0314             cache_err_dcache[coreid] = 0;
0315         } else {
0316             dcache_err = read_octeon_c0_dcacheerr();
0317         }
0318 
0319         pr_err("Core%lu: Cache error exception:\n", coreid);
0320         pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
0321         if (icache_err & 1) {
0322             pr_err("CacheErr (Icache) == %llx\n",
0323                    (unsigned long long)icache_err);
0324             write_octeon_c0_icacheerr(0);
0325         }
0326         if (dcache_err & 1) {
0327             pr_err("CacheErr (Dcache) == %llx\n",
0328                    (unsigned long long)dcache_err);
0329         }
0330     }
0331 }
0332 
0333 /*
0334  * Called when the exception is recoverable
0335  */
0336 
0337 asmlinkage void cache_parity_error_octeon_recoverable(void)
0338 {
0339     co_cache_error_call_notifiers(0);
0340 }
0341 
0342 /*
0343  * Called when the exception is not recoverable
0344  */
0345 
0346 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
0347 {
0348     co_cache_error_call_notifiers(1);
0349     panic("Can't handle cache error: nested exception");
0350 }