Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
0004  *
0005  * Copyright (C) 2007 ARM Limited
0006  */
0007 #include <linux/init.h>
0008 #include <linux/highmem.h>
0009 #include <asm/cp15.h>
0010 #include <asm/cputype.h>
0011 #include <asm/cacheflush.h>
0012 
0013 #define CR_L2   (1 << 26)
0014 
0015 #define CACHE_LINE_SIZE     32
0016 #define CACHE_LINE_SHIFT    5
0017 #define CACHE_WAY_PER_SET   8
0018 
0019 #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
0020 #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
0021 
0022 static inline int xsc3_l2_present(void)
0023 {
0024     unsigned long l2ctype;
0025 
0026     __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
0027 
0028     return !!(l2ctype & 0xf8);
0029 }
0030 
0031 static inline void xsc3_l2_clean_mva(unsigned long addr)
0032 {
0033     __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
0034 }
0035 
0036 static inline void xsc3_l2_inv_mva(unsigned long addr)
0037 {
0038     __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
0039 }
0040 
0041 static inline void xsc3_l2_inv_all(void)
0042 {
0043     unsigned long l2ctype, set_way;
0044     int set, way;
0045 
0046     __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
0047 
0048     for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
0049         for (way = 0; way < CACHE_WAY_PER_SET; way++) {
0050             set_way = (way << 29) | (set << 5);
0051             __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
0052         }
0053     }
0054 
0055     dsb();
0056 }
0057 
0058 static inline void l2_unmap_va(unsigned long va)
0059 {
0060 #ifdef CONFIG_HIGHMEM
0061     if (va != -1)
0062         kunmap_atomic((void *)va);
0063 #endif
0064 }
0065 
0066 static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
0067 {
0068 #ifdef CONFIG_HIGHMEM
0069     unsigned long va = prev_va & PAGE_MASK;
0070     unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
0071     if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
0072         /*
0073          * Switching to a new page.  Because cache ops are
0074          * using virtual addresses only, we must put a mapping
0075          * in place for it.
0076          */
0077         l2_unmap_va(prev_va);
0078         va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
0079     }
0080     return va + (pa_offset >> (32 - PAGE_SHIFT));
0081 #else
0082     return __phys_to_virt(pa);
0083 #endif
0084 }
0085 
0086 static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
0087 {
0088     unsigned long vaddr;
0089 
0090     if (start == 0 && end == -1ul) {
0091         xsc3_l2_inv_all();
0092         return;
0093     }
0094 
0095     vaddr = -1;  /* to force the first mapping */
0096 
0097     /*
0098      * Clean and invalidate partial first cache line.
0099      */
0100     if (start & (CACHE_LINE_SIZE - 1)) {
0101         vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
0102         xsc3_l2_clean_mva(vaddr);
0103         xsc3_l2_inv_mva(vaddr);
0104         start = (start | (CACHE_LINE_SIZE - 1)) + 1;
0105     }
0106 
0107     /*
0108      * Invalidate all full cache lines between 'start' and 'end'.
0109      */
0110     while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
0111         vaddr = l2_map_va(start, vaddr);
0112         xsc3_l2_inv_mva(vaddr);
0113         start += CACHE_LINE_SIZE;
0114     }
0115 
0116     /*
0117      * Clean and invalidate partial last cache line.
0118      */
0119     if (start < end) {
0120         vaddr = l2_map_va(start, vaddr);
0121         xsc3_l2_clean_mva(vaddr);
0122         xsc3_l2_inv_mva(vaddr);
0123     }
0124 
0125     l2_unmap_va(vaddr);
0126 
0127     dsb();
0128 }
0129 
0130 static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
0131 {
0132     unsigned long vaddr;
0133 
0134     vaddr = -1;  /* to force the first mapping */
0135 
0136     start &= ~(CACHE_LINE_SIZE - 1);
0137     while (start < end) {
0138         vaddr = l2_map_va(start, vaddr);
0139         xsc3_l2_clean_mva(vaddr);
0140         start += CACHE_LINE_SIZE;
0141     }
0142 
0143     l2_unmap_va(vaddr);
0144 
0145     dsb();
0146 }
0147 
0148 /*
0149  * optimize L2 flush all operation by set/way format
0150  */
0151 static inline void xsc3_l2_flush_all(void)
0152 {
0153     unsigned long l2ctype, set_way;
0154     int set, way;
0155 
0156     __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
0157 
0158     for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
0159         for (way = 0; way < CACHE_WAY_PER_SET; way++) {
0160             set_way = (way << 29) | (set << 5);
0161             __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
0162         }
0163     }
0164 
0165     dsb();
0166 }
0167 
0168 static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
0169 {
0170     unsigned long vaddr;
0171 
0172     if (start == 0 && end == -1ul) {
0173         xsc3_l2_flush_all();
0174         return;
0175     }
0176 
0177     vaddr = -1;  /* to force the first mapping */
0178 
0179     start &= ~(CACHE_LINE_SIZE - 1);
0180     while (start < end) {
0181         vaddr = l2_map_va(start, vaddr);
0182         xsc3_l2_clean_mva(vaddr);
0183         xsc3_l2_inv_mva(vaddr);
0184         start += CACHE_LINE_SIZE;
0185     }
0186 
0187     l2_unmap_va(vaddr);
0188 
0189     dsb();
0190 }
0191 
0192 static int __init xsc3_l2_init(void)
0193 {
0194     if (!cpu_is_xsc3() || !xsc3_l2_present())
0195         return 0;
0196 
0197     if (get_cr() & CR_L2) {
0198         pr_info("XScale3 L2 cache enabled.\n");
0199         xsc3_l2_inv_all();
0200 
0201         outer_cache.inv_range = xsc3_l2_inv_range;
0202         outer_cache.clean_range = xsc3_l2_clean_range;
0203         outer_cache.flush_range = xsc3_l2_flush_range;
0204     }
0205 
0206     return 0;
0207 }
0208 core_initcall(xsc3_l2_init);