Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 
0003 #include <linux/highmem.h>
0004 #include <linux/kprobes.h>
0005 
0006 /**
0007  * flush_coherent_icache() - if a CPU has a coherent icache, flush it
0008  * Return true if the cache was flushed, false otherwise
0009  */
0010 static inline bool flush_coherent_icache(void)
0011 {
0012     /*
0013      * For a snooping icache, we still need a dummy icbi to purge all the
0014      * prefetched instructions from the ifetch buffers. We also need a sync
0015      * before the icbi to order the actual stores to memory that might
0016      * have modified instructions with the icbi.
0017      */
0018     if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
0019         mb(); /* sync */
0020         icbi((void *)PAGE_OFFSET);
0021         mb(); /* sync */
0022         isync();
0023         return true;
0024     }
0025 
0026     return false;
0027 }
0028 
0029 /**
0030  * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
0031  * @start: the start address
0032  * @stop: the stop address (exclusive)
0033  */
0034 static void invalidate_icache_range(unsigned long start, unsigned long stop)
0035 {
0036     unsigned long shift = l1_icache_shift();
0037     unsigned long bytes = l1_icache_bytes();
0038     char *addr = (char *)(start & ~(bytes - 1));
0039     unsigned long size = stop - (unsigned long)addr + (bytes - 1);
0040     unsigned long i;
0041 
0042     for (i = 0; i < size >> shift; i++, addr += bytes)
0043         icbi(addr);
0044 
0045     mb(); /* sync */
0046     isync();
0047 }
0048 
0049 /**
0050  * flush_icache_range: Write any modified data cache blocks out to memory
0051  * and invalidate the corresponding blocks in the instruction cache
0052  *
0053  * Generic code will call this after writing memory, before executing from it.
0054  *
0055  * @start: the start address
0056  * @stop: the stop address (exclusive)
0057  */
0058 void flush_icache_range(unsigned long start, unsigned long stop)
0059 {
0060     if (flush_coherent_icache())
0061         return;
0062 
0063     clean_dcache_range(start, stop);
0064 
0065     if (IS_ENABLED(CONFIG_44x)) {
0066         /*
0067          * Flash invalidate on 44x because we are passed kmapped
0068          * addresses and this doesn't work for userspace pages due to
0069          * the virtually tagged icache.
0070          */
0071         iccci((void *)start);
0072         mb(); /* sync */
0073         isync();
0074     } else
0075         invalidate_icache_range(start, stop);
0076 }
0077 EXPORT_SYMBOL(flush_icache_range);
0078 
0079 #ifdef CONFIG_HIGHMEM
0080 /**
0081  * flush_dcache_icache_phys() - Flush a page by it's physical address
0082  * @physaddr: the physical address of the page
0083  */
0084 static void flush_dcache_icache_phys(unsigned long physaddr)
0085 {
0086     unsigned long bytes = l1_dcache_bytes();
0087     unsigned long nb = PAGE_SIZE / bytes;
0088     unsigned long addr = physaddr & PAGE_MASK;
0089     unsigned long msr, msr0;
0090     unsigned long loop1 = addr, loop2 = addr;
0091 
0092     msr0 = mfmsr();
0093     msr = msr0 & ~MSR_DR;
0094     /*
0095      * This must remain as ASM to prevent potential memory accesses
0096      * while the data MMU is disabled
0097      */
0098     asm volatile(
0099         "   mtctr %2;\n"
0100         "   mtmsr %3;\n"
0101         "   isync;\n"
0102         "0: dcbst   0, %0;\n"
0103         "   addi    %0, %0, %4;\n"
0104         "   bdnz    0b;\n"
0105         "   sync;\n"
0106         "   mtctr %2;\n"
0107         "1: icbi    0, %1;\n"
0108         "   addi    %1, %1, %4;\n"
0109         "   bdnz    1b;\n"
0110         "   sync;\n"
0111         "   mtmsr %5;\n"
0112         "   isync;\n"
0113         : "+&r" (loop1), "+&r" (loop2)
0114         : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
0115         : "ctr", "memory");
0116 }
0117 NOKPROBE_SYMBOL(flush_dcache_icache_phys)
0118 #else
0119 static void flush_dcache_icache_phys(unsigned long physaddr)
0120 {
0121 }
0122 #endif
0123 
0124 /**
0125  * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
0126  * Note: this is necessary because the instruction cache does *not*
0127  * snoop from the data cache.
0128  *
0129  * @p: the address of the page to flush
0130  */
0131 static void __flush_dcache_icache(void *p)
0132 {
0133     unsigned long addr = (unsigned long)p & PAGE_MASK;
0134 
0135     clean_dcache_range(addr, addr + PAGE_SIZE);
0136 
0137     /*
0138      * We don't flush the icache on 44x. Those have a virtual icache and we
0139      * don't have access to the virtual address here (it's not the page
0140      * vaddr but where it's mapped in user space). The flushing of the
0141      * icache on these is handled elsewhere, when a change in the address
0142      * space occurs, before returning to user space.
0143      */
0144 
0145     if (mmu_has_feature(MMU_FTR_TYPE_44x))
0146         return;
0147 
0148     invalidate_icache_range(addr, addr + PAGE_SIZE);
0149 }
0150 
0151 static void flush_dcache_icache_hugepage(struct page *page)
0152 {
0153     int i;
0154     int nr = compound_nr(page);
0155 
0156     if (!PageHighMem(page)) {
0157         for (i = 0; i < nr; i++)
0158             __flush_dcache_icache(lowmem_page_address(page + i));
0159     } else {
0160         for (i = 0; i < nr; i++) {
0161             void *start = kmap_local_page(page + i);
0162 
0163             __flush_dcache_icache(start);
0164             kunmap_local(start);
0165         }
0166     }
0167 }
0168 
0169 void flush_dcache_icache_page(struct page *page)
0170 {
0171     if (flush_coherent_icache())
0172         return;
0173 
0174     if (PageCompound(page))
0175         return flush_dcache_icache_hugepage(page);
0176 
0177     if (!PageHighMem(page)) {
0178         __flush_dcache_icache(lowmem_page_address(page));
0179     } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
0180         void *start = kmap_local_page(page);
0181 
0182         __flush_dcache_icache(start);
0183         kunmap_local(start);
0184     } else {
0185         flush_dcache_icache_phys(page_to_phys(page));
0186     }
0187 }
0188 EXPORT_SYMBOL(flush_dcache_icache_page);
0189 
0190 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
0191 {
0192     clear_page(page);
0193 
0194     /*
0195      * We shouldn't have to do this, but some versions of glibc
0196      * require it (ld.so assumes zero filled pages are icache clean)
0197      * - Anton
0198      */
0199     flush_dcache_page(pg);
0200 }
0201 EXPORT_SYMBOL(clear_user_page);
0202 
0203 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
0204             struct page *pg)
0205 {
0206     copy_page(vto, vfrom);
0207 
0208     /*
0209      * We should be able to use the following optimisation, however
0210      * there are two problems.
0211      * Firstly a bug in some versions of binutils meant PLT sections
0212      * were not marked executable.
0213      * Secondly the first word in the GOT section is blrl, used
0214      * to establish the GOT address. Until recently the GOT was
0215      * not marked executable.
0216      * - Anton
0217      */
0218 #if 0
0219     if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
0220         return;
0221 #endif
0222 
0223     flush_dcache_page(pg);
0224 }
0225 
0226 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
0227                  unsigned long addr, int len)
0228 {
0229     void *maddr;
0230 
0231     maddr = kmap_local_page(page) + (addr & ~PAGE_MASK);
0232     flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len);
0233     kunmap_local(maddr);
0234 }