Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/copypage-v4wb.c
0004  *
0005  *  Copyright (C) 1995-1999 Russell King
0006  */
0007 #include <linux/init.h>
0008 #include <linux/highmem.h>
0009 
0010 /*
0011  * ARMv4 optimised copy_user_highpage
0012  *
0013  * We flush the destination cache lines just before we write the data into the
0014  * corresponding address.  Since the Dcache is read-allocate, this removes the
0015  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
0016  * and merged as appropriate.
0017  *
0018  * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
0019  * instruction.  If your processor does not supply this, you have to write your
0020  * own copy_user_highpage that does the right thing.
0021  */
0022 static void v4wb_copy_user_page(void *kto, const void *kfrom)
0023 {
0024     int tmp;
0025 
0026     asm volatile ("\
0027     .syntax unified\n\
0028     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0029 1:  mcr p15, 0, %0, c7, c6, 1       @ 1   invalidate D line\n\
0030     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0031     ldmia   %1!, {r3, r4, ip, lr}       @ 4+1\n\
0032     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0033     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0034     mcr p15, 0, %0, c7, c6, 1       @ 1   invalidate D line\n\
0035     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0036     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0037     subs    %2, %2, #1          @ 1\n\
0038     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0039     ldmiane %1!, {r3, r4, ip, lr}       @ 4\n\
0040     bne 1b              @ 1\n\
0041     mcr p15, 0, %1, c7, c10, 4      @ 1   drain WB"
0042     : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
0043     : "2" (PAGE_SIZE / 64)
0044     : "r3", "r4", "ip", "lr");
0045 }
0046 
0047 void v4wb_copy_user_highpage(struct page *to, struct page *from,
0048     unsigned long vaddr, struct vm_area_struct *vma)
0049 {
0050     void *kto, *kfrom;
0051 
0052     kto = kmap_atomic(to);
0053     kfrom = kmap_atomic(from);
0054     flush_cache_page(vma, vaddr, page_to_pfn(from));
0055     v4wb_copy_user_page(kto, kfrom);
0056     kunmap_atomic(kfrom);
0057     kunmap_atomic(kto);
0058 }
0059 
0060 /*
0061  * ARMv4 optimised clear_user_page
0062  *
0063  * Same story as above.
0064  */
0065 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
0066 {
0067     void *ptr, *kaddr = kmap_atomic(page);
0068     asm volatile("\
0069     mov r1, %2              @ 1\n\
0070     mov r2, #0              @ 1\n\
0071     mov r3, #0              @ 1\n\
0072     mov ip, #0              @ 1\n\
0073     mov lr, #0              @ 1\n\
0074 1:  mcr p15, 0, %0, c7, c6, 1       @ 1   invalidate D line\n\
0075     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0076     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0077     mcr p15, 0, %0, c7, c6, 1       @ 1   invalidate D line\n\
0078     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0079     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0080     subs    r1, r1, #1          @ 1\n\
0081     bne 1b              @ 1\n\
0082     mcr p15, 0, r1, c7, c10, 4      @ 1   drain WB"
0083     : "=r" (ptr)
0084     : "0" (kaddr), "I" (PAGE_SIZE / 64)
0085     : "r1", "r2", "r3", "ip", "lr");
0086     kunmap_atomic(kaddr);
0087 }
0088 
0089 struct cpu_user_fns v4wb_user_fns __initdata = {
0090     .cpu_clear_user_highpage = v4wb_clear_user_highpage,
0091     .cpu_copy_user_highpage = v4wb_copy_user_highpage,
0092 };