Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/copypage-v4wt.S
0004  *
0005  *  Copyright (C) 1995-1999 Russell King
0006  *
0007  *  This is for CPUs with a writethrough cache and 'flush ID cache' is
0008  *  the only supported cache operation.
0009  */
0010 #include <linux/init.h>
0011 #include <linux/highmem.h>
0012 
0013 /*
0014  * ARMv4 optimised copy_user_highpage
0015  *
0016  * Since we have writethrough caches, we don't have to worry about
0017  * dirty data in the cache.  However, we do have to ensure that
0018  * subsequent reads are up to date.
0019  */
0020 static void v4wt_copy_user_page(void *kto, const void *kfrom)
0021 {
0022     int tmp;
0023 
0024     asm volatile ("\
0025     .syntax unified\n\
0026     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0027 1:  stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0028     ldmia   %1!, {r3, r4, ip, lr}       @ 4+1\n\
0029     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0030     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0031     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0032     ldmia   %1!, {r3, r4, ip, lr}       @ 4\n\
0033     subs    %2, %2, #1          @ 1\n\
0034     stmia   %0!, {r3, r4, ip, lr}       @ 4\n\
0035     ldmiane %1!, {r3, r4, ip, lr}       @ 4\n\
0036     bne 1b              @ 1\n\
0037     mcr p15, 0, %2, c7, c7, 0       @ flush ID cache"
0038     : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
0039     : "2" (PAGE_SIZE / 64)
0040     : "r3", "r4", "ip", "lr");
0041 }
0042 
0043 void v4wt_copy_user_highpage(struct page *to, struct page *from,
0044     unsigned long vaddr, struct vm_area_struct *vma)
0045 {
0046     void *kto, *kfrom;
0047 
0048     kto = kmap_atomic(to);
0049     kfrom = kmap_atomic(from);
0050     v4wt_copy_user_page(kto, kfrom);
0051     kunmap_atomic(kfrom);
0052     kunmap_atomic(kto);
0053 }
0054 
0055 /*
0056  * ARMv4 optimised clear_user_page
0057  *
0058  * Same story as above.
0059  */
0060 void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
0061 {
0062     void *ptr, *kaddr = kmap_atomic(page);
0063     asm volatile("\
0064     mov r1, %2              @ 1\n\
0065     mov r2, #0              @ 1\n\
0066     mov r3, #0              @ 1\n\
0067     mov ip, #0              @ 1\n\
0068     mov lr, #0              @ 1\n\
0069 1:  stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0070     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0071     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0072     stmia   %0!, {r2, r3, ip, lr}       @ 4\n\
0073     subs    r1, r1, #1          @ 1\n\
0074     bne 1b              @ 1\n\
0075     mcr p15, 0, r2, c7, c7, 0       @ flush ID cache"
0076     : "=r" (ptr)
0077     : "0" (kaddr), "I" (PAGE_SIZE / 64)
0078     : "r1", "r2", "r3", "ip", "lr");
0079     kunmap_atomic(kaddr);
0080 }
0081 
0082 struct cpu_user_fns v4wt_user_fns __initdata = {
0083     .cpu_clear_user_highpage = v4wt_clear_user_highpage,
0084     .cpu_copy_user_highpage = v4wt_copy_user_highpage,
0085 };