0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/init.h>
0011 #include <linux/highmem.h>
0012
0013
0014
0015
0016
0017
0018
0019
0020 static void v4wt_copy_user_page(void *kto, const void *kfrom)
0021 {
0022 int tmp;
0023
0024 asm volatile ("\
0025 .syntax unified\n\
0026 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
0027 1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
0028 ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
0029 stmia %0!, {r3, r4, ip, lr} @ 4\n\
0030 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
0031 stmia %0!, {r3, r4, ip, lr} @ 4\n\
0032 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
0033 subs %2, %2, #1 @ 1\n\
0034 stmia %0!, {r3, r4, ip, lr} @ 4\n\
0035 ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
0036 bne 1b @ 1\n\
0037 mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
0038 : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
0039 : "2" (PAGE_SIZE / 64)
0040 : "r3", "r4", "ip", "lr");
0041 }
0042
0043 void v4wt_copy_user_highpage(struct page *to, struct page *from,
0044 unsigned long vaddr, struct vm_area_struct *vma)
0045 {
0046 void *kto, *kfrom;
0047
0048 kto = kmap_atomic(to);
0049 kfrom = kmap_atomic(from);
0050 v4wt_copy_user_page(kto, kfrom);
0051 kunmap_atomic(kfrom);
0052 kunmap_atomic(kto);
0053 }
0054
0055
0056
0057
0058
0059
0060 void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
0061 {
0062 void *ptr, *kaddr = kmap_atomic(page);
0063 asm volatile("\
0064 mov r1, %2 @ 1\n\
0065 mov r2, #0 @ 1\n\
0066 mov r3, #0 @ 1\n\
0067 mov ip, #0 @ 1\n\
0068 mov lr, #0 @ 1\n\
0069 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
0070 stmia %0!, {r2, r3, ip, lr} @ 4\n\
0071 stmia %0!, {r2, r3, ip, lr} @ 4\n\
0072 stmia %0!, {r2, r3, ip, lr} @ 4\n\
0073 subs r1, r1, #1 @ 1\n\
0074 bne 1b @ 1\n\
0075 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
0076 : "=r" (ptr)
0077 : "0" (kaddr), "I" (PAGE_SIZE / 64)
0078 : "r1", "r2", "r3", "ip", "lr");
0079 kunmap_atomic(kaddr);
0080 }
0081
0082 struct cpu_user_fns v4wt_user_fns __initdata = {
0083 .cpu_clear_user_highpage = v4wt_clear_user_highpage,
0084 .cpu_copy_user_highpage = v4wt_copy_user_highpage,
0085 };