0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/init.h>
0014 #include <linux/mm.h>
0015 #include <linux/highmem.h>
0016 #include <linux/pagemap.h>
0017
0018 #include <asm/tlbflush.h>
0019 #include <asm/cacheflush.h>
0020
0021 #include "mm.h"
0022
0023 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
0024 L_PTE_MT_MINICACHE)
0025
0026 static DEFINE_RAW_SPINLOCK(minicache_lock);
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 static void mc_copy_user_page(void *from, void *to)
0037 {
0038 int tmp;
0039
0040
0041
0042
0043
0044 asm volatile ("\
0045 .arch xscale \n\
0046 pld [%0, #0] \n\
0047 pld [%0, #32] \n\
0048 pld [%1, #0] \n\
0049 pld [%1, #32] \n\
0050 1: pld [%0, #64] \n\
0051 pld [%0, #96] \n\
0052 pld [%1, #64] \n\
0053 pld [%1, #96] \n\
0054 2: ldrd r2, r3, [%0], #8 \n\
0055 ldrd r4, r5, [%0], #8 \n\
0056 mov ip, %1 \n\
0057 strd r2, r3, [%1], #8 \n\
0058 ldrd r2, r3, [%0], #8 \n\
0059 strd r4, r5, [%1], #8 \n\
0060 ldrd r4, r5, [%0], #8 \n\
0061 strd r2, r3, [%1], #8 \n\
0062 strd r4, r5, [%1], #8 \n\
0063 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
0064 ldrd r2, r3, [%0], #8 \n\
0065 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
0066 ldrd r4, r5, [%0], #8 \n\
0067 mov ip, %1 \n\
0068 strd r2, r3, [%1], #8 \n\
0069 ldrd r2, r3, [%0], #8 \n\
0070 strd r4, r5, [%1], #8 \n\
0071 ldrd r4, r5, [%0], #8 \n\
0072 strd r2, r3, [%1], #8 \n\
0073 strd r4, r5, [%1], #8 \n\
0074 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
0075 subs %2, %2, #1 \n\
0076 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
0077 bgt 1b \n\
0078 beq 2b "
0079 : "+&r" (from), "+&r" (to), "=&r" (tmp)
0080 : "2" (PAGE_SIZE / 64 - 1)
0081 : "r2", "r3", "r4", "r5", "ip");
0082 }
0083
0084 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
0085 unsigned long vaddr, struct vm_area_struct *vma)
0086 {
0087 void *kto = kmap_atomic(to);
0088
0089 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
0090 __flush_dcache_page(page_mapping_file(from), from);
0091
0092 raw_spin_lock(&minicache_lock);
0093
0094 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
0095
0096 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
0097
0098 raw_spin_unlock(&minicache_lock);
0099
0100 kunmap_atomic(kto);
0101 }
0102
0103
0104
0105
0106 void
0107 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
0108 {
0109 void *ptr, *kaddr = kmap_atomic(page);
0110 asm volatile("\
0111 .arch xscale \n\
0112 mov r1, %2 \n\
0113 mov r2, #0 \n\
0114 mov r3, #0 \n\
0115 1: mov ip, %0 \n\
0116 strd r2, r3, [%0], #8 \n\
0117 strd r2, r3, [%0], #8 \n\
0118 strd r2, r3, [%0], #8 \n\
0119 strd r2, r3, [%0], #8 \n\
0120 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
0121 subs r1, r1, #1 \n\
0122 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
0123 bne 1b"
0124 : "=r" (ptr)
0125 : "0" (kaddr), "I" (PAGE_SIZE / 32)
0126 : "r1", "r2", "r3", "ip");
0127 kunmap_atomic(kaddr);
0128 }
0129
0130 struct cpu_user_fns xscale_mc_user_fns __initdata = {
0131 .cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
0132 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
0133 };