Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/copypage-xsc3.S
0004  *
0005  *  Copyright (C) 2004 Intel Corp.
0006  *
0007  * Adapted for 3rd gen XScale core, no more mini-dcache
0008  * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
0009  */
0010 #include <linux/init.h>
0011 #include <linux/highmem.h>
0012 
0013 /*
0014  * General note:
0015  *  We don't really want write-allocate cache behaviour for these functions
0016  *  since that will just eat through 8K of the cache.
0017  */
0018 
0019 /*
0020  * XSC3 optimised copy_user_highpage
0021  *
0022  * The source page may have some clean entries in the cache already, but we
0023  * can safely ignore them - break_cow() will flush them out of the cache
0024  * if we eventually end up using our copied page.
0025  *
0026  */
0027 static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
0028 {
0029     int tmp;
0030 
0031     asm volatile ("\
0032 .arch xscale                    \n\
0033     pld [%1, #0]            \n\
0034     pld [%1, #32]           \n\
0035 1:  pld [%1, #64]           \n\
0036     pld [%1, #96]           \n\
0037                         \n\
0038 2:  ldrd    r2, r3, [%1], #8        \n\
0039     ldrd    r4, r5, [%1], #8        \n\
0040     mcr p15, 0, %0, c7, c6, 1       @ invalidate\n\
0041     strd    r2, r3, [%0], #8        \n\
0042     ldrd    r2, r3, [%1], #8        \n\
0043     strd    r4, r5, [%0], #8        \n\
0044     ldrd    r4, r5, [%1], #8        \n\
0045     strd    r2, r3, [%0], #8        \n\
0046     strd    r4, r5, [%0], #8        \n\
0047     ldrd    r2, r3, [%1], #8        \n\
0048     ldrd    r4, r5, [%1], #8        \n\
0049     mcr p15, 0, %0, c7, c6, 1       @ invalidate\n\
0050     strd    r2, r3, [%0], #8        \n\
0051     ldrd    r2, r3, [%1], #8        \n\
0052     subs    %2, %2, #1          \n\
0053     strd    r4, r5, [%0], #8        \n\
0054     ldrd    r4, r5, [%1], #8        \n\
0055     strd    r2, r3, [%0], #8        \n\
0056     strd    r4, r5, [%0], #8        \n\
0057     bgt 1b              \n\
0058     beq 2b              "
0059     : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
0060     : "2" (PAGE_SIZE / 64 - 1)
0061     : "r2", "r3", "r4", "r5");
0062 }
0063 
0064 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
0065     unsigned long vaddr, struct vm_area_struct *vma)
0066 {
0067     void *kto, *kfrom;
0068 
0069     kto = kmap_atomic(to);
0070     kfrom = kmap_atomic(from);
0071     flush_cache_page(vma, vaddr, page_to_pfn(from));
0072     xsc3_mc_copy_user_page(kto, kfrom);
0073     kunmap_atomic(kfrom);
0074     kunmap_atomic(kto);
0075 }
0076 
0077 /*
0078  * XScale optimised clear_user_page
0079  */
0080 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
0081 {
0082     void *ptr, *kaddr = kmap_atomic(page);
0083     asm volatile ("\
0084 .arch xscale                    \n\
0085     mov r1, %2              \n\
0086     mov r2, #0              \n\
0087     mov r3, #0              \n\
0088 1:  mcr p15, 0, %0, c7, c6, 1       @ invalidate line\n\
0089     strd    r2, r3, [%0], #8        \n\
0090     strd    r2, r3, [%0], #8        \n\
0091     strd    r2, r3, [%0], #8        \n\
0092     strd    r2, r3, [%0], #8        \n\
0093     subs    r1, r1, #1          \n\
0094     bne 1b"
0095     : "=r" (ptr)
0096     : "0" (kaddr), "I" (PAGE_SIZE / 32)
0097     : "r1", "r2", "r3");
0098     kunmap_atomic(kaddr);
0099 }
0100 
0101 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
0102     .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
0103     .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
0104 };