Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922
0004  *
0005  *  Copyright (C) 1999,2000 ARM Limited
0006  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
0007  *  Copyright (C) 2001 Altera Corporation
0008  *  hacked for non-paged-MM by Hyok S. Choi, 2003.
0009  *
0010  * These are the low level assembler for performing cache and TLB
0011  * functions on the arm922.
0012  *
0013  *  CONFIG_CPU_ARM922_CPU_IDLE -> nohlt
0014  */
0015 #include <linux/linkage.h>
0016 #include <linux/init.h>
0017 #include <linux/pgtable.h>
0018 #include <asm/assembler.h>
0019 #include <asm/hwcap.h>
0020 #include <asm/pgtable-hwdef.h>
0021 #include <asm/page.h>
0022 #include <asm/ptrace.h>
0023 #include "proc-macros.S"
0024 
0025 /*
0026  * The size of one data cache line.
0027  */
0028 #define CACHE_DLINESIZE 32
0029 
0030 /*
0031  * The number of data cache segments.
0032  */
0033 #define CACHE_DSEGMENTS 4
0034 
0035 /*
0036  * The number of lines in a cache segment.
0037  */
0038 #define CACHE_DENTRIES  64
0039 
0040 /*
0041  * This is the size at which it becomes more efficient to
0042  * clean the whole cache, rather than using the individual
0043  * cache line maintenance instructions.  (I think this should
0044  * be 32768).
0045  */
0046 #define CACHE_DLIMIT    8192
0047 
0048 
0049     .text
0050 /*
0051  * cpu_arm922_proc_init()
0052  */
0053 ENTRY(cpu_arm922_proc_init)
0054     ret lr
0055 
0056 /*
0057  * cpu_arm922_proc_fin()
0058  */
0059 ENTRY(cpu_arm922_proc_fin)
0060     mrc p15, 0, r0, c1, c0, 0       @ ctrl register
0061     bic r0, r0, #0x1000         @ ...i............
0062     bic r0, r0, #0x000e         @ ............wca.
0063     mcr p15, 0, r0, c1, c0, 0       @ disable caches
0064     ret lr
0065 
0066 /*
0067  * cpu_arm922_reset(loc)
0068  *
0069  * Perform a soft reset of the system.  Put the CPU into the
0070  * same state as it would be if it had been reset, and branch
0071  * to what would be the reset vector.
0072  *
0073  * loc: location to jump to for soft reset
0074  */
0075     .align  5
0076     .pushsection    .idmap.text, "ax"
0077 ENTRY(cpu_arm922_reset)
0078     mov ip, #0
0079     mcr p15, 0, ip, c7, c7, 0       @ invalidate I,D caches
0080     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0081 #ifdef CONFIG_MMU
0082     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0083 #endif
0084     mrc p15, 0, ip, c1, c0, 0       @ ctrl register
0085     bic ip, ip, #0x000f         @ ............wcam
0086     bic ip, ip, #0x1100         @ ...i...s........
0087     mcr p15, 0, ip, c1, c0, 0       @ ctrl register
0088     ret r0
0089 ENDPROC(cpu_arm922_reset)
0090     .popsection
0091 
0092 /*
0093  * cpu_arm922_do_idle()
0094  */
0095     .align  5
0096 ENTRY(cpu_arm922_do_idle)
0097     mcr p15, 0, r0, c7, c0, 4       @ Wait for interrupt
0098     ret lr
0099 
0100 
0101 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0102 
0103 /*
0104  *  flush_icache_all()
0105  *
0106  *  Unconditionally clean and invalidate the entire icache.
0107  */
0108 ENTRY(arm922_flush_icache_all)
0109     mov r0, #0
0110     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0111     ret lr
0112 ENDPROC(arm922_flush_icache_all)
0113 
0114 /*
0115  *  flush_user_cache_all()
0116  *
0117  *  Clean and invalidate all cache entries in a particular
0118  *  address space.
0119  */
0120 ENTRY(arm922_flush_user_cache_all)
0121     /* FALLTHROUGH */
0122 
0123 /*
0124  *  flush_kern_cache_all()
0125  *
0126  *  Clean and invalidate the entire cache.
0127  */
0128 ENTRY(arm922_flush_kern_cache_all)
0129     mov r2, #VM_EXEC
0130     mov ip, #0
0131 __flush_whole_cache:
0132     mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
0133 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0134 2:  mcr p15, 0, r3, c7, c14, 2      @ clean+invalidate D index
0135     subs    r3, r3, #1 << 26
0136     bcs 2b              @ entries 63 to 0
0137     subs    r1, r1, #1 << 5
0138     bcs 1b              @ segments 7 to 0
0139     tst r2, #VM_EXEC
0140     mcrne   p15, 0, ip, c7, c5, 0       @ invalidate I cache
0141     mcrne   p15, 0, ip, c7, c10, 4      @ drain WB
0142     ret lr
0143 
0144 /*
0145  *  flush_user_cache_range(start, end, flags)
0146  *
0147  *  Clean and invalidate a range of cache entries in the
0148  *  specified address range.
0149  *
0150  *  - start - start address (inclusive)
0151  *  - end   - end address (exclusive)
0152  *  - flags - vm_flags describing address space
0153  */
0154 ENTRY(arm922_flush_user_cache_range)
0155     mov ip, #0
0156     sub r3, r1, r0          @ calculate total size
0157     cmp r3, #CACHE_DLIMIT
0158     bhs __flush_whole_cache
0159 
0160 1:  mcr p15, 0, r0, c7, c14, 1      @ clean+invalidate D entry
0161     tst r2, #VM_EXEC
0162     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0163     add r0, r0, #CACHE_DLINESIZE
0164     cmp r0, r1
0165     blo 1b
0166     tst r2, #VM_EXEC
0167     mcrne   p15, 0, ip, c7, c10, 4      @ drain WB
0168     ret lr
0169 
0170 /*
0171  *  coherent_kern_range(start, end)
0172  *
0173  *  Ensure coherency between the Icache and the Dcache in the
0174  *  region described by start, end.  If you have non-snooping
0175  *  Harvard caches, you need to implement this function.
0176  *
0177  *  - start - virtual start address
0178  *  - end   - virtual end address
0179  */
0180 ENTRY(arm922_coherent_kern_range)
0181     /* FALLTHROUGH */
0182 
0183 /*
0184  *  coherent_user_range(start, end)
0185  *
0186  *  Ensure coherency between the Icache and the Dcache in the
0187  *  region described by start, end.  If you have non-snooping
0188  *  Harvard caches, you need to implement this function.
0189  *
0190  *  - start - virtual start address
0191  *  - end   - virtual end address
0192  */
0193 ENTRY(arm922_coherent_user_range)
0194     bic r0, r0, #CACHE_DLINESIZE - 1
0195 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0196     mcr p15, 0, r0, c7, c5, 1       @ invalidate I entry
0197     add r0, r0, #CACHE_DLINESIZE
0198     cmp r0, r1
0199     blo 1b
0200     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0201     mov r0, #0
0202     ret lr
0203 
0204 /*
0205  *  flush_kern_dcache_area(void *addr, size_t size)
0206  *
0207  *  Ensure no D cache aliasing occurs, either with itself or
0208  *  the I cache
0209  *
0210  *  - addr  - kernel address
0211  *  - size  - region size
0212  */
0213 ENTRY(arm922_flush_kern_dcache_area)
0214     add r1, r0, r1
0215 1:  mcr p15, 0, r0, c7, c14, 1      @ clean+invalidate D entry
0216     add r0, r0, #CACHE_DLINESIZE
0217     cmp r0, r1
0218     blo 1b
0219     mov r0, #0
0220     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0221     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0222     ret lr
0223 
0224 /*
0225  *  dma_inv_range(start, end)
0226  *
0227  *  Invalidate (discard) the specified virtual address range.
0228  *  May not write back any entries.  If 'start' or 'end'
0229  *  are not cache line aligned, those lines must be written
0230  *  back.
0231  *
0232  *  - start - virtual start address
0233  *  - end   - virtual end address
0234  *
0235  * (same as v4wb)
0236  */
0237 arm922_dma_inv_range:
0238     tst r0, #CACHE_DLINESIZE - 1
0239     bic r0, r0, #CACHE_DLINESIZE - 1
0240     mcrne   p15, 0, r0, c7, c10, 1      @ clean D entry
0241     tst r1, #CACHE_DLINESIZE - 1
0242     mcrne   p15, 0, r1, c7, c10, 1      @ clean D entry
0243 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0244     add r0, r0, #CACHE_DLINESIZE
0245     cmp r0, r1
0246     blo 1b
0247     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0248     ret lr
0249 
0250 /*
0251  *  dma_clean_range(start, end)
0252  *
0253  *  Clean the specified virtual address range.
0254  *
0255  *  - start - virtual start address
0256  *  - end   - virtual end address
0257  *
0258  * (same as v4wb)
0259  */
0260 arm922_dma_clean_range:
0261     bic r0, r0, #CACHE_DLINESIZE - 1
0262 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0263     add r0, r0, #CACHE_DLINESIZE
0264     cmp r0, r1
0265     blo 1b
0266     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0267     ret lr
0268 
0269 /*
0270  *  dma_flush_range(start, end)
0271  *
0272  *  Clean and invalidate the specified virtual address range.
0273  *
0274  *  - start - virtual start address
0275  *  - end   - virtual end address
0276  */
0277 ENTRY(arm922_dma_flush_range)
0278     bic r0, r0, #CACHE_DLINESIZE - 1
0279 1:  mcr p15, 0, r0, c7, c14, 1      @ clean+invalidate D entry
0280     add r0, r0, #CACHE_DLINESIZE
0281     cmp r0, r1
0282     blo 1b
0283     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0284     ret lr
0285 
0286 /*
0287  *  dma_map_area(start, size, dir)
0288  *  - start - kernel virtual start address
0289  *  - size  - size of region
0290  *  - dir   - DMA direction
0291  */
0292 ENTRY(arm922_dma_map_area)
0293     add r1, r1, r0
0294     cmp r2, #DMA_TO_DEVICE
0295     beq arm922_dma_clean_range
0296     bcs arm922_dma_inv_range
0297     b   arm922_dma_flush_range
0298 ENDPROC(arm922_dma_map_area)
0299 
0300 /*
0301  *  dma_unmap_area(start, size, dir)
0302  *  - start - kernel virtual start address
0303  *  - size  - size of region
0304  *  - dir   - DMA direction
0305  */
0306 ENTRY(arm922_dma_unmap_area)
0307     ret lr
0308 ENDPROC(arm922_dma_unmap_area)
0309 
0310     .globl  arm922_flush_kern_cache_louis
0311     .equ    arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
0312 
0313     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0314     define_cache_functions arm922
0315 #endif
0316 
0317 
0318 ENTRY(cpu_arm922_dcache_clean_area)
0319 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0320 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0321     add r0, r0, #CACHE_DLINESIZE
0322     subs    r1, r1, #CACHE_DLINESIZE
0323     bhi 1b
0324 #endif
0325     ret lr
0326 
0327 /* =============================== PageTable ============================== */
0328 
0329 /*
0330  * cpu_arm922_switch_mm(pgd)
0331  *
0332  * Set the translation base pointer to be as described by pgd.
0333  *
0334  * pgd: new page tables
0335  */
0336     .align  5
0337 ENTRY(cpu_arm922_switch_mm)
0338 #ifdef CONFIG_MMU
0339     mov ip, #0
0340 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0341     mcr p15, 0, ip, c7, c6, 0       @ invalidate D cache
0342 #else
0343 @ && 'Clean & Invalidate whole DCache'
0344 @ && Re-written to use Index Ops.
0345 @ && Uses registers r1, r3 and ip
0346 
0347     mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments
0348 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0349 2:  mcr p15, 0, r3, c7, c14, 2      @ clean & invalidate D index
0350     subs    r3, r3, #1 << 26
0351     bcs 2b              @ entries 63 to 0
0352     subs    r1, r1, #1 << 5
0353     bcs 1b              @ segments 7 to 0
0354 #endif
0355     mcr p15, 0, ip, c7, c5, 0       @ invalidate I cache
0356     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0357     mcr p15, 0, r0, c2, c0, 0       @ load page table pointer
0358     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0359 #endif
0360     ret lr
0361 
0362 /*
0363  * cpu_arm922_set_pte_ext(ptep, pte, ext)
0364  *
0365  * Set a PTE and flush it out
0366  */
0367     .align  5
0368 ENTRY(cpu_arm922_set_pte_ext)
0369 #ifdef CONFIG_MMU
0370     armv3_set_pte_ext
0371     mov r0, r0
0372     mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0373     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0374 #endif /* CONFIG_MMU */
0375     ret lr
0376 
0377     .type   __arm922_setup, #function
0378 __arm922_setup:
0379     mov r0, #0
0380     mcr p15, 0, r0, c7, c7      @ invalidate I,D caches on v4
0381     mcr p15, 0, r0, c7, c10, 4      @ drain write buffer on v4
0382 #ifdef CONFIG_MMU
0383     mcr p15, 0, r0, c8, c7      @ invalidate I,D TLBs on v4
0384 #endif
0385     adr r5, arm922_crval
0386     ldmia   r5, {r5, r6}
0387     mrc p15, 0, r0, c1, c0      @ get control register v4
0388     bic r0, r0, r5
0389     orr r0, r0, r6
0390     ret lr
0391     .size   __arm922_setup, . - __arm922_setup
0392 
0393     /*
0394      *  R
0395      * .RVI ZFRS BLDP WCAM
0396      * ..11 0001 ..11 0101
0397      * 
0398      */
0399     .type   arm922_crval, #object
0400 arm922_crval:
0401     crval   clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
0402 
0403     __INITDATA
0404     @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
0405     define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort
0406 
0407     .section ".rodata"
0408 
0409     string  cpu_arch_name, "armv4t"
0410     string  cpu_elf_name, "v4"
0411     string  cpu_arm922_name, "ARM922T"
0412 
0413     .align
0414 
0415     .section ".proc.info.init", "a"
0416 
0417     .type   __arm922_proc_info,#object
0418 __arm922_proc_info:
0419     .long   0x41009220
0420     .long   0xff00fff0
0421     .long   PMD_TYPE_SECT | \
0422         PMD_SECT_BUFFERABLE | \
0423         PMD_SECT_CACHEABLE | \
0424         PMD_BIT4 | \
0425         PMD_SECT_AP_WRITE | \
0426         PMD_SECT_AP_READ
0427     .long   PMD_TYPE_SECT | \
0428         PMD_BIT4 | \
0429         PMD_SECT_AP_WRITE | \
0430         PMD_SECT_AP_READ
0431     initfn  __arm922_setup, __arm922_proc_info
0432     .long   cpu_arch_name
0433     .long   cpu_elf_name
0434     .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
0435     .long   cpu_arm922_name
0436     .long   arm922_processor_functions
0437     .long   v4wbi_tlb_fns
0438     .long   v4wb_user_fns
0439 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0440     .long   arm922_cache_fns
0441 #else
0442     .long   v4wt_cache_fns
0443 #endif
0444     .size   __arm922_proc_info, . - __arm922_proc_info