Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  linux/arch/arm/mm/arm925.S: MMU functions for ARM925
0004  *
0005  *  Copyright (C) 1999,2000 ARM Limited
0006  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
0007  *  Copyright (C) 2002 RidgeRun, Inc.
0008  *  Copyright (C) 2002-2003 MontaVista Software, Inc.
0009  *
0010  *  Update for Linux-2.6 and cache flush improvements
0011  *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
0012  *
0013  *  hacked for non-paged-MM by Hyok S. Choi, 2004.
0014  *
0015  * These are the low level assembler for performing cache and TLB
0016  * functions on the arm925.
0017  *
0018  *  CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
0019  *
0020  * Some additional notes based on deciphering the TI TRM on OMAP-5910:
0021  *
0022  * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
0023  *    entry mode" must be 0 to flush the entries in both segments
0024  *    at once. This is the default value. See TRM 2-20 and 2-24 for
0025  *    more information.
0026  *
0027  * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
0028  *    like the "Transparent mode" must be on for partial cache flushes
0029  *    to work in this mode. This mode only works with 16-bit external
0030  *    memory. See TRM 2-24 for more information.
0031  *
0032  * NOTE3: Write-back cache flushing seems to be flakey with devices using
0033  *        direct memory access, such as USB OHCI. The workaround is to use
0034  *        write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
0035  *        the default for OMAP-1510).
0036  */
0037 
0038 #include <linux/linkage.h>
0039 #include <linux/init.h>
0040 #include <linux/pgtable.h>
0041 #include <asm/assembler.h>
0042 #include <asm/hwcap.h>
0043 #include <asm/pgtable-hwdef.h>
0044 #include <asm/page.h>
0045 #include <asm/ptrace.h>
0046 #include "proc-macros.S"
0047 
0048 /*
0049  * The size of one data cache line.
0050  */
0051 #define CACHE_DLINESIZE 16
0052 
0053 /*
0054  * The number of data cache segments.
0055  */
0056 #define CACHE_DSEGMENTS 2
0057 
0058 /*
0059  * The number of lines in a cache segment.
0060  */
0061 #define CACHE_DENTRIES  256
0062 
0063 /*
0064  * This is the size at which it becomes more efficient to
0065  * clean the whole cache, rather than using the individual
0066  * cache line maintenance instructions.
0067  */
0068 #define CACHE_DLIMIT    8192
0069 
0070     .text
0071 /*
0072  * cpu_arm925_proc_init()
0073  */
0074 ENTRY(cpu_arm925_proc_init)
0075     ret lr
0076 
0077 /*
0078  * cpu_arm925_proc_fin()
0079  */
0080 ENTRY(cpu_arm925_proc_fin)
0081     mrc p15, 0, r0, c1, c0, 0       @ ctrl register
0082     bic r0, r0, #0x1000         @ ...i............
0083     bic r0, r0, #0x000e         @ ............wca.
0084     mcr p15, 0, r0, c1, c0, 0       @ disable caches
0085     ret lr
0086 
0087 /*
0088  * cpu_arm925_reset(loc)
0089  *
0090  * Perform a soft reset of the system.  Put the CPU into the
0091  * same state as it would be if it had been reset, and branch
0092  * to what would be the reset vector.
0093  *
0094  * loc: location to jump to for soft reset
0095  */
0096     .align  5
0097     .pushsection    .idmap.text, "ax"
0098 ENTRY(cpu_arm925_reset)
0099     /* Send software reset to MPU and DSP */
0100     mov ip, #0xff000000
0101     orr ip, ip, #0x00fe0000
0102     orr ip, ip, #0x0000ce00
0103     mov r4, #1
0104     strh    r4, [ip, #0x10]
0105 ENDPROC(cpu_arm925_reset)
0106     .popsection
0107 
0108     mov ip, #0
0109     mcr p15, 0, ip, c7, c7, 0       @ invalidate I,D caches
0110     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0111 #ifdef CONFIG_MMU
0112     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0113 #endif
0114     mrc p15, 0, ip, c1, c0, 0       @ ctrl register
0115     bic ip, ip, #0x000f         @ ............wcam
0116     bic ip, ip, #0x1100         @ ...i...s........
0117     mcr p15, 0, ip, c1, c0, 0       @ ctrl register
0118     ret r0
0119 
0120 /*
0121  * cpu_arm925_do_idle()
0122  *
0123  * Called with IRQs disabled
0124  */
0125     .align  10
0126 ENTRY(cpu_arm925_do_idle)
0127     mov r0, #0
0128     mrc p15, 0, r1, c1, c0, 0       @ Read control register
0129     mcr p15, 0, r0, c7, c10, 4      @ Drain write buffer
0130     bic r2, r1, #1 << 12
0131     mcr p15, 0, r2, c1, c0, 0       @ Disable I cache
0132     mcr p15, 0, r0, c7, c0, 4       @ Wait for interrupt
0133     mcr p15, 0, r1, c1, c0, 0       @ Restore ICache enable
0134     ret lr
0135 
0136 /*
0137  *  flush_icache_all()
0138  *
0139  *  Unconditionally clean and invalidate the entire icache.
0140  */
0141 ENTRY(arm925_flush_icache_all)
0142     mov r0, #0
0143     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0144     ret lr
0145 ENDPROC(arm925_flush_icache_all)
0146 
0147 /*
0148  *  flush_user_cache_all()
0149  *
0150  *  Clean and invalidate all cache entries in a particular
0151  *  address space.
0152  */
0153 ENTRY(arm925_flush_user_cache_all)
0154     /* FALLTHROUGH */
0155 
0156 /*
0157  *  flush_kern_cache_all()
0158  *
0159  *  Clean and invalidate the entire cache.
0160  */
0161 ENTRY(arm925_flush_kern_cache_all)
0162     mov r2, #VM_EXEC
0163     mov ip, #0
0164 __flush_whole_cache:
0165 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0166     mcr p15, 0, ip, c7, c6, 0       @ invalidate D cache
0167 #else
0168     /* Flush entries in both segments at once, see NOTE1 above */
0169     mov r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
0170 2:  mcr p15, 0, r3, c7, c14, 2      @ clean+invalidate D index
0171     subs    r3, r3, #1 << 4
0172     bcs 2b              @ entries 255 to 0
0173 #endif
0174     tst r2, #VM_EXEC
0175     mcrne   p15, 0, ip, c7, c5, 0       @ invalidate I cache
0176     mcrne   p15, 0, ip, c7, c10, 4      @ drain WB
0177     ret lr
0178 
0179 /*
0180  *  flush_user_cache_range(start, end, flags)
0181  *
0182  *  Clean and invalidate a range of cache entries in the
0183  *  specified address range.
0184  *
0185  *  - start - start address (inclusive)
0186  *  - end   - end address (exclusive)
0187  *  - flags - vm_flags describing address space
0188  */
0189 ENTRY(arm925_flush_user_cache_range)
0190     mov ip, #0
0191     sub r3, r1, r0          @ calculate total size
0192     cmp r3, #CACHE_DLIMIT
0193     bgt __flush_whole_cache
0194 1:  tst r2, #VM_EXEC
0195 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0196     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0197     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0198     add r0, r0, #CACHE_DLINESIZE
0199     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0200     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0201     add r0, r0, #CACHE_DLINESIZE
0202 #else
0203     mcr p15, 0, r0, c7, c14, 1      @ clean and invalidate D entry
0204     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0205     add r0, r0, #CACHE_DLINESIZE
0206     mcr p15, 0, r0, c7, c14, 1      @ clean and invalidate D entry
0207     mcrne   p15, 0, r0, c7, c5, 1       @ invalidate I entry
0208     add r0, r0, #CACHE_DLINESIZE
0209 #endif
0210     cmp r0, r1
0211     blo 1b
0212     tst r2, #VM_EXEC
0213     mcrne   p15, 0, ip, c7, c10, 4      @ drain WB
0214     ret lr
0215 
0216 /*
0217  *  coherent_kern_range(start, end)
0218  *
0219  *  Ensure coherency between the Icache and the Dcache in the
0220  *  region described by start, end.  If you have non-snooping
0221  *  Harvard caches, you need to implement this function.
0222  *
0223  *  - start - virtual start address
0224  *  - end   - virtual end address
0225  */
0226 ENTRY(arm925_coherent_kern_range)
0227     /* FALLTHROUGH */
0228 
0229 /*
0230  *  coherent_user_range(start, end)
0231  *
0232  *  Ensure coherency between the Icache and the Dcache in the
0233  *  region described by start, end.  If you have non-snooping
0234  *  Harvard caches, you need to implement this function.
0235  *
0236  *  - start - virtual start address
0237  *  - end   - virtual end address
0238  */
0239 ENTRY(arm925_coherent_user_range)
0240     bic r0, r0, #CACHE_DLINESIZE - 1
0241 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0242     mcr p15, 0, r0, c7, c5, 1       @ invalidate I entry
0243     add r0, r0, #CACHE_DLINESIZE
0244     cmp r0, r1
0245     blo 1b
0246     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0247     mov r0, #0
0248     ret lr
0249 
0250 /*
0251  *  flush_kern_dcache_area(void *addr, size_t size)
0252  *
0253  *  Ensure no D cache aliasing occurs, either with itself or
0254  *  the I cache
0255  *
0256  *  - addr  - kernel address
0257  *  - size  - region size
0258  */
0259 ENTRY(arm925_flush_kern_dcache_area)
0260     add r1, r0, r1
0261 1:  mcr p15, 0, r0, c7, c14, 1      @ clean+invalidate D entry
0262     add r0, r0, #CACHE_DLINESIZE
0263     cmp r0, r1
0264     blo 1b
0265     mov r0, #0
0266     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0267     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0268     ret lr
0269 
0270 /*
0271  *  dma_inv_range(start, end)
0272  *
0273  *  Invalidate (discard) the specified virtual address range.
0274  *  May not write back any entries.  If 'start' or 'end'
0275  *  are not cache line aligned, those lines must be written
0276  *  back.
0277  *
0278  *  - start - virtual start address
0279  *  - end   - virtual end address
0280  *
0281  * (same as v4wb)
0282  */
0283 arm925_dma_inv_range:
0284 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0285     tst r0, #CACHE_DLINESIZE - 1
0286     mcrne   p15, 0, r0, c7, c10, 1      @ clean D entry
0287     tst r1, #CACHE_DLINESIZE - 1
0288     mcrne   p15, 0, r1, c7, c10, 1      @ clean D entry
0289 #endif
0290     bic r0, r0, #CACHE_DLINESIZE - 1
0291 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0292     add r0, r0, #CACHE_DLINESIZE
0293     cmp r0, r1
0294     blo 1b
0295     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0296     ret lr
0297 
0298 /*
0299  *  dma_clean_range(start, end)
0300  *
0301  *  Clean the specified virtual address range.
0302  *
0303  *  - start - virtual start address
0304  *  - end   - virtual end address
0305  *
0306  * (same as v4wb)
0307  */
0308 arm925_dma_clean_range:
0309 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0310     bic r0, r0, #CACHE_DLINESIZE - 1
0311 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0312     add r0, r0, #CACHE_DLINESIZE
0313     cmp r0, r1
0314     blo 1b
0315 #endif
0316     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0317     ret lr
0318 
0319 /*
0320  *  dma_flush_range(start, end)
0321  *
0322  *  Clean and invalidate the specified virtual address range.
0323  *
0324  *  - start - virtual start address
0325  *  - end   - virtual end address
0326  */
0327 ENTRY(arm925_dma_flush_range)
0328     bic r0, r0, #CACHE_DLINESIZE - 1
0329 1:
0330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0331     mcr p15, 0, r0, c7, c14, 1      @ clean+invalidate D entry
0332 #else
0333     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0334 #endif
0335     add r0, r0, #CACHE_DLINESIZE
0336     cmp r0, r1
0337     blo 1b
0338     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0339     ret lr
0340 
0341 /*
0342  *  dma_map_area(start, size, dir)
0343  *  - start - kernel virtual start address
0344  *  - size  - size of region
0345  *  - dir   - DMA direction
0346  */
0347 ENTRY(arm925_dma_map_area)
0348     add r1, r1, r0
0349     cmp r2, #DMA_TO_DEVICE
0350     beq arm925_dma_clean_range
0351     bcs arm925_dma_inv_range
0352     b   arm925_dma_flush_range
0353 ENDPROC(arm925_dma_map_area)
0354 
0355 /*
0356  *  dma_unmap_area(start, size, dir)
0357  *  - start - kernel virtual start address
0358  *  - size  - size of region
0359  *  - dir   - DMA direction
0360  */
0361 ENTRY(arm925_dma_unmap_area)
0362     ret lr
0363 ENDPROC(arm925_dma_unmap_area)
0364 
0365     .globl  arm925_flush_kern_cache_louis
0366     .equ    arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
0367 
0368     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0369     define_cache_functions arm925
0370 
0371 ENTRY(cpu_arm925_dcache_clean_area)
0372 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0373 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0374     add r0, r0, #CACHE_DLINESIZE
0375     subs    r1, r1, #CACHE_DLINESIZE
0376     bhi 1b
0377 #endif
0378     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0379     ret lr
0380 
0381 /* =============================== PageTable ============================== */
0382 
0383 /*
0384  * cpu_arm925_switch_mm(pgd)
0385  *
0386  * Set the translation base pointer to be as described by pgd.
0387  *
0388  * pgd: new page tables
0389  */
0390     .align  5
0391 ENTRY(cpu_arm925_switch_mm)
0392 #ifdef CONFIG_MMU
0393     mov ip, #0
0394 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0395     mcr p15, 0, ip, c7, c6, 0       @ invalidate D cache
0396 #else
0397     /* Flush entries in bothe segments at once, see NOTE1 above */
0398     mov r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
0399 2:  mcr p15, 0, r3, c7, c14, 2      @ clean & invalidate D index
0400     subs    r3, r3, #1 << 4
0401     bcs 2b              @ entries 255 to 0
0402 #endif
0403     mcr p15, 0, ip, c7, c5, 0       @ invalidate I cache
0404     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0405     mcr p15, 0, r0, c2, c0, 0       @ load page table pointer
0406     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0407 #endif
0408     ret lr
0409 
0410 /*
0411  * cpu_arm925_set_pte_ext(ptep, pte, ext)
0412  *
0413  * Set a PTE and flush it out
0414  */
0415     .align  5
0416 ENTRY(cpu_arm925_set_pte_ext)
0417 #ifdef CONFIG_MMU
0418     armv3_set_pte_ext
0419     mov r0, r0
0420 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0421     mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0422 #endif
0423     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0424 #endif /* CONFIG_MMU */
0425     ret lr
0426 
0427     .type   __arm925_setup, #function
0428 __arm925_setup:
0429     mov r0, #0
0430 
0431     /* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
0432         orr     r0,r0,#1 << 1           @ transparent mode on
0433         mcr     p15, 0, r0, c15, c1, 0          @ write TI config register
0434 
0435     mov r0, #0
0436     mcr p15, 0, r0, c7, c7      @ invalidate I,D caches on v4
0437     mcr p15, 0, r0, c7, c10, 4      @ drain write buffer on v4
0438 #ifdef CONFIG_MMU
0439     mcr p15, 0, r0, c8, c7      @ invalidate I,D TLBs on v4
0440 #endif
0441 
0442 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0443     mov r0, #4              @ disable write-back on caches explicitly
0444     mcr p15, 7, r0, c15, c0, 0
0445 #endif
0446 
0447     adr r5, arm925_crval
0448     ldmia   r5, {r5, r6}
0449     mrc p15, 0, r0, c1, c0      @ get control register v4
0450     bic r0, r0, r5
0451     orr r0, r0, r6
0452 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
0453     orr r0, r0, #0x4000         @ .1.. .... .... ....
0454 #endif
0455     ret lr
0456     .size   __arm925_setup, . - __arm925_setup
0457 
0458     /*
0459      *  R
0460      * .RVI ZFRS BLDP WCAM
0461      * .011 0001 ..11 1101
0462      * 
0463      */
0464     .type   arm925_crval, #object
0465 arm925_crval:
0466     crval   clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
0467 
0468     __INITDATA
0469     @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
0470     define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
0471 
0472     .section ".rodata"
0473 
0474     string  cpu_arch_name, "armv4t"
0475     string  cpu_elf_name, "v4"
0476     string  cpu_arm925_name, "ARM925T"
0477 
0478     .align
0479 
0480     .section ".proc.info.init", "a"
0481 
0482 .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
0483     .type   __\name\()_proc_info,#object
0484 __\name\()_proc_info:
0485     .long   \cpu_val
0486     .long   \cpu_mask
0487     .long   PMD_TYPE_SECT | \
0488         PMD_SECT_CACHEABLE | \
0489         PMD_BIT4 | \
0490         PMD_SECT_AP_WRITE | \
0491         PMD_SECT_AP_READ
0492     .long   PMD_TYPE_SECT | \
0493         PMD_BIT4 | \
0494         PMD_SECT_AP_WRITE | \
0495         PMD_SECT_AP_READ
0496     initfn  __arm925_setup, __\name\()_proc_info
0497     .long   cpu_arch_name
0498     .long   cpu_elf_name
0499     .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
0500     .long   cpu_arm925_name
0501     .long   arm925_processor_functions
0502     .long   v4wbi_tlb_fns
0503     .long   v4wb_user_fns
0504     .long   arm925_cache_fns
0505     .size   __\name\()_proc_info, . - __\name\()_proc_info
0506 .endm
0507 
0508     arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
0509     arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name