Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
0004  *
0005  *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
0006  */
0007 #include <linux/linkage.h>
0008 #include <linux/init.h>
0009 #include <linux/pgtable.h>
0010 #include <asm/assembler.h>
0011 #include <asm/hwcap.h>
0012 #include <asm/pgtable-hwdef.h>
0013 #include <asm/ptrace.h>
0014 #include "proc-macros.S"
0015 
0016 /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
0017 #define CACHE_DLINESIZE 16
0018 #define CACHE_DSEGMENTS 4
0019 #define CACHE_DENTRIES  64
0020 
0021     .text
0022 /*
0023  * cpu_arm940_proc_init()
0024  * cpu_arm940_switch_mm()
0025  *
0026  * These are not required.
0027  */
0028 ENTRY(cpu_arm940_proc_init)
0029 ENTRY(cpu_arm940_switch_mm)
0030     ret lr
0031 
0032 /*
0033  * cpu_arm940_proc_fin()
0034  */
0035 ENTRY(cpu_arm940_proc_fin)
0036     mrc p15, 0, r0, c1, c0, 0       @ ctrl register
0037     bic r0, r0, #0x00001000     @ i-cache
0038     bic r0, r0, #0x00000004     @ d-cache
0039     mcr p15, 0, r0, c1, c0, 0       @ disable caches
0040     ret lr
0041 
0042 /*
0043  * cpu_arm940_reset(loc)
0044  * Params  : r0 = address to jump to
0045  * Notes   : This sets up everything for a reset
0046  */
0047     .pushsection    .idmap.text, "ax"
0048 ENTRY(cpu_arm940_reset)
0049     mov ip, #0
0050     mcr p15, 0, ip, c7, c5, 0       @ flush I cache
0051     mcr p15, 0, ip, c7, c6, 0       @ flush D cache
0052     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0053     mrc p15, 0, ip, c1, c0, 0       @ ctrl register
0054     bic ip, ip, #0x00000005     @ .............c.p
0055     bic ip, ip, #0x00001000     @ i-cache
0056     mcr p15, 0, ip, c1, c0, 0       @ ctrl register
0057     ret r0
0058 ENDPROC(cpu_arm940_reset)
0059     .popsection
0060 
0061 /*
0062  * cpu_arm940_do_idle()
0063  */
0064     .align  5
0065 ENTRY(cpu_arm940_do_idle)
0066     mcr p15, 0, r0, c7, c0, 4       @ Wait for interrupt
0067     ret lr
0068 
0069 /*
0070  *  flush_icache_all()
0071  *
0072  *  Unconditionally clean and invalidate the entire icache.
0073  */
0074 ENTRY(arm940_flush_icache_all)
0075     mov r0, #0
0076     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0077     ret lr
0078 ENDPROC(arm940_flush_icache_all)
0079 
0080 /*
0081  *  flush_user_cache_all()
0082  */
0083 ENTRY(arm940_flush_user_cache_all)
0084     /* FALLTHROUGH */
0085 
0086 /*
0087  *  flush_kern_cache_all()
0088  *
0089  *  Clean and invalidate the entire cache.
0090  */
0091 ENTRY(arm940_flush_kern_cache_all)
0092     mov r2, #VM_EXEC
0093     /* FALLTHROUGH */
0094 
0095 /*
0096  *  flush_user_cache_range(start, end, flags)
0097  *
0098  *  There is no efficient way to flush a range of cache entries
0099  *  in the specified address range. Thus, flushes all.
0100  *
0101  *  - start - start address (inclusive)
0102  *  - end   - end address (exclusive)
0103  *  - flags - vm_flags describing address space
0104  */
0105 ENTRY(arm940_flush_user_cache_range)
0106     mov ip, #0
0107 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0108     mcr p15, 0, ip, c7, c6, 0       @ flush D cache
0109 #else
0110     mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
0111 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0112 2:  mcr p15, 0, r3, c7, c14, 2      @ clean/flush D index
0113     subs    r3, r3, #1 << 26
0114     bcs 2b              @ entries 63 to 0
0115     subs    r1, r1, #1 << 4
0116     bcs 1b              @ segments 3 to 0
0117 #endif
0118     tst r2, #VM_EXEC
0119     mcrne   p15, 0, ip, c7, c5, 0       @ invalidate I cache
0120     mcrne   p15, 0, ip, c7, c10, 4      @ drain WB
0121     ret lr
0122 
0123 /*
0124  *  coherent_kern_range(start, end)
0125  *
0126  *  Ensure coherency between the Icache and the Dcache in the
0127  *  region described by start, end.  If you have non-snooping
0128  *  Harvard caches, you need to implement this function.
0129  *
0130  *  - start - virtual start address
0131  *  - end   - virtual end address
0132  */
0133 ENTRY(arm940_coherent_kern_range)
0134     /* FALLTHROUGH */
0135 
0136 /*
0137  *  coherent_user_range(start, end)
0138  *
0139  *  Ensure coherency between the Icache and the Dcache in the
0140  *  region described by start, end.  If you have non-snooping
0141  *  Harvard caches, you need to implement this function.
0142  *
0143  *  - start - virtual start address
0144  *  - end   - virtual end address
0145  */
0146 ENTRY(arm940_coherent_user_range)
0147     /* FALLTHROUGH */
0148 
0149 /*
0150  *  flush_kern_dcache_area(void *addr, size_t size)
0151  *
0152  *  Ensure no D cache aliasing occurs, either with itself or
0153  *  the I cache
0154  *
0155  *  - addr  - kernel address
0156  *  - size  - region size
0157  */
0158 ENTRY(arm940_flush_kern_dcache_area)
0159     mov r0, #0
0160     mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
0161 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0162 2:  mcr p15, 0, r3, c7, c14, 2      @ clean/flush D index
0163     subs    r3, r3, #1 << 26
0164     bcs 2b              @ entries 63 to 0
0165     subs    r1, r1, #1 << 4
0166     bcs 1b              @ segments 7 to 0
0167     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0168     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0169     ret lr
0170 
0171 /*
0172  *  dma_inv_range(start, end)
0173  *
0174  *  There is no efficient way to invalidate a specifid virtual
0175  *  address range. Thus, invalidates all.
0176  *
0177  *  - start - virtual start address
0178  *  - end   - virtual end address
0179  */
0180 arm940_dma_inv_range:
0181     mov ip, #0
0182     mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
0183 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0184 2:  mcr p15, 0, r3, c7, c6, 2       @ flush D entry
0185     subs    r3, r3, #1 << 26
0186     bcs 2b              @ entries 63 to 0
0187     subs    r1, r1, #1 << 4
0188     bcs 1b              @ segments 7 to 0
0189     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0190     ret lr
0191 
0192 /*
0193  *  dma_clean_range(start, end)
0194  *
0195  *  There is no efficient way to clean a specifid virtual
0196  *  address range. Thus, cleans all.
0197  *
0198  *  - start - virtual start address
0199  *  - end   - virtual end address
0200  */
0201 arm940_dma_clean_range:
0202 ENTRY(cpu_arm940_dcache_clean_area)
0203     mov ip, #0
0204 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0205     mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
0206 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0207 2:  mcr p15, 0, r3, c7, c10, 2      @ clean D entry
0208     subs    r3, r3, #1 << 26
0209     bcs 2b              @ entries 63 to 0
0210     subs    r1, r1, #1 << 4
0211     bcs 1b              @ segments 7 to 0
0212 #endif
0213     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0214     ret lr
0215 
0216 /*
0217  *  dma_flush_range(start, end)
0218  *
0219  *  There is no efficient way to clean and invalidate a specifid
0220  *  virtual address range.
0221  *
0222  *  - start - virtual start address
0223  *  - end   - virtual end address
0224  */
0225 ENTRY(arm940_dma_flush_range)
0226     mov ip, #0
0227     mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
0228 1:  orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
0229 2:
0230 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
0231     mcr p15, 0, r3, c7, c14, 2      @ clean/flush D entry
0232 #else
0233     mcr p15, 0, r3, c7, c6, 2       @ invalidate D entry
0234 #endif
0235     subs    r3, r3, #1 << 26
0236     bcs 2b              @ entries 63 to 0
0237     subs    r1, r1, #1 << 4
0238     bcs 1b              @ segments 7 to 0
0239     mcr p15, 0, ip, c7, c10, 4      @ drain WB
0240     ret lr
0241 
0242 /*
0243  *  dma_map_area(start, size, dir)
0244  *  - start - kernel virtual start address
0245  *  - size  - size of region
0246  *  - dir   - DMA direction
0247  */
0248 ENTRY(arm940_dma_map_area)
0249     add r1, r1, r0
0250     cmp r2, #DMA_TO_DEVICE
0251     beq arm940_dma_clean_range
0252     bcs arm940_dma_inv_range
0253     b   arm940_dma_flush_range
0254 ENDPROC(arm940_dma_map_area)
0255 
0256 /*
0257  *  dma_unmap_area(start, size, dir)
0258  *  - start - kernel virtual start address
0259  *  - size  - size of region
0260  *  - dir   - DMA direction
0261  */
0262 ENTRY(arm940_dma_unmap_area)
0263     ret lr
0264 ENDPROC(arm940_dma_unmap_area)
0265 
0266     .globl  arm940_flush_kern_cache_louis
0267     .equ    arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
0268 
0269     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0270     define_cache_functions arm940
0271 
0272     .type   __arm940_setup, #function
0273 __arm940_setup:
0274     mov r0, #0
0275     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0276     mcr p15, 0, r0, c7, c6, 0       @ invalidate D cache
0277     mcr p15, 0, r0, c7, c10, 4      @ drain WB
0278 
0279     mcr p15, 0, r0, c6, c3, 0       @ disable data area 3~7
0280     mcr p15, 0, r0, c6, c4, 0
0281     mcr p15, 0, r0, c6, c5, 0
0282     mcr p15, 0, r0, c6, c6, 0
0283     mcr p15, 0, r0, c6, c7, 0
0284 
0285     mcr p15, 0, r0, c6, c3, 1       @ disable instruction area 3~7
0286     mcr p15, 0, r0, c6, c4, 1
0287     mcr p15, 0, r0, c6, c5, 1
0288     mcr p15, 0, r0, c6, c6, 1
0289     mcr p15, 0, r0, c6, c7, 1
0290 
0291     mov r0, #0x0000003F         @ base = 0, size = 4GB
0292     mcr p15, 0, r0, c6, c0, 0       @ set area 0, default
0293     mcr p15, 0, r0, c6, c0, 1
0294 
0295     ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
0296     ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
0297     pr_val  r3, r0, r7, #1
0298     mcr p15, 0, r3, c6, c1, 0       @ set area 1, RAM
0299     mcr p15, 0, r3, c6, c1, 1
0300 
0301     ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
0302     ldr r7, =CONFIG_FLASH_SIZE      @ size of FLASH (must be >= 4KB)
0303     pr_val  r3, r0, r6, #1
0304     mcr p15, 0, r3, c6, c2, 0       @ set area 2, ROM/FLASH
0305     mcr p15, 0, r3, c6, c2, 1
0306 
0307     mov r0, #0x06
0308     mcr p15, 0, r0, c2, c0, 0       @ Region 1&2 cacheable
0309     mcr p15, 0, r0, c2, c0, 1
0310 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0311     mov r0, #0x00           @ disable whole write buffer
0312 #else
0313     mov r0, #0x02           @ Region 1 write bufferred
0314 #endif
0315     mcr p15, 0, r0, c3, c0, 0
0316 
0317     mov r0, #0x10000
0318     sub r0, r0, #1          @ r0 = 0xffff
0319     mcr p15, 0, r0, c5, c0, 0       @ all read/write access
0320     mcr p15, 0, r0, c5, c0, 1
0321 
0322     mrc p15, 0, r0, c1, c0      @ get control register
0323     orr r0, r0, #0x00001000     @ I-cache
0324     orr r0, r0, #0x00000005     @ MPU/D-cache
0325 
0326     ret lr
0327 
0328     .size   __arm940_setup, . - __arm940_setup
0329 
0330     __INITDATA
0331 
0332     @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
0333     define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
0334 
0335     .section ".rodata"
0336 
0337     string  cpu_arch_name, "armv4t"
0338     string  cpu_elf_name, "v4"
0339     string  cpu_arm940_name, "ARM940T"
0340 
0341     .align
0342 
0343     .section ".proc.info.init", "a"
0344 
0345     .type   __arm940_proc_info,#object
0346 __arm940_proc_info:
0347     .long   0x41009400
0348     .long   0xff00fff0
0349     .long   0
0350     initfn  __arm940_setup, __arm940_proc_info
0351     .long   cpu_arch_name
0352     .long   cpu_elf_name
0353     .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
0354     .long   cpu_arm940_name
0355     .long   arm940_processor_functions
0356     .long   0
0357     .long   0
0358     .long   arm940_cache_fns
0359     .size   __arm940_proc_info, . - __arm940_proc_info
0360