Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/mm/proc-xscale.S
0004  *
0005  *  Author: Nicolas Pitre
0006  *  Created:    November 2000
0007  *  Copyright:  (C) 2000, 2001 MontaVista Software Inc.
0008  *
0009  * MMU functions for the Intel XScale CPUs
0010  *
0011  * 2001 Aug 21:
0012  *  some contributions by Brett Gaines <brett.w.gaines@intel.com>
0013  *  Copyright 2001 by Intel Corp.
0014  *
0015  * 2001 Sep 08:
0016  *  Completely revisited, many important fixes
0017  *  Nicolas Pitre <nico@fluxnic.net>
0018  */
0019 
0020 #include <linux/linkage.h>
0021 #include <linux/init.h>
0022 #include <linux/pgtable.h>
0023 #include <asm/assembler.h>
0024 #include <asm/hwcap.h>
0025 #include <asm/pgtable-hwdef.h>
0026 #include <asm/page.h>
0027 #include <asm/ptrace.h>
0028 #include "proc-macros.S"
0029 
0030 /*
0031  * This is the maximum size of an area which will be flushed.  If the area
0032  * is larger than this, then we flush the whole cache
0033  */
0034 #define MAX_AREA_SIZE   32768
0035 
0036 /*
0037  * the cache line size of the I and D cache
0038  */
0039 #define CACHELINESIZE   32
0040 
0041 /*
0042  * the size of the data cache
0043  */
0044 #define CACHESIZE   32768
0045 
0046 /*
0047  * Virtual address used to allocate the cache when flushed
0048  *
0049  * This must be an address range which is _never_ used.  It should
0050  * apparently have a mapping in the corresponding page table for
0051  * compatibility with future CPUs that _could_ require it.  For instance we
0052  * don't care.
0053  *
0054  * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
0055  * the 2 areas in alternance each time the clean_d_cache macro is used.
0056  * Without this the XScale core exhibits cache eviction problems and no one
0057  * knows why.
0058  *
0059  * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
0060  */
0061 #define CLEAN_ADDR  0xfffe0000
0062 
0063 /*
0064  * This macro is used to wait for a CP15 write and is needed
0065  * when we have to ensure that the last operation to the co-pro
0066  * was completed before continuing with operation.
0067  */
0068     .macro  cpwait, rd
0069     mrc p15, 0, \rd, c2, c0, 0      @ arbitrary read of cp15
0070     mov \rd, \rd            @ wait for completion
0071     sub     pc, pc, #4          @ flush instruction pipeline
0072     .endm
0073 
0074     .macro  cpwait_ret, lr, rd
0075     mrc p15, 0, \rd, c2, c0, 0      @ arbitrary read of cp15
0076     sub pc, \lr, \rd, LSR #32       @ wait for completion and
0077                         @ flush instruction pipeline
0078     .endm
0079 
0080 /*
0081  * This macro cleans the entire dcache using line allocate.
0082  * The main loop has been unrolled to reduce loop overhead.
0083  * rd and rs are two scratch registers.
0084  */
0085     .macro  clean_d_cache, rd, rs
0086     ldr \rs, =clean_addr
0087     ldr \rd, [\rs]
0088     eor \rd, \rd, #CACHESIZE
0089     str \rd, [\rs]
0090     add \rs, \rd, #CACHESIZE
0091 1:  mcr p15, 0, \rd, c7, c2, 5      @ allocate D cache line
0092     add \rd, \rd, #CACHELINESIZE
0093     mcr p15, 0, \rd, c7, c2, 5      @ allocate D cache line
0094     add \rd, \rd, #CACHELINESIZE
0095     mcr p15, 0, \rd, c7, c2, 5      @ allocate D cache line
0096     add \rd, \rd, #CACHELINESIZE
0097     mcr p15, 0, \rd, c7, c2, 5      @ allocate D cache line
0098     add \rd, \rd, #CACHELINESIZE
0099     teq \rd, \rs
0100     bne 1b
0101     .endm
0102 
0103     .data
0104     .align  2
0105 clean_addr: .word   CLEAN_ADDR
0106 
0107     .text
0108 
0109 /*
0110  * cpu_xscale_proc_init()
0111  *
0112  * Nothing too exciting at the moment
0113  */
0114 ENTRY(cpu_xscale_proc_init)
0115     @ enable write buffer coalescing. Some bootloader disable it
0116     mrc p15, 0, r1, c1, c0, 1
0117     bic r1, r1, #1
0118     mcr p15, 0, r1, c1, c0, 1
0119     ret lr
0120 
0121 /*
0122  * cpu_xscale_proc_fin()
0123  */
0124 ENTRY(cpu_xscale_proc_fin)
0125     mrc p15, 0, r0, c1, c0, 0       @ ctrl register
0126     bic r0, r0, #0x1800         @ ...IZ...........
0127     bic r0, r0, #0x0006         @ .............CA.
0128     mcr p15, 0, r0, c1, c0, 0       @ disable caches
0129     ret lr
0130 
0131 /*
0132  * cpu_xscale_reset(loc)
0133  *
0134  * Perform a soft reset of the system.  Put the CPU into the
0135  * same state as it would be if it had been reset, and branch
0136  * to what would be the reset vector.
0137  *
0138  * loc: location to jump to for soft reset
0139  *
0140  * Beware PXA270 erratum E7.
0141  */
0142     .align  5
0143     .pushsection    .idmap.text, "ax"
0144 ENTRY(cpu_xscale_reset)
0145     mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
0146     msr cpsr_c, r1          @ reset CPSR
0147     mcr p15, 0, r1, c10, c4, 1      @ unlock I-TLB
0148     mcr p15, 0, r1, c8, c5, 0       @ invalidate I-TLB
0149     mrc p15, 0, r1, c1, c0, 0       @ ctrl register
0150     bic r1, r1, #0x0086         @ ........B....CA.
0151     bic r1, r1, #0x3900         @ ..VIZ..S........
0152     sub pc, pc, #4          @ flush pipeline
0153     @ *** cache line aligned ***
0154     mcr p15, 0, r1, c1, c0, 0       @ ctrl register
0155     bic r1, r1, #0x0001         @ ...............M
0156     mcr p15, 0, ip, c7, c7, 0       @ invalidate I,D caches & BTB
0157     mcr p15, 0, r1, c1, c0, 0       @ ctrl register
0158     @ CAUTION: MMU turned off from this point. We count on the pipeline
0159     @ already containing those two last instructions to survive.
0160     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0161     ret r0
0162 ENDPROC(cpu_xscale_reset)
0163     .popsection
0164 
0165 /*
0166  * cpu_xscale_do_idle()
0167  *
0168  * Cause the processor to idle
0169  *
0170  * For now we do nothing but go to idle mode for every case
0171  *
0172  * XScale supports clock switching, but using idle mode support
0173  * allows external hardware to react to system state changes.
0174  */
0175     .align  5
0176 
0177 ENTRY(cpu_xscale_do_idle)
0178     mov r0, #1
0179     mcr p14, 0, r0, c7, c0, 0       @ Go to IDLE
0180     ret lr
0181 
0182 /* ================================= CACHE ================================ */
0183 
0184 /*
0185  *  flush_icache_all()
0186  *
0187  *  Unconditionally clean and invalidate the entire icache.
0188  */
0189 ENTRY(xscale_flush_icache_all)
0190     mov r0, #0
0191     mcr p15, 0, r0, c7, c5, 0       @ invalidate I cache
0192     ret lr
0193 ENDPROC(xscale_flush_icache_all)
0194 
0195 /*
0196  *  flush_user_cache_all()
0197  *
0198  *  Invalidate all cache entries in a particular address
0199  *  space.
0200  */
0201 ENTRY(xscale_flush_user_cache_all)
0202     /* FALLTHROUGH */
0203 
0204 /*
0205  *  flush_kern_cache_all()
0206  *
0207  *  Clean and invalidate the entire cache.
0208  */
0209 ENTRY(xscale_flush_kern_cache_all)
0210     mov r2, #VM_EXEC
0211     mov ip, #0
0212 __flush_whole_cache:
0213     clean_d_cache r0, r1
0214     tst r2, #VM_EXEC
0215     mcrne   p15, 0, ip, c7, c5, 0       @ Invalidate I cache & BTB
0216     mcrne   p15, 0, ip, c7, c10, 4      @ Drain Write (& Fill) Buffer
0217     ret lr
0218 
0219 /*
0220  *  flush_user_cache_range(start, end, vm_flags)
0221  *
0222  *  Invalidate a range of cache entries in the specified
0223  *  address space.
0224  *
0225  *  - start - start address (may not be aligned)
0226  *  - end   - end address (exclusive, may not be aligned)
0227  *  - vma   - vma_area_struct describing address space
0228  */
0229     .align  5
0230 ENTRY(xscale_flush_user_cache_range)
0231     mov ip, #0
0232     sub r3, r1, r0          @ calculate total size
0233     cmp r3, #MAX_AREA_SIZE
0234     bhs __flush_whole_cache
0235 
0236 1:  tst r2, #VM_EXEC
0237     mcrne   p15, 0, r0, c7, c5, 1       @ Invalidate I cache line
0238     mcr p15, 0, r0, c7, c10, 1      @ Clean D cache line
0239     mcr p15, 0, r0, c7, c6, 1       @ Invalidate D cache line
0240     add r0, r0, #CACHELINESIZE
0241     cmp r0, r1
0242     blo 1b
0243     tst r2, #VM_EXEC
0244     mcrne   p15, 0, ip, c7, c5, 6       @ Invalidate BTB
0245     mcrne   p15, 0, ip, c7, c10, 4      @ Drain Write (& Fill) Buffer
0246     ret lr
0247 
0248 /*
0249  *  coherent_kern_range(start, end)
0250  *
0251  *  Ensure coherency between the Icache and the Dcache in the
0252  *  region described by start.  If you have non-snooping
0253  *  Harvard caches, you need to implement this function.
0254  *
0255  *  - start  - virtual start address
0256  *  - end    - virtual end address
0257  *
0258  *  Note: single I-cache line invalidation isn't used here since
0259  *  it also trashes the mini I-cache used by JTAG debuggers.
0260  */
0261 ENTRY(xscale_coherent_kern_range)
0262     bic r0, r0, #CACHELINESIZE - 1
0263 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0264     add r0, r0, #CACHELINESIZE
0265     cmp r0, r1
0266     blo 1b
0267     mov r0, #0
0268     mcr p15, 0, r0, c7, c5, 0       @ Invalidate I cache & BTB
0269     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0270     ret lr
0271 
0272 /*
0273  *  coherent_user_range(start, end)
0274  *
0275  *  Ensure coherency between the Icache and the Dcache in the
0276  *  region described by start.  If you have non-snooping
0277  *  Harvard caches, you need to implement this function.
0278  *
0279  *  - start  - virtual start address
0280  *  - end    - virtual end address
0281  */
0282 ENTRY(xscale_coherent_user_range)
0283     bic r0, r0, #CACHELINESIZE - 1
0284 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0285     mcr p15, 0, r0, c7, c5, 1       @ Invalidate I cache entry
0286     add r0, r0, #CACHELINESIZE
0287     cmp r0, r1
0288     blo 1b
0289     mov r0, #0
0290     mcr p15, 0, r0, c7, c5, 6       @ Invalidate BTB
0291     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0292     ret lr
0293 
0294 /*
0295  *  flush_kern_dcache_area(void *addr, size_t size)
0296  *
0297  *  Ensure no D cache aliasing occurs, either with itself or
0298  *  the I cache
0299  *
0300  *  - addr  - kernel address
0301  *  - size  - region size
0302  */
0303 ENTRY(xscale_flush_kern_dcache_area)
0304     add r1, r0, r1
0305 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0306     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0307     add r0, r0, #CACHELINESIZE
0308     cmp r0, r1
0309     blo 1b
0310     mov r0, #0
0311     mcr p15, 0, r0, c7, c5, 0       @ Invalidate I cache & BTB
0312     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0313     ret lr
0314 
0315 /*
0316  *  dma_inv_range(start, end)
0317  *
0318  *  Invalidate (discard) the specified virtual address range.
0319  *  May not write back any entries.  If 'start' or 'end'
0320  *  are not cache line aligned, those lines must be written
0321  *  back.
0322  *
0323  *  - start  - virtual start address
0324  *  - end    - virtual end address
0325  */
0326 xscale_dma_inv_range:
0327     tst r0, #CACHELINESIZE - 1
0328     bic r0, r0, #CACHELINESIZE - 1
0329     mcrne   p15, 0, r0, c7, c10, 1      @ clean D entry
0330     tst r1, #CACHELINESIZE - 1
0331     mcrne   p15, 0, r1, c7, c10, 1      @ clean D entry
0332 1:  mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0333     add r0, r0, #CACHELINESIZE
0334     cmp r0, r1
0335     blo 1b
0336     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0337     ret lr
0338 
0339 /*
0340  *  dma_clean_range(start, end)
0341  *
0342  *  Clean the specified virtual address range.
0343  *
0344  *  - start  - virtual start address
0345  *  - end    - virtual end address
0346  */
0347 xscale_dma_clean_range:
0348     bic r0, r0, #CACHELINESIZE - 1
0349 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0350     add r0, r0, #CACHELINESIZE
0351     cmp r0, r1
0352     blo 1b
0353     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0354     ret lr
0355 
0356 /*
0357  *  dma_flush_range(start, end)
0358  *
0359  *  Clean and invalidate the specified virtual address range.
0360  *
0361  *  - start  - virtual start address
0362  *  - end    - virtual end address
0363  */
0364 ENTRY(xscale_dma_flush_range)
0365     bic r0, r0, #CACHELINESIZE - 1
0366 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0367     mcr p15, 0, r0, c7, c6, 1       @ invalidate D entry
0368     add r0, r0, #CACHELINESIZE
0369     cmp r0, r1
0370     blo 1b
0371     mcr p15, 0, r0, c7, c10, 4      @ Drain Write (& Fill) Buffer
0372     ret lr
0373 
0374 /*
0375  *  dma_map_area(start, size, dir)
0376  *  - start - kernel virtual start address
0377  *  - size  - size of region
0378  *  - dir   - DMA direction
0379  */
0380 ENTRY(xscale_dma_map_area)
0381     add r1, r1, r0
0382     cmp r2, #DMA_TO_DEVICE
0383     beq xscale_dma_clean_range
0384     bcs xscale_dma_inv_range
0385     b   xscale_dma_flush_range
0386 ENDPROC(xscale_dma_map_area)
0387 
0388 /*
0389  *  dma_map_area(start, size, dir)
0390  *  - start - kernel virtual start address
0391  *  - size  - size of region
0392  *  - dir   - DMA direction
0393  */
0394 ENTRY(xscale_80200_A0_A1_dma_map_area)
0395     add r1, r1, r0
0396     teq r2, #DMA_TO_DEVICE
0397     beq xscale_dma_clean_range
0398     b   xscale_dma_flush_range
0399 ENDPROC(xscale_80200_A0_A1_dma_map_area)
0400 
0401 /*
0402  *  dma_unmap_area(start, size, dir)
0403  *  - start - kernel virtual start address
0404  *  - size  - size of region
0405  *  - dir   - DMA direction
0406  */
0407 ENTRY(xscale_dma_unmap_area)
0408     ret lr
0409 ENDPROC(xscale_dma_unmap_area)
0410 
0411     .globl  xscale_flush_kern_cache_louis
0412     .equ    xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
0413 
0414     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0415     define_cache_functions xscale
0416 
0417 /*
0418  * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
0419  * clear the dirty bits, which means that if we invalidate a dirty line,
0420  * the dirty data can still be written back to external memory later on.
0421  *
0422  * The recommended workaround is to always do a clean D-cache line before
0423  * doing an invalidate D-cache line, so on the affected processors,
0424  * dma_inv_range() is implemented as dma_flush_range().
0425  *
0426  * See erratum #25 of "Intel 80200 Processor Specification Update",
0427  * revision January 22, 2003, available at:
0428  *     http://www.intel.com/design/iio/specupdt/273415.htm
0429  */
0430 .macro a0_alias basename
0431     .globl xscale_80200_A0_A1_\basename
0432     .type xscale_80200_A0_A1_\basename , %function
0433     .equ xscale_80200_A0_A1_\basename , xscale_\basename
0434 .endm
0435 
0436 /*
0437  * Most of the cache functions are unchanged for these processor revisions.
0438  * Export suitable alias symbols for the unchanged functions:
0439  */
0440     a0_alias flush_icache_all
0441     a0_alias flush_user_cache_all
0442     a0_alias flush_kern_cache_all
0443     a0_alias flush_kern_cache_louis
0444     a0_alias flush_user_cache_range
0445     a0_alias coherent_kern_range
0446     a0_alias coherent_user_range
0447     a0_alias flush_kern_dcache_area
0448     a0_alias dma_flush_range
0449     a0_alias dma_unmap_area
0450 
0451     @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
0452     define_cache_functions xscale_80200_A0_A1
0453 
0454 ENTRY(cpu_xscale_dcache_clean_area)
0455 1:  mcr p15, 0, r0, c7, c10, 1      @ clean D entry
0456     add r0, r0, #CACHELINESIZE
0457     subs    r1, r1, #CACHELINESIZE
0458     bhi 1b
0459     ret lr
0460 
0461 /* =============================== PageTable ============================== */
0462 
0463 /*
0464  * cpu_xscale_switch_mm(pgd)
0465  *
0466  * Set the translation base pointer to be as described by pgd.
0467  *
0468  * pgd: new page tables
0469  */
0470     .align  5
0471 ENTRY(cpu_xscale_switch_mm)
0472     clean_d_cache r1, r2
0473     mcr p15, 0, ip, c7, c5, 0       @ Invalidate I cache & BTB
0474     mcr p15, 0, ip, c7, c10, 4      @ Drain Write (& Fill) Buffer
0475     mcr p15, 0, r0, c2, c0, 0       @ load page table pointer
0476     mcr p15, 0, ip, c8, c7, 0       @ invalidate I & D TLBs
0477     cpwait_ret lr, ip
0478 
0479 /*
0480  * cpu_xscale_set_pte_ext(ptep, pte, ext)
0481  *
0482  * Set a PTE and flush it out
0483  *
0484  * Errata 40: must set memory to write-through for user read-only pages.
0485  */
0486 cpu_xscale_mt_table:
0487     .long   0x00                        @ L_PTE_MT_UNCACHED
0488     .long   PTE_BUFFERABLE                  @ L_PTE_MT_BUFFERABLE
0489     .long   PTE_CACHEABLE                   @ L_PTE_MT_WRITETHROUGH
0490     .long   PTE_CACHEABLE | PTE_BUFFERABLE          @ L_PTE_MT_WRITEBACK
0491     .long   PTE_EXT_TEX(1) | PTE_BUFFERABLE         @ L_PTE_MT_DEV_SHARED
0492     .long   0x00                        @ unused
0493     .long   PTE_EXT_TEX(1) | PTE_CACHEABLE          @ L_PTE_MT_MINICACHE
0494     .long   PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
0495     .long   0x00                        @ unused
0496     .long   PTE_BUFFERABLE                  @ L_PTE_MT_DEV_WC
0497     .long   0x00                        @ unused
0498     .long   PTE_CACHEABLE | PTE_BUFFERABLE          @ L_PTE_MT_DEV_CACHED
0499     .long   0x00                        @ L_PTE_MT_DEV_NONSHARED
0500     .long   0x00                        @ unused
0501     .long   0x00                        @ unused
0502     .long   0x00                        @ unused
0503 
0504     .align  5
0505 ENTRY(cpu_xscale_set_pte_ext)
0506     xscale_set_pte_ext_prologue
0507 
0508     @
0509     @ Erratum 40: must set memory to write-through for user read-only pages
0510     @
0511     and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
0512     teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
0513 
0514     moveq   r1, #L_PTE_MT_WRITETHROUGH
0515     and r1, r1, #L_PTE_MT_MASK
0516     adr ip, cpu_xscale_mt_table
0517     ldr ip, [ip, r1]
0518     bic r2, r2, #0x0c
0519     orr r2, r2, ip
0520 
0521     xscale_set_pte_ext_epilogue
0522     ret lr
0523 
0524     .ltorg
0525     .align
0526 
0527 .globl  cpu_xscale_suspend_size
0528 .equ    cpu_xscale_suspend_size, 4 * 6
0529 #ifdef CONFIG_ARM_CPU_SUSPEND
0530 ENTRY(cpu_xscale_do_suspend)
0531     stmfd   sp!, {r4 - r9, lr}
0532     mrc p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
0533     mrc p15, 0, r5, c15, c1, 0  @ CP access reg
0534     mrc p15, 0, r6, c13, c0, 0  @ PID
0535     mrc p15, 0, r7, c3, c0, 0   @ domain ID
0536     mrc p15, 0, r8, c1, c0, 1   @ auxiliary control reg
0537     mrc p15, 0, r9, c1, c0, 0   @ control reg
0538     bic r4, r4, #2      @ clear frequency change bit
0539     stmia   r0, {r4 - r9}       @ store cp regs
0540     ldmfd   sp!, {r4 - r9, pc}
0541 ENDPROC(cpu_xscale_do_suspend)
0542 
0543 ENTRY(cpu_xscale_do_resume)
0544     ldmia   r0, {r4 - r9}       @ load cp regs
0545     mov ip, #0
0546     mcr p15, 0, ip, c8, c7, 0   @ invalidate I & D TLBs
0547     mcr p15, 0, ip, c7, c7, 0   @ invalidate I & D caches, BTB
0548     mcr p14, 0, r4, c6, c0, 0   @ clock configuration, turbo mode.
0549     mcr p15, 0, r5, c15, c1, 0  @ CP access reg
0550     mcr p15, 0, r6, c13, c0, 0  @ PID
0551     mcr p15, 0, r7, c3, c0, 0   @ domain ID
0552     mcr p15, 0, r1, c2, c0, 0   @ translation table base addr
0553     mcr p15, 0, r8, c1, c0, 1   @ auxiliary control reg
0554     mov r0, r9          @ control register
0555     b   cpu_resume_mmu
0556 ENDPROC(cpu_xscale_do_resume)
0557 #endif
0558 
0559     .type   __xscale_setup, #function
0560 __xscale_setup:
0561     mcr p15, 0, ip, c7, c7, 0       @ invalidate I, D caches & BTB
0562     mcr p15, 0, ip, c7, c10, 4      @ Drain Write (& Fill) Buffer
0563     mcr p15, 0, ip, c8, c7, 0       @ invalidate I, D TLBs
0564     mov r0, #1 << 6         @ cp6 for IOP3xx and Bulverde
0565     orr r0, r0, #1 << 13        @ Its undefined whether this
0566     mcr p15, 0, r0, c15, c1, 0      @ affects USR or SVC modes
0567 
0568     adr r5, xscale_crval
0569     ldmia   r5, {r5, r6}
0570     mrc p15, 0, r0, c1, c0, 0       @ get control register
0571     bic r0, r0, r5
0572     orr r0, r0, r6
0573     ret lr
0574     .size   __xscale_setup, . - __xscale_setup
0575 
0576     /*
0577      *  R
0578      * .RVI ZFRS BLDP WCAM
0579      * ..11 1.01 .... .101
0580      * 
0581      */
0582     .type   xscale_crval, #object
0583 xscale_crval:
0584     crval   clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
0585 
0586     __INITDATA
0587 
0588     @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
0589     define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
0590 
0591     .section ".rodata"
0592 
0593     string  cpu_arch_name, "armv5te"
0594     string  cpu_elf_name, "v5"
0595 
0596     string  cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
0597     string  cpu_80200_name, "XScale-80200"
0598     string  cpu_80219_name, "XScale-80219"
0599     string  cpu_8032x_name, "XScale-IOP8032x Family"
0600     string  cpu_8033x_name, "XScale-IOP8033x Family"
0601     string  cpu_pxa250_name, "XScale-PXA250"
0602     string  cpu_pxa210_name, "XScale-PXA210"
0603     string  cpu_ixp42x_name, "XScale-IXP42x Family"
0604     string  cpu_ixp43x_name, "XScale-IXP43x Family"
0605     string  cpu_ixp46x_name, "XScale-IXP46x Family"
0606     string  cpu_ixp2400_name, "XScale-IXP2400"
0607     string  cpu_ixp2800_name, "XScale-IXP2800"
0608     string  cpu_pxa255_name, "XScale-PXA255"
0609     string  cpu_pxa270_name, "XScale-PXA270"
0610 
0611     .align
0612 
0613     .section ".proc.info.init", "a"
0614 
0615 .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
0616     .type   __\name\()_proc_info,#object
0617 __\name\()_proc_info:
0618     .long   \cpu_val
0619     .long   \cpu_mask
0620     .long   PMD_TYPE_SECT | \
0621         PMD_SECT_BUFFERABLE | \
0622         PMD_SECT_CACHEABLE | \
0623         PMD_SECT_AP_WRITE | \
0624         PMD_SECT_AP_READ
0625     .long   PMD_TYPE_SECT | \
0626         PMD_SECT_AP_WRITE | \
0627         PMD_SECT_AP_READ
0628     initfn  __xscale_setup, __\name\()_proc_info
0629     .long   cpu_arch_name
0630     .long   cpu_elf_name
0631     .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
0632     .long   \cpu_name
0633     .long   xscale_processor_functions
0634     .long   v4wbi_tlb_fns
0635     .long   xscale_mc_user_fns
0636     .ifb \cache
0637         .long   xscale_cache_fns
0638     .else
0639         .long   \cache
0640     .endif
0641     .size   __\name\()_proc_info, . - __\name\()_proc_info
0642 .endm
0643 
0644     xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
0645         cache=xscale_80200_A0_A1_cache_fns
0646     xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
0647     xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
0648     xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
0649     xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
0650     xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
0651     xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
0652     xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
0653     xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
0654     xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
0655     xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
0656     xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
0657     xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
0658     xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name