Back to home page

LXR

 
 

    


0001 /*
0002  * linux/arch/unicore32/kernel/head.S
0003  *
0004  * Code specific to PKUnity SoC and UniCore ISA
0005  *
0006  * Copyright (C) 2001-2010 GUAN Xue-tao
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License version 2 as
0010  * published by the Free Software Foundation.
0011  */
0012 #include <linux/linkage.h>
0013 #include <linux/init.h>
0014 
0015 #include <asm/assembler.h>
0016 #include <asm/ptrace.h>
0017 #include <generated/asm-offsets.h>
0018 #include <asm/memory.h>
0019 #include <asm/thread_info.h>
0020 #include <asm/hwdef-copro.h>
0021 #include <asm/pgtable-hwdef.h>
0022 
0023 #if (PHYS_OFFSET & 0x003fffff)
0024 #error "PHYS_OFFSET must be at an even 4MiB boundary!"
0025 #endif
0026 
0027 #define KERNEL_RAM_VADDR    (PAGE_OFFSET + KERNEL_IMAGE_START)
0028 #define KERNEL_RAM_PADDR    (PHYS_OFFSET + KERNEL_IMAGE_START)
0029 
0030 #define KERNEL_PGD_PADDR    (KERNEL_RAM_PADDR - 0x1000)
0031 #define KERNEL_PGD_VADDR    (KERNEL_RAM_VADDR - 0x1000)
0032 
0033 #define KERNEL_START        KERNEL_RAM_VADDR
0034 #define KERNEL_END      _end
0035 
0036 /*
0037  * swapper_pg_dir is the virtual address of the initial page table.
0038  * We place the page tables 4K below KERNEL_RAM_VADDR.  Therefore, we must
0039  * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
0040  * the least significant 16 bits to be 0x8000, but we could probably
0041  * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.
0042  */
0043 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
0044 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
0045 #endif
0046 
0047     .globl  swapper_pg_dir
0048     .equ    swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000
0049 
0050 /*
0051  * Kernel startup entry point.
0052  * ---------------------------
0053  *
0054  * This is normally called from the decompressor code.  The requirements
0055  * are: MMU = off, D-cache = off, I-cache = dont care
0056  *
0057  * This code is mostly position independent, so if you link the kernel at
0058  * 0xc0008000, you call this at __pa(0xc0008000).
0059  */
0060     __HEAD
0061 ENTRY(stext)
0062     @ set asr
0063     mov r0, #PRIV_MODE          @ ensure priv mode
0064     or  r0, #PSR_R_BIT | PSR_I_BIT  @ disable irqs
0065     mov.a   asr, r0
0066 
0067     @ process identify
0068     movc    r0, p0.c0, #0           @ cpuid
0069     movl    r1, 0xff00ffff          @ mask
0070     movl    r2, 0x4d000863          @ value
0071     and r0, r1, r0
0072     cxor.a  r0, r2
0073     bne __error_p           @ invalid processor id
0074 
0075     /*
0076      * Clear the 4K level 1 swapper page table
0077      */
0078     movl    r0, #KERNEL_PGD_PADDR       @ page table address
0079     mov r1, #0
0080     add r2, r0, #0x1000
0081 101:    stw.w   r1, [r0]+, #4
0082     stw.w   r1, [r0]+, #4
0083     stw.w   r1, [r0]+, #4
0084     stw.w   r1, [r0]+, #4
0085     cxor.a  r0, r2
0086     bne 101b
0087 
0088     movl    r4, #KERNEL_PGD_PADDR       @ page table address
0089     mov r7, #PMD_TYPE_SECT | PMD_PRESENT    @ page size: section
0090     or  r7, r7, #PMD_SECT_CACHEABLE     @ cacheable
0091     or  r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC
0092 
0093     /*
0094      * Create identity mapping for first 4MB of kernel to
0095      * cater for the MMU enable.  This identity mapping
0096      * will be removed by paging_init().  We use our current program
0097      * counter to determine corresponding section base address.
0098      */
0099     mov r6, pc
0100     mov r6, r6 >> #22           @ start of kernel section
0101     or  r1, r7, r6 << #22       @ flags + kernel base
0102     stw r1, [r4+], r6 << #2     @ identity mapping
0103 
0104     /*
0105      * Now setup the pagetables for our kernel direct
0106      * mapped region.
0107      */
0108     add r0, r4,  #(KERNEL_START & 0xff000000) >> 20
0109     stw.w   r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20
0110     movl    r6, #(KERNEL_END - 1)
0111     add r0, r0, #4
0112     add r6, r4, r6 >> #20
0113 102:    csub.a  r0, r6
0114     add r1, r1, #1 << 22
0115     bua 103f
0116     stw.w   r1, [r0]+, #4
0117     b   102b
0118 103:
0119     /*
0120      * Then map first 4MB of ram in case it contains our boot params.
0121      */
0122     add r0, r4, #PAGE_OFFSET >> 20
0123     or  r6, r7, #(PHYS_OFFSET & 0xffc00000)
0124     stw r6, [r0]
0125 
0126     ldw r15, __switch_data      @ address to jump to after
0127 
0128     /*
0129      * Initialise TLB, Caches, and MMU state ready to switch the MMU
0130      * on.
0131      */
0132     mov r0, #0
0133     movc    p0.c5, r0, #28          @ cache invalidate all
0134     nop8
0135     movc    p0.c6, r0, #6           @ TLB invalidate all
0136     nop8
0137 
0138     /*
0139      * ..V. .... ..TB IDAM
0140      * ..1. .... ..01 1111
0141      */
0142     movl    r0, #0x201f         @ control register setting
0143 
0144     /*
0145      * Setup common bits before finally enabling the MMU.  Essentially
0146      * this is just loading the page table pointer and domain access
0147      * registers.
0148      */
0149     #ifndef CONFIG_ALIGNMENT_TRAP
0150         andn    r0, r0, #CR_A
0151     #endif
0152     #ifdef CONFIG_CPU_DCACHE_DISABLE
0153         andn    r0, r0, #CR_D
0154     #endif
0155     #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
0156         andn    r0, r0, #CR_B
0157     #endif
0158     #ifdef CONFIG_CPU_ICACHE_DISABLE
0159         andn    r0, r0, #CR_I
0160     #endif
0161 
0162     movc    p0.c2, r4, #0           @ set pgd
0163     b   __turn_mmu_on
0164 ENDPROC(stext)
0165 
0166 /*
0167  * Enable the MMU.  This completely changes the structure of the visible
0168  * memory space.  You will not be able to trace execution through this.
0169  *
0170  *  r0  = cp#0 control register
0171  *  r15 = *virtual* address to jump to upon completion
0172  */
0173     .align  5
0174 __turn_mmu_on:
0175     mov r0, r0
0176     movc    p0.c1, r0, #0           @ write control reg
0177     nop                 @ fetch inst by phys addr
0178     mov pc, r15
0179     nop8                    @ fetch inst by phys addr
0180 ENDPROC(__turn_mmu_on)
0181 
0182 /*
0183  * Setup the initial page tables.  We only setup the barest
0184  * amount which are required to get the kernel running, which
0185  * generally means mapping in the kernel code.
0186  *
0187  * r9  = cpuid
0188  * r10 = procinfo
0189  *
0190  * Returns:
0191  *  r0, r3, r6, r7 corrupted
0192  *  r4 = physical page table address
0193  */
0194     .ltorg
0195 
0196     .align  2
0197     .type   __switch_data, %object
0198 __switch_data:
0199     .long   __mmap_switched
0200     .long   __bss_start         @ r6
0201     .long   _end                @ r7
0202     .long   cr_alignment            @ r8
0203     .long   init_thread_union + THREAD_START_SP @ sp
0204 
0205 /*
0206  * The following fragment of code is executed with the MMU on in MMU mode,
0207  * and uses absolute addresses; this is not position independent.
0208  *
0209  *  r0  = cp#0 control register
0210  */
0211 __mmap_switched:
0212     adr r3, __switch_data + 4
0213 
0214     ldm.w   (r6, r7, r8), [r3]+
0215     ldw sp, [r3]
0216 
0217     mov fp, #0              @ Clear BSS (and zero fp)
0218 203:    csub.a  r6, r7
0219     bea 204f
0220     stw.w   fp, [r6]+,#4
0221     b   203b
0222 204:
0223     andn    r1, r0, #CR_A           @ Clear 'A' bit
0224     stm (r0, r1), [r8]+         @ Save control register values
0225     b   start_kernel
0226 ENDPROC(__mmap_switched)
0227 
0228 /*
0229  * Exception handling.  Something went wrong and we can't proceed.  We
0230  * ought to tell the user, but since we don't have any guarantee that
0231  * we're even running on the right architecture, we do virtually nothing.
0232  *
0233  * If CONFIG_DEBUG_LL is set we try to print out something about the error
0234  * and hope for the best (useful if bootloader fails to pass a proper
0235  * machine ID for example).
0236  */
0237 __error_p:
0238 #ifdef CONFIG_DEBUG_LL
0239     adr r0, str_p1
0240     b.l printascii
0241     mov r0, r9
0242     b.l printhex8
0243     adr r0, str_p2
0244     b.l printascii
0245 901:    nop8
0246     b   901b
0247 str_p1: .asciz  "\nError: unrecognized processor variant (0x"
0248 str_p2: .asciz  ").\n"
0249     .align
0250 #endif
0251 ENDPROC(__error_p)
0252