Back to home page

LXR

 
 

    


0001 /* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
0002  *
0003  *  arch/sh/kernel/head.S
0004  *
0005  *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
0006  *  Copyright (C) 2010  Matt Fleming
0007  *
0008  * This file is subject to the terms and conditions of the GNU General Public
0009  * License.  See the file "COPYING" in the main directory of this archive
0010  * for more details.
0011  *
0012  * Head.S contains the SH exception handlers and startup code.
0013  */
0014 #include <linux/init.h>
0015 #include <linux/linkage.h>
0016 #include <asm/thread_info.h>
0017 #include <asm/mmu.h>
0018 #include <cpu/mmu_context.h>
0019 
0020 #ifdef CONFIG_CPU_SH4A
0021 #define SYNCO()     synco
0022 
0023 #define PREFI(label, reg)   \
0024     mov.l   label, reg; \
0025     prefi   @reg
0026 #else
0027 #define SYNCO()
0028 #define PREFI(label, reg)
0029 #endif
0030 
0031     .section    .empty_zero_page, "aw"
0032 ENTRY(empty_zero_page)
0033     .long   1       /* MOUNT_ROOT_RDONLY */
0034     .long   0       /* RAMDISK_FLAGS */
0035     .long   0x0200      /* ORIG_ROOT_DEV */
0036     .long   1       /* LOADER_TYPE */
0037     .long   0x00000000  /* INITRD_START */
0038     .long   0x00000000  /* INITRD_SIZE */
0039 #ifdef CONFIG_32BIT
0040     .long   0x53453f00 + 32 /* "SE?" = 32 bit */
0041 #else
0042     .long   0x53453f00 + 29 /* "SE?" = 29 bit */
0043 #endif
0044 1:
0045     .skip   PAGE_SIZE - empty_zero_page - 1b
0046 
0047     __HEAD
0048 
0049 /*
0050  * Condition at the entry of _stext:
0051  *
0052  *   BSC has already been initialized.
0053  *   INTC may or may not be initialized.
0054  *   VBR may or may not be initialized.
0055  *   MMU may or may not be initialized.
0056  *   Cache may or may not be initialized.
0057  *   Hardware (including on-chip modules) may or may not be initialized. 
0058  *
0059  */
0060 ENTRY(_stext)
0061     !           Initialize Status Register
0062     mov.l   1f, r0      ! MD=1, RB=0, BL=0, IMASK=0xF
0063     ldc r0, sr
0064     !           Initialize global interrupt mask
0065 #ifdef CONFIG_CPU_HAS_SR_RB
0066     mov #0, r0
0067     ldc r0, r6_bank
0068 #endif
0069 
0070 #ifdef CONFIG_OF_FLATTREE
0071     mov r4, r12     ! Store device tree blob pointer in r12
0072 #endif
0073     
0074     /*
0075      * Prefetch if possible to reduce cache miss penalty.
0076      *
0077      * We do this early on for SH-4A as a micro-optimization,
0078      * as later on we will have speculative execution enabled
0079      * and this will become less of an issue.
0080      */
0081     PREFI(5f, r0)
0082     PREFI(6f, r0)
0083 
0084     !
0085     mov.l   2f, r0
0086     mov r0, r15     ! Set initial r15 (stack pointer)
0087 #ifdef CONFIG_CPU_HAS_SR_RB
0088     mov.l   7f, r0
0089     ldc r0, r7_bank ! ... and initial thread_info
0090 #endif
0091 
0092 #ifdef CONFIG_PMB
0093 /*
0094  * Reconfigure the initial PMB mappings setup by the hardware.
0095  *
0096  * When we boot in 32-bit MMU mode there are 2 PMB entries already
0097  * setup for us.
0098  *
0099  * Entry       VPN     PPN      V   SZ  C   UB  WT
0100  * ---------------------------------------------------------------
0101  *   0      0x80000000 0x00000000   1  512MB    1   0   1
0102  *   1      0xA0000000 0x00000000   1  512MB    0   0   0
0103  *
0104  * But we reprogram them here because we want complete control over
0105  * our address space and the initial mappings may not map PAGE_OFFSET
0106  * to __MEMORY_START (or even map all of our RAM).
0107  *
0108  * Once we've setup cached and uncached mappings we clear the rest of the
0109  * PMB entries. This clearing also deals with the fact that PMB entries
0110  * can persist across reboots. The PMB could have been left in any state
0111  * when the reboot occurred, so to be safe we clear all entries and start
0112  * with with a clean slate.
0113  *
0114  * The uncached mapping is constructed using the smallest possible
0115  * mapping with a single unbufferable page. Only the kernel text needs to
0116  * be covered via the uncached mapping so that certain functions can be
0117  * run uncached.
0118  *
0119  * Drivers and the like that have previously abused the 1:1 identity
0120  * mapping are unsupported in 32-bit mode and must specify their caching
0121  * preference when page tables are constructed.
0122  *
0123  * This frees up the P2 space for more nefarious purposes.
0124  *
0125  * Register utilization is as follows:
0126  *
0127  *  r0 = PMB_DATA data field
0128  *  r1 = PMB_DATA address field
0129  *  r2 = PMB_ADDR data field
0130  *  r3 = PMB_ADDR address field
0131  *  r4 = PMB_E_SHIFT
0132  *  r5 = remaining amount of RAM to map
0133  *  r6 = PMB mapping size we're trying to use
0134  *  r7 = cached_to_uncached
0135  *  r8 = scratch register
0136  *  r9 = scratch register
0137  *  r10 = number of PMB entries we've setup
0138  *  r11 = scratch register
0139  */
0140 
0141     mov.l   .LMMUCR, r1 /* Flush the TLB */
0142     mov.l   @r1, r0
0143     or  #MMUCR_TI, r0
0144     mov.l   r0, @r1
0145 
0146     mov.l   .LMEMORY_SIZE, r5
0147 
0148     mov #PMB_E_SHIFT, r0
0149     mov #0x1, r4
0150     shld    r0, r4
0151 
0152     mov.l   .LFIRST_DATA_ENTRY, r0
0153     mov.l   .LPMB_DATA, r1
0154     mov.l   .LFIRST_ADDR_ENTRY, r2
0155     mov.l   .LPMB_ADDR, r3
0156 
0157     /*
0158      * First we need to walk the PMB and figure out if there are any
0159      * existing mappings that match the initial mappings VPN/PPN.
0160      * If these have already been established by the bootloader, we
0161      * don't bother setting up new entries here, and let the late PMB
0162      * initialization take care of things instead.
0163      *
0164      * Note that we may need to coalesce and merge entries in order
0165      * to reclaim more available PMB slots, which is much more than
0166      * we want to do at this early stage.
0167      */
0168     mov #0, r10
0169     mov #NR_PMB_ENTRIES, r9
0170 
0171     mov r1, r7      /* temporary PMB_DATA iter */
0172 
0173 .Lvalidate_existing_mappings:
0174 
0175     mov.l   .LPMB_DATA_MASK, r11
0176     mov.l   @r7, r8
0177     and r11, r8
0178     cmp/eq  r0, r8      /* Check for valid __MEMORY_START mappings */
0179     bt  .Lpmb_done
0180 
0181     add #1, r10     /* Increment the loop counter */
0182     cmp/eq  r9, r10
0183     bf/s    .Lvalidate_existing_mappings
0184      add    r4, r7      /* Increment to the next PMB_DATA entry */
0185 
0186     /*
0187      * If we've fallen through, continue with setting up the initial
0188      * mappings.
0189      */
0190 
0191     mov r5, r7      /* cached_to_uncached */
0192     mov #0, r10
0193 
0194 #ifdef CONFIG_UNCACHED_MAPPING
0195     /*
0196      * Uncached mapping
0197      */
0198     mov #(PMB_SZ_16M >> 2), r9
0199     shll2   r9
0200 
0201     mov #(PMB_UB >> 8), r8
0202     shll8   r8
0203 
0204     or  r0, r8
0205     or  r9, r8
0206     mov.l   r8, @r1
0207     mov r2, r8
0208     add r7, r8
0209     mov.l   r8, @r3
0210 
0211     add r4, r1
0212     add r4, r3
0213     add #1, r10
0214 #endif
0215 
0216 /*
0217  * Iterate over all of the available sizes from largest to
0218  * smallest for constructing the cached mapping.
0219  */
0220 #define __PMB_ITER_BY_SIZE(size)            \
0221 .L##size:                       \
0222     mov #(size >> 4), r6;           \
0223     shll16  r6;                 \
0224     shll8   r6;                 \
0225                             \
0226     cmp/hi  r5, r6;                 \
0227     bt  9999f;                  \
0228                             \
0229     mov #(PMB_SZ_##size##M >> 2), r9;       \
0230     shll2   r9;                 \
0231                             \
0232     /*                      \
0233      * Cached mapping               \
0234      */                     \
0235     mov #PMB_C, r8;             \
0236     or  r0, r8;                 \
0237     or  r9, r8;                 \
0238     mov.l   r8, @r1;                \
0239     mov.l   r2, @r3;                \
0240                             \
0241     /* Increment to the next PMB_DATA entry */  \
0242     add r4, r1;                 \
0243     /* Increment to the next PMB_ADDR entry */  \
0244     add r4, r3;                 \
0245     /* Increment number of PMB entries */       \
0246     add #1, r10;                \
0247                             \
0248     sub r6, r5;                 \
0249     add r6, r0;                 \
0250     add r6, r2;                 \
0251                             \
0252     bra .L##size;               \
0253 9999:
0254 
0255     __PMB_ITER_BY_SIZE(512)
0256     __PMB_ITER_BY_SIZE(128)
0257     __PMB_ITER_BY_SIZE(64)
0258     __PMB_ITER_BY_SIZE(16)
0259 
0260 #ifdef CONFIG_UNCACHED_MAPPING
0261     /*
0262      * Now that we can access it, update cached_to_uncached and
0263      * uncached_size.
0264      */
0265     mov.l   .Lcached_to_uncached, r0
0266     mov.l   r7, @r0
0267 
0268     mov.l   .Luncached_size, r0
0269     mov #1, r7
0270     shll16  r7
0271     shll8   r7
0272     mov.l   r7, @r0
0273 #endif
0274 
0275     /*
0276      * Clear the remaining PMB entries.
0277      *
0278      * r3 = entry to begin clearing from
0279      * r10 = number of entries we've setup so far
0280      */
0281     mov #0, r1
0282     mov #NR_PMB_ENTRIES, r0
0283 
0284 .Lagain:
0285     mov.l   r1, @r3     /* Clear PMB_ADDR entry */
0286     add #1, r10     /* Increment the loop counter */
0287     cmp/eq  r0, r10
0288     bf/s    .Lagain
0289      add    r4, r3      /* Increment to the next PMB_ADDR entry */
0290 
0291     mov.l   6f, r0
0292     icbi    @r0
0293 
0294 .Lpmb_done:
0295 #endif /* CONFIG_PMB */
0296 
0297 #ifndef CONFIG_SH_NO_BSS_INIT
0298     /*
0299      * Don't clear BSS if running on slow platforms such as an RTL simulation,
0300      * remote memory via SHdebug link, etc.  For these the memory can be guaranteed
0301      * to be all zero on boot anyway.
0302      */
0303                 ! Clear BSS area
0304 #ifdef CONFIG_SMP   
0305     mov.l   3f, r0
0306     cmp/eq  #0, r0      ! skip clear if set to zero
0307     bt  10f
0308 #endif
0309     
0310     mov.l   3f, r1
0311     add #4, r1
0312     mov.l   4f, r2
0313     mov #0, r0
0314 9:  cmp/hs  r2, r1
0315     bf/s    9b      ! while (r1 < r2)
0316      mov.l  r0,@-r2
0317 
0318 10:     
0319 #endif
0320 
0321 #ifdef CONFIG_OF_FLATTREE
0322     mov.l   8f, r0      ! Make flat device tree available early.
0323     jsr @r0
0324      mov    r12, r4
0325 #endif
0326 
0327     !           Additional CPU initialization
0328     mov.l   6f, r0
0329     jsr @r0
0330      nop
0331 
0332     SYNCO()         ! Wait for pending instructions..
0333     
0334     !           Start kernel
0335     mov.l   5f, r0
0336     jmp @r0
0337      nop
0338 
0339     .balign 4
0340 #if defined(CONFIG_CPU_SH2)
0341 1:  .long   0x000000F0      ! IMASK=0xF
0342 #else
0343 1:  .long   0x500080F0      ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
0344 #endif
0345 ENTRY(stack_start)
0346 2:  .long   init_thread_union+THREAD_SIZE
0347 3:  .long   __bss_start
0348 4:  .long   _end
0349 5:  .long   start_kernel
0350 6:  .long   cpu_init
0351 7:  .long   init_thread_union
0352 #if defined(CONFIG_OF_FLATTREE)
0353 8:  .long   sh_fdt_init
0354 #endif
0355 
0356 #ifdef CONFIG_PMB
0357 .LPMB_ADDR:     .long   PMB_ADDR
0358 .LPMB_DATA:     .long   PMB_DATA
0359 .LPMB_DATA_MASK:    .long   PMB_PFN_MASK | PMB_V
0360 .LFIRST_ADDR_ENTRY: .long   PAGE_OFFSET | PMB_V
0361 .LFIRST_DATA_ENTRY: .long   __MEMORY_START | PMB_V
0362 .LMMUCR:        .long   MMUCR
0363 .LMEMORY_SIZE:      .long   __MEMORY_SIZE
0364 #ifdef CONFIG_UNCACHED_MAPPING
0365 .Lcached_to_uncached:   .long   cached_to_uncached
0366 .Luncached_size:    .long   uncached_size
0367 #endif
0368 #endif