Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/alignment.c
0004  *
0005  *  Copyright (C) 1995  Linus Torvalds
0006  *  Modifications for ARM processor (c) 1995-2001 Russell King
0007  *  Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
0008  *  - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
0009  *    Copyright (C) 1996, Cygnus Software Technologies Ltd.
0010  */
0011 #include <linux/moduleparam.h>
0012 #include <linux/compiler.h>
0013 #include <linux/kernel.h>
0014 #include <linux/sched/debug.h>
0015 #include <linux/errno.h>
0016 #include <linux/string.h>
0017 #include <linux/proc_fs.h>
0018 #include <linux/seq_file.h>
0019 #include <linux/init.h>
0020 #include <linux/sched/signal.h>
0021 #include <linux/uaccess.h>
0022 
0023 #include <asm/cp15.h>
0024 #include <asm/system_info.h>
0025 #include <asm/unaligned.h>
0026 #include <asm/opcodes.h>
0027 
0028 #include "fault.h"
0029 #include "mm.h"
0030 
0031 /*
0032  * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
0033  * /proc/sys/debug/alignment, modified and integrated into
0034  * Linux 2.1 by Russell King
0035  *
0036  * Speed optimisations and better fault handling by Russell King.
0037  *
0038  * *** NOTE ***
0039  * This code is not portable to processors with late data abort handling.
0040  */
0041 #define CODING_BITS(i)  (i & 0x0e000000)
0042 #define COND_BITS(i)    (i & 0xf0000000)
0043 
0044 #define LDST_I_BIT(i)   (i & (1 << 26))     /* Immediate constant   */
0045 #define LDST_P_BIT(i)   (i & (1 << 24))     /* Preindex     */
0046 #define LDST_U_BIT(i)   (i & (1 << 23))     /* Add offset       */
0047 #define LDST_W_BIT(i)   (i & (1 << 21))     /* Writeback        */
0048 #define LDST_L_BIT(i)   (i & (1 << 20))     /* Load         */
0049 
0050 #define LDST_P_EQ_U(i)  ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
0051 
0052 #define LDSTHD_I_BIT(i) (i & (1 << 22))     /* double/half-word immed */
0053 #define LDM_S_BIT(i)    (i & (1 << 22))     /* write CPSR from SPSR */
0054 
0055 #define RN_BITS(i)  ((i >> 16) & 15)    /* Rn           */
0056 #define RD_BITS(i)  ((i >> 12) & 15)    /* Rd           */
0057 #define RM_BITS(i)  (i & 15)        /* Rm           */
0058 
0059 #define REGMASK_BITS(i) (i & 0xffff)
0060 #define OFFSET_BITS(i)  (i & 0x0fff)
0061 
0062 #define IS_SHIFT(i) (i & 0x0ff0)
0063 #define SHIFT_BITS(i)   ((i >> 7) & 0x1f)
0064 #define SHIFT_TYPE(i)   (i & 0x60)
0065 #define SHIFT_LSL   0x00
0066 #define SHIFT_LSR   0x20
0067 #define SHIFT_ASR   0x40
0068 #define SHIFT_RORRRX    0x60
0069 
0070 #define BAD_INSTR   0xdeadc0de
0071 
0072 /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
0073 #define IS_T32(hi16) \
0074     (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
0075 
0076 static unsigned long ai_user;
0077 static unsigned long ai_sys;
0078 static void *ai_sys_last_pc;
0079 static unsigned long ai_skipped;
0080 static unsigned long ai_half;
0081 static unsigned long ai_word;
0082 static unsigned long ai_dword;
0083 static unsigned long ai_multi;
0084 static int ai_usermode;
0085 static unsigned long cr_no_alignment;
0086 
0087 core_param(alignment, ai_usermode, int, 0600);
0088 
0089 #define UM_WARN     (1 << 0)
0090 #define UM_FIXUP    (1 << 1)
0091 #define UM_SIGNAL   (1 << 2)
0092 
0093 /* Return true if and only if the ARMv6 unaligned access model is in use. */
0094 static bool cpu_is_v6_unaligned(void)
0095 {
0096     return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
0097 }
0098 
0099 static int safe_usermode(int new_usermode, bool warn)
0100 {
0101     /*
0102      * ARMv6 and later CPUs can perform unaligned accesses for
0103      * most single load and store instructions up to word size.
0104      * LDM, STM, LDRD and STRD still need to be handled.
0105      *
0106      * Ignoring the alignment fault is not an option on these
0107      * CPUs since we spin re-faulting the instruction without
0108      * making any progress.
0109      */
0110     if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
0111         new_usermode |= UM_FIXUP;
0112 
0113         if (warn)
0114             pr_warn("alignment: ignoring faults is unsafe on this CPU.  Defaulting to fixup mode.\n");
0115     }
0116 
0117     return new_usermode;
0118 }
0119 
0120 #ifdef CONFIG_PROC_FS
0121 static const char *usermode_action[] = {
0122     "ignored",
0123     "warn",
0124     "fixup",
0125     "fixup+warn",
0126     "signal",
0127     "signal+warn"
0128 };
0129 
0130 static int alignment_proc_show(struct seq_file *m, void *v)
0131 {
0132     seq_printf(m, "User:\t\t%lu\n", ai_user);
0133     seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc);
0134     seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
0135     seq_printf(m, "Half:\t\t%lu\n", ai_half);
0136     seq_printf(m, "Word:\t\t%lu\n", ai_word);
0137     if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
0138         seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
0139     seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
0140     seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
0141             usermode_action[ai_usermode]);
0142 
0143     return 0;
0144 }
0145 
0146 static int alignment_proc_open(struct inode *inode, struct file *file)
0147 {
0148     return single_open(file, alignment_proc_show, NULL);
0149 }
0150 
0151 static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
0152                     size_t count, loff_t *pos)
0153 {
0154     char mode;
0155 
0156     if (count > 0) {
0157         if (get_user(mode, buffer))
0158             return -EFAULT;
0159         if (mode >= '0' && mode <= '5')
0160             ai_usermode = safe_usermode(mode - '0', true);
0161     }
0162     return count;
0163 }
0164 
0165 static const struct proc_ops alignment_proc_ops = {
0166     .proc_open  = alignment_proc_open,
0167     .proc_read  = seq_read,
0168     .proc_lseek = seq_lseek,
0169     .proc_release   = single_release,
0170     .proc_write = alignment_proc_write,
0171 };
0172 #endif /* CONFIG_PROC_FS */
0173 
0174 union offset_union {
0175     unsigned long un;
0176       signed long sn;
0177 };
0178 
0179 #define TYPE_ERROR  0
0180 #define TYPE_FAULT  1
0181 #define TYPE_LDST   2
0182 #define TYPE_DONE   3
0183 
0184 #ifdef __ARMEB__
0185 #define BE      1
0186 #define FIRST_BYTE_16   "mov    %1, %1, ror #8\n"
0187 #define FIRST_BYTE_32   "mov    %1, %1, ror #24\n"
0188 #define NEXT_BYTE   "ror #24"
0189 #else
0190 #define BE      0
0191 #define FIRST_BYTE_16
0192 #define FIRST_BYTE_32
0193 #define NEXT_BYTE   "lsr #8"
0194 #endif
0195 
0196 #define __get8_unaligned_check(ins,val,addr,err)    \
0197     __asm__(                    \
0198  ARM(   "1: "ins"   %1, [%2], #1\n" )       \
0199  THUMB( "1: "ins"   %1, [%2]\n" )       \
0200  THUMB( "   add %2, %2, #1\n"   )       \
0201     "2:\n"                      \
0202     "   .pushsection .text.fixup,\"ax\"\n"  \
0203     "   .align  2\n"                \
0204     "3: mov %0, #1\n"           \
0205     "   b   2b\n"               \
0206     "   .popsection\n"              \
0207     "   .pushsection __ex_table,\"a\"\n"    \
0208     "   .align  3\n"                \
0209     "   .long   1b, 3b\n"           \
0210     "   .popsection\n"              \
0211     : "=r" (err), "=&r" (val), "=r" (addr)      \
0212     : "0" (err), "2" (addr))
0213 
0214 #define __get16_unaligned_check(ins,val,addr)           \
0215     do {                            \
0216         unsigned int err = 0, v, a = addr;      \
0217         __get8_unaligned_check(ins,v,a,err);        \
0218         val =  v << ((BE) ? 8 : 0);         \
0219         __get8_unaligned_check(ins,v,a,err);        \
0220         val |= v << ((BE) ? 0 : 8);         \
0221         if (err)                    \
0222             goto fault;             \
0223     } while (0)
0224 
0225 #define get16_unaligned_check(val,addr) \
0226     __get16_unaligned_check("ldrb",val,addr)
0227 
0228 #define get16t_unaligned_check(val,addr) \
0229     __get16_unaligned_check("ldrbt",val,addr)
0230 
0231 #define __get32_unaligned_check(ins,val,addr)           \
0232     do {                            \
0233         unsigned int err = 0, v, a = addr;      \
0234         __get8_unaligned_check(ins,v,a,err);        \
0235         val =  v << ((BE) ? 24 :  0);           \
0236         __get8_unaligned_check(ins,v,a,err);        \
0237         val |= v << ((BE) ? 16 :  8);           \
0238         __get8_unaligned_check(ins,v,a,err);        \
0239         val |= v << ((BE) ?  8 : 16);           \
0240         __get8_unaligned_check(ins,v,a,err);        \
0241         val |= v << ((BE) ?  0 : 24);           \
0242         if (err)                    \
0243             goto fault;             \
0244     } while (0)
0245 
0246 #define get32_unaligned_check(val,addr) \
0247     __get32_unaligned_check("ldrb",val,addr)
0248 
0249 #define get32t_unaligned_check(val,addr) \
0250     __get32_unaligned_check("ldrbt",val,addr)
0251 
0252 #define __put16_unaligned_check(ins,val,addr)           \
0253     do {                            \
0254         unsigned int err = 0, v = val, a = addr;    \
0255         __asm__( FIRST_BYTE_16              \
0256      ARM(   "1: "ins"   %1, [%2], #1\n" )       \
0257      THUMB( "1: "ins"   %1, [%2]\n" )       \
0258      THUMB( "   add %2, %2, #1\n"   )       \
0259         "   mov %1, %1, "NEXT_BYTE"\n"      \
0260         "2: "ins"   %1, [%2]\n"         \
0261         "3:\n"                      \
0262         "   .pushsection .text.fixup,\"ax\"\n"  \
0263         "   .align  2\n"                \
0264         "4: mov %0, #1\n"           \
0265         "   b   3b\n"               \
0266         "   .popsection\n"              \
0267         "   .pushsection __ex_table,\"a\"\n"    \
0268         "   .align  3\n"                \
0269         "   .long   1b, 4b\n"           \
0270         "   .long   2b, 4b\n"           \
0271         "   .popsection\n"              \
0272         : "=r" (err), "=&r" (v), "=&r" (a)      \
0273         : "0" (err), "1" (v), "2" (a));         \
0274         if (err)                    \
0275             goto fault;             \
0276     } while (0)
0277 
0278 #define put16_unaligned_check(val,addr)  \
0279     __put16_unaligned_check("strb",val,addr)
0280 
0281 #define put16t_unaligned_check(val,addr) \
0282     __put16_unaligned_check("strbt",val,addr)
0283 
0284 #define __put32_unaligned_check(ins,val,addr)           \
0285     do {                            \
0286         unsigned int err = 0, v = val, a = addr;    \
0287         __asm__( FIRST_BYTE_32              \
0288      ARM(   "1: "ins"   %1, [%2], #1\n" )       \
0289      THUMB( "1: "ins"   %1, [%2]\n" )       \
0290      THUMB( "   add %2, %2, #1\n"   )       \
0291         "   mov %1, %1, "NEXT_BYTE"\n"      \
0292      ARM(   "2: "ins"   %1, [%2], #1\n" )       \
0293      THUMB( "2: "ins"   %1, [%2]\n" )       \
0294      THUMB( "   add %2, %2, #1\n"   )       \
0295         "   mov %1, %1, "NEXT_BYTE"\n"      \
0296      ARM(   "3: "ins"   %1, [%2], #1\n" )       \
0297      THUMB( "3: "ins"   %1, [%2]\n" )       \
0298      THUMB( "   add %2, %2, #1\n"   )       \
0299         "   mov %1, %1, "NEXT_BYTE"\n"      \
0300         "4: "ins"   %1, [%2]\n"         \
0301         "5:\n"                      \
0302         "   .pushsection .text.fixup,\"ax\"\n"  \
0303         "   .align  2\n"                \
0304         "6: mov %0, #1\n"           \
0305         "   b   5b\n"               \
0306         "   .popsection\n"              \
0307         "   .pushsection __ex_table,\"a\"\n"    \
0308         "   .align  3\n"                \
0309         "   .long   1b, 6b\n"           \
0310         "   .long   2b, 6b\n"           \
0311         "   .long   3b, 6b\n"           \
0312         "   .long   4b, 6b\n"           \
0313         "   .popsection\n"              \
0314         : "=r" (err), "=&r" (v), "=&r" (a)      \
0315         : "0" (err), "1" (v), "2" (a));         \
0316         if (err)                    \
0317             goto fault;             \
0318     } while (0)
0319 
0320 #define put32_unaligned_check(val,addr) \
0321     __put32_unaligned_check("strb", val, addr)
0322 
0323 #define put32t_unaligned_check(val,addr) \
0324     __put32_unaligned_check("strbt", val, addr)
0325 
0326 static void
0327 do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
0328 {
0329     if (!LDST_U_BIT(instr))
0330         offset.un = -offset.un;
0331 
0332     if (!LDST_P_BIT(instr))
0333         addr += offset.un;
0334 
0335     if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
0336         regs->uregs[RN_BITS(instr)] = addr;
0337 }
0338 
0339 static int
0340 do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
0341 {
0342     unsigned int rd = RD_BITS(instr);
0343 
0344     ai_half += 1;
0345 
0346     if (user_mode(regs))
0347         goto user;
0348 
0349     if (LDST_L_BIT(instr)) {
0350         unsigned long val;
0351         get16_unaligned_check(val, addr);
0352 
0353         /* signed half-word? */
0354         if (instr & 0x40)
0355             val = (signed long)((signed short) val);
0356 
0357         regs->uregs[rd] = val;
0358     } else
0359         put16_unaligned_check(regs->uregs[rd], addr);
0360 
0361     return TYPE_LDST;
0362 
0363  user:
0364     if (LDST_L_BIT(instr)) {
0365         unsigned long val;
0366         unsigned int __ua_flags = uaccess_save_and_enable();
0367 
0368         get16t_unaligned_check(val, addr);
0369         uaccess_restore(__ua_flags);
0370 
0371         /* signed half-word? */
0372         if (instr & 0x40)
0373             val = (signed long)((signed short) val);
0374 
0375         regs->uregs[rd] = val;
0376     } else {
0377         unsigned int __ua_flags = uaccess_save_and_enable();
0378         put16t_unaligned_check(regs->uregs[rd], addr);
0379         uaccess_restore(__ua_flags);
0380     }
0381 
0382     return TYPE_LDST;
0383 
0384  fault:
0385     return TYPE_FAULT;
0386 }
0387 
0388 static int
0389 do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
0390 {
0391     unsigned int rd = RD_BITS(instr);
0392     unsigned int rd2;
0393     int load;
0394 
0395     if ((instr & 0xfe000000) == 0xe8000000) {
0396         /* ARMv7 Thumb-2 32-bit LDRD/STRD */
0397         rd2 = (instr >> 8) & 0xf;
0398         load = !!(LDST_L_BIT(instr));
0399     } else if (((rd & 1) == 1) || (rd == 14))
0400         goto bad;
0401     else {
0402         load = ((instr & 0xf0) == 0xd0);
0403         rd2 = rd + 1;
0404     }
0405 
0406     ai_dword += 1;
0407 
0408     if (user_mode(regs))
0409         goto user;
0410 
0411     if (load) {
0412         unsigned long val;
0413         get32_unaligned_check(val, addr);
0414         regs->uregs[rd] = val;
0415         get32_unaligned_check(val, addr + 4);
0416         regs->uregs[rd2] = val;
0417     } else {
0418         put32_unaligned_check(regs->uregs[rd], addr);
0419         put32_unaligned_check(regs->uregs[rd2], addr + 4);
0420     }
0421 
0422     return TYPE_LDST;
0423 
0424  user:
0425     if (load) {
0426         unsigned long val, val2;
0427         unsigned int __ua_flags = uaccess_save_and_enable();
0428 
0429         get32t_unaligned_check(val, addr);
0430         get32t_unaligned_check(val2, addr + 4);
0431 
0432         uaccess_restore(__ua_flags);
0433 
0434         regs->uregs[rd] = val;
0435         regs->uregs[rd2] = val2;
0436     } else {
0437         unsigned int __ua_flags = uaccess_save_and_enable();
0438         put32t_unaligned_check(regs->uregs[rd], addr);
0439         put32t_unaligned_check(regs->uregs[rd2], addr + 4);
0440         uaccess_restore(__ua_flags);
0441     }
0442 
0443     return TYPE_LDST;
0444  bad:
0445     return TYPE_ERROR;
0446  fault:
0447     return TYPE_FAULT;
0448 }
0449 
0450 static int
0451 do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
0452 {
0453     unsigned int rd = RD_BITS(instr);
0454 
0455     ai_word += 1;
0456 
0457     if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
0458         goto trans;
0459 
0460     if (LDST_L_BIT(instr)) {
0461         unsigned int val;
0462         get32_unaligned_check(val, addr);
0463         regs->uregs[rd] = val;
0464     } else
0465         put32_unaligned_check(regs->uregs[rd], addr);
0466     return TYPE_LDST;
0467 
0468  trans:
0469     if (LDST_L_BIT(instr)) {
0470         unsigned int val;
0471         unsigned int __ua_flags = uaccess_save_and_enable();
0472         get32t_unaligned_check(val, addr);
0473         uaccess_restore(__ua_flags);
0474         regs->uregs[rd] = val;
0475     } else {
0476         unsigned int __ua_flags = uaccess_save_and_enable();
0477         put32t_unaligned_check(regs->uregs[rd], addr);
0478         uaccess_restore(__ua_flags);
0479     }
0480     return TYPE_LDST;
0481 
0482  fault:
0483     return TYPE_FAULT;
0484 }
0485 
0486 /*
0487  * LDM/STM alignment handler.
0488  *
0489  * There are 4 variants of this instruction:
0490  *
0491  * B = rn pointer before instruction, A = rn pointer after instruction
0492  *              ------ increasing address ----->
0493  *          |    | r0 | r1 | ... | rx |    |
0494  * PU = 01             B                    A
0495  * PU = 11        B                    A
0496  * PU = 00        A                    B
0497  * PU = 10             A                    B
0498  */
0499 static int
0500 do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
0501 {
0502     unsigned int rd, rn, correction, nr_regs, regbits;
0503     unsigned long eaddr, newaddr;
0504 
0505     if (LDM_S_BIT(instr))
0506         goto bad;
0507 
0508     correction = 4; /* processor implementation defined */
0509     regs->ARM_pc += correction;
0510 
0511     ai_multi += 1;
0512 
0513     /* count the number of registers in the mask to be transferred */
0514     nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
0515 
0516     rn = RN_BITS(instr);
0517     newaddr = eaddr = regs->uregs[rn];
0518 
0519     if (!LDST_U_BIT(instr))
0520         nr_regs = -nr_regs;
0521     newaddr += nr_regs;
0522     if (!LDST_U_BIT(instr))
0523         eaddr = newaddr;
0524 
0525     if (LDST_P_EQ_U(instr)) /* U = P */
0526         eaddr += 4;
0527 
0528     /*
0529      * For alignment faults on the ARM922T/ARM920T the MMU  makes
0530      * the FSR (and hence addr) equal to the updated base address
0531      * of the multiple access rather than the restored value.
0532      * Switch this message off if we've got a ARM92[02], otherwise
0533      * [ls]dm alignment faults are noisy!
0534      */
0535 #if !(defined CONFIG_CPU_ARM922T)  && !(defined CONFIG_CPU_ARM920T)
0536     /*
0537      * This is a "hint" - we already have eaddr worked out by the
0538      * processor for us.
0539      */
0540     if (addr != eaddr) {
0541         pr_err("LDMSTM: PC = %08lx, instr = %08x, "
0542             "addr = %08lx, eaddr = %08lx\n",
0543              instruction_pointer(regs), instr, addr, eaddr);
0544         show_regs(regs);
0545     }
0546 #endif
0547 
0548     if (user_mode(regs)) {
0549         unsigned int __ua_flags = uaccess_save_and_enable();
0550         for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
0551              regbits >>= 1, rd += 1)
0552             if (regbits & 1) {
0553                 if (LDST_L_BIT(instr)) {
0554                     unsigned int val;
0555                     get32t_unaligned_check(val, eaddr);
0556                     regs->uregs[rd] = val;
0557                 } else
0558                     put32t_unaligned_check(regs->uregs[rd], eaddr);
0559                 eaddr += 4;
0560             }
0561         uaccess_restore(__ua_flags);
0562     } else {
0563         for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
0564              regbits >>= 1, rd += 1)
0565             if (regbits & 1) {
0566                 if (LDST_L_BIT(instr)) {
0567                     unsigned int val;
0568                     get32_unaligned_check(val, eaddr);
0569                     regs->uregs[rd] = val;
0570                 } else
0571                     put32_unaligned_check(regs->uregs[rd], eaddr);
0572                 eaddr += 4;
0573             }
0574     }
0575 
0576     if (LDST_W_BIT(instr))
0577         regs->uregs[rn] = newaddr;
0578     if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
0579         regs->ARM_pc -= correction;
0580     return TYPE_DONE;
0581 
0582 fault:
0583     regs->ARM_pc -= correction;
0584     return TYPE_FAULT;
0585 
0586 bad:
0587     pr_err("Alignment trap: not handling ldm with s-bit set\n");
0588     return TYPE_ERROR;
0589 }
0590 
0591 /*
0592  * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
0593  * we can reuse ARM userland alignment fault fixups for Thumb.
0594  *
0595  * This implementation was initially based on the algorithm found in
0596  * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
0597  * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
0598  *
0599  * NOTES:
0600  * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
0601  * 2. If for some reason we're passed an non-ld/st Thumb instruction to
0602  *    decode, we return 0xdeadc0de. This should never happen under normal
0603  *    circumstances but if it does, we've got other problems to deal with
0604  *    elsewhere and we obviously can't fix those problems here.
0605  */
0606 
0607 static unsigned long
0608 thumb2arm(u16 tinstr)
0609 {
0610     u32 L = (tinstr & (1<<11)) >> 11;
0611 
0612     switch ((tinstr & 0xf800) >> 11) {
0613     /* 6.5.1 Format 1: */
0614     case 0x6000 >> 11:              /* 7.1.52 STR(1) */
0615     case 0x6800 >> 11:              /* 7.1.26 LDR(1) */
0616     case 0x7000 >> 11:              /* 7.1.55 STRB(1) */
0617     case 0x7800 >> 11:              /* 7.1.30 LDRB(1) */
0618         return 0xe5800000 |
0619             ((tinstr & (1<<12)) << (22-12)) |   /* fixup */
0620             (L<<20) |               /* L==1? */
0621             ((tinstr & (7<<0)) << (12-0)) |     /* Rd */
0622             ((tinstr & (7<<3)) << (16-3)) |     /* Rn */
0623             ((tinstr & (31<<6)) >>          /* immed_5 */
0624                 (6 - ((tinstr & (1<<12)) ? 0 : 2)));
0625     case 0x8000 >> 11:              /* 7.1.57 STRH(1) */
0626     case 0x8800 >> 11:              /* 7.1.32 LDRH(1) */
0627         return 0xe1c000b0 |
0628             (L<<20) |               /* L==1? */
0629             ((tinstr & (7<<0)) << (12-0)) |     /* Rd */
0630             ((tinstr & (7<<3)) << (16-3)) |     /* Rn */
0631             ((tinstr & (7<<6)) >> (6-1)) |   /* immed_5[2:0] */
0632             ((tinstr & (3<<9)) >> (9-8));    /* immed_5[4:3] */
0633 
0634     /* 6.5.1 Format 2: */
0635     case 0x5000 >> 11:
0636     case 0x5800 >> 11:
0637         {
0638             static const u32 subset[8] = {
0639                 0xe7800000,     /* 7.1.53 STR(2) */
0640                 0xe18000b0,     /* 7.1.58 STRH(2) */
0641                 0xe7c00000,     /* 7.1.56 STRB(2) */
0642                 0xe19000d0,     /* 7.1.34 LDRSB */
0643                 0xe7900000,     /* 7.1.27 LDR(2) */
0644                 0xe19000b0,     /* 7.1.33 LDRH(2) */
0645                 0xe7d00000,     /* 7.1.31 LDRB(2) */
0646                 0xe19000f0      /* 7.1.35 LDRSH */
0647             };
0648             return subset[(tinstr & (7<<9)) >> 9] |
0649                 ((tinstr & (7<<0)) << (12-0)) | /* Rd */
0650                 ((tinstr & (7<<3)) << (16-3)) | /* Rn */
0651                 ((tinstr & (7<<6)) >> (6-0));   /* Rm */
0652         }
0653 
0654     /* 6.5.1 Format 3: */
0655     case 0x4800 >> 11:              /* 7.1.28 LDR(3) */
0656         /* NOTE: This case is not technically possible. We're
0657          *   loading 32-bit memory data via PC relative
0658          *   addressing mode. So we can and should eliminate
0659          *   this case. But I'll leave it here for now.
0660          */
0661         return 0xe59f0000 |
0662             ((tinstr & (7<<8)) << (12-8)) |     /* Rd */
0663             ((tinstr & 255) << (2-0));          /* immed_8 */
0664 
0665     /* 6.5.1 Format 4: */
0666     case 0x9000 >> 11:              /* 7.1.54 STR(3) */
0667     case 0x9800 >> 11:              /* 7.1.29 LDR(4) */
0668         return 0xe58d0000 |
0669             (L<<20) |               /* L==1? */
0670             ((tinstr & (7<<8)) << (12-8)) |     /* Rd */
0671             ((tinstr & 255) << 2);          /* immed_8 */
0672 
0673     /* 6.6.1 Format 1: */
0674     case 0xc000 >> 11:              /* 7.1.51 STMIA */
0675     case 0xc800 >> 11:              /* 7.1.25 LDMIA */
0676         {
0677             u32 Rn = (tinstr & (7<<8)) >> 8;
0678             u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
0679 
0680             return 0xe8800000 | W | (L<<20) | (Rn<<16) |
0681                 (tinstr&255);
0682         }
0683 
0684     /* 6.6.1 Format 2: */
0685     case 0xb000 >> 11:              /* 7.1.48 PUSH */
0686     case 0xb800 >> 11:              /* 7.1.47 POP */
0687         if ((tinstr & (3 << 9)) == 0x0400) {
0688             static const u32 subset[4] = {
0689                 0xe92d0000, /* STMDB sp!,{registers} */
0690                 0xe92d4000, /* STMDB sp!,{registers,lr} */
0691                 0xe8bd0000, /* LDMIA sp!,{registers} */
0692                 0xe8bd8000  /* LDMIA sp!,{registers,pc} */
0693             };
0694             return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
0695                 (tinstr & 255);     /* register_list */
0696         }
0697         fallthrough;    /* for illegal instruction case */
0698 
0699     default:
0700         return BAD_INSTR;
0701     }
0702 }
0703 
0704 /*
0705  * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
0706  * handlable by ARM alignment handler, also find the corresponding handler,
0707  * so that we can reuse ARM userland alignment fault fixups for Thumb.
0708  *
0709  * @pinstr: original Thumb-2 instruction; returns new handlable instruction
0710  * @regs: register context.
0711  * @poffset: return offset from faulted addr for later writeback
0712  *
0713  * NOTES:
0714  * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
0715  * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
0716  */
0717 static void *
0718 do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
0719                 union offset_union *poffset)
0720 {
0721     u32 instr = *pinstr;
0722     u16 tinst1 = (instr >> 16) & 0xffff;
0723     u16 tinst2 = instr & 0xffff;
0724 
0725     switch (tinst1 & 0xffe0) {
0726     /* A6.3.5 Load/Store multiple */
0727     case 0xe880:        /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
0728     case 0xe8a0:        /* ...above writeback version */
0729     case 0xe900:        /* STMDB/STMFD, LDMDB/LDMEA */
0730     case 0xe920:        /* ...above writeback version */
0731         /* no need offset decision since handler calculates it */
0732         return do_alignment_ldmstm;
0733 
0734     case 0xf840:        /* POP/PUSH T3 (single register) */
0735         if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
0736             u32 L = !!(LDST_L_BIT(instr));
0737             const u32 subset[2] = {
0738                 0xe92d0000, /* STMDB sp!,{registers} */
0739                 0xe8bd0000, /* LDMIA sp!,{registers} */
0740             };
0741             *pinstr = subset[L] | (1<<RD_BITS(instr));
0742             return do_alignment_ldmstm;
0743         }
0744         /* Else fall through for illegal instruction case */
0745         break;
0746 
0747     /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
0748     case 0xe860:
0749     case 0xe960:
0750     case 0xe8e0:
0751     case 0xe9e0:
0752         poffset->un = (tinst2 & 0xff) << 2;
0753         fallthrough;
0754 
0755     case 0xe940:
0756     case 0xe9c0:
0757         return do_alignment_ldrdstrd;
0758 
0759     /*
0760      * No need to handle load/store instructions up to word size
0761      * since ARMv6 and later CPUs can perform unaligned accesses.
0762      */
0763     default:
0764         break;
0765     }
0766     return NULL;
0767 }
0768 
0769 static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
0770 {
0771     u32 instr = 0;
0772     int fault;
0773 
0774     if (user_mode(regs))
0775         fault = get_user(instr, ip);
0776     else
0777         fault = get_kernel_nofault(instr, ip);
0778 
0779     *inst = __mem_to_opcode_arm(instr);
0780 
0781     return fault;
0782 }
0783 
0784 static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
0785 {
0786     u16 instr = 0;
0787     int fault;
0788 
0789     if (user_mode(regs))
0790         fault = get_user(instr, ip);
0791     else
0792         fault = get_kernel_nofault(instr, ip);
0793 
0794     *inst = __mem_to_opcode_thumb16(instr);
0795 
0796     return fault;
0797 }
0798 
0799 static int
0800 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
0801 {
0802     union offset_union offset;
0803     unsigned long instrptr;
0804     int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
0805     unsigned int type;
0806     u32 instr = 0;
0807     u16 tinstr = 0;
0808     int isize = 4;
0809     int thumb2_32b = 0;
0810     int fault;
0811 
0812     if (interrupts_enabled(regs))
0813         local_irq_enable();
0814 
0815     instrptr = instruction_pointer(regs);
0816 
0817     if (thumb_mode(regs)) {
0818         u16 *ptr = (u16 *)(instrptr & ~1);
0819 
0820         fault = alignment_get_thumb(regs, ptr, &tinstr);
0821         if (!fault) {
0822             if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
0823                 IS_T32(tinstr)) {
0824                 /* Thumb-2 32-bit */
0825                 u16 tinst2;
0826                 fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
0827                 instr = __opcode_thumb32_compose(tinstr, tinst2);
0828                 thumb2_32b = 1;
0829             } else {
0830                 isize = 2;
0831                 instr = thumb2arm(tinstr);
0832             }
0833         }
0834     } else {
0835         fault = alignment_get_arm(regs, (void *)instrptr, &instr);
0836     }
0837 
0838     if (fault) {
0839         type = TYPE_FAULT;
0840         goto bad_or_fault;
0841     }
0842 
0843     if (user_mode(regs))
0844         goto user;
0845 
0846     ai_sys += 1;
0847     ai_sys_last_pc = (void *)instruction_pointer(regs);
0848 
0849  fixup:
0850 
0851     regs->ARM_pc += isize;
0852 
0853     switch (CODING_BITS(instr)) {
0854     case 0x00000000:    /* 3.13.4 load/store instruction extensions */
0855         if (LDSTHD_I_BIT(instr))
0856             offset.un = (instr & 0xf00) >> 4 | (instr & 15);
0857         else
0858             offset.un = regs->uregs[RM_BITS(instr)];
0859 
0860         if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
0861             (instr & 0x001000f0) == 0x001000f0)   /* LDRSH */
0862             handler = do_alignment_ldrhstrh;
0863         else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
0864              (instr & 0x001000f0) == 0x000000f0)   /* STRD */
0865             handler = do_alignment_ldrdstrd;
0866         else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
0867             goto swp;
0868         else
0869             goto bad;
0870         break;
0871 
0872     case 0x04000000:    /* ldr or str immediate */
0873         if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
0874             goto bad;
0875         offset.un = OFFSET_BITS(instr);
0876         handler = do_alignment_ldrstr;
0877         break;
0878 
0879     case 0x06000000:    /* ldr or str register */
0880         offset.un = regs->uregs[RM_BITS(instr)];
0881 
0882         if (IS_SHIFT(instr)) {
0883             unsigned int shiftval = SHIFT_BITS(instr);
0884 
0885             switch(SHIFT_TYPE(instr)) {
0886             case SHIFT_LSL:
0887                 offset.un <<= shiftval;
0888                 break;
0889 
0890             case SHIFT_LSR:
0891                 offset.un >>= shiftval;
0892                 break;
0893 
0894             case SHIFT_ASR:
0895                 offset.sn >>= shiftval;
0896                 break;
0897 
0898             case SHIFT_RORRRX:
0899                 if (shiftval == 0) {
0900                     offset.un >>= 1;
0901                     if (regs->ARM_cpsr & PSR_C_BIT)
0902                         offset.un |= 1 << 31;
0903                 } else
0904                     offset.un = offset.un >> shiftval |
0905                               offset.un << (32 - shiftval);
0906                 break;
0907             }
0908         }
0909         handler = do_alignment_ldrstr;
0910         break;
0911 
0912     case 0x08000000:    /* ldm or stm, or thumb-2 32bit instruction */
0913         if (thumb2_32b) {
0914             offset.un = 0;
0915             handler = do_alignment_t32_to_handler(&instr, regs, &offset);
0916         } else {
0917             offset.un = 0;
0918             handler = do_alignment_ldmstm;
0919         }
0920         break;
0921 
0922     default:
0923         goto bad;
0924     }
0925 
0926     if (!handler)
0927         goto bad;
0928     type = handler(addr, instr, regs);
0929 
0930     if (type == TYPE_ERROR || type == TYPE_FAULT) {
0931         regs->ARM_pc -= isize;
0932         goto bad_or_fault;
0933     }
0934 
0935     if (type == TYPE_LDST)
0936         do_alignment_finish_ldst(addr, instr, regs, offset);
0937 
0938     if (thumb_mode(regs))
0939         regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
0940 
0941     return 0;
0942 
0943  bad_or_fault:
0944     if (type == TYPE_ERROR)
0945         goto bad;
0946     /*
0947      * We got a fault - fix it up, or die.
0948      */
0949     do_bad_area(addr, fsr, regs);
0950     return 0;
0951 
0952  swp:
0953     pr_err("Alignment trap: not handling swp instruction\n");
0954 
0955  bad:
0956     /*
0957      * Oops, we didn't handle the instruction.
0958      */
0959     pr_err("Alignment trap: not handling instruction "
0960         "%0*x at [<%08lx>]\n",
0961         isize << 1,
0962         isize == 2 ? tinstr : instr, instrptr);
0963     ai_skipped += 1;
0964     return 1;
0965 
0966  user:
0967     ai_user += 1;
0968 
0969     if (ai_usermode & UM_WARN)
0970         printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
0971                "Address=0x%08lx FSR 0x%03x\n", current->comm,
0972             task_pid_nr(current), instrptr,
0973             isize << 1,
0974             isize == 2 ? tinstr : instr,
0975                 addr, fsr);
0976 
0977     if (ai_usermode & UM_FIXUP)
0978         goto fixup;
0979 
0980     if (ai_usermode & UM_SIGNAL) {
0981         force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
0982     } else {
0983         /*
0984          * We're about to disable the alignment trap and return to
0985          * user space.  But if an interrupt occurs before actually
0986          * reaching user space, then the IRQ vector entry code will
0987          * notice that we were still in kernel space and therefore
0988          * the alignment trap won't be re-enabled in that case as it
0989          * is presumed to be always on from kernel space.
0990          * Let's prevent that race by disabling interrupts here (they
0991          * are disabled on the way back to user space anyway in
0992          * entry-common.S) and disable the alignment trap only if
0993          * there is no work pending for this thread.
0994          */
0995         raw_local_irq_disable();
0996         if (!(read_thread_flags() & _TIF_WORK_MASK))
0997             set_cr(cr_no_alignment);
0998     }
0999 
1000     return 0;
1001 }
1002 
1003 static int __init noalign_setup(char *__unused)
1004 {
1005     set_cr(__clear_cr(CR_A));
1006     return 1;
1007 }
1008 __setup("noalign", noalign_setup);
1009 
1010 /*
1011  * This needs to be done after sysctl_init_bases(), otherwise sys/ will be
1012  * overwritten.  Actually, this shouldn't be in sys/ at all since
1013  * it isn't a sysctl, and it doesn't contain sysctl information.
1014  * We now locate it in /proc/cpu/alignment instead.
1015  */
1016 static int __init alignment_init(void)
1017 {
1018 #ifdef CONFIG_PROC_FS
1019     struct proc_dir_entry *res;
1020 
1021     res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
1022               &alignment_proc_ops);
1023     if (!res)
1024         return -ENOMEM;
1025 #endif
1026 
1027     if (cpu_is_v6_unaligned()) {
1028         set_cr(__clear_cr(CR_A));
1029         ai_usermode = safe_usermode(ai_usermode, false);
1030     }
1031 
1032     cr_no_alignment = get_cr() & ~CR_A;
1033 
1034     hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
1035             "alignment exception");
1036 
1037     /*
1038      * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
1039      * fault, not as alignment error.
1040      *
1041      * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
1042      * needed.
1043      */
1044     if (cpu_architecture() <= CPU_ARCH_ARMv6) {
1045         hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
1046                 "alignment exception");
1047     }
1048 
1049     return 0;
1050 }
1051 
1052 fs_initcall(alignment_init);