Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * unaligned.c: Unaligned load/store trap handling with special
0004  *              cases for the kernel to do them more quickly.
0005  *
0006  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
0007  * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0008  */
0009 
0010 
0011 #include <linux/kernel.h>
0012 #include <linux/sched/signal.h>
0013 #include <linux/mm.h>
0014 #include <asm/ptrace.h>
0015 #include <asm/processor.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/smp.h>
0018 #include <linux/perf_event.h>
0019 #include <linux/extable.h>
0020 
0021 #include <asm/setup.h>
0022 
0023 #include "kernel.h"
0024 
0025 enum direction {
0026     load,    /* ld, ldd, ldh, ldsh */
0027     store,   /* st, std, sth, stsh */
0028     both,    /* Swap, ldstub, etc. */
0029     fpload,
0030     fpstore,
0031     invalid,
0032 };
0033 
0034 static inline enum direction decode_direction(unsigned int insn)
0035 {
0036     unsigned long tmp = (insn >> 21) & 1;
0037 
0038     if(!tmp)
0039         return load;
0040     else {
0041         if(((insn>>19)&0x3f) == 15)
0042             return both;
0043         else
0044             return store;
0045     }
0046 }
0047 
0048 /* 8 = double-word, 4 = word, 2 = half-word */
0049 static inline int decode_access_size(unsigned int insn)
0050 {
0051     insn = (insn >> 19) & 3;
0052 
0053     if(!insn)
0054         return 4;
0055     else if(insn == 3)
0056         return 8;
0057     else if(insn == 2)
0058         return 2;
0059     else {
0060         printk("Impossible unaligned trap. insn=%08x\n", insn);
0061         die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
0062         return 4; /* just to keep gcc happy. */
0063     }
0064 }
0065 
0066 /* 0x400000 = signed, 0 = unsigned */
0067 static inline int decode_signedness(unsigned int insn)
0068 {
0069     return (insn & 0x400000);
0070 }
0071 
0072 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
0073                        unsigned int rd)
0074 {
0075     if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
0076         /* Wheee... */
0077         __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
0078                      "save %sp, -0x40, %sp\n\t"
0079                      "save %sp, -0x40, %sp\n\t"
0080                      "save %sp, -0x40, %sp\n\t"
0081                      "save %sp, -0x40, %sp\n\t"
0082                      "save %sp, -0x40, %sp\n\t"
0083                      "save %sp, -0x40, %sp\n\t"
0084                      "restore; restore; restore; restore;\n\t"
0085                      "restore; restore; restore;\n\t");
0086     }
0087 }
0088 
0089 static inline int sign_extend_imm13(int imm)
0090 {
0091     return imm << 19 >> 19;
0092 }
0093 
0094 static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
0095 {
0096     struct reg_window32 *win;
0097 
0098     if(reg < 16)
0099         return (!reg ? 0 : regs->u_regs[reg]);
0100 
0101     /* Ho hum, the slightly complicated case. */
0102     win = (struct reg_window32 *) regs->u_regs[UREG_FP];
0103     return win->locals[reg - 16]; /* yes, I know what this does... */
0104 }
0105 
0106 static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
0107 {
0108     struct reg_window32 __user *win;
0109     unsigned long ret;
0110 
0111     if (reg < 16)
0112         return (!reg ? 0 : regs->u_regs[reg]);
0113 
0114     /* Ho hum, the slightly complicated case. */
0115     win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
0116 
0117     if ((unsigned long)win & 3)
0118         return -1;
0119 
0120     if (get_user(ret, &win->locals[reg - 16]))
0121         return -1;
0122 
0123     return ret;
0124 }
0125 
0126 static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
0127 {
0128     struct reg_window32 *win;
0129 
0130     if(reg < 16)
0131         return &regs->u_regs[reg];
0132     win = (struct reg_window32 *) regs->u_regs[UREG_FP];
0133     return &win->locals[reg - 16];
0134 }
0135 
0136 static unsigned long compute_effective_address(struct pt_regs *regs,
0137                            unsigned int insn)
0138 {
0139     unsigned int rs1 = (insn >> 14) & 0x1f;
0140     unsigned int rs2 = insn & 0x1f;
0141     unsigned int rd = (insn >> 25) & 0x1f;
0142 
0143     if(insn & 0x2000) {
0144         maybe_flush_windows(rs1, 0, rd);
0145         return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
0146     } else {
0147         maybe_flush_windows(rs1, rs2, rd);
0148         return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
0149     }
0150 }
0151 
0152 unsigned long safe_compute_effective_address(struct pt_regs *regs,
0153                          unsigned int insn)
0154 {
0155     unsigned int rs1 = (insn >> 14) & 0x1f;
0156     unsigned int rs2 = insn & 0x1f;
0157     unsigned int rd = (insn >> 25) & 0x1f;
0158 
0159     if(insn & 0x2000) {
0160         maybe_flush_windows(rs1, 0, rd);
0161         return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
0162     } else {
0163         maybe_flush_windows(rs1, rs2, rd);
0164         return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
0165     }
0166 }
0167 
0168 /* This is just to make gcc think panic does return... */
0169 static void unaligned_panic(char *str)
0170 {
0171     panic("%s", str);
0172 }
0173 
0174 /* una_asm.S */
0175 extern int do_int_load(unsigned long *dest_reg, int size,
0176                unsigned long *saddr, int is_signed);
0177 extern int __do_int_store(unsigned long *dst_addr, int size,
0178               unsigned long *src_val);
0179 
0180 static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
0181             struct pt_regs *regs)
0182 {
0183     unsigned long zero[2] = { 0, 0 };
0184     unsigned long *src_val;
0185 
0186     if (reg_num)
0187         src_val = fetch_reg_addr(reg_num, regs);
0188     else {
0189         src_val = &zero[0];
0190         if (size == 8)
0191             zero[1] = fetch_reg(1, regs);
0192     }
0193     return __do_int_store(dst_addr, size, src_val);
0194 }
0195 
0196 extern void smp_capture(void);
0197 extern void smp_release(void);
0198 
0199 static inline void advance(struct pt_regs *regs)
0200 {
0201     regs->pc   = regs->npc;
0202     regs->npc += 4;
0203 }
0204 
0205 static inline int floating_point_load_or_store_p(unsigned int insn)
0206 {
0207     return (insn >> 24) & 1;
0208 }
0209 
0210 static inline int ok_for_kernel(unsigned int insn)
0211 {
0212     return !floating_point_load_or_store_p(insn);
0213 }
0214 
0215 static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
0216 {
0217     const struct exception_table_entry *entry;
0218 
0219     entry = search_exception_tables(regs->pc);
0220     if (!entry) {
0221         unsigned long address = compute_effective_address(regs, insn);
0222             if(address < PAGE_SIZE) {
0223                     printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
0224             } else
0225                     printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
0226             printk(KERN_ALERT " at virtual address %08lx\n",address);
0227         printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
0228             (current->mm ? current->mm->context :
0229             current->active_mm->context));
0230         printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
0231             (current->mm ? (unsigned long) current->mm->pgd :
0232             (unsigned long) current->active_mm->pgd));
0233             die_if_kernel("Oops", regs);
0234         /* Not reached */
0235     }
0236     regs->pc = entry->fixup;
0237     regs->npc = regs->pc + 4;
0238 }
0239 
0240 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
0241 {
0242     enum direction dir = decode_direction(insn);
0243     int size = decode_access_size(insn);
0244 
0245     if(!ok_for_kernel(insn) || dir == both) {
0246         printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
0247                regs->pc);
0248         unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
0249     } else {
0250         unsigned long addr = compute_effective_address(regs, insn);
0251         int err;
0252 
0253         perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
0254         switch (dir) {
0255         case load:
0256             err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
0257                              regs),
0258                       size, (unsigned long *) addr,
0259                       decode_signedness(insn));
0260             break;
0261 
0262         case store:
0263             err = do_int_store(((insn>>25)&0x1f), size,
0264                        (unsigned long *) addr, regs);
0265             break;
0266         default:
0267             panic("Impossible kernel unaligned trap.");
0268             /* Not reached... */
0269         }
0270         if (err)
0271             kernel_mna_trap_fault(regs, insn);
0272         else
0273             advance(regs);
0274     }
0275 }
0276 
0277 asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
0278 {
0279     send_sig_fault(SIGBUS, BUS_ADRALN,
0280                (void __user *)safe_compute_effective_address(regs, insn),
0281                current);
0282 }