0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <asm/head.h>
0011
0012 #include <linux/string.h>
0013 #include <linux/types.h>
0014 #include <linux/sched.h>
0015 #include <linux/ptrace.h>
0016 #include <linux/mman.h>
0017 #include <linux/threads.h>
0018 #include <linux/kernel.h>
0019 #include <linux/signal.h>
0020 #include <linux/mm.h>
0021 #include <linux/smp.h>
0022 #include <linux/perf_event.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/kdebug.h>
0025 #include <linux/uaccess.h>
0026 #include <linux/extable.h>
0027
0028 #include <asm/page.h>
0029 #include <asm/openprom.h>
0030 #include <asm/oplib.h>
0031 #include <asm/setup.h>
0032 #include <asm/smp.h>
0033 #include <asm/traps.h>
0034
0035 #include "mm_32.h"
0036
0037 int show_unhandled_signals = 1;
0038
0039 static void __noreturn unhandled_fault(unsigned long address,
0040 struct task_struct *tsk,
0041 struct pt_regs *regs)
0042 {
0043 if ((unsigned long) address < PAGE_SIZE) {
0044 printk(KERN_ALERT
0045 "Unable to handle kernel NULL pointer dereference\n");
0046 } else {
0047 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
0048 address);
0049 }
0050 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
0051 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
0052 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
0053 (tsk->mm ? (unsigned long) tsk->mm->pgd :
0054 (unsigned long) tsk->active_mm->pgd));
0055 die_if_kernel("Oops", regs);
0056 }
0057
0058 static inline void
0059 show_signal_msg(struct pt_regs *regs, int sig, int code,
0060 unsigned long address, struct task_struct *tsk)
0061 {
0062 if (!unhandled_signal(tsk, sig))
0063 return;
0064
0065 if (!printk_ratelimit())
0066 return;
0067
0068 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
0069 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
0070 tsk->comm, task_pid_nr(tsk), address,
0071 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
0072 (void *)regs->u_regs[UREG_FP], code);
0073
0074 print_vma_addr(KERN_CONT " in ", regs->pc);
0075
0076 printk(KERN_CONT "\n");
0077 }
0078
0079 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
0080 unsigned long addr)
0081 {
0082 if (unlikely(show_unhandled_signals))
0083 show_signal_msg(regs, sig, code,
0084 addr, current);
0085
0086 force_sig_fault(sig, code, (void __user *) addr);
0087 }
0088
0089 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
0090 {
0091 unsigned int insn;
0092
0093 if (text_fault)
0094 return regs->pc;
0095
0096 if (regs->psr & PSR_PS)
0097 insn = *(unsigned int *) regs->pc;
0098 else
0099 __get_user(insn, (unsigned int *) regs->pc);
0100
0101 return safe_compute_effective_address(regs, insn);
0102 }
0103
0104 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
0105 int text_fault)
0106 {
0107 unsigned long addr = compute_si_addr(regs, text_fault);
0108
0109 __do_fault_siginfo(code, sig, regs, addr);
0110 }
0111
0112 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
0113 unsigned long address)
0114 {
0115 struct vm_area_struct *vma;
0116 struct task_struct *tsk = current;
0117 struct mm_struct *mm = tsk->mm;
0118 int from_user = !(regs->psr & PSR_PS);
0119 int code;
0120 vm_fault_t fault;
0121 unsigned int flags = FAULT_FLAG_DEFAULT;
0122
0123 if (text_fault)
0124 address = regs->pc;
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 code = SEGV_MAPERR;
0136 if (address >= TASK_SIZE)
0137 goto vmalloc_fault;
0138
0139
0140
0141
0142
0143 if (pagefault_disabled() || !mm)
0144 goto no_context;
0145
0146 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
0147
0148 retry:
0149 mmap_read_lock(mm);
0150
0151 if (!from_user && address >= PAGE_OFFSET)
0152 goto bad_area;
0153
0154 vma = find_vma(mm, address);
0155 if (!vma)
0156 goto bad_area;
0157 if (vma->vm_start <= address)
0158 goto good_area;
0159 if (!(vma->vm_flags & VM_GROWSDOWN))
0160 goto bad_area;
0161 if (expand_stack(vma, address))
0162 goto bad_area;
0163
0164
0165
0166
0167 good_area:
0168 code = SEGV_ACCERR;
0169 if (write) {
0170 if (!(vma->vm_flags & VM_WRITE))
0171 goto bad_area;
0172 } else {
0173
0174 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
0175 goto bad_area;
0176 }
0177
0178 if (from_user)
0179 flags |= FAULT_FLAG_USER;
0180 if (write)
0181 flags |= FAULT_FLAG_WRITE;
0182
0183
0184
0185
0186
0187
0188 fault = handle_mm_fault(vma, address, flags, regs);
0189
0190 if (fault_signal_pending(fault, regs))
0191 return;
0192
0193
0194 if (fault & VM_FAULT_COMPLETED)
0195 return;
0196
0197 if (unlikely(fault & VM_FAULT_ERROR)) {
0198 if (fault & VM_FAULT_OOM)
0199 goto out_of_memory;
0200 else if (fault & VM_FAULT_SIGSEGV)
0201 goto bad_area;
0202 else if (fault & VM_FAULT_SIGBUS)
0203 goto do_sigbus;
0204 BUG();
0205 }
0206
0207 if (fault & VM_FAULT_RETRY) {
0208 flags |= FAULT_FLAG_TRIED;
0209
0210
0211
0212
0213
0214
0215 goto retry;
0216 }
0217
0218 mmap_read_unlock(mm);
0219 return;
0220
0221
0222
0223
0224
0225 bad_area:
0226 mmap_read_unlock(mm);
0227
0228 bad_area_nosemaphore:
0229
0230 if (from_user) {
0231 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
0232 return;
0233 }
0234
0235
0236 no_context:
0237 if (!from_user) {
0238 const struct exception_table_entry *entry;
0239
0240 entry = search_exception_tables(regs->pc);
0241 #ifdef DEBUG_EXCEPTIONS
0242 printk("Exception: PC<%08lx> faddr<%08lx>\n",
0243 regs->pc, address);
0244 printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
0245 regs->pc, entry->fixup);
0246 #endif
0247 regs->pc = entry->fixup;
0248 regs->npc = regs->pc + 4;
0249 return;
0250 }
0251
0252 unhandled_fault(address, tsk, regs);
0253
0254
0255
0256
0257
0258 out_of_memory:
0259 mmap_read_unlock(mm);
0260 if (from_user) {
0261 pagefault_out_of_memory();
0262 return;
0263 }
0264 goto no_context;
0265
0266 do_sigbus:
0267 mmap_read_unlock(mm);
0268 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
0269 if (!from_user)
0270 goto no_context;
0271
0272 vmalloc_fault:
0273 {
0274
0275
0276
0277
0278 int offset = pgd_index(address);
0279 pgd_t *pgd, *pgd_k;
0280 p4d_t *p4d, *p4d_k;
0281 pud_t *pud, *pud_k;
0282 pmd_t *pmd, *pmd_k;
0283
0284 pgd = tsk->active_mm->pgd + offset;
0285 pgd_k = init_mm.pgd + offset;
0286
0287 if (!pgd_present(*pgd)) {
0288 if (!pgd_present(*pgd_k))
0289 goto bad_area_nosemaphore;
0290 pgd_val(*pgd) = pgd_val(*pgd_k);
0291 return;
0292 }
0293
0294 p4d = p4d_offset(pgd, address);
0295 pud = pud_offset(p4d, address);
0296 pmd = pmd_offset(pud, address);
0297
0298 p4d_k = p4d_offset(pgd_k, address);
0299 pud_k = pud_offset(p4d_k, address);
0300 pmd_k = pmd_offset(pud_k, address);
0301
0302 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
0303 goto bad_area_nosemaphore;
0304
0305 *pmd = *pmd_k;
0306 return;
0307 }
0308 }
0309
0310
0311 static void force_user_fault(unsigned long address, int write)
0312 {
0313 struct vm_area_struct *vma;
0314 struct task_struct *tsk = current;
0315 struct mm_struct *mm = tsk->mm;
0316 unsigned int flags = FAULT_FLAG_USER;
0317 int code;
0318
0319 code = SEGV_MAPERR;
0320
0321 mmap_read_lock(mm);
0322 vma = find_vma(mm, address);
0323 if (!vma)
0324 goto bad_area;
0325 if (vma->vm_start <= address)
0326 goto good_area;
0327 if (!(vma->vm_flags & VM_GROWSDOWN))
0328 goto bad_area;
0329 if (expand_stack(vma, address))
0330 goto bad_area;
0331 good_area:
0332 code = SEGV_ACCERR;
0333 if (write) {
0334 if (!(vma->vm_flags & VM_WRITE))
0335 goto bad_area;
0336 flags |= FAULT_FLAG_WRITE;
0337 } else {
0338 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
0339 goto bad_area;
0340 }
0341 switch (handle_mm_fault(vma, address, flags, NULL)) {
0342 case VM_FAULT_SIGBUS:
0343 case VM_FAULT_OOM:
0344 goto do_sigbus;
0345 }
0346 mmap_read_unlock(mm);
0347 return;
0348 bad_area:
0349 mmap_read_unlock(mm);
0350 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
0351 return;
0352
0353 do_sigbus:
0354 mmap_read_unlock(mm);
0355 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
0356 }
0357
0358 static void check_stack_aligned(unsigned long sp)
0359 {
0360 if (sp & 0x7UL)
0361 force_sig(SIGILL);
0362 }
0363
0364 void window_overflow_fault(void)
0365 {
0366 unsigned long sp;
0367
0368 sp = current_thread_info()->rwbuf_stkptrs[0];
0369 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
0370 force_user_fault(sp + 0x38, 1);
0371 force_user_fault(sp, 1);
0372
0373 check_stack_aligned(sp);
0374 }
0375
0376 void window_underflow_fault(unsigned long sp)
0377 {
0378 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
0379 force_user_fault(sp + 0x38, 0);
0380 force_user_fault(sp, 0);
0381
0382 check_stack_aligned(sp);
0383 }
0384
0385 void window_ret_fault(struct pt_regs *regs)
0386 {
0387 unsigned long sp;
0388
0389 sp = regs->u_regs[UREG_FP];
0390 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
0391 force_user_fault(sp + 0x38, 0);
0392 force_user_fault(sp, 0);
0393
0394 check_stack_aligned(sp);
0395 }