0001
0002
0003
0004
0005
0006
0007 #include <linux/signal.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/sched/signal.h>
0010 #include <linux/errno.h>
0011 #include <linux/ptrace.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/kdebug.h>
0014 #include <linux/perf_event.h>
0015 #include <linux/mm_types.h>
0016 #include <asm/mmu.h>
0017
0018
0019
0020
0021
0022
0023
0024
0025 noinline static int handle_kernel_vaddr_fault(unsigned long address)
0026 {
0027
0028
0029
0030
0031 pgd_t *pgd, *pgd_k;
0032 p4d_t *p4d, *p4d_k;
0033 pud_t *pud, *pud_k;
0034 pmd_t *pmd, *pmd_k;
0035
0036 pgd = pgd_offset(current->active_mm, address);
0037 pgd_k = pgd_offset_k(address);
0038
0039 if (pgd_none (*pgd_k))
0040 goto bad_area;
0041 if (!pgd_present(*pgd))
0042 set_pgd(pgd, *pgd_k);
0043
0044 p4d = p4d_offset(pgd, address);
0045 p4d_k = p4d_offset(pgd_k, address);
0046 if (p4d_none(*p4d_k))
0047 goto bad_area;
0048 if (!p4d_present(*p4d))
0049 set_p4d(p4d, *p4d_k);
0050
0051 pud = pud_offset(p4d, address);
0052 pud_k = pud_offset(p4d_k, address);
0053 if (pud_none(*pud_k))
0054 goto bad_area;
0055 if (!pud_present(*pud))
0056 set_pud(pud, *pud_k);
0057
0058 pmd = pmd_offset(pud, address);
0059 pmd_k = pmd_offset(pud_k, address);
0060 if (pmd_none(*pmd_k))
0061 goto bad_area;
0062 if (!pmd_present(*pmd))
0063 set_pmd(pmd, *pmd_k);
0064
0065
0066 return 0;
0067
0068 bad_area:
0069 return 1;
0070 }
0071
0072 void do_page_fault(unsigned long address, struct pt_regs *regs)
0073 {
0074 struct vm_area_struct *vma = NULL;
0075 struct task_struct *tsk = current;
0076 struct mm_struct *mm = tsk->mm;
0077 int sig, si_code = SEGV_MAPERR;
0078 unsigned int write = 0, exec = 0, mask;
0079 vm_fault_t fault = VM_FAULT_SIGSEGV;
0080 unsigned int flags;
0081
0082
0083
0084
0085
0086
0087
0088 if (address >= VMALLOC_START && !user_mode(regs)) {
0089 if (unlikely(handle_kernel_vaddr_fault(address)))
0090 goto no_context;
0091 else
0092 return;
0093 }
0094
0095
0096
0097
0098
0099 if (faulthandler_disabled() || !mm)
0100 goto no_context;
0101
0102 if (regs->ecr_cause & ECR_C_PROTV_STORE)
0103 write = 1;
0104 else if ((regs->ecr_vec == ECR_V_PROTV) &&
0105 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
0106 exec = 1;
0107
0108 flags = FAULT_FLAG_DEFAULT;
0109 if (user_mode(regs))
0110 flags |= FAULT_FLAG_USER;
0111 if (write)
0112 flags |= FAULT_FLAG_WRITE;
0113
0114 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
0115 retry:
0116 mmap_read_lock(mm);
0117
0118 vma = find_vma(mm, address);
0119 if (!vma)
0120 goto bad_area;
0121 if (unlikely(address < vma->vm_start)) {
0122 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
0123 goto bad_area;
0124 }
0125
0126
0127
0128
0129 mask = VM_READ;
0130 if (write)
0131 mask = VM_WRITE;
0132 if (exec)
0133 mask = VM_EXEC;
0134
0135 if (!(vma->vm_flags & mask)) {
0136 si_code = SEGV_ACCERR;
0137 goto bad_area;
0138 }
0139
0140 fault = handle_mm_fault(vma, address, flags, regs);
0141
0142
0143 if (fault_signal_pending(fault, regs)) {
0144 if (!user_mode(regs))
0145 goto no_context;
0146 return;
0147 }
0148
0149
0150 if (fault & VM_FAULT_COMPLETED)
0151 return;
0152
0153
0154
0155
0156 if (unlikely(fault & VM_FAULT_RETRY)) {
0157 flags |= FAULT_FLAG_TRIED;
0158 goto retry;
0159 }
0160
0161 bad_area:
0162 mmap_read_unlock(mm);
0163
0164
0165
0166
0167
0168 if (likely(!(fault & VM_FAULT_ERROR)))
0169
0170 return;
0171
0172 if (!user_mode(regs))
0173 goto no_context;
0174
0175 if (fault & VM_FAULT_OOM) {
0176 pagefault_out_of_memory();
0177 return;
0178 }
0179
0180 if (fault & VM_FAULT_SIGBUS) {
0181 sig = SIGBUS;
0182 si_code = BUS_ADRERR;
0183 }
0184 else {
0185 sig = SIGSEGV;
0186 }
0187
0188 tsk->thread.fault_address = address;
0189 force_sig_fault(sig, si_code, (void __user *)address);
0190 return;
0191
0192 no_context:
0193 if (fixup_exception(regs))
0194 return;
0195
0196 die("Oops", regs, address);
0197 }