0001
0002
0003
0004 #include <linux/extable.h>
0005 #include <linux/kprobes.h>
0006 #include <linux/mmu_context.h>
0007 #include <linux/perf_event.h>
0008
0009 int fixup_exception(struct pt_regs *regs)
0010 {
0011 const struct exception_table_entry *fixup;
0012
0013 fixup = search_exception_tables(instruction_pointer(regs));
0014 if (fixup) {
0015 regs->pc = fixup->fixup;
0016
0017 return 1;
0018 }
0019
0020 return 0;
0021 }
0022
0023 static inline bool is_write(struct pt_regs *regs)
0024 {
0025 switch (trap_no(regs)) {
0026 case VEC_TLBINVALIDS:
0027 return true;
0028 case VEC_TLBMODIFIED:
0029 return true;
0030 }
0031
0032 return false;
0033 }
0034
0035 #ifdef CONFIG_CPU_HAS_LDSTEX
0036 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
0037 {
0038 return;
0039 }
0040 #else
0041 extern unsigned long csky_cmpxchg_ldw;
0042 extern unsigned long csky_cmpxchg_stw;
0043 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
0044 {
0045 if (trap_no(regs) != VEC_TLBMODIFIED)
0046 return;
0047
0048 if (instruction_pointer(regs) == csky_cmpxchg_stw)
0049 instruction_pointer_set(regs, csky_cmpxchg_ldw);
0050 return;
0051 }
0052 #endif
0053
0054 static inline void no_context(struct pt_regs *regs, unsigned long addr)
0055 {
0056 current->thread.trap_no = trap_no(regs);
0057
0058
0059 if (fixup_exception(regs))
0060 return;
0061
0062
0063
0064
0065
0066 bust_spinlocks(1);
0067 pr_alert("Unable to handle kernel paging request at virtual "
0068 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
0069 die(regs, "Oops");
0070 make_task_dead(SIGKILL);
0071 }
0072
0073 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
0074 {
0075 current->thread.trap_no = trap_no(regs);
0076
0077 if (fault & VM_FAULT_OOM) {
0078
0079
0080
0081
0082 if (!user_mode(regs)) {
0083 no_context(regs, addr);
0084 return;
0085 }
0086 pagefault_out_of_memory();
0087 return;
0088 } else if (fault & VM_FAULT_SIGBUS) {
0089
0090 if (!user_mode(regs)) {
0091 no_context(regs, addr);
0092 return;
0093 }
0094 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
0095 return;
0096 }
0097 BUG();
0098 }
0099
0100 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
0101 {
0102
0103
0104
0105
0106 mmap_read_unlock(mm);
0107
0108 if (user_mode(regs)) {
0109 do_trap(regs, SIGSEGV, code, addr);
0110 return;
0111 }
0112
0113 no_context(regs, addr);
0114 }
0115
0116 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
0117 {
0118 pgd_t *pgd, *pgd_k;
0119 pud_t *pud, *pud_k;
0120 pmd_t *pmd, *pmd_k;
0121 pte_t *pte_k;
0122 int offset;
0123
0124
0125 if (user_mode(regs)) {
0126 do_trap(regs, SIGSEGV, code, addr);
0127 return;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137 offset = pgd_index(addr);
0138
0139 pgd = get_pgd() + offset;
0140 pgd_k = init_mm.pgd + offset;
0141
0142 if (!pgd_present(*pgd_k)) {
0143 no_context(regs, addr);
0144 return;
0145 }
0146 set_pgd(pgd, *pgd_k);
0147
0148 pud = (pud_t *)pgd;
0149 pud_k = (pud_t *)pgd_k;
0150 if (!pud_present(*pud_k)) {
0151 no_context(regs, addr);
0152 return;
0153 }
0154
0155 pmd = pmd_offset(pud, addr);
0156 pmd_k = pmd_offset(pud_k, addr);
0157 if (!pmd_present(*pmd_k)) {
0158 no_context(regs, addr);
0159 return;
0160 }
0161 set_pmd(pmd, *pmd_k);
0162
0163 pte_k = pte_offset_kernel(pmd_k, addr);
0164 if (!pte_present(*pte_k)) {
0165 no_context(regs, addr);
0166 return;
0167 }
0168
0169 flush_tlb_one(addr);
0170 }
0171
0172 static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
0173 {
0174 if (is_write(regs)) {
0175 if (!(vma->vm_flags & VM_WRITE))
0176 return true;
0177 } else {
0178 if (unlikely(!vma_is_accessible(vma)))
0179 return true;
0180 }
0181 return false;
0182 }
0183
0184
0185
0186
0187
0188 asmlinkage void do_page_fault(struct pt_regs *regs)
0189 {
0190 struct task_struct *tsk;
0191 struct vm_area_struct *vma;
0192 struct mm_struct *mm;
0193 unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
0194 unsigned int flags = FAULT_FLAG_DEFAULT;
0195 int code = SEGV_MAPERR;
0196 vm_fault_t fault;
0197
0198 tsk = current;
0199 mm = tsk->mm;
0200
0201 csky_cmpxchg_fixup(regs);
0202
0203 if (kprobe_page_fault(regs, tsk->thread.trap_no))
0204 return;
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
0216 vmalloc_fault(regs, code, addr);
0217 return;
0218 }
0219
0220
0221 if (likely(regs->sr & BIT(6)))
0222 local_irq_enable();
0223
0224
0225
0226
0227
0228 if (unlikely(faulthandler_disabled() || !mm)) {
0229 no_context(regs, addr);
0230 return;
0231 }
0232
0233 if (user_mode(regs))
0234 flags |= FAULT_FLAG_USER;
0235
0236 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
0237
0238 if (is_write(regs))
0239 flags |= FAULT_FLAG_WRITE;
0240 retry:
0241 mmap_read_lock(mm);
0242 vma = find_vma(mm, addr);
0243 if (unlikely(!vma)) {
0244 bad_area(regs, mm, code, addr);
0245 return;
0246 }
0247 if (likely(vma->vm_start <= addr))
0248 goto good_area;
0249 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
0250 bad_area(regs, mm, code, addr);
0251 return;
0252 }
0253 if (unlikely(expand_stack(vma, addr))) {
0254 bad_area(regs, mm, code, addr);
0255 return;
0256 }
0257
0258
0259
0260
0261
0262 good_area:
0263 code = SEGV_ACCERR;
0264
0265 if (unlikely(access_error(regs, vma))) {
0266 bad_area(regs, mm, code, addr);
0267 return;
0268 }
0269
0270
0271
0272
0273
0274
0275 fault = handle_mm_fault(vma, addr, flags, regs);
0276
0277
0278
0279
0280
0281
0282 if (fault_signal_pending(fault, regs)) {
0283 if (!user_mode(regs))
0284 no_context(regs, addr);
0285 return;
0286 }
0287
0288
0289 if (fault & VM_FAULT_COMPLETED)
0290 return;
0291
0292 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
0293 flags |= FAULT_FLAG_TRIED;
0294
0295
0296
0297
0298
0299
0300 goto retry;
0301 }
0302
0303 mmap_read_unlock(mm);
0304
0305 if (unlikely(fault & VM_FAULT_ERROR)) {
0306 mm_fault_error(regs, addr, fault);
0307 return;
0308 }
0309 return;
0310 }