0001
0002
0003 #include <linux/compat.h>
0004 #include <linux/context_tracking.h>
0005 #include <linux/randomize_kstack.h>
0006
0007 #include <asm/interrupt.h>
0008 #include <asm/kup.h>
0009 #include <asm/syscall.h>
0010 #include <asm/time.h>
0011 #include <asm/tm.h>
0012 #include <asm/unistd.h>
0013
0014
0015 typedef long (*syscall_fn)(long, long, long, long, long, long);
0016
0017
0018 notrace long system_call_exception(long r3, long r4, long r5,
0019 long r6, long r7, long r8,
0020 unsigned long r0, struct pt_regs *regs)
0021 {
0022 long ret;
0023 syscall_fn f;
0024
0025 kuap_lock();
0026
0027 add_random_kstack_offset();
0028 regs->orig_gpr3 = r3;
0029
0030 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
0031 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
0032
0033 trace_hardirqs_off();
0034
0035 CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
0036 user_exit_irqoff();
0037
0038 BUG_ON(regs_is_unrecoverable(regs));
0039 BUG_ON(!(regs->msr & MSR_PR));
0040 BUG_ON(arch_irq_disabled_regs(regs));
0041
0042 #ifdef CONFIG_PPC_PKEY
0043 if (mmu_has_feature(MMU_FTR_PKEY)) {
0044 unsigned long amr, iamr;
0045 bool flush_needed = false;
0046
0047
0048
0049
0050 amr = mfspr(SPRN_AMR);
0051 iamr = mfspr(SPRN_IAMR);
0052 regs->amr = amr;
0053 regs->iamr = iamr;
0054 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
0055 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
0056 flush_needed = true;
0057 }
0058 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
0059 mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
0060 flush_needed = true;
0061 }
0062 if (flush_needed)
0063 isync();
0064 } else
0065 #endif
0066 kuap_assert_locked();
0067
0068 booke_restore_dbcr0();
0069
0070 account_cpu_user_entry();
0071
0072 account_stolen_time();
0073
0074
0075
0076
0077
0078
0079
0080 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
0092 unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
0093 set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags);
0094
0095
0096
0097
0098
0099
0100
0101 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0102 if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
0103 !trap_is_unsupported_scv(regs)) {
0104
0105 hard_irq_disable();
0106 mtmsr(mfmsr() | MSR_TM);
0107
0108
0109 asm volatile(".long 0x7c00071d | ((%0) << 16)"
0110 :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 return -ENOSYS;
0121 }
0122 #endif
0123
0124 local_irq_enable();
0125
0126 if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
0127 if (unlikely(trap_is_unsupported_scv(regs))) {
0128
0129 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
0130 return regs->gpr[3];
0131 }
0132
0133
0134
0135
0136
0137
0138
0139 r0 = do_syscall_trace_enter(regs);
0140 if (unlikely(r0 >= NR_syscalls))
0141 return regs->gpr[3];
0142 r3 = regs->gpr[3];
0143 r4 = regs->gpr[4];
0144 r5 = regs->gpr[5];
0145 r6 = regs->gpr[6];
0146 r7 = regs->gpr[7];
0147 r8 = regs->gpr[8];
0148
0149 } else if (unlikely(r0 >= NR_syscalls)) {
0150 if (unlikely(trap_is_unsupported_scv(regs))) {
0151
0152 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
0153 return regs->gpr[3];
0154 }
0155 return -ENOSYS;
0156 }
0157
0158
0159 barrier_nospec();
0160
0161 if (unlikely(is_compat_task())) {
0162 f = (void *)compat_sys_call_table[r0];
0163
0164 r3 &= 0x00000000ffffffffULL;
0165 r4 &= 0x00000000ffffffffULL;
0166 r5 &= 0x00000000ffffffffULL;
0167 r6 &= 0x00000000ffffffffULL;
0168 r7 &= 0x00000000ffffffffULL;
0169 r8 &= 0x00000000ffffffffULL;
0170
0171 } else {
0172 f = (void *)sys_call_table[r0];
0173 }
0174
0175 ret = f(r3, r4, r5, r6, r7, r8);
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 choose_random_kstack_offset(mftb());
0188
0189 return ret;
0190 }