0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/errno.h>
0009 #include <linux/module.h>
0010 #include <linux/sched.h>
0011 #include <linux/sched/task.h>
0012 #include <linux/sched/task_stack.h>
0013
0014 #include <linux/mm.h>
0015 #include <linux/fs.h>
0016 #include <linux/unistd.h>
0017 #include <linux/ptrace.h>
0018 #include <linux/slab.h>
0019 #include <linux/syscalls.h>
0020 #include <linux/elf.h>
0021 #include <linux/tick.h>
0022
0023 #include <asm/fpu.h>
0024
0025 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
0026 {
0027 task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
0028 return 0;
0029 }
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 SYSCALL_DEFINE0(arc_gettls)
0042 {
0043 return task_thread_info(current)->thr_ptr;
0044 }
0045
0046 SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
0047 {
0048 struct pt_regs *regs = current_pt_regs();
0049 u32 uval;
0050 int ret;
0051
0052
0053
0054
0055
0056
0057
0058 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
0059
0060
0061 regs->status32 &= ~STATUS_Z_MASK;
0062
0063 ret = access_ok(uaddr, sizeof(*uaddr));
0064 if (!ret)
0065 goto fail;
0066
0067 again:
0068 preempt_disable();
0069
0070 ret = __get_user(uval, uaddr);
0071 if (ret)
0072 goto fault;
0073
0074 if (uval != expected)
0075 goto out;
0076
0077 ret = __put_user(new, uaddr);
0078 if (ret)
0079 goto fault;
0080
0081 regs->status32 |= STATUS_Z_MASK;
0082
0083 out:
0084 preempt_enable();
0085 return uval;
0086
0087 fault:
0088 preempt_enable();
0089
0090 if (unlikely(ret != -EFAULT))
0091 goto fail;
0092
0093 mmap_read_lock(current->mm);
0094 ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
0095 FAULT_FLAG_WRITE, NULL);
0096 mmap_read_unlock(current->mm);
0097
0098 if (likely(!ret))
0099 goto again;
0100
0101 fail:
0102 force_sig(SIGSEGV);
0103 return ret;
0104 }
0105
0106 #ifdef CONFIG_ISA_ARCV2
0107
0108 void arch_cpu_idle(void)
0109 {
0110
0111 const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
0112
0113 __asm__ __volatile__(
0114 "sleep %0 \n"
0115 :
0116 :"I"(arg));
0117 }
0118
0119 #else
0120
0121 void arch_cpu_idle(void)
0122 {
0123
0124 __asm__ __volatile__("sleep 0x3 \n");
0125 }
0126
0127 #endif
0128
0129 asmlinkage void ret_from_fork(void);
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
0166 {
0167 unsigned long clone_flags = args->flags;
0168 unsigned long usp = args->stack;
0169 unsigned long tls = args->tls;
0170 struct pt_regs *c_regs;
0171 unsigned long *childksp;
0172 struct callee_regs *c_callee;
0173 struct callee_regs *parent_callee;
0174 struct pt_regs *regs = current_pt_regs();
0175
0176
0177 c_regs = task_pt_regs(p);
0178 childksp = (unsigned long *)c_regs - 2;
0179 c_callee = ((struct callee_regs *)childksp) - 1;
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 p->thread.ksp = (unsigned long)c_callee;
0190
0191
0192 childksp[0] = 0;
0193 childksp[1] = (unsigned long)ret_from_fork;
0194
0195 if (unlikely(args->fn)) {
0196 memset(c_regs, 0, sizeof(struct pt_regs));
0197
0198 c_callee->r13 = (unsigned long)args->fn_arg;
0199 c_callee->r14 = (unsigned long)args->fn;
0200
0201 return 0;
0202 }
0203
0204
0205
0206
0207 childksp[0] = 0;
0208 childksp[1] = (unsigned long)ret_from_fork;
0209
0210
0211 *c_regs = *regs;
0212
0213 if (usp)
0214 c_regs->sp = usp;
0215
0216 c_regs->r0 = 0;
0217
0218 parent_callee = ((struct callee_regs *)regs) - 1;
0219 *c_callee = *parent_callee;
0220
0221 if (unlikely(clone_flags & CLONE_SETTLS)) {
0222
0223
0224
0225
0226 task_thread_info(p)->thr_ptr = tls;
0227 } else {
0228
0229 task_thread_info(p)->thr_ptr =
0230 task_thread_info(current)->thr_ptr;
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 c_callee->r25 = task_thread_info(p)->thr_ptr;
0242
0243 #ifdef CONFIG_ARC_CURR_IN_REG
0244
0245
0246
0247
0248
0249
0250 c_regs->user_r25 = c_callee->r25;
0251 #endif
0252
0253 return 0;
0254 }
0255
0256
0257
0258
0259 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
0260 {
0261 regs->sp = usp;
0262 regs->ret = pc;
0263
0264
0265
0266
0267
0268
0269 regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
0270
0271 fpu_init_task(regs);
0272
0273
0274 regs->lp_start = 0x10;
0275 regs->lp_end = 0x80;
0276 }
0277
0278
0279
0280
0281 void flush_thread(void)
0282 {
0283 }
0284
0285 int elf_check_arch(const struct elf32_hdr *x)
0286 {
0287 unsigned int eflags;
0288
0289 if (x->e_machine != EM_ARC_INUSE) {
0290 pr_err("ELF not built for %s ISA\n",
0291 is_isa_arcompact() ? "ARCompact":"ARCv2");
0292 return 0;
0293 }
0294
0295 eflags = x->e_flags;
0296 if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
0297 pr_err("ABI mismatch - you need newer toolchain\n");
0298 force_fatal_sig(SIGSEGV);
0299 return 0;
0300 }
0301
0302 return 1;
0303 }
0304 EXPORT_SYMBOL(elf_check_arch);