Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  * Amit Bhor, Kanika Nema: Codito Technologies 2004
0006  */
0007 
0008 #include <linux/errno.h>
0009 #include <linux/module.h>
0010 #include <linux/sched.h>
0011 #include <linux/sched/task.h>
0012 #include <linux/sched/task_stack.h>
0013 
0014 #include <linux/mm.h>
0015 #include <linux/fs.h>
0016 #include <linux/unistd.h>
0017 #include <linux/ptrace.h>
0018 #include <linux/slab.h>
0019 #include <linux/syscalls.h>
0020 #include <linux/elf.h>
0021 #include <linux/tick.h>
0022 
0023 #include <asm/fpu.h>
0024 
0025 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
0026 {
0027     task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
0028     return 0;
0029 }
0030 
0031 /*
0032  * We return the user space TLS data ptr as sys-call return code
0033  * Ideally it should be copy to user.
0034  * However we can cheat by the fact that some sys-calls do return
0035  * absurdly high values
0036  * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
0037  * it won't be considered a sys-call error
0038  * and it will be loads better than copy-to-user, which is a definite
0039  * D-TLB Miss
0040  */
0041 SYSCALL_DEFINE0(arc_gettls)
0042 {
0043     return task_thread_info(current)->thr_ptr;
0044 }
0045 
0046 SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
0047 {
0048     struct pt_regs *regs = current_pt_regs();
0049     u32 uval;
0050     int ret;
0051 
0052     /*
0053      * This is only for old cores lacking LLOCK/SCOND, which by definition
0054      * can't possibly be SMP. Thus doesn't need to be SMP safe.
0055      * And this also helps reduce the overhead for serializing in
0056      * the UP case
0057      */
0058     WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
0059 
0060     /* Z indicates to userspace if operation succeeded */
0061     regs->status32 &= ~STATUS_Z_MASK;
0062 
0063     ret = access_ok(uaddr, sizeof(*uaddr));
0064     if (!ret)
0065          goto fail;
0066 
0067 again:
0068     preempt_disable();
0069 
0070     ret = __get_user(uval, uaddr);
0071     if (ret)
0072          goto fault;
0073 
0074     if (uval != expected)
0075          goto out;
0076 
0077     ret = __put_user(new, uaddr);
0078     if (ret)
0079          goto fault;
0080 
0081     regs->status32 |= STATUS_Z_MASK;
0082 
0083 out:
0084     preempt_enable();
0085     return uval;
0086 
0087 fault:
0088     preempt_enable();
0089 
0090     if (unlikely(ret != -EFAULT))
0091          goto fail;
0092 
0093     mmap_read_lock(current->mm);
0094     ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
0095                    FAULT_FLAG_WRITE, NULL);
0096     mmap_read_unlock(current->mm);
0097 
0098     if (likely(!ret))
0099          goto again;
0100 
0101 fail:
0102     force_sig(SIGSEGV);
0103     return ret;
0104 }
0105 
0106 #ifdef CONFIG_ISA_ARCV2
0107 
0108 void arch_cpu_idle(void)
0109 {
0110     /* Re-enable interrupts <= default irq priority before committing SLEEP */
0111     const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
0112 
0113     __asm__ __volatile__(
0114         "sleep %0   \n"
0115         :
0116         :"I"(arg)); /* can't be "r" has to be embedded const */
0117 }
0118 
0119 #else   /* ARC700 */
0120 
0121 void arch_cpu_idle(void)
0122 {
0123     /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
0124     __asm__ __volatile__("sleep 0x3 \n");
0125 }
0126 
0127 #endif
0128 
0129 asmlinkage void ret_from_fork(void);
0130 
0131 /*
0132  * Copy architecture-specific thread state
0133  *
0134  * Layout of Child kernel mode stack as setup at the end of this function is
0135  *
0136  * |     ...        |
0137  * |     ...        |
0138  * |    unused      |
0139  * |                |
0140  * ------------------
0141  * |     r25        |   <==== top of Stack (thread.ksp)
0142  * ~                ~
0143  * |    --to--      |   (CALLEE Regs of kernel mode)
0144  * |     r13        |
0145  * ------------------
0146  * |     fp         |
0147  * |    blink       |   @ret_from_fork
0148  * ------------------
0149  * |                |
0150  * ~                ~
0151  * ~                ~
0152  * |                |
0153  * ------------------
0154  * |     r12        |
0155  * ~                ~
0156  * |    --to--      |   (scratch Regs of user mode)
0157  * |     r0         |
0158  * ------------------
0159  * |      SP        |
0160  * |    orig_r0     |
0161  * |    event/ECR   |
0162  * |    user_r25    |
0163  * ------------------  <===== END of PAGE
0164  */
0165 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
0166 {
0167     unsigned long clone_flags = args->flags;
0168     unsigned long usp = args->stack;
0169     unsigned long tls = args->tls;
0170     struct pt_regs *c_regs;        /* child's pt_regs */
0171     unsigned long *childksp;       /* to unwind out of __switch_to() */
0172     struct callee_regs *c_callee;  /* child's callee regs */
0173     struct callee_regs *parent_callee;  /* paren't callee */
0174     struct pt_regs *regs = current_pt_regs();
0175 
0176     /* Mark the specific anchors to begin with (see pic above) */
0177     c_regs = task_pt_regs(p);
0178     childksp = (unsigned long *)c_regs - 2;  /* 2 words for FP/BLINK */
0179     c_callee = ((struct callee_regs *)childksp) - 1;
0180 
0181     /*
0182      * __switch_to() uses thread.ksp to start unwinding stack
0183      * For kernel threads we don't need to create callee regs, the
0184      * stack layout nevertheless needs to remain the same.
0185      * Also, since __switch_to anyways unwinds callee regs, we use
0186      * this to populate kernel thread entry-pt/args into callee regs,
0187      * so that ret_from_kernel_thread() becomes simpler.
0188      */
0189     p->thread.ksp = (unsigned long)c_callee;    /* THREAD_KSP */
0190 
0191     /* __switch_to expects FP(0), BLINK(return addr) at top */
0192     childksp[0] = 0;            /* fp */
0193     childksp[1] = (unsigned long)ret_from_fork; /* blink */
0194 
0195     if (unlikely(args->fn)) {
0196         memset(c_regs, 0, sizeof(struct pt_regs));
0197 
0198         c_callee->r13 = (unsigned long)args->fn_arg;
0199         c_callee->r14 = (unsigned long)args->fn;
0200 
0201         return 0;
0202     }
0203 
0204     /*--------- User Task Only --------------*/
0205 
0206     /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
0207     childksp[0] = 0;                /* for POP fp */
0208     childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
0209 
0210     /* Copy parents pt regs on child's kernel mode stack */
0211     *c_regs = *regs;
0212 
0213     if (usp)
0214         c_regs->sp = usp;
0215 
0216     c_regs->r0 = 0;     /* fork returns 0 in child */
0217 
0218     parent_callee = ((struct callee_regs *)regs) - 1;
0219     *c_callee = *parent_callee;
0220 
0221     if (unlikely(clone_flags & CLONE_SETTLS)) {
0222         /*
0223          * set task's userland tls data ptr from 4th arg
0224          * clone C-lib call is difft from clone sys-call
0225          */
0226         task_thread_info(p)->thr_ptr = tls;
0227     } else {
0228         /* Normal fork case: set parent's TLS ptr in child */
0229         task_thread_info(p)->thr_ptr =
0230         task_thread_info(current)->thr_ptr;
0231     }
0232 
0233 
0234     /*
0235      * setup usermode thread pointer #1:
0236      * when child is picked by scheduler, __switch_to() uses @c_callee to
0237      * populate usermode callee regs: this works (despite being in a kernel
0238      * function) since special return path for child @ret_from_fork()
0239      * ensures those regs are not clobbered all the way to RTIE to usermode
0240      */
0241     c_callee->r25 = task_thread_info(p)->thr_ptr;
0242 
0243 #ifdef CONFIG_ARC_CURR_IN_REG
0244     /*
0245      * setup usermode thread pointer #2:
0246      * however for this special use of r25 in kernel, __switch_to() sets
0247      * r25 for kernel needs and only in the final return path is usermode
0248      * r25 setup, from pt_regs->user_r25. So set that up as well
0249      */
0250     c_regs->user_r25 = c_callee->r25;
0251 #endif
0252 
0253     return 0;
0254 }
0255 
0256 /*
0257  * Do necessary setup to start up a new user task
0258  */
0259 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
0260 {
0261     regs->sp = usp;
0262     regs->ret = pc;
0263 
0264     /*
0265      * [U]ser Mode bit set
0266      * [L] ZOL loop inhibited to begin with - cleared by a LP insn
0267      * Interrupts enabled
0268      */
0269     regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
0270 
0271     fpu_init_task(regs);
0272 
0273     /* bogus seed values for debugging */
0274     regs->lp_start = 0x10;
0275     regs->lp_end = 0x80;
0276 }
0277 
0278 /*
0279  * Some archs flush debug and FPU info here
0280  */
0281 void flush_thread(void)
0282 {
0283 }
0284 
0285 int elf_check_arch(const struct elf32_hdr *x)
0286 {
0287     unsigned int eflags;
0288 
0289     if (x->e_machine != EM_ARC_INUSE) {
0290         pr_err("ELF not built for %s ISA\n",
0291             is_isa_arcompact() ? "ARCompact":"ARCv2");
0292         return 0;
0293     }
0294 
0295     eflags = x->e_flags;
0296     if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
0297         pr_err("ABI mismatch - you need newer toolchain\n");
0298         force_fatal_sig(SIGSEGV);
0299         return 0;
0300     }
0301 
0302     return 1;
0303 }
0304 EXPORT_SYMBOL(elf_check_arch);