Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Based on arch/arm/kernel/sys_arm.c
0004  *
0005  * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
0006  * Copyright (C) 1995, 1996 Russell King.
0007  * Copyright (C) 2012 ARM Ltd.
0008  */
0009 
0010 #include <linux/compat.h>
0011 #include <linux/cpufeature.h>
0012 #include <linux/sched.h>
0013 #include <linux/sched/signal.h>
0014 #include <linux/slab.h>
0015 #include <linux/syscalls.h>
0016 #include <linux/uaccess.h>
0017 
0018 #include <asm/cacheflush.h>
0019 #include <asm/system_misc.h>
0020 #include <asm/tlbflush.h>
0021 #include <asm/unistd.h>
0022 
0023 static long
0024 __do_compat_cache_op(unsigned long start, unsigned long end)
0025 {
0026     long ret;
0027 
0028     do {
0029         unsigned long chunk = min(PAGE_SIZE, end - start);
0030 
0031         if (fatal_signal_pending(current))
0032             return 0;
0033 
0034         if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
0035             /*
0036              * The workaround requires an inner-shareable tlbi.
0037              * We pick the reserved-ASID to minimise the impact.
0038              */
0039             __tlbi(aside1is, __TLBI_VADDR(0, 0));
0040             dsb(ish);
0041         }
0042 
0043         ret = caches_clean_inval_user_pou(start, start + chunk);
0044         if (ret)
0045             return ret;
0046 
0047         cond_resched();
0048         start += chunk;
0049     } while (start < end);
0050 
0051     return 0;
0052 }
0053 
0054 static inline long
0055 do_compat_cache_op(unsigned long start, unsigned long end, int flags)
0056 {
0057     if (end < start || flags)
0058         return -EINVAL;
0059 
0060     if (!access_ok((const void __user *)start, end - start))
0061         return -EFAULT;
0062 
0063     return __do_compat_cache_op(start, end);
0064 }
0065 /*
0066  * Handle all unrecognised system calls.
0067  */
0068 long compat_arm_syscall(struct pt_regs *regs, int scno)
0069 {
0070     unsigned long addr;
0071 
0072     switch (scno) {
0073     /*
0074      * Flush a region from virtual address 'r0' to virtual address 'r1'
0075      * _exclusive_.  There is no alignment requirement on either address;
0076      * user space does not need to know the hardware cache layout.
0077      *
0078      * r2 contains flags.  It should ALWAYS be passed as ZERO until it
0079      * is defined to be something else.  For now we ignore it, but may
0080      * the fires of hell burn in your belly if you break this rule. ;)
0081      *
0082      * (at a later date, we may want to allow this call to not flush
0083      * various aspects of the cache.  Passing '0' will guarantee that
0084      * everything necessary gets flushed to maintain consistency in
0085      * the specified region).
0086      */
0087     case __ARM_NR_compat_cacheflush:
0088         return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
0089 
0090     case __ARM_NR_compat_set_tls:
0091         current->thread.uw.tp_value = regs->regs[0];
0092 
0093         /*
0094          * Protect against register corruption from context switch.
0095          * See comment in tls_thread_flush.
0096          */
0097         barrier();
0098         write_sysreg(regs->regs[0], tpidrro_el0);
0099         return 0;
0100 
0101     default:
0102         /*
0103          * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
0104          * if not implemented, rather than raising SIGILL. This
0105          * way the calling program can gracefully determine whether
0106          * a feature is supported.
0107          */
0108         if (scno < __ARM_NR_COMPAT_END)
0109             return -ENOSYS;
0110         break;
0111     }
0112 
0113     addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4);
0114 
0115     arm64_notify_die("Oops - bad compat syscall(2)", regs,
0116              SIGILL, ILL_ILLTRP, addr, 0);
0117     return 0;
0118 }