Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Based on arch/arm/kernel/signal.c
0004  *
0005  * Copyright (C) 1995-2009 Russell King
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 
0009 #include <linux/cache.h>
0010 #include <linux/compat.h>
0011 #include <linux/errno.h>
0012 #include <linux/kernel.h>
0013 #include <linux/signal.h>
0014 #include <linux/freezer.h>
0015 #include <linux/stddef.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/sizes.h>
0018 #include <linux/string.h>
0019 #include <linux/resume_user_mode.h>
0020 #include <linux/ratelimit.h>
0021 #include <linux/syscalls.h>
0022 
0023 #include <asm/daifflags.h>
0024 #include <asm/debug-monitors.h>
0025 #include <asm/elf.h>
0026 #include <asm/cacheflush.h>
0027 #include <asm/ucontext.h>
0028 #include <asm/unistd.h>
0029 #include <asm/fpsimd.h>
0030 #include <asm/ptrace.h>
0031 #include <asm/syscall.h>
0032 #include <asm/signal32.h>
0033 #include <asm/traps.h>
0034 #include <asm/vdso.h>
0035 
0036 /*
0037  * Do a signal return; undo the signal stack. These are aligned to 128-bit.
0038  */
0039 struct rt_sigframe {
0040     struct siginfo info;
0041     struct ucontext uc;
0042 };
0043 
0044 struct frame_record {
0045     u64 fp;
0046     u64 lr;
0047 };
0048 
0049 struct rt_sigframe_user_layout {
0050     struct rt_sigframe __user *sigframe;
0051     struct frame_record __user *next_frame;
0052 
0053     unsigned long size; /* size of allocated sigframe data */
0054     unsigned long limit;    /* largest allowed size */
0055 
0056     unsigned long fpsimd_offset;
0057     unsigned long esr_offset;
0058     unsigned long sve_offset;
0059     unsigned long za_offset;
0060     unsigned long extra_offset;
0061     unsigned long end_offset;
0062 };
0063 
0064 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
0065 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
0066 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
0067 
0068 static void init_user_layout(struct rt_sigframe_user_layout *user)
0069 {
0070     const size_t reserved_size =
0071         sizeof(user->sigframe->uc.uc_mcontext.__reserved);
0072 
0073     memset(user, 0, sizeof(*user));
0074     user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
0075 
0076     user->limit = user->size + reserved_size;
0077 
0078     user->limit -= TERMINATOR_SIZE;
0079     user->limit -= EXTRA_CONTEXT_SIZE;
0080     /* Reserve space for extension and terminator ^ */
0081 }
0082 
0083 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
0084 {
0085     return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
0086 }
0087 
0088 /*
0089  * Sanity limit on the approximate maximum size of signal frame we'll
0090  * try to generate.  Stack alignment padding and the frame record are
0091  * not taken into account.  This limit is not a guarantee and is
0092  * NOT ABI.
0093  */
0094 #define SIGFRAME_MAXSZ SZ_256K
0095 
0096 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
0097                 unsigned long *offset, size_t size, bool extend)
0098 {
0099     size_t padded_size = round_up(size, 16);
0100 
0101     if (padded_size > user->limit - user->size &&
0102         !user->extra_offset &&
0103         extend) {
0104         int ret;
0105 
0106         user->limit += EXTRA_CONTEXT_SIZE;
0107         ret = __sigframe_alloc(user, &user->extra_offset,
0108                        sizeof(struct extra_context), false);
0109         if (ret) {
0110             user->limit -= EXTRA_CONTEXT_SIZE;
0111             return ret;
0112         }
0113 
0114         /* Reserve space for the __reserved[] terminator */
0115         user->size += TERMINATOR_SIZE;
0116 
0117         /*
0118          * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
0119          * the terminator:
0120          */
0121         user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
0122     }
0123 
0124     /* Still not enough space?  Bad luck! */
0125     if (padded_size > user->limit - user->size)
0126         return -ENOMEM;
0127 
0128     *offset = user->size;
0129     user->size += padded_size;
0130 
0131     return 0;
0132 }
0133 
0134 /*
0135  * Allocate space for an optional record of <size> bytes in the user
0136  * signal frame.  The offset from the signal frame base address to the
0137  * allocated block is assigned to *offset.
0138  */
0139 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
0140               unsigned long *offset, size_t size)
0141 {
0142     return __sigframe_alloc(user, offset, size, true);
0143 }
0144 
0145 /* Allocate the null terminator record and prevent further allocations */
0146 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
0147 {
0148     int ret;
0149 
0150     /* Un-reserve the space reserved for the terminator: */
0151     user->limit += TERMINATOR_SIZE;
0152 
0153     ret = sigframe_alloc(user, &user->end_offset,
0154                  sizeof(struct _aarch64_ctx));
0155     if (ret)
0156         return ret;
0157 
0158     /* Prevent further allocation: */
0159     user->limit = user->size;
0160     return 0;
0161 }
0162 
0163 static void __user *apply_user_offset(
0164     struct rt_sigframe_user_layout const *user, unsigned long offset)
0165 {
0166     char __user *base = (char __user *)user->sigframe;
0167 
0168     return base + offset;
0169 }
0170 
0171 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
0172 {
0173     struct user_fpsimd_state const *fpsimd =
0174         &current->thread.uw.fpsimd_state;
0175     int err;
0176 
0177     /* copy the FP and status/control registers */
0178     err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
0179     __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
0180     __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
0181 
0182     /* copy the magic/size information */
0183     __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
0184     __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
0185 
0186     return err ? -EFAULT : 0;
0187 }
0188 
0189 static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
0190 {
0191     struct user_fpsimd_state fpsimd;
0192     __u32 magic, size;
0193     int err = 0;
0194 
0195     /* check the magic/size information */
0196     __get_user_error(magic, &ctx->head.magic, err);
0197     __get_user_error(size, &ctx->head.size, err);
0198     if (err)
0199         return -EFAULT;
0200     if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
0201         return -EINVAL;
0202 
0203     /* copy the FP and status/control registers */
0204     err = __copy_from_user(fpsimd.vregs, ctx->vregs,
0205                    sizeof(fpsimd.vregs));
0206     __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
0207     __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
0208 
0209     clear_thread_flag(TIF_SVE);
0210 
0211     /* load the hardware registers from the fpsimd_state structure */
0212     if (!err)
0213         fpsimd_update_current_state(&fpsimd);
0214 
0215     return err ? -EFAULT : 0;
0216 }
0217 
0218 
0219 struct user_ctxs {
0220     struct fpsimd_context __user *fpsimd;
0221     struct sve_context __user *sve;
0222     struct za_context __user *za;
0223 };
0224 
0225 #ifdef CONFIG_ARM64_SVE
0226 
0227 static int preserve_sve_context(struct sve_context __user *ctx)
0228 {
0229     int err = 0;
0230     u16 reserved[ARRAY_SIZE(ctx->__reserved)];
0231     u16 flags = 0;
0232     unsigned int vl = task_get_sve_vl(current);
0233     unsigned int vq = 0;
0234 
0235     if (thread_sm_enabled(&current->thread)) {
0236         vl = task_get_sme_vl(current);
0237         vq = sve_vq_from_vl(vl);
0238         flags |= SVE_SIG_FLAG_SM;
0239     } else if (test_thread_flag(TIF_SVE)) {
0240         vq = sve_vq_from_vl(vl);
0241     }
0242 
0243     memset(reserved, 0, sizeof(reserved));
0244 
0245     __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
0246     __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
0247              &ctx->head.size, err);
0248     __put_user_error(vl, &ctx->vl, err);
0249     __put_user_error(flags, &ctx->flags, err);
0250     BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
0251     err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
0252 
0253     if (vq) {
0254         /*
0255          * This assumes that the SVE state has already been saved to
0256          * the task struct by calling the function
0257          * fpsimd_signal_preserve_current_state().
0258          */
0259         err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
0260                       current->thread.sve_state,
0261                       SVE_SIG_REGS_SIZE(vq));
0262     }
0263 
0264     return err ? -EFAULT : 0;
0265 }
0266 
0267 static int restore_sve_fpsimd_context(struct user_ctxs *user)
0268 {
0269     int err;
0270     unsigned int vl, vq;
0271     struct user_fpsimd_state fpsimd;
0272     struct sve_context sve;
0273 
0274     if (__copy_from_user(&sve, user->sve, sizeof(sve)))
0275         return -EFAULT;
0276 
0277     if (sve.flags & SVE_SIG_FLAG_SM) {
0278         if (!system_supports_sme())
0279             return -EINVAL;
0280 
0281         vl = task_get_sme_vl(current);
0282     } else {
0283         if (!system_supports_sve())
0284             return -EINVAL;
0285 
0286         vl = task_get_sve_vl(current);
0287     }
0288 
0289     if (sve.vl != vl)
0290         return -EINVAL;
0291 
0292     if (sve.head.size <= sizeof(*user->sve)) {
0293         clear_thread_flag(TIF_SVE);
0294         current->thread.svcr &= ~SVCR_SM_MASK;
0295         goto fpsimd_only;
0296     }
0297 
0298     vq = sve_vq_from_vl(sve.vl);
0299 
0300     if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
0301         return -EINVAL;
0302 
0303     /*
0304      * Careful: we are about __copy_from_user() directly into
0305      * thread.sve_state with preemption enabled, so protection is
0306      * needed to prevent a racing context switch from writing stale
0307      * registers back over the new data.
0308      */
0309 
0310     fpsimd_flush_task_state(current);
0311     /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
0312 
0313     sve_alloc(current, true);
0314     if (!current->thread.sve_state) {
0315         clear_thread_flag(TIF_SVE);
0316         return -ENOMEM;
0317     }
0318 
0319     err = __copy_from_user(current->thread.sve_state,
0320                    (char __user const *)user->sve +
0321                     SVE_SIG_REGS_OFFSET,
0322                    SVE_SIG_REGS_SIZE(vq));
0323     if (err)
0324         return -EFAULT;
0325 
0326     if (sve.flags & SVE_SIG_FLAG_SM)
0327         current->thread.svcr |= SVCR_SM_MASK;
0328     else
0329         set_thread_flag(TIF_SVE);
0330 
0331 fpsimd_only:
0332     /* copy the FP and status/control registers */
0333     /* restore_sigframe() already checked that user->fpsimd != NULL. */
0334     err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
0335                    sizeof(fpsimd.vregs));
0336     __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
0337     __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
0338 
0339     /* load the hardware registers from the fpsimd_state structure */
0340     if (!err)
0341         fpsimd_update_current_state(&fpsimd);
0342 
0343     return err ? -EFAULT : 0;
0344 }
0345 
0346 #else /* ! CONFIG_ARM64_SVE */
0347 
0348 static int restore_sve_fpsimd_context(struct user_ctxs *user)
0349 {
0350     WARN_ON_ONCE(1);
0351     return -EINVAL;
0352 }
0353 
0354 /* Turn any non-optimised out attempts to use this into a link error: */
0355 extern int preserve_sve_context(void __user *ctx);
0356 
0357 #endif /* ! CONFIG_ARM64_SVE */
0358 
0359 #ifdef CONFIG_ARM64_SME
0360 
0361 static int preserve_za_context(struct za_context __user *ctx)
0362 {
0363     int err = 0;
0364     u16 reserved[ARRAY_SIZE(ctx->__reserved)];
0365     unsigned int vl = task_get_sme_vl(current);
0366     unsigned int vq;
0367 
0368     if (thread_za_enabled(&current->thread))
0369         vq = sve_vq_from_vl(vl);
0370     else
0371         vq = 0;
0372 
0373     memset(reserved, 0, sizeof(reserved));
0374 
0375     __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
0376     __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
0377              &ctx->head.size, err);
0378     __put_user_error(vl, &ctx->vl, err);
0379     BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
0380     err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
0381 
0382     if (vq) {
0383         /*
0384          * This assumes that the ZA state has already been saved to
0385          * the task struct by calling the function
0386          * fpsimd_signal_preserve_current_state().
0387          */
0388         err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
0389                       current->thread.za_state,
0390                       ZA_SIG_REGS_SIZE(vq));
0391     }
0392 
0393     return err ? -EFAULT : 0;
0394 }
0395 
0396 static int restore_za_context(struct user_ctxs *user)
0397 {
0398     int err;
0399     unsigned int vq;
0400     struct za_context za;
0401 
0402     if (__copy_from_user(&za, user->za, sizeof(za)))
0403         return -EFAULT;
0404 
0405     if (za.vl != task_get_sme_vl(current))
0406         return -EINVAL;
0407 
0408     if (za.head.size <= sizeof(*user->za)) {
0409         current->thread.svcr &= ~SVCR_ZA_MASK;
0410         return 0;
0411     }
0412 
0413     vq = sve_vq_from_vl(za.vl);
0414 
0415     if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
0416         return -EINVAL;
0417 
0418     /*
0419      * Careful: we are about __copy_from_user() directly into
0420      * thread.za_state with preemption enabled, so protection is
0421      * needed to prevent a racing context switch from writing stale
0422      * registers back over the new data.
0423      */
0424 
0425     fpsimd_flush_task_state(current);
0426     /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
0427 
0428     sme_alloc(current);
0429     if (!current->thread.za_state) {
0430         current->thread.svcr &= ~SVCR_ZA_MASK;
0431         clear_thread_flag(TIF_SME);
0432         return -ENOMEM;
0433     }
0434 
0435     err = __copy_from_user(current->thread.za_state,
0436                    (char __user const *)user->za +
0437                     ZA_SIG_REGS_OFFSET,
0438                    ZA_SIG_REGS_SIZE(vq));
0439     if (err)
0440         return -EFAULT;
0441 
0442     set_thread_flag(TIF_SME);
0443     current->thread.svcr |= SVCR_ZA_MASK;
0444 
0445     return 0;
0446 }
0447 #else /* ! CONFIG_ARM64_SME */
0448 
0449 /* Turn any non-optimised out attempts to use these into a link error: */
0450 extern int preserve_za_context(void __user *ctx);
0451 extern int restore_za_context(struct user_ctxs *user);
0452 
0453 #endif /* ! CONFIG_ARM64_SME */
0454 
0455 static int parse_user_sigframe(struct user_ctxs *user,
0456                    struct rt_sigframe __user *sf)
0457 {
0458     struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
0459     struct _aarch64_ctx __user *head;
0460     char __user *base = (char __user *)&sc->__reserved;
0461     size_t offset = 0;
0462     size_t limit = sizeof(sc->__reserved);
0463     bool have_extra_context = false;
0464     char const __user *const sfp = (char const __user *)sf;
0465 
0466     user->fpsimd = NULL;
0467     user->sve = NULL;
0468     user->za = NULL;
0469 
0470     if (!IS_ALIGNED((unsigned long)base, 16))
0471         goto invalid;
0472 
0473     while (1) {
0474         int err = 0;
0475         u32 magic, size;
0476         char const __user *userp;
0477         struct extra_context const __user *extra;
0478         u64 extra_datap;
0479         u32 extra_size;
0480         struct _aarch64_ctx const __user *end;
0481         u32 end_magic, end_size;
0482 
0483         if (limit - offset < sizeof(*head))
0484             goto invalid;
0485 
0486         if (!IS_ALIGNED(offset, 16))
0487             goto invalid;
0488 
0489         head = (struct _aarch64_ctx __user *)(base + offset);
0490         __get_user_error(magic, &head->magic, err);
0491         __get_user_error(size, &head->size, err);
0492         if (err)
0493             return err;
0494 
0495         if (limit - offset < size)
0496             goto invalid;
0497 
0498         switch (magic) {
0499         case 0:
0500             if (size)
0501                 goto invalid;
0502 
0503             goto done;
0504 
0505         case FPSIMD_MAGIC:
0506             if (!system_supports_fpsimd())
0507                 goto invalid;
0508             if (user->fpsimd)
0509                 goto invalid;
0510 
0511             if (size < sizeof(*user->fpsimd))
0512                 goto invalid;
0513 
0514             user->fpsimd = (struct fpsimd_context __user *)head;
0515             break;
0516 
0517         case ESR_MAGIC:
0518             /* ignore */
0519             break;
0520 
0521         case SVE_MAGIC:
0522             if (!system_supports_sve() && !system_supports_sme())
0523                 goto invalid;
0524 
0525             if (user->sve)
0526                 goto invalid;
0527 
0528             if (size < sizeof(*user->sve))
0529                 goto invalid;
0530 
0531             user->sve = (struct sve_context __user *)head;
0532             break;
0533 
0534         case ZA_MAGIC:
0535             if (!system_supports_sme())
0536                 goto invalid;
0537 
0538             if (user->za)
0539                 goto invalid;
0540 
0541             if (size < sizeof(*user->za))
0542                 goto invalid;
0543 
0544             user->za = (struct za_context __user *)head;
0545             break;
0546 
0547         case EXTRA_MAGIC:
0548             if (have_extra_context)
0549                 goto invalid;
0550 
0551             if (size < sizeof(*extra))
0552                 goto invalid;
0553 
0554             userp = (char const __user *)head;
0555 
0556             extra = (struct extra_context const __user *)userp;
0557             userp += size;
0558 
0559             __get_user_error(extra_datap, &extra->datap, err);
0560             __get_user_error(extra_size, &extra->size, err);
0561             if (err)
0562                 return err;
0563 
0564             /* Check for the dummy terminator in __reserved[]: */
0565 
0566             if (limit - offset - size < TERMINATOR_SIZE)
0567                 goto invalid;
0568 
0569             end = (struct _aarch64_ctx const __user *)userp;
0570             userp += TERMINATOR_SIZE;
0571 
0572             __get_user_error(end_magic, &end->magic, err);
0573             __get_user_error(end_size, &end->size, err);
0574             if (err)
0575                 return err;
0576 
0577             if (end_magic || end_size)
0578                 goto invalid;
0579 
0580             /* Prevent looping/repeated parsing of extra_context */
0581             have_extra_context = true;
0582 
0583             base = (__force void __user *)extra_datap;
0584             if (!IS_ALIGNED((unsigned long)base, 16))
0585                 goto invalid;
0586 
0587             if (!IS_ALIGNED(extra_size, 16))
0588                 goto invalid;
0589 
0590             if (base != userp)
0591                 goto invalid;
0592 
0593             /* Reject "unreasonably large" frames: */
0594             if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
0595                 goto invalid;
0596 
0597             /*
0598              * Ignore trailing terminator in __reserved[]
0599              * and start parsing extra data:
0600              */
0601             offset = 0;
0602             limit = extra_size;
0603 
0604             if (!access_ok(base, limit))
0605                 goto invalid;
0606 
0607             continue;
0608 
0609         default:
0610             goto invalid;
0611         }
0612 
0613         if (size < sizeof(*head))
0614             goto invalid;
0615 
0616         if (limit - offset < size)
0617             goto invalid;
0618 
0619         offset += size;
0620     }
0621 
0622 done:
0623     return 0;
0624 
0625 invalid:
0626     return -EINVAL;
0627 }
0628 
0629 static int restore_sigframe(struct pt_regs *regs,
0630                 struct rt_sigframe __user *sf)
0631 {
0632     sigset_t set;
0633     int i, err;
0634     struct user_ctxs user;
0635 
0636     err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
0637     if (err == 0)
0638         set_current_blocked(&set);
0639 
0640     for (i = 0; i < 31; i++)
0641         __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
0642                  err);
0643     __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
0644     __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
0645     __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
0646 
0647     /*
0648      * Avoid sys_rt_sigreturn() restarting.
0649      */
0650     forget_syscall(regs);
0651 
0652     err |= !valid_user_regs(&regs->user_regs, current);
0653     if (err == 0)
0654         err = parse_user_sigframe(&user, sf);
0655 
0656     if (err == 0 && system_supports_fpsimd()) {
0657         if (!user.fpsimd)
0658             return -EINVAL;
0659 
0660         if (user.sve)
0661             err = restore_sve_fpsimd_context(&user);
0662         else
0663             err = restore_fpsimd_context(user.fpsimd);
0664     }
0665 
0666     if (err == 0 && system_supports_sme() && user.za)
0667         err = restore_za_context(&user);
0668 
0669     return err;
0670 }
0671 
0672 SYSCALL_DEFINE0(rt_sigreturn)
0673 {
0674     struct pt_regs *regs = current_pt_regs();
0675     struct rt_sigframe __user *frame;
0676 
0677     /* Always make any pending restarted system calls return -EINTR */
0678     current->restart_block.fn = do_no_restart_syscall;
0679 
0680     /*
0681      * Since we stacked the signal on a 128-bit boundary, then 'sp' should
0682      * be word aligned here.
0683      */
0684     if (regs->sp & 15)
0685         goto badframe;
0686 
0687     frame = (struct rt_sigframe __user *)regs->sp;
0688 
0689     if (!access_ok(frame, sizeof (*frame)))
0690         goto badframe;
0691 
0692     if (restore_sigframe(regs, frame))
0693         goto badframe;
0694 
0695     if (restore_altstack(&frame->uc.uc_stack))
0696         goto badframe;
0697 
0698     return regs->regs[0];
0699 
0700 badframe:
0701     arm64_notify_segfault(regs->sp);
0702     return 0;
0703 }
0704 
0705 /*
0706  * Determine the layout of optional records in the signal frame
0707  *
0708  * add_all: if true, lays out the biggest possible signal frame for
0709  *  this task; otherwise, generates a layout for the current state
0710  *  of the task.
0711  */
0712 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
0713                  bool add_all)
0714 {
0715     int err;
0716 
0717     if (system_supports_fpsimd()) {
0718         err = sigframe_alloc(user, &user->fpsimd_offset,
0719                      sizeof(struct fpsimd_context));
0720         if (err)
0721             return err;
0722     }
0723 
0724     /* fault information, if valid */
0725     if (add_all || current->thread.fault_code) {
0726         err = sigframe_alloc(user, &user->esr_offset,
0727                      sizeof(struct esr_context));
0728         if (err)
0729             return err;
0730     }
0731 
0732     if (system_supports_sve()) {
0733         unsigned int vq = 0;
0734 
0735         if (add_all || test_thread_flag(TIF_SVE) ||
0736             thread_sm_enabled(&current->thread)) {
0737             int vl = max(sve_max_vl(), sme_max_vl());
0738 
0739             if (!add_all)
0740                 vl = thread_get_cur_vl(&current->thread);
0741 
0742             vq = sve_vq_from_vl(vl);
0743         }
0744 
0745         err = sigframe_alloc(user, &user->sve_offset,
0746                      SVE_SIG_CONTEXT_SIZE(vq));
0747         if (err)
0748             return err;
0749     }
0750 
0751     if (system_supports_sme()) {
0752         unsigned int vl;
0753         unsigned int vq = 0;
0754 
0755         if (add_all)
0756             vl = sme_max_vl();
0757         else
0758             vl = task_get_sme_vl(current);
0759 
0760         if (thread_za_enabled(&current->thread))
0761             vq = sve_vq_from_vl(vl);
0762 
0763         err = sigframe_alloc(user, &user->za_offset,
0764                      ZA_SIG_CONTEXT_SIZE(vq));
0765         if (err)
0766             return err;
0767     }
0768 
0769     return sigframe_alloc_end(user);
0770 }
0771 
0772 static int setup_sigframe(struct rt_sigframe_user_layout *user,
0773               struct pt_regs *regs, sigset_t *set)
0774 {
0775     int i, err = 0;
0776     struct rt_sigframe __user *sf = user->sigframe;
0777 
0778     /* set up the stack frame for unwinding */
0779     __put_user_error(regs->regs[29], &user->next_frame->fp, err);
0780     __put_user_error(regs->regs[30], &user->next_frame->lr, err);
0781 
0782     for (i = 0; i < 31; i++)
0783         __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
0784                  err);
0785     __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
0786     __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
0787     __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
0788 
0789     __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
0790 
0791     err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
0792 
0793     if (err == 0 && system_supports_fpsimd()) {
0794         struct fpsimd_context __user *fpsimd_ctx =
0795             apply_user_offset(user, user->fpsimd_offset);
0796         err |= preserve_fpsimd_context(fpsimd_ctx);
0797     }
0798 
0799     /* fault information, if valid */
0800     if (err == 0 && user->esr_offset) {
0801         struct esr_context __user *esr_ctx =
0802             apply_user_offset(user, user->esr_offset);
0803 
0804         __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
0805         __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
0806         __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
0807     }
0808 
0809     /* Scalable Vector Extension state (including streaming), if present */
0810     if ((system_supports_sve() || system_supports_sme()) &&
0811         err == 0 && user->sve_offset) {
0812         struct sve_context __user *sve_ctx =
0813             apply_user_offset(user, user->sve_offset);
0814         err |= preserve_sve_context(sve_ctx);
0815     }
0816 
0817     /* ZA state if present */
0818     if (system_supports_sme() && err == 0 && user->za_offset) {
0819         struct za_context __user *za_ctx =
0820             apply_user_offset(user, user->za_offset);
0821         err |= preserve_za_context(za_ctx);
0822     }
0823 
0824     if (err == 0 && user->extra_offset) {
0825         char __user *sfp = (char __user *)user->sigframe;
0826         char __user *userp =
0827             apply_user_offset(user, user->extra_offset);
0828 
0829         struct extra_context __user *extra;
0830         struct _aarch64_ctx __user *end;
0831         u64 extra_datap;
0832         u32 extra_size;
0833 
0834         extra = (struct extra_context __user *)userp;
0835         userp += EXTRA_CONTEXT_SIZE;
0836 
0837         end = (struct _aarch64_ctx __user *)userp;
0838         userp += TERMINATOR_SIZE;
0839 
0840         /*
0841          * extra_datap is just written to the signal frame.
0842          * The value gets cast back to a void __user *
0843          * during sigreturn.
0844          */
0845         extra_datap = (__force u64)userp;
0846         extra_size = sfp + round_up(user->size, 16) - userp;
0847 
0848         __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
0849         __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
0850         __put_user_error(extra_datap, &extra->datap, err);
0851         __put_user_error(extra_size, &extra->size, err);
0852 
0853         /* Add the terminator */
0854         __put_user_error(0, &end->magic, err);
0855         __put_user_error(0, &end->size, err);
0856     }
0857 
0858     /* set the "end" magic */
0859     if (err == 0) {
0860         struct _aarch64_ctx __user *end =
0861             apply_user_offset(user, user->end_offset);
0862 
0863         __put_user_error(0, &end->magic, err);
0864         __put_user_error(0, &end->size, err);
0865     }
0866 
0867     return err;
0868 }
0869 
0870 static int get_sigframe(struct rt_sigframe_user_layout *user,
0871              struct ksignal *ksig, struct pt_regs *regs)
0872 {
0873     unsigned long sp, sp_top;
0874     int err;
0875 
0876     init_user_layout(user);
0877     err = setup_sigframe_layout(user, false);
0878     if (err)
0879         return err;
0880 
0881     sp = sp_top = sigsp(regs->sp, ksig);
0882 
0883     sp = round_down(sp - sizeof(struct frame_record), 16);
0884     user->next_frame = (struct frame_record __user *)sp;
0885 
0886     sp = round_down(sp, 16) - sigframe_size(user);
0887     user->sigframe = (struct rt_sigframe __user *)sp;
0888 
0889     /*
0890      * Check that we can actually write to the signal frame.
0891      */
0892     if (!access_ok(user->sigframe, sp_top - sp))
0893         return -EFAULT;
0894 
0895     return 0;
0896 }
0897 
0898 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
0899              struct rt_sigframe_user_layout *user, int usig)
0900 {
0901     __sigrestore_t sigtramp;
0902 
0903     regs->regs[0] = usig;
0904     regs->sp = (unsigned long)user->sigframe;
0905     regs->regs[29] = (unsigned long)&user->next_frame->fp;
0906     regs->pc = (unsigned long)ka->sa.sa_handler;
0907 
0908     /*
0909      * Signal delivery is a (wacky) indirect function call in
0910      * userspace, so simulate the same setting of BTYPE as a BLR
0911      * <register containing the signal handler entry point>.
0912      * Signal delivery to a location in a PROT_BTI guarded page
0913      * that is not a function entry point will now trigger a
0914      * SIGILL in userspace.
0915      *
0916      * If the signal handler entry point is not in a PROT_BTI
0917      * guarded page, this is harmless.
0918      */
0919     if (system_supports_bti()) {
0920         regs->pstate &= ~PSR_BTYPE_MASK;
0921         regs->pstate |= PSR_BTYPE_C;
0922     }
0923 
0924     /* TCO (Tag Check Override) always cleared for signal handlers */
0925     regs->pstate &= ~PSR_TCO_BIT;
0926 
0927     /* Signal handlers are invoked with ZA and streaming mode disabled */
0928     if (system_supports_sme()) {
0929         /*
0930          * If we were in streaming mode the saved register
0931          * state was SVE but we will exit SM and use the
0932          * FPSIMD register state - flush the saved FPSIMD
0933          * register state in case it gets loaded.
0934          */
0935         if (current->thread.svcr & SVCR_SM_MASK)
0936             memset(&current->thread.uw.fpsimd_state, 0,
0937                    sizeof(current->thread.uw.fpsimd_state));
0938 
0939         current->thread.svcr &= ~(SVCR_ZA_MASK |
0940                       SVCR_SM_MASK);
0941         sme_smstop();
0942     }
0943 
0944     if (ka->sa.sa_flags & SA_RESTORER)
0945         sigtramp = ka->sa.sa_restorer;
0946     else
0947         sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
0948 
0949     regs->regs[30] = (unsigned long)sigtramp;
0950 }
0951 
0952 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
0953               struct pt_regs *regs)
0954 {
0955     struct rt_sigframe_user_layout user;
0956     struct rt_sigframe __user *frame;
0957     int err = 0;
0958 
0959     fpsimd_signal_preserve_current_state();
0960 
0961     if (get_sigframe(&user, ksig, regs))
0962         return 1;
0963 
0964     frame = user.sigframe;
0965 
0966     __put_user_error(0, &frame->uc.uc_flags, err);
0967     __put_user_error(NULL, &frame->uc.uc_link, err);
0968 
0969     err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
0970     err |= setup_sigframe(&user, regs, set);
0971     if (err == 0) {
0972         setup_return(regs, &ksig->ka, &user, usig);
0973         if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
0974             err |= copy_siginfo_to_user(&frame->info, &ksig->info);
0975             regs->regs[1] = (unsigned long)&frame->info;
0976             regs->regs[2] = (unsigned long)&frame->uc;
0977         }
0978     }
0979 
0980     return err;
0981 }
0982 
0983 static void setup_restart_syscall(struct pt_regs *regs)
0984 {
0985     if (is_compat_task())
0986         compat_setup_restart_syscall(regs);
0987     else
0988         regs->regs[8] = __NR_restart_syscall;
0989 }
0990 
0991 /*
0992  * OK, we're invoking a handler
0993  */
0994 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
0995 {
0996     sigset_t *oldset = sigmask_to_save();
0997     int usig = ksig->sig;
0998     int ret;
0999 
1000     rseq_signal_deliver(ksig, regs);
1001 
1002     /*
1003      * Set up the stack frame
1004      */
1005     if (is_compat_task()) {
1006         if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1007             ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1008         else
1009             ret = compat_setup_frame(usig, ksig, oldset, regs);
1010     } else {
1011         ret = setup_rt_frame(usig, ksig, oldset, regs);
1012     }
1013 
1014     /*
1015      * Check that the resulting registers are actually sane.
1016      */
1017     ret |= !valid_user_regs(&regs->user_regs, current);
1018 
1019     /* Step into the signal handler if we are stepping */
1020     signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1021 }
1022 
1023 /*
1024  * Note that 'init' is a special process: it doesn't get signals it doesn't
1025  * want to handle. Thus you cannot kill init even with a SIGKILL even by
1026  * mistake.
1027  *
1028  * Note that we go through the signals twice: once to check the signals that
1029  * the kernel can handle, and then we build all the user-level signal handling
1030  * stack-frames in one go after that.
1031  */
1032 static void do_signal(struct pt_regs *regs)
1033 {
1034     unsigned long continue_addr = 0, restart_addr = 0;
1035     int retval = 0;
1036     struct ksignal ksig;
1037     bool syscall = in_syscall(regs);
1038 
1039     /*
1040      * If we were from a system call, check for system call restarting...
1041      */
1042     if (syscall) {
1043         continue_addr = regs->pc;
1044         restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1045         retval = regs->regs[0];
1046 
1047         /*
1048          * Avoid additional syscall restarting via ret_to_user.
1049          */
1050         forget_syscall(regs);
1051 
1052         /*
1053          * Prepare for system call restart. We do this here so that a
1054          * debugger will see the already changed PC.
1055          */
1056         switch (retval) {
1057         case -ERESTARTNOHAND:
1058         case -ERESTARTSYS:
1059         case -ERESTARTNOINTR:
1060         case -ERESTART_RESTARTBLOCK:
1061             regs->regs[0] = regs->orig_x0;
1062             regs->pc = restart_addr;
1063             break;
1064         }
1065     }
1066 
1067     /*
1068      * Get the signal to deliver. When running under ptrace, at this point
1069      * the debugger may change all of our registers.
1070      */
1071     if (get_signal(&ksig)) {
1072         /*
1073          * Depending on the signal settings, we may need to revert the
1074          * decision to restart the system call, but skip this if a
1075          * debugger has chosen to restart at a different PC.
1076          */
1077         if (regs->pc == restart_addr &&
1078             (retval == -ERESTARTNOHAND ||
1079              retval == -ERESTART_RESTARTBLOCK ||
1080              (retval == -ERESTARTSYS &&
1081               !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1082             syscall_set_return_value(current, regs, -EINTR, 0);
1083             regs->pc = continue_addr;
1084         }
1085 
1086         handle_signal(&ksig, regs);
1087         return;
1088     }
1089 
1090     /*
1091      * Handle restarting a different system call. As above, if a debugger
1092      * has chosen to restart at a different PC, ignore the restart.
1093      */
1094     if (syscall && regs->pc == restart_addr) {
1095         if (retval == -ERESTART_RESTARTBLOCK)
1096             setup_restart_syscall(regs);
1097         user_rewind_single_step(current);
1098     }
1099 
1100     restore_saved_sigmask();
1101 }
1102 
1103 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
1104 {
1105     do {
1106         if (thread_flags & _TIF_NEED_RESCHED) {
1107             /* Unmask Debug and SError for the next task */
1108             local_daif_restore(DAIF_PROCCTX_NOIRQ);
1109 
1110             schedule();
1111         } else {
1112             local_daif_restore(DAIF_PROCCTX);
1113 
1114             if (thread_flags & _TIF_UPROBE)
1115                 uprobe_notify_resume(regs);
1116 
1117             if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1118                 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1119                 send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1120                            (void __user *)NULL, current);
1121             }
1122 
1123             if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1124                 do_signal(regs);
1125 
1126             if (thread_flags & _TIF_NOTIFY_RESUME)
1127                 resume_user_mode_work(regs);
1128 
1129             if (thread_flags & _TIF_FOREIGN_FPSTATE)
1130                 fpsimd_restore_current_state();
1131         }
1132 
1133         local_daif_mask();
1134         thread_flags = read_thread_flags();
1135     } while (thread_flags & _TIF_WORK_MASK);
1136 }
1137 
1138 unsigned long __ro_after_init signal_minsigstksz;
1139 
1140 /*
1141  * Determine the stack space required for guaranteed signal devliery.
1142  * This function is used to populate AT_MINSIGSTKSZ at process startup.
1143  * cpufeatures setup is assumed to be complete.
1144  */
1145 void __init minsigstksz_setup(void)
1146 {
1147     struct rt_sigframe_user_layout user;
1148 
1149     init_user_layout(&user);
1150 
1151     /*
1152      * If this fails, SIGFRAME_MAXSZ needs to be enlarged.  It won't
1153      * be big enough, but it's our best guess:
1154      */
1155     if (WARN_ON(setup_sigframe_layout(&user, true)))
1156         return;
1157 
1158     signal_minsigstksz = sigframe_size(&user) +
1159         round_up(sizeof(struct frame_record), 16) +
1160         16; /* max alignment padding */
1161 }
1162 
1163 /*
1164  * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1165  * changes likely come with new fields that should be added below.
1166  */
1167 static_assert(NSIGILL   == 11);
1168 static_assert(NSIGFPE   == 15);
1169 static_assert(NSIGSEGV  == 9);
1170 static_assert(NSIGBUS   == 5);
1171 static_assert(NSIGTRAP  == 6);
1172 static_assert(NSIGCHLD  == 6);
1173 static_assert(NSIGSYS   == 2);
1174 static_assert(sizeof(siginfo_t) == 128);
1175 static_assert(__alignof__(siginfo_t) == 8);
1176 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1177 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1178 static_assert(offsetof(siginfo_t, si_code)  == 0x08);
1179 static_assert(offsetof(siginfo_t, si_pid)   == 0x10);
1180 static_assert(offsetof(siginfo_t, si_uid)   == 0x14);
1181 static_assert(offsetof(siginfo_t, si_tid)   == 0x10);
1182 static_assert(offsetof(siginfo_t, si_overrun)   == 0x14);
1183 static_assert(offsetof(siginfo_t, si_status)    == 0x18);
1184 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1185 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1186 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1187 static_assert(offsetof(siginfo_t, si_int)   == 0x18);
1188 static_assert(offsetof(siginfo_t, si_ptr)   == 0x18);
1189 static_assert(offsetof(siginfo_t, si_addr)  == 0x10);
1190 static_assert(offsetof(siginfo_t, si_addr_lsb)  == 0x18);
1191 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1192 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1193 static_assert(offsetof(siginfo_t, si_pkey)  == 0x20);
1194 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1195 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1196 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1197 static_assert(offsetof(siginfo_t, si_band)  == 0x10);
1198 static_assert(offsetof(siginfo_t, si_fd)    == 0x18);
1199 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1200 static_assert(offsetof(siginfo_t, si_syscall)   == 0x18);
1201 static_assert(offsetof(siginfo_t, si_arch)  == 0x1c);