0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/audit.h>
0012 #include <linux/compat.h>
0013 #include <linux/kernel.h>
0014 #include <linux/sched/signal.h>
0015 #include <linux/sched/task_stack.h>
0016 #include <linux/mm.h>
0017 #include <linux/nospec.h>
0018 #include <linux/smp.h>
0019 #include <linux/ptrace.h>
0020 #include <linux/user.h>
0021 #include <linux/seccomp.h>
0022 #include <linux/security.h>
0023 #include <linux/init.h>
0024 #include <linux/signal.h>
0025 #include <linux/string.h>
0026 #include <linux/uaccess.h>
0027 #include <linux/perf_event.h>
0028 #include <linux/hw_breakpoint.h>
0029 #include <linux/regset.h>
0030 #include <linux/elf.h>
0031
0032 #include <asm/compat.h>
0033 #include <asm/cpufeature.h>
0034 #include <asm/debug-monitors.h>
0035 #include <asm/fpsimd.h>
0036 #include <asm/mte.h>
0037 #include <asm/pointer_auth.h>
0038 #include <asm/stacktrace.h>
0039 #include <asm/syscall.h>
0040 #include <asm/traps.h>
0041 #include <asm/system_misc.h>
0042
0043 #define CREATE_TRACE_POINTS
0044 #include <trace/events/syscalls.h>
0045
0046 struct pt_regs_offset {
0047 const char *name;
0048 int offset;
0049 };
0050
0051 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
0052 #define REG_OFFSET_END {.name = NULL, .offset = 0}
0053 #define GPR_OFFSET_NAME(r) \
0054 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
0055
0056 static const struct pt_regs_offset regoffset_table[] = {
0057 GPR_OFFSET_NAME(0),
0058 GPR_OFFSET_NAME(1),
0059 GPR_OFFSET_NAME(2),
0060 GPR_OFFSET_NAME(3),
0061 GPR_OFFSET_NAME(4),
0062 GPR_OFFSET_NAME(5),
0063 GPR_OFFSET_NAME(6),
0064 GPR_OFFSET_NAME(7),
0065 GPR_OFFSET_NAME(8),
0066 GPR_OFFSET_NAME(9),
0067 GPR_OFFSET_NAME(10),
0068 GPR_OFFSET_NAME(11),
0069 GPR_OFFSET_NAME(12),
0070 GPR_OFFSET_NAME(13),
0071 GPR_OFFSET_NAME(14),
0072 GPR_OFFSET_NAME(15),
0073 GPR_OFFSET_NAME(16),
0074 GPR_OFFSET_NAME(17),
0075 GPR_OFFSET_NAME(18),
0076 GPR_OFFSET_NAME(19),
0077 GPR_OFFSET_NAME(20),
0078 GPR_OFFSET_NAME(21),
0079 GPR_OFFSET_NAME(22),
0080 GPR_OFFSET_NAME(23),
0081 GPR_OFFSET_NAME(24),
0082 GPR_OFFSET_NAME(25),
0083 GPR_OFFSET_NAME(26),
0084 GPR_OFFSET_NAME(27),
0085 GPR_OFFSET_NAME(28),
0086 GPR_OFFSET_NAME(29),
0087 GPR_OFFSET_NAME(30),
0088 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
0089 REG_OFFSET_NAME(sp),
0090 REG_OFFSET_NAME(pc),
0091 REG_OFFSET_NAME(pstate),
0092 REG_OFFSET_END,
0093 };
0094
0095
0096
0097
0098
0099
0100
0101
0102 int regs_query_register_offset(const char *name)
0103 {
0104 const struct pt_regs_offset *roff;
0105
0106 for (roff = regoffset_table; roff->name != NULL; roff++)
0107 if (!strcmp(roff->name, name))
0108 return roff->offset;
0109 return -EINVAL;
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
0121 {
0122 return ((addr & ~(THREAD_SIZE - 1)) ==
0123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
0124 on_irq_stack(addr, sizeof(unsigned long), NULL);
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
0137 {
0138 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
0139
0140 addr += n;
0141 if (regs_within_kernel_stack(regs, (unsigned long)addr))
0142 return *addr;
0143 else
0144 return 0;
0145 }
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 void ptrace_disable(struct task_struct *child)
0156 {
0157
0158
0159
0160
0161
0162 user_disable_single_step(child);
0163 }
0164
0165 #ifdef CONFIG_HAVE_HW_BREAKPOINT
0166
0167
0168
0169 static void ptrace_hbptriggered(struct perf_event *bp,
0170 struct perf_sample_data *data,
0171 struct pt_regs *regs)
0172 {
0173 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
0174 const char *desc = "Hardware breakpoint trap (ptrace)";
0175
0176 #ifdef CONFIG_COMPAT
0177 if (is_compat_task()) {
0178 int si_errno = 0;
0179 int i;
0180
0181 for (i = 0; i < ARM_MAX_BRP; ++i) {
0182 if (current->thread.debug.hbp_break[i] == bp) {
0183 si_errno = (i << 1) + 1;
0184 break;
0185 }
0186 }
0187
0188 for (i = 0; i < ARM_MAX_WRP; ++i) {
0189 if (current->thread.debug.hbp_watch[i] == bp) {
0190 si_errno = -((i << 1) + 1);
0191 break;
0192 }
0193 }
0194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
0195 desc);
0196 return;
0197 }
0198 #endif
0199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
0200 }
0201
0202
0203
0204
0205
0206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
0207 {
0208 int i;
0209 struct thread_struct *t = &tsk->thread;
0210
0211 for (i = 0; i < ARM_MAX_BRP; i++) {
0212 if (t->debug.hbp_break[i]) {
0213 unregister_hw_breakpoint(t->debug.hbp_break[i]);
0214 t->debug.hbp_break[i] = NULL;
0215 }
0216 }
0217
0218 for (i = 0; i < ARM_MAX_WRP; i++) {
0219 if (t->debug.hbp_watch[i]) {
0220 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
0221 t->debug.hbp_watch[i] = NULL;
0222 }
0223 }
0224 }
0225
0226 void ptrace_hw_copy_thread(struct task_struct *tsk)
0227 {
0228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
0229 }
0230
0231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
0232 struct task_struct *tsk,
0233 unsigned long idx)
0234 {
0235 struct perf_event *bp = ERR_PTR(-EINVAL);
0236
0237 switch (note_type) {
0238 case NT_ARM_HW_BREAK:
0239 if (idx >= ARM_MAX_BRP)
0240 goto out;
0241 idx = array_index_nospec(idx, ARM_MAX_BRP);
0242 bp = tsk->thread.debug.hbp_break[idx];
0243 break;
0244 case NT_ARM_HW_WATCH:
0245 if (idx >= ARM_MAX_WRP)
0246 goto out;
0247 idx = array_index_nospec(idx, ARM_MAX_WRP);
0248 bp = tsk->thread.debug.hbp_watch[idx];
0249 break;
0250 }
0251
0252 out:
0253 return bp;
0254 }
0255
0256 static int ptrace_hbp_set_event(unsigned int note_type,
0257 struct task_struct *tsk,
0258 unsigned long idx,
0259 struct perf_event *bp)
0260 {
0261 int err = -EINVAL;
0262
0263 switch (note_type) {
0264 case NT_ARM_HW_BREAK:
0265 if (idx >= ARM_MAX_BRP)
0266 goto out;
0267 idx = array_index_nospec(idx, ARM_MAX_BRP);
0268 tsk->thread.debug.hbp_break[idx] = bp;
0269 err = 0;
0270 break;
0271 case NT_ARM_HW_WATCH:
0272 if (idx >= ARM_MAX_WRP)
0273 goto out;
0274 idx = array_index_nospec(idx, ARM_MAX_WRP);
0275 tsk->thread.debug.hbp_watch[idx] = bp;
0276 err = 0;
0277 break;
0278 }
0279
0280 out:
0281 return err;
0282 }
0283
0284 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
0285 struct task_struct *tsk,
0286 unsigned long idx)
0287 {
0288 struct perf_event *bp;
0289 struct perf_event_attr attr;
0290 int err, type;
0291
0292 switch (note_type) {
0293 case NT_ARM_HW_BREAK:
0294 type = HW_BREAKPOINT_X;
0295 break;
0296 case NT_ARM_HW_WATCH:
0297 type = HW_BREAKPOINT_RW;
0298 break;
0299 default:
0300 return ERR_PTR(-EINVAL);
0301 }
0302
0303 ptrace_breakpoint_init(&attr);
0304
0305
0306
0307
0308
0309 attr.bp_addr = 0;
0310 attr.bp_len = HW_BREAKPOINT_LEN_4;
0311 attr.bp_type = type;
0312 attr.disabled = 1;
0313
0314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
0315 if (IS_ERR(bp))
0316 return bp;
0317
0318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
0319 if (err)
0320 return ERR_PTR(err);
0321
0322 return bp;
0323 }
0324
0325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
0326 struct arch_hw_breakpoint_ctrl ctrl,
0327 struct perf_event_attr *attr)
0328 {
0329 int err, len, type, offset, disabled = !ctrl.enabled;
0330
0331 attr->disabled = disabled;
0332 if (disabled)
0333 return 0;
0334
0335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
0336 if (err)
0337 return err;
0338
0339 switch (note_type) {
0340 case NT_ARM_HW_BREAK:
0341 if ((type & HW_BREAKPOINT_X) != type)
0342 return -EINVAL;
0343 break;
0344 case NT_ARM_HW_WATCH:
0345 if ((type & HW_BREAKPOINT_RW) != type)
0346 return -EINVAL;
0347 break;
0348 default:
0349 return -EINVAL;
0350 }
0351
0352 attr->bp_len = len;
0353 attr->bp_type = type;
0354 attr->bp_addr += offset;
0355
0356 return 0;
0357 }
0358
0359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
0360 {
0361 u8 num;
0362 u32 reg = 0;
0363
0364 switch (note_type) {
0365 case NT_ARM_HW_BREAK:
0366 num = hw_breakpoint_slots(TYPE_INST);
0367 break;
0368 case NT_ARM_HW_WATCH:
0369 num = hw_breakpoint_slots(TYPE_DATA);
0370 break;
0371 default:
0372 return -EINVAL;
0373 }
0374
0375 reg |= debug_monitors_arch();
0376 reg <<= 8;
0377 reg |= num;
0378
0379 *info = reg;
0380 return 0;
0381 }
0382
0383 static int ptrace_hbp_get_ctrl(unsigned int note_type,
0384 struct task_struct *tsk,
0385 unsigned long idx,
0386 u32 *ctrl)
0387 {
0388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
0389
0390 if (IS_ERR(bp))
0391 return PTR_ERR(bp);
0392
0393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
0394 return 0;
0395 }
0396
0397 static int ptrace_hbp_get_addr(unsigned int note_type,
0398 struct task_struct *tsk,
0399 unsigned long idx,
0400 u64 *addr)
0401 {
0402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
0403
0404 if (IS_ERR(bp))
0405 return PTR_ERR(bp);
0406
0407 *addr = bp ? counter_arch_bp(bp)->address : 0;
0408 return 0;
0409 }
0410
0411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
0412 struct task_struct *tsk,
0413 unsigned long idx)
0414 {
0415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
0416
0417 if (!bp)
0418 bp = ptrace_hbp_create(note_type, tsk, idx);
0419
0420 return bp;
0421 }
0422
0423 static int ptrace_hbp_set_ctrl(unsigned int note_type,
0424 struct task_struct *tsk,
0425 unsigned long idx,
0426 u32 uctrl)
0427 {
0428 int err;
0429 struct perf_event *bp;
0430 struct perf_event_attr attr;
0431 struct arch_hw_breakpoint_ctrl ctrl;
0432
0433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
0434 if (IS_ERR(bp)) {
0435 err = PTR_ERR(bp);
0436 return err;
0437 }
0438
0439 attr = bp->attr;
0440 decode_ctrl_reg(uctrl, &ctrl);
0441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
0442 if (err)
0443 return err;
0444
0445 return modify_user_hw_breakpoint(bp, &attr);
0446 }
0447
0448 static int ptrace_hbp_set_addr(unsigned int note_type,
0449 struct task_struct *tsk,
0450 unsigned long idx,
0451 u64 addr)
0452 {
0453 int err;
0454 struct perf_event *bp;
0455 struct perf_event_attr attr;
0456
0457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
0458 if (IS_ERR(bp)) {
0459 err = PTR_ERR(bp);
0460 return err;
0461 }
0462
0463 attr = bp->attr;
0464 attr.bp_addr = addr;
0465 err = modify_user_hw_breakpoint(bp, &attr);
0466 return err;
0467 }
0468
0469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
0470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
0471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
0472
0473 static int hw_break_get(struct task_struct *target,
0474 const struct user_regset *regset,
0475 struct membuf to)
0476 {
0477 unsigned int note_type = regset->core_note_type;
0478 int ret, idx = 0;
0479 u32 info, ctrl;
0480 u64 addr;
0481
0482
0483 ret = ptrace_hbp_get_resource_info(note_type, &info);
0484 if (ret)
0485 return ret;
0486
0487 membuf_write(&to, &info, sizeof(info));
0488 membuf_zero(&to, sizeof(u32));
0489
0490 while (to.left) {
0491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
0492 if (ret)
0493 return ret;
0494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
0495 if (ret)
0496 return ret;
0497 membuf_store(&to, addr);
0498 membuf_store(&to, ctrl);
0499 membuf_zero(&to, sizeof(u32));
0500 idx++;
0501 }
0502 return 0;
0503 }
0504
0505 static int hw_break_set(struct task_struct *target,
0506 const struct user_regset *regset,
0507 unsigned int pos, unsigned int count,
0508 const void *kbuf, const void __user *ubuf)
0509 {
0510 unsigned int note_type = regset->core_note_type;
0511 int ret, idx = 0, offset, limit;
0512 u32 ctrl;
0513 u64 addr;
0514
0515
0516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
0517 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
0518 if (ret)
0519 return ret;
0520
0521
0522 limit = regset->n * regset->size;
0523 while (count && offset < limit) {
0524 if (count < PTRACE_HBP_ADDR_SZ)
0525 return -EINVAL;
0526 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
0527 offset, offset + PTRACE_HBP_ADDR_SZ);
0528 if (ret)
0529 return ret;
0530 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
0531 if (ret)
0532 return ret;
0533 offset += PTRACE_HBP_ADDR_SZ;
0534
0535 if (!count)
0536 break;
0537 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
0538 offset, offset + PTRACE_HBP_CTRL_SZ);
0539 if (ret)
0540 return ret;
0541 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
0542 if (ret)
0543 return ret;
0544 offset += PTRACE_HBP_CTRL_SZ;
0545
0546 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
0547 offset,
0548 offset + PTRACE_HBP_PAD_SZ);
0549 if (ret)
0550 return ret;
0551 offset += PTRACE_HBP_PAD_SZ;
0552 idx++;
0553 }
0554
0555 return 0;
0556 }
0557 #endif
0558
0559 static int gpr_get(struct task_struct *target,
0560 const struct user_regset *regset,
0561 struct membuf to)
0562 {
0563 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
0564 return membuf_write(&to, uregs, sizeof(*uregs));
0565 }
0566
0567 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
0568 unsigned int pos, unsigned int count,
0569 const void *kbuf, const void __user *ubuf)
0570 {
0571 int ret;
0572 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
0573
0574 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
0575 if (ret)
0576 return ret;
0577
0578 if (!valid_user_regs(&newregs, target))
0579 return -EINVAL;
0580
0581 task_pt_regs(target)->user_regs = newregs;
0582 return 0;
0583 }
0584
0585 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
0586 {
0587 if (!system_supports_fpsimd())
0588 return -ENODEV;
0589 return regset->n;
0590 }
0591
0592
0593
0594
0595 static int __fpr_get(struct task_struct *target,
0596 const struct user_regset *regset,
0597 struct membuf to)
0598 {
0599 struct user_fpsimd_state *uregs;
0600
0601 sve_sync_to_fpsimd(target);
0602
0603 uregs = &target->thread.uw.fpsimd_state;
0604
0605 return membuf_write(&to, uregs, sizeof(*uregs));
0606 }
0607
0608 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
0609 struct membuf to)
0610 {
0611 if (!system_supports_fpsimd())
0612 return -EINVAL;
0613
0614 if (target == current)
0615 fpsimd_preserve_current_state();
0616
0617 return __fpr_get(target, regset, to);
0618 }
0619
0620 static int __fpr_set(struct task_struct *target,
0621 const struct user_regset *regset,
0622 unsigned int pos, unsigned int count,
0623 const void *kbuf, const void __user *ubuf,
0624 unsigned int start_pos)
0625 {
0626 int ret;
0627 struct user_fpsimd_state newstate;
0628
0629
0630
0631
0632
0633 sve_sync_to_fpsimd(target);
0634
0635 newstate = target->thread.uw.fpsimd_state;
0636
0637 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
0638 start_pos, start_pos + sizeof(newstate));
0639 if (ret)
0640 return ret;
0641
0642 target->thread.uw.fpsimd_state = newstate;
0643
0644 return ret;
0645 }
0646
0647 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
0648 unsigned int pos, unsigned int count,
0649 const void *kbuf, const void __user *ubuf)
0650 {
0651 int ret;
0652
0653 if (!system_supports_fpsimd())
0654 return -EINVAL;
0655
0656 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
0657 if (ret)
0658 return ret;
0659
0660 sve_sync_from_fpsimd_zeropad(target);
0661 fpsimd_flush_task_state(target);
0662
0663 return ret;
0664 }
0665
0666 static int tls_get(struct task_struct *target, const struct user_regset *regset,
0667 struct membuf to)
0668 {
0669 if (target == current)
0670 tls_preserve_current_state();
0671
0672 return membuf_store(&to, target->thread.uw.tp_value);
0673 }
0674
0675 static int tls_set(struct task_struct *target, const struct user_regset *regset,
0676 unsigned int pos, unsigned int count,
0677 const void *kbuf, const void __user *ubuf)
0678 {
0679 int ret;
0680 unsigned long tls = target->thread.uw.tp_value;
0681
0682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
0683 if (ret)
0684 return ret;
0685
0686 target->thread.uw.tp_value = tls;
0687 return ret;
0688 }
0689
0690 static int system_call_get(struct task_struct *target,
0691 const struct user_regset *regset,
0692 struct membuf to)
0693 {
0694 return membuf_store(&to, task_pt_regs(target)->syscallno);
0695 }
0696
0697 static int system_call_set(struct task_struct *target,
0698 const struct user_regset *regset,
0699 unsigned int pos, unsigned int count,
0700 const void *kbuf, const void __user *ubuf)
0701 {
0702 int syscallno = task_pt_regs(target)->syscallno;
0703 int ret;
0704
0705 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
0706 if (ret)
0707 return ret;
0708
0709 task_pt_regs(target)->syscallno = syscallno;
0710 return ret;
0711 }
0712
0713 #ifdef CONFIG_ARM64_SVE
0714
0715 static void sve_init_header_from_task(struct user_sve_header *header,
0716 struct task_struct *target,
0717 enum vec_type type)
0718 {
0719 unsigned int vq;
0720 bool active;
0721 bool fpsimd_only;
0722 enum vec_type task_type;
0723
0724 memset(header, 0, sizeof(*header));
0725
0726
0727 if (thread_sm_enabled(&target->thread))
0728 task_type = ARM64_VEC_SME;
0729 else
0730 task_type = ARM64_VEC_SVE;
0731 active = (task_type == type);
0732
0733 switch (type) {
0734 case ARM64_VEC_SVE:
0735 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
0736 header->flags |= SVE_PT_VL_INHERIT;
0737 fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
0738 break;
0739 case ARM64_VEC_SME:
0740 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
0741 header->flags |= SVE_PT_VL_INHERIT;
0742 fpsimd_only = false;
0743 break;
0744 default:
0745 WARN_ON_ONCE(1);
0746 return;
0747 }
0748
0749 if (active) {
0750 if (fpsimd_only) {
0751 header->flags |= SVE_PT_REGS_FPSIMD;
0752 } else {
0753 header->flags |= SVE_PT_REGS_SVE;
0754 }
0755 }
0756
0757 header->vl = task_get_vl(target, type);
0758 vq = sve_vq_from_vl(header->vl);
0759
0760 header->max_vl = vec_max_vl(type);
0761 header->size = SVE_PT_SIZE(vq, header->flags);
0762 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
0763 SVE_PT_REGS_SVE);
0764 }
0765
0766 static unsigned int sve_size_from_header(struct user_sve_header const *header)
0767 {
0768 return ALIGN(header->size, SVE_VQ_BYTES);
0769 }
0770
0771 static int sve_get_common(struct task_struct *target,
0772 const struct user_regset *regset,
0773 struct membuf to,
0774 enum vec_type type)
0775 {
0776 struct user_sve_header header;
0777 unsigned int vq;
0778 unsigned long start, end;
0779
0780
0781 sve_init_header_from_task(&header, target, type);
0782 vq = sve_vq_from_vl(header.vl);
0783
0784 membuf_write(&to, &header, sizeof(header));
0785
0786 if (target == current)
0787 fpsimd_preserve_current_state();
0788
0789 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
0790 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
0791
0792 switch ((header.flags & SVE_PT_REGS_MASK)) {
0793 case SVE_PT_REGS_FPSIMD:
0794 return __fpr_get(target, regset, to);
0795
0796 case SVE_PT_REGS_SVE:
0797 start = SVE_PT_SVE_OFFSET;
0798 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
0799 membuf_write(&to, target->thread.sve_state, end - start);
0800
0801 start = end;
0802 end = SVE_PT_SVE_FPSR_OFFSET(vq);
0803 membuf_zero(&to, end - start);
0804
0805
0806
0807
0808
0809 start = end;
0810 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
0811 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
0812 end - start);
0813
0814 start = end;
0815 end = sve_size_from_header(&header);
0816 return membuf_zero(&to, end - start);
0817
0818 default:
0819 return 0;
0820 }
0821 }
0822
0823 static int sve_get(struct task_struct *target,
0824 const struct user_regset *regset,
0825 struct membuf to)
0826 {
0827 if (!system_supports_sve())
0828 return -EINVAL;
0829
0830 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
0831 }
0832
0833 static int sve_set_common(struct task_struct *target,
0834 const struct user_regset *regset,
0835 unsigned int pos, unsigned int count,
0836 const void *kbuf, const void __user *ubuf,
0837 enum vec_type type)
0838 {
0839 int ret;
0840 struct user_sve_header header;
0841 unsigned int vq;
0842 unsigned long start, end;
0843
0844
0845 if (count < sizeof(header))
0846 return -EINVAL;
0847 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
0848 0, sizeof(header));
0849 if (ret)
0850 goto out;
0851
0852
0853
0854
0855
0856 ret = vec_set_vector_length(target, type, header.vl,
0857 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
0858 if (ret)
0859 goto out;
0860
0861
0862 vq = sve_vq_from_vl(task_get_vl(target, type));
0863
0864
0865 if (system_supports_sme()) {
0866 u64 old_svcr = target->thread.svcr;
0867
0868 switch (type) {
0869 case ARM64_VEC_SVE:
0870 target->thread.svcr &= ~SVCR_SM_MASK;
0871 break;
0872 case ARM64_VEC_SME:
0873 target->thread.svcr |= SVCR_SM_MASK;
0874 break;
0875 default:
0876 WARN_ON_ONCE(1);
0877 return -EINVAL;
0878 }
0879
0880
0881
0882
0883
0884 if (target->thread.svcr != old_svcr)
0885 sve_alloc(target, true);
0886 }
0887
0888
0889
0890 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
0891 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
0892 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
0893 SVE_PT_FPSIMD_OFFSET);
0894 clear_tsk_thread_flag(target, TIF_SVE);
0895 if (type == ARM64_VEC_SME)
0896 fpsimd_force_sync_to_sve(target);
0897 goto out;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910 if (count && vq != sve_vq_from_vl(header.vl)) {
0911 ret = -EIO;
0912 goto out;
0913 }
0914
0915 sve_alloc(target, true);
0916 if (!target->thread.sve_state) {
0917 ret = -ENOMEM;
0918 clear_tsk_thread_flag(target, TIF_SVE);
0919 goto out;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928 fpsimd_sync_to_sve(target);
0929 set_tsk_thread_flag(target, TIF_SVE);
0930
0931 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
0932 start = SVE_PT_SVE_OFFSET;
0933 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
0934 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
0935 target->thread.sve_state,
0936 start, end);
0937 if (ret)
0938 goto out;
0939
0940 start = end;
0941 end = SVE_PT_SVE_FPSR_OFFSET(vq);
0942 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
0943 start, end);
0944 if (ret)
0945 goto out;
0946
0947
0948
0949
0950
0951 start = end;
0952 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
0953 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
0954 &target->thread.uw.fpsimd_state.fpsr,
0955 start, end);
0956
0957 out:
0958 fpsimd_flush_task_state(target);
0959 return ret;
0960 }
0961
0962 static int sve_set(struct task_struct *target,
0963 const struct user_regset *regset,
0964 unsigned int pos, unsigned int count,
0965 const void *kbuf, const void __user *ubuf)
0966 {
0967 if (!system_supports_sve())
0968 return -EINVAL;
0969
0970 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
0971 ARM64_VEC_SVE);
0972 }
0973
0974 #endif
0975
0976 #ifdef CONFIG_ARM64_SME
0977
0978 static int ssve_get(struct task_struct *target,
0979 const struct user_regset *regset,
0980 struct membuf to)
0981 {
0982 if (!system_supports_sme())
0983 return -EINVAL;
0984
0985 return sve_get_common(target, regset, to, ARM64_VEC_SME);
0986 }
0987
0988 static int ssve_set(struct task_struct *target,
0989 const struct user_regset *regset,
0990 unsigned int pos, unsigned int count,
0991 const void *kbuf, const void __user *ubuf)
0992 {
0993 if (!system_supports_sme())
0994 return -EINVAL;
0995
0996 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
0997 ARM64_VEC_SME);
0998 }
0999
1000 static int za_get(struct task_struct *target,
1001 const struct user_regset *regset,
1002 struct membuf to)
1003 {
1004 struct user_za_header header;
1005 unsigned int vq;
1006 unsigned long start, end;
1007
1008 if (!system_supports_sme())
1009 return -EINVAL;
1010
1011
1012 memset(&header, 0, sizeof(header));
1013
1014 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1015 header.flags |= ZA_PT_VL_INHERIT;
1016
1017 header.vl = task_get_sme_vl(target);
1018 vq = sve_vq_from_vl(header.vl);
1019 header.max_vl = sme_max_vl();
1020 header.max_size = ZA_PT_SIZE(vq);
1021
1022
1023 if (thread_za_enabled(&target->thread))
1024 header.size = ZA_PT_SIZE(vq);
1025 else
1026 header.size = ZA_PT_ZA_OFFSET;
1027
1028 membuf_write(&to, &header, sizeof(header));
1029
1030 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1031 end = ZA_PT_ZA_OFFSET;
1032
1033 if (target == current)
1034 fpsimd_preserve_current_state();
1035
1036
1037 if (thread_za_enabled(&target->thread)) {
1038 start = end;
1039 end = ZA_PT_SIZE(vq);
1040 membuf_write(&to, target->thread.za_state, end - start);
1041 }
1042
1043
1044 start = end;
1045 end = ALIGN(header.size, SVE_VQ_BYTES);
1046 return membuf_zero(&to, end - start);
1047 }
1048
1049 static int za_set(struct task_struct *target,
1050 const struct user_regset *regset,
1051 unsigned int pos, unsigned int count,
1052 const void *kbuf, const void __user *ubuf)
1053 {
1054 int ret;
1055 struct user_za_header header;
1056 unsigned int vq;
1057 unsigned long start, end;
1058
1059 if (!system_supports_sme())
1060 return -EINVAL;
1061
1062
1063 if (count < sizeof(header))
1064 return -EINVAL;
1065 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1066 0, sizeof(header));
1067 if (ret)
1068 goto out;
1069
1070
1071
1072
1073
1074
1075 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1076 ((unsigned long)header.flags) << 16);
1077 if (ret)
1078 goto out;
1079
1080
1081 vq = sve_vq_from_vl(task_get_sme_vl(target));
1082
1083
1084 if (!target->thread.sve_state) {
1085 sve_alloc(target, false);
1086 if (!target->thread.sve_state) {
1087 ret = -ENOMEM;
1088 goto out;
1089 }
1090 }
1091
1092
1093 sme_alloc(target);
1094 if (!target->thread.za_state) {
1095 ret = -ENOMEM;
1096 goto out;
1097 }
1098
1099
1100 if (!count) {
1101 target->thread.svcr &= ~SVCR_ZA_MASK;
1102 goto out;
1103 }
1104
1105
1106
1107
1108
1109
1110 if (vq != sve_vq_from_vl(header.vl)) {
1111 ret = -EIO;
1112 goto out;
1113 }
1114
1115 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1116 start = ZA_PT_ZA_OFFSET;
1117 end = ZA_PT_SIZE(vq);
1118 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1119 target->thread.za_state,
1120 start, end);
1121 if (ret)
1122 goto out;
1123
1124
1125 set_tsk_thread_flag(target, TIF_SME);
1126 target->thread.svcr |= SVCR_ZA_MASK;
1127
1128 out:
1129 fpsimd_flush_task_state(target);
1130 return ret;
1131 }
1132
1133 #endif
1134
1135 #ifdef CONFIG_ARM64_PTR_AUTH
1136 static int pac_mask_get(struct task_struct *target,
1137 const struct user_regset *regset,
1138 struct membuf to)
1139 {
1140
1141
1142
1143
1144
1145 unsigned long mask = ptrauth_user_pac_mask();
1146 struct user_pac_mask uregs = {
1147 .data_mask = mask,
1148 .insn_mask = mask,
1149 };
1150
1151 if (!system_supports_address_auth())
1152 return -EINVAL;
1153
1154 return membuf_write(&to, &uregs, sizeof(uregs));
1155 }
1156
1157 static int pac_enabled_keys_get(struct task_struct *target,
1158 const struct user_regset *regset,
1159 struct membuf to)
1160 {
1161 long enabled_keys = ptrauth_get_enabled_keys(target);
1162
1163 if (IS_ERR_VALUE(enabled_keys))
1164 return enabled_keys;
1165
1166 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1167 }
1168
1169 static int pac_enabled_keys_set(struct task_struct *target,
1170 const struct user_regset *regset,
1171 unsigned int pos, unsigned int count,
1172 const void *kbuf, const void __user *ubuf)
1173 {
1174 int ret;
1175 long enabled_keys = ptrauth_get_enabled_keys(target);
1176
1177 if (IS_ERR_VALUE(enabled_keys))
1178 return enabled_keys;
1179
1180 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1181 sizeof(long));
1182 if (ret)
1183 return ret;
1184
1185 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1186 enabled_keys);
1187 }
1188
1189 #ifdef CONFIG_CHECKPOINT_RESTORE
1190 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1191 {
1192 return (__uint128_t)key->hi << 64 | key->lo;
1193 }
1194
1195 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1196 {
1197 struct ptrauth_key key = {
1198 .lo = (unsigned long)ukey,
1199 .hi = (unsigned long)(ukey >> 64),
1200 };
1201
1202 return key;
1203 }
1204
1205 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1206 const struct ptrauth_keys_user *keys)
1207 {
1208 ukeys->apiakey = pac_key_to_user(&keys->apia);
1209 ukeys->apibkey = pac_key_to_user(&keys->apib);
1210 ukeys->apdakey = pac_key_to_user(&keys->apda);
1211 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1212 }
1213
1214 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1215 const struct user_pac_address_keys *ukeys)
1216 {
1217 keys->apia = pac_key_from_user(ukeys->apiakey);
1218 keys->apib = pac_key_from_user(ukeys->apibkey);
1219 keys->apda = pac_key_from_user(ukeys->apdakey);
1220 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1221 }
1222
1223 static int pac_address_keys_get(struct task_struct *target,
1224 const struct user_regset *regset,
1225 struct membuf to)
1226 {
1227 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1228 struct user_pac_address_keys user_keys;
1229
1230 if (!system_supports_address_auth())
1231 return -EINVAL;
1232
1233 pac_address_keys_to_user(&user_keys, keys);
1234
1235 return membuf_write(&to, &user_keys, sizeof(user_keys));
1236 }
1237
1238 static int pac_address_keys_set(struct task_struct *target,
1239 const struct user_regset *regset,
1240 unsigned int pos, unsigned int count,
1241 const void *kbuf, const void __user *ubuf)
1242 {
1243 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1244 struct user_pac_address_keys user_keys;
1245 int ret;
1246
1247 if (!system_supports_address_auth())
1248 return -EINVAL;
1249
1250 pac_address_keys_to_user(&user_keys, keys);
1251 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1252 &user_keys, 0, -1);
1253 if (ret)
1254 return ret;
1255 pac_address_keys_from_user(keys, &user_keys);
1256
1257 return 0;
1258 }
1259
1260 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1261 const struct ptrauth_keys_user *keys)
1262 {
1263 ukeys->apgakey = pac_key_to_user(&keys->apga);
1264 }
1265
1266 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1267 const struct user_pac_generic_keys *ukeys)
1268 {
1269 keys->apga = pac_key_from_user(ukeys->apgakey);
1270 }
1271
1272 static int pac_generic_keys_get(struct task_struct *target,
1273 const struct user_regset *regset,
1274 struct membuf to)
1275 {
1276 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1277 struct user_pac_generic_keys user_keys;
1278
1279 if (!system_supports_generic_auth())
1280 return -EINVAL;
1281
1282 pac_generic_keys_to_user(&user_keys, keys);
1283
1284 return membuf_write(&to, &user_keys, sizeof(user_keys));
1285 }
1286
1287 static int pac_generic_keys_set(struct task_struct *target,
1288 const struct user_regset *regset,
1289 unsigned int pos, unsigned int count,
1290 const void *kbuf, const void __user *ubuf)
1291 {
1292 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1293 struct user_pac_generic_keys user_keys;
1294 int ret;
1295
1296 if (!system_supports_generic_auth())
1297 return -EINVAL;
1298
1299 pac_generic_keys_to_user(&user_keys, keys);
1300 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1301 &user_keys, 0, -1);
1302 if (ret)
1303 return ret;
1304 pac_generic_keys_from_user(keys, &user_keys);
1305
1306 return 0;
1307 }
1308 #endif
1309 #endif
1310
1311 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1312 static int tagged_addr_ctrl_get(struct task_struct *target,
1313 const struct user_regset *regset,
1314 struct membuf to)
1315 {
1316 long ctrl = get_tagged_addr_ctrl(target);
1317
1318 if (IS_ERR_VALUE(ctrl))
1319 return ctrl;
1320
1321 return membuf_write(&to, &ctrl, sizeof(ctrl));
1322 }
1323
1324 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1325 user_regset *regset, unsigned int pos,
1326 unsigned int count, const void *kbuf, const
1327 void __user *ubuf)
1328 {
1329 int ret;
1330 long ctrl;
1331
1332 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1333 if (ret)
1334 return ret;
1335
1336 return set_tagged_addr_ctrl(target, ctrl);
1337 }
1338 #endif
1339
1340 enum aarch64_regset {
1341 REGSET_GPR,
1342 REGSET_FPR,
1343 REGSET_TLS,
1344 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1345 REGSET_HW_BREAK,
1346 REGSET_HW_WATCH,
1347 #endif
1348 REGSET_SYSTEM_CALL,
1349 #ifdef CONFIG_ARM64_SVE
1350 REGSET_SVE,
1351 #endif
1352 #ifdef CONFIG_ARM64_SVE
1353 REGSET_SSVE,
1354 REGSET_ZA,
1355 #endif
1356 #ifdef CONFIG_ARM64_PTR_AUTH
1357 REGSET_PAC_MASK,
1358 REGSET_PAC_ENABLED_KEYS,
1359 #ifdef CONFIG_CHECKPOINT_RESTORE
1360 REGSET_PACA_KEYS,
1361 REGSET_PACG_KEYS,
1362 #endif
1363 #endif
1364 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1365 REGSET_TAGGED_ADDR_CTRL,
1366 #endif
1367 };
1368
1369 static const struct user_regset aarch64_regsets[] = {
1370 [REGSET_GPR] = {
1371 .core_note_type = NT_PRSTATUS,
1372 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1373 .size = sizeof(u64),
1374 .align = sizeof(u64),
1375 .regset_get = gpr_get,
1376 .set = gpr_set
1377 },
1378 [REGSET_FPR] = {
1379 .core_note_type = NT_PRFPREG,
1380 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1381
1382
1383
1384
1385 .size = sizeof(u32),
1386 .align = sizeof(u32),
1387 .active = fpr_active,
1388 .regset_get = fpr_get,
1389 .set = fpr_set
1390 },
1391 [REGSET_TLS] = {
1392 .core_note_type = NT_ARM_TLS,
1393 .n = 1,
1394 .size = sizeof(void *),
1395 .align = sizeof(void *),
1396 .regset_get = tls_get,
1397 .set = tls_set,
1398 },
1399 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1400 [REGSET_HW_BREAK] = {
1401 .core_note_type = NT_ARM_HW_BREAK,
1402 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1403 .size = sizeof(u32),
1404 .align = sizeof(u32),
1405 .regset_get = hw_break_get,
1406 .set = hw_break_set,
1407 },
1408 [REGSET_HW_WATCH] = {
1409 .core_note_type = NT_ARM_HW_WATCH,
1410 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1411 .size = sizeof(u32),
1412 .align = sizeof(u32),
1413 .regset_get = hw_break_get,
1414 .set = hw_break_set,
1415 },
1416 #endif
1417 [REGSET_SYSTEM_CALL] = {
1418 .core_note_type = NT_ARM_SYSTEM_CALL,
1419 .n = 1,
1420 .size = sizeof(int),
1421 .align = sizeof(int),
1422 .regset_get = system_call_get,
1423 .set = system_call_set,
1424 },
1425 #ifdef CONFIG_ARM64_SVE
1426 [REGSET_SVE] = {
1427 .core_note_type = NT_ARM_SVE,
1428 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1429 SVE_VQ_BYTES),
1430 .size = SVE_VQ_BYTES,
1431 .align = SVE_VQ_BYTES,
1432 .regset_get = sve_get,
1433 .set = sve_set,
1434 },
1435 #endif
1436 #ifdef CONFIG_ARM64_SME
1437 [REGSET_SSVE] = {
1438 .core_note_type = NT_ARM_SSVE,
1439 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1440 SVE_VQ_BYTES),
1441 .size = SVE_VQ_BYTES,
1442 .align = SVE_VQ_BYTES,
1443 .regset_get = ssve_get,
1444 .set = ssve_set,
1445 },
1446 [REGSET_ZA] = {
1447 .core_note_type = NT_ARM_ZA,
1448
1449
1450
1451
1452
1453
1454
1455
1456 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1457 .size = SVE_VQ_BYTES,
1458 .align = SVE_VQ_BYTES,
1459 .regset_get = za_get,
1460 .set = za_set,
1461 },
1462 #endif
1463 #ifdef CONFIG_ARM64_PTR_AUTH
1464 [REGSET_PAC_MASK] = {
1465 .core_note_type = NT_ARM_PAC_MASK,
1466 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1467 .size = sizeof(u64),
1468 .align = sizeof(u64),
1469 .regset_get = pac_mask_get,
1470
1471 },
1472 [REGSET_PAC_ENABLED_KEYS] = {
1473 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1474 .n = 1,
1475 .size = sizeof(long),
1476 .align = sizeof(long),
1477 .regset_get = pac_enabled_keys_get,
1478 .set = pac_enabled_keys_set,
1479 },
1480 #ifdef CONFIG_CHECKPOINT_RESTORE
1481 [REGSET_PACA_KEYS] = {
1482 .core_note_type = NT_ARM_PACA_KEYS,
1483 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1484 .size = sizeof(__uint128_t),
1485 .align = sizeof(__uint128_t),
1486 .regset_get = pac_address_keys_get,
1487 .set = pac_address_keys_set,
1488 },
1489 [REGSET_PACG_KEYS] = {
1490 .core_note_type = NT_ARM_PACG_KEYS,
1491 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1492 .size = sizeof(__uint128_t),
1493 .align = sizeof(__uint128_t),
1494 .regset_get = pac_generic_keys_get,
1495 .set = pac_generic_keys_set,
1496 },
1497 #endif
1498 #endif
1499 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1500 [REGSET_TAGGED_ADDR_CTRL] = {
1501 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1502 .n = 1,
1503 .size = sizeof(long),
1504 .align = sizeof(long),
1505 .regset_get = tagged_addr_ctrl_get,
1506 .set = tagged_addr_ctrl_set,
1507 },
1508 #endif
1509 };
1510
1511 static const struct user_regset_view user_aarch64_view = {
1512 .name = "aarch64", .e_machine = EM_AARCH64,
1513 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1514 };
1515
1516 #ifdef CONFIG_COMPAT
1517 enum compat_regset {
1518 REGSET_COMPAT_GPR,
1519 REGSET_COMPAT_VFP,
1520 };
1521
1522 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1523 {
1524 struct pt_regs *regs = task_pt_regs(task);
1525
1526 switch (idx) {
1527 case 15:
1528 return regs->pc;
1529 case 16:
1530 return pstate_to_compat_psr(regs->pstate);
1531 case 17:
1532 return regs->orig_x0;
1533 default:
1534 return regs->regs[idx];
1535 }
1536 }
1537
1538 static int compat_gpr_get(struct task_struct *target,
1539 const struct user_regset *regset,
1540 struct membuf to)
1541 {
1542 int i = 0;
1543
1544 while (to.left)
1545 membuf_store(&to, compat_get_user_reg(target, i++));
1546 return 0;
1547 }
1548
1549 static int compat_gpr_set(struct task_struct *target,
1550 const struct user_regset *regset,
1551 unsigned int pos, unsigned int count,
1552 const void *kbuf, const void __user *ubuf)
1553 {
1554 struct pt_regs newregs;
1555 int ret = 0;
1556 unsigned int i, start, num_regs;
1557
1558
1559 num_regs = count / regset->size;
1560
1561
1562 start = pos / regset->size;
1563
1564 if (start + num_regs > regset->n)
1565 return -EIO;
1566
1567 newregs = *task_pt_regs(target);
1568
1569 for (i = 0; i < num_regs; ++i) {
1570 unsigned int idx = start + i;
1571 compat_ulong_t reg;
1572
1573 if (kbuf) {
1574 memcpy(®, kbuf, sizeof(reg));
1575 kbuf += sizeof(reg);
1576 } else {
1577 ret = copy_from_user(®, ubuf, sizeof(reg));
1578 if (ret) {
1579 ret = -EFAULT;
1580 break;
1581 }
1582
1583 ubuf += sizeof(reg);
1584 }
1585
1586 switch (idx) {
1587 case 15:
1588 newregs.pc = reg;
1589 break;
1590 case 16:
1591 reg = compat_psr_to_pstate(reg);
1592 newregs.pstate = reg;
1593 break;
1594 case 17:
1595 newregs.orig_x0 = reg;
1596 break;
1597 default:
1598 newregs.regs[idx] = reg;
1599 }
1600
1601 }
1602
1603 if (valid_user_regs(&newregs.user_regs, target))
1604 *task_pt_regs(target) = newregs;
1605 else
1606 ret = -EINVAL;
1607
1608 return ret;
1609 }
1610
1611 static int compat_vfp_get(struct task_struct *target,
1612 const struct user_regset *regset,
1613 struct membuf to)
1614 {
1615 struct user_fpsimd_state *uregs;
1616 compat_ulong_t fpscr;
1617
1618 if (!system_supports_fpsimd())
1619 return -EINVAL;
1620
1621 uregs = &target->thread.uw.fpsimd_state;
1622
1623 if (target == current)
1624 fpsimd_preserve_current_state();
1625
1626
1627
1628
1629
1630 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1631 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1632 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1633 return membuf_store(&to, fpscr);
1634 }
1635
1636 static int compat_vfp_set(struct task_struct *target,
1637 const struct user_regset *regset,
1638 unsigned int pos, unsigned int count,
1639 const void *kbuf, const void __user *ubuf)
1640 {
1641 struct user_fpsimd_state *uregs;
1642 compat_ulong_t fpscr;
1643 int ret, vregs_end_pos;
1644
1645 if (!system_supports_fpsimd())
1646 return -EINVAL;
1647
1648 uregs = &target->thread.uw.fpsimd_state;
1649
1650 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1651 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1652 vregs_end_pos);
1653
1654 if (count && !ret) {
1655 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1656 vregs_end_pos, VFP_STATE_SIZE);
1657 if (!ret) {
1658 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1659 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1660 }
1661 }
1662
1663 fpsimd_flush_task_state(target);
1664 return ret;
1665 }
1666
1667 static int compat_tls_get(struct task_struct *target,
1668 const struct user_regset *regset,
1669 struct membuf to)
1670 {
1671 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1672 }
1673
1674 static int compat_tls_set(struct task_struct *target,
1675 const struct user_regset *regset, unsigned int pos,
1676 unsigned int count, const void *kbuf,
1677 const void __user *ubuf)
1678 {
1679 int ret;
1680 compat_ulong_t tls = target->thread.uw.tp_value;
1681
1682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1683 if (ret)
1684 return ret;
1685
1686 target->thread.uw.tp_value = tls;
1687 return ret;
1688 }
1689
1690 static const struct user_regset aarch32_regsets[] = {
1691 [REGSET_COMPAT_GPR] = {
1692 .core_note_type = NT_PRSTATUS,
1693 .n = COMPAT_ELF_NGREG,
1694 .size = sizeof(compat_elf_greg_t),
1695 .align = sizeof(compat_elf_greg_t),
1696 .regset_get = compat_gpr_get,
1697 .set = compat_gpr_set
1698 },
1699 [REGSET_COMPAT_VFP] = {
1700 .core_note_type = NT_ARM_VFP,
1701 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1702 .size = sizeof(compat_ulong_t),
1703 .align = sizeof(compat_ulong_t),
1704 .active = fpr_active,
1705 .regset_get = compat_vfp_get,
1706 .set = compat_vfp_set
1707 },
1708 };
1709
1710 static const struct user_regset_view user_aarch32_view = {
1711 .name = "aarch32", .e_machine = EM_ARM,
1712 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1713 };
1714
1715 static const struct user_regset aarch32_ptrace_regsets[] = {
1716 [REGSET_GPR] = {
1717 .core_note_type = NT_PRSTATUS,
1718 .n = COMPAT_ELF_NGREG,
1719 .size = sizeof(compat_elf_greg_t),
1720 .align = sizeof(compat_elf_greg_t),
1721 .regset_get = compat_gpr_get,
1722 .set = compat_gpr_set
1723 },
1724 [REGSET_FPR] = {
1725 .core_note_type = NT_ARM_VFP,
1726 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1727 .size = sizeof(compat_ulong_t),
1728 .align = sizeof(compat_ulong_t),
1729 .regset_get = compat_vfp_get,
1730 .set = compat_vfp_set
1731 },
1732 [REGSET_TLS] = {
1733 .core_note_type = NT_ARM_TLS,
1734 .n = 1,
1735 .size = sizeof(compat_ulong_t),
1736 .align = sizeof(compat_ulong_t),
1737 .regset_get = compat_tls_get,
1738 .set = compat_tls_set,
1739 },
1740 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1741 [REGSET_HW_BREAK] = {
1742 .core_note_type = NT_ARM_HW_BREAK,
1743 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1744 .size = sizeof(u32),
1745 .align = sizeof(u32),
1746 .regset_get = hw_break_get,
1747 .set = hw_break_set,
1748 },
1749 [REGSET_HW_WATCH] = {
1750 .core_note_type = NT_ARM_HW_WATCH,
1751 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1752 .size = sizeof(u32),
1753 .align = sizeof(u32),
1754 .regset_get = hw_break_get,
1755 .set = hw_break_set,
1756 },
1757 #endif
1758 [REGSET_SYSTEM_CALL] = {
1759 .core_note_type = NT_ARM_SYSTEM_CALL,
1760 .n = 1,
1761 .size = sizeof(int),
1762 .align = sizeof(int),
1763 .regset_get = system_call_get,
1764 .set = system_call_set,
1765 },
1766 };
1767
1768 static const struct user_regset_view user_aarch32_ptrace_view = {
1769 .name = "aarch32", .e_machine = EM_ARM,
1770 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1771 };
1772
1773 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1774 compat_ulong_t __user *ret)
1775 {
1776 compat_ulong_t tmp;
1777
1778 if (off & 3)
1779 return -EIO;
1780
1781 if (off == COMPAT_PT_TEXT_ADDR)
1782 tmp = tsk->mm->start_code;
1783 else if (off == COMPAT_PT_DATA_ADDR)
1784 tmp = tsk->mm->start_data;
1785 else if (off == COMPAT_PT_TEXT_END_ADDR)
1786 tmp = tsk->mm->end_code;
1787 else if (off < sizeof(compat_elf_gregset_t))
1788 tmp = compat_get_user_reg(tsk, off >> 2);
1789 else if (off >= COMPAT_USER_SZ)
1790 return -EIO;
1791 else
1792 tmp = 0;
1793
1794 return put_user(tmp, ret);
1795 }
1796
1797 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1798 compat_ulong_t val)
1799 {
1800 struct pt_regs newregs = *task_pt_regs(tsk);
1801 unsigned int idx = off / 4;
1802
1803 if (off & 3 || off >= COMPAT_USER_SZ)
1804 return -EIO;
1805
1806 if (off >= sizeof(compat_elf_gregset_t))
1807 return 0;
1808
1809 switch (idx) {
1810 case 15:
1811 newregs.pc = val;
1812 break;
1813 case 16:
1814 newregs.pstate = compat_psr_to_pstate(val);
1815 break;
1816 case 17:
1817 newregs.orig_x0 = val;
1818 break;
1819 default:
1820 newregs.regs[idx] = val;
1821 }
1822
1823 if (!valid_user_regs(&newregs.user_regs, tsk))
1824 return -EINVAL;
1825
1826 *task_pt_regs(tsk) = newregs;
1827 return 0;
1828 }
1829
1830 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1831
1832
1833
1834
1835
1836
1837
1838
1839 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1840 {
1841 return (abs(num) - 1) >> 1;
1842 }
1843
1844 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1845 {
1846 u8 num_brps, num_wrps, debug_arch, wp_len;
1847 u32 reg = 0;
1848
1849 num_brps = hw_breakpoint_slots(TYPE_INST);
1850 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1851
1852 debug_arch = debug_monitors_arch();
1853 wp_len = 8;
1854 reg |= debug_arch;
1855 reg <<= 8;
1856 reg |= wp_len;
1857 reg <<= 8;
1858 reg |= num_wrps;
1859 reg <<= 8;
1860 reg |= num_brps;
1861
1862 *kdata = reg;
1863 return 0;
1864 }
1865
1866 static int compat_ptrace_hbp_get(unsigned int note_type,
1867 struct task_struct *tsk,
1868 compat_long_t num,
1869 u32 *kdata)
1870 {
1871 u64 addr = 0;
1872 u32 ctrl = 0;
1873
1874 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1875
1876 if (num & 1) {
1877 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1878 *kdata = (u32)addr;
1879 } else {
1880 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1881 *kdata = ctrl;
1882 }
1883
1884 return err;
1885 }
1886
1887 static int compat_ptrace_hbp_set(unsigned int note_type,
1888 struct task_struct *tsk,
1889 compat_long_t num,
1890 u32 *kdata)
1891 {
1892 u64 addr;
1893 u32 ctrl;
1894
1895 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1896
1897 if (num & 1) {
1898 addr = *kdata;
1899 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1900 } else {
1901 ctrl = *kdata;
1902 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1903 }
1904
1905 return err;
1906 }
1907
1908 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1909 compat_ulong_t __user *data)
1910 {
1911 int ret;
1912 u32 kdata;
1913
1914
1915 if (num < 0) {
1916 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1917
1918 } else if (num == 0) {
1919 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1920
1921 } else {
1922 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1923 }
1924
1925 if (!ret)
1926 ret = put_user(kdata, data);
1927
1928 return ret;
1929 }
1930
1931 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1932 compat_ulong_t __user *data)
1933 {
1934 int ret;
1935 u32 kdata = 0;
1936
1937 if (num == 0)
1938 return 0;
1939
1940 ret = get_user(kdata, data);
1941 if (ret)
1942 return ret;
1943
1944 if (num < 0)
1945 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1946 else
1947 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1948
1949 return ret;
1950 }
1951 #endif
1952
1953 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1954 compat_ulong_t caddr, compat_ulong_t cdata)
1955 {
1956 unsigned long addr = caddr;
1957 unsigned long data = cdata;
1958 void __user *datap = compat_ptr(data);
1959 int ret;
1960
1961 switch (request) {
1962 case PTRACE_PEEKUSR:
1963 ret = compat_ptrace_read_user(child, addr, datap);
1964 break;
1965
1966 case PTRACE_POKEUSR:
1967 ret = compat_ptrace_write_user(child, addr, data);
1968 break;
1969
1970 case COMPAT_PTRACE_GETREGS:
1971 ret = copy_regset_to_user(child,
1972 &user_aarch32_view,
1973 REGSET_COMPAT_GPR,
1974 0, sizeof(compat_elf_gregset_t),
1975 datap);
1976 break;
1977
1978 case COMPAT_PTRACE_SETREGS:
1979 ret = copy_regset_from_user(child,
1980 &user_aarch32_view,
1981 REGSET_COMPAT_GPR,
1982 0, sizeof(compat_elf_gregset_t),
1983 datap);
1984 break;
1985
1986 case COMPAT_PTRACE_GET_THREAD_AREA:
1987 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1988 (compat_ulong_t __user *)datap);
1989 break;
1990
1991 case COMPAT_PTRACE_SET_SYSCALL:
1992 task_pt_regs(child)->syscallno = data;
1993 ret = 0;
1994 break;
1995
1996 case COMPAT_PTRACE_GETVFPREGS:
1997 ret = copy_regset_to_user(child,
1998 &user_aarch32_view,
1999 REGSET_COMPAT_VFP,
2000 0, VFP_STATE_SIZE,
2001 datap);
2002 break;
2003
2004 case COMPAT_PTRACE_SETVFPREGS:
2005 ret = copy_regset_from_user(child,
2006 &user_aarch32_view,
2007 REGSET_COMPAT_VFP,
2008 0, VFP_STATE_SIZE,
2009 datap);
2010 break;
2011
2012 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2013 case COMPAT_PTRACE_GETHBPREGS:
2014 ret = compat_ptrace_gethbpregs(child, addr, datap);
2015 break;
2016
2017 case COMPAT_PTRACE_SETHBPREGS:
2018 ret = compat_ptrace_sethbpregs(child, addr, datap);
2019 break;
2020 #endif
2021
2022 default:
2023 ret = compat_ptrace_request(child, request, addr,
2024 data);
2025 break;
2026 }
2027
2028 return ret;
2029 }
2030 #endif
2031
2032 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2033 {
2034 #ifdef CONFIG_COMPAT
2035
2036
2037
2038
2039
2040
2041 if (is_compat_task())
2042 return &user_aarch32_view;
2043 else if (is_compat_thread(task_thread_info(task)))
2044 return &user_aarch32_ptrace_view;
2045 #endif
2046 return &user_aarch64_view;
2047 }
2048
2049 long arch_ptrace(struct task_struct *child, long request,
2050 unsigned long addr, unsigned long data)
2051 {
2052 switch (request) {
2053 case PTRACE_PEEKMTETAGS:
2054 case PTRACE_POKEMTETAGS:
2055 return mte_ptrace_copy_tags(child, request, addr, data);
2056 }
2057
2058 return ptrace_request(child, request, addr, data);
2059 }
2060
2061 enum ptrace_syscall_dir {
2062 PTRACE_SYSCALL_ENTER = 0,
2063 PTRACE_SYSCALL_EXIT,
2064 };
2065
2066 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2067 {
2068 int regno;
2069 unsigned long saved_reg;
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 regno = (is_compat_task() ? 12 : 7);
2088 saved_reg = regs->regs[regno];
2089 regs->regs[regno] = dir;
2090
2091 if (dir == PTRACE_SYSCALL_ENTER) {
2092 if (ptrace_report_syscall_entry(regs))
2093 forget_syscall(regs);
2094 regs->regs[regno] = saved_reg;
2095 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2096 ptrace_report_syscall_exit(regs, 0);
2097 regs->regs[regno] = saved_reg;
2098 } else {
2099 regs->regs[regno] = saved_reg;
2100
2101
2102
2103
2104
2105
2106 ptrace_report_syscall_exit(regs, 1);
2107 }
2108 }
2109
2110 int syscall_trace_enter(struct pt_regs *regs)
2111 {
2112 unsigned long flags = read_thread_flags();
2113
2114 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2115 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2116 if (flags & _TIF_SYSCALL_EMU)
2117 return NO_SYSCALL;
2118 }
2119
2120
2121 if (secure_computing() == -1)
2122 return NO_SYSCALL;
2123
2124 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2125 trace_sys_enter(regs, regs->syscallno);
2126
2127 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2128 regs->regs[2], regs->regs[3]);
2129
2130 return regs->syscallno;
2131 }
2132
2133 void syscall_trace_exit(struct pt_regs *regs)
2134 {
2135 unsigned long flags = read_thread_flags();
2136
2137 audit_syscall_exit(regs);
2138
2139 if (flags & _TIF_SYSCALL_TRACEPOINT)
2140 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2141
2142 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2143 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2144
2145 rseq_syscall(regs);
2146 }
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158 #define SPSR_EL1_AARCH64_RES0_BITS \
2159 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2160 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2161 #define SPSR_EL1_AARCH32_RES0_BITS \
2162 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2163
2164 static int valid_compat_regs(struct user_pt_regs *regs)
2165 {
2166 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2167
2168 if (!system_supports_mixed_endian_el0()) {
2169 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2170 regs->pstate |= PSR_AA32_E_BIT;
2171 else
2172 regs->pstate &= ~PSR_AA32_E_BIT;
2173 }
2174
2175 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2176 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2177 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2178 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2179 return 1;
2180 }
2181
2182
2183
2184
2185
2186 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2187 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2188 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2189 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2190 PSR_AA32_T_BIT;
2191 regs->pstate |= PSR_MODE32_BIT;
2192
2193 return 0;
2194 }
2195
2196 static int valid_native_regs(struct user_pt_regs *regs)
2197 {
2198 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2199
2200 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2201 (regs->pstate & PSR_D_BIT) == 0 &&
2202 (regs->pstate & PSR_A_BIT) == 0 &&
2203 (regs->pstate & PSR_I_BIT) == 0 &&
2204 (regs->pstate & PSR_F_BIT) == 0) {
2205 return 1;
2206 }
2207
2208
2209 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2210
2211 return 0;
2212 }
2213
2214
2215
2216
2217
2218 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2219 {
2220
2221 user_regs_reset_single_step(regs, task);
2222
2223 if (is_compat_thread(task_thread_info(task)))
2224 return valid_compat_regs(regs);
2225 else
2226 return valid_native_regs(regs);
2227 }