Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
0007  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
0008  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
0009  * Copyright (C) 2004 Thiemo Seufer
0010  * Copyright (C) 2013  Imagination Technologies Ltd.
0011  */
0012 #include <linux/cpu.h>
0013 #include <linux/errno.h>
0014 #include <linux/init.h>
0015 #include <linux/kallsyms.h>
0016 #include <linux/kernel.h>
0017 #include <linux/nmi.h>
0018 #include <linux/personality.h>
0019 #include <linux/prctl.h>
0020 #include <linux/random.h>
0021 #include <linux/sched.h>
0022 #include <linux/sched/debug.h>
0023 #include <linux/sched/task_stack.h>
0024 
0025 #include <asm/abi.h>
0026 #include <asm/asm.h>
0027 #include <asm/dsemul.h>
0028 #include <asm/dsp.h>
0029 #include <asm/exec.h>
0030 #include <asm/fpu.h>
0031 #include <asm/inst.h>
0032 #include <asm/irq.h>
0033 #include <asm/irq_regs.h>
0034 #include <asm/isadep.h>
0035 #include <asm/msa.h>
0036 #include <asm/mips-cps.h>
0037 #include <asm/mipsregs.h>
0038 #include <asm/processor.h>
0039 #include <asm/reg.h>
0040 #include <asm/stacktrace.h>
0041 
0042 #ifdef CONFIG_HOTPLUG_CPU
0043 void arch_cpu_idle_dead(void)
0044 {
0045     play_dead();
0046 }
0047 #endif
0048 
0049 asmlinkage void ret_from_fork(void);
0050 asmlinkage void ret_from_kernel_thread(void);
0051 
0052 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
0053 {
0054     unsigned long status;
0055 
0056     /* New thread loses kernel privileges. */
0057     status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
0058     status |= KU_USER;
0059     regs->cp0_status = status;
0060     lose_fpu(0);
0061     clear_thread_flag(TIF_MSA_CTX_LIVE);
0062     clear_used_math();
0063 #ifdef CONFIG_MIPS_FP_SUPPORT
0064     atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
0065 #endif
0066     init_dsp();
0067     regs->cp0_epc = pc;
0068     regs->regs[29] = sp;
0069 }
0070 
0071 void exit_thread(struct task_struct *tsk)
0072 {
0073     /*
0074      * User threads may have allocated a delay slot emulation frame.
0075      * If so, clean up that allocation.
0076      */
0077     if (!(current->flags & PF_KTHREAD))
0078         dsemul_thread_cleanup(tsk);
0079 }
0080 
0081 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
0082 {
0083     /*
0084      * Save any process state which is live in hardware registers to the
0085      * parent context prior to duplication. This prevents the new child
0086      * state becoming stale if the parent is preempted before copy_thread()
0087      * gets a chance to save the parent's live hardware registers to the
0088      * child context.
0089      */
0090     preempt_disable();
0091 
0092     if (is_msa_enabled())
0093         save_msa(current);
0094     else if (is_fpu_owner())
0095         _save_fp(current);
0096 
0097     save_dsp(current);
0098 
0099     preempt_enable();
0100 
0101     *dst = *src;
0102     return 0;
0103 }
0104 
0105 /*
0106  * Copy architecture-specific thread state
0107  */
0108 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
0109 {
0110     unsigned long clone_flags = args->flags;
0111     unsigned long usp = args->stack;
0112     unsigned long tls = args->tls;
0113     struct thread_info *ti = task_thread_info(p);
0114     struct pt_regs *childregs, *regs = current_pt_regs();
0115     unsigned long childksp;
0116 
0117     childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
0118 
0119     /* set up new TSS. */
0120     childregs = (struct pt_regs *) childksp - 1;
0121     /*  Put the stack after the struct pt_regs.  */
0122     childksp = (unsigned long) childregs;
0123     p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
0124     if (unlikely(args->fn)) {
0125         /* kernel thread */
0126         unsigned long status = p->thread.cp0_status;
0127         memset(childregs, 0, sizeof(struct pt_regs));
0128         p->thread.reg16 = (unsigned long)args->fn;
0129         p->thread.reg17 = (unsigned long)args->fn_arg;
0130         p->thread.reg29 = childksp;
0131         p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
0132 #if defined(CONFIG_CPU_R3000)
0133         status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
0134              ((status & (ST0_KUC | ST0_IEC)) << 2);
0135 #else
0136         status |= ST0_EXL;
0137 #endif
0138         childregs->cp0_status = status;
0139         return 0;
0140     }
0141 
0142     /* user thread */
0143     *childregs = *regs;
0144     childregs->regs[7] = 0; /* Clear error flag */
0145     childregs->regs[2] = 0; /* Child gets zero as return value */
0146     if (usp)
0147         childregs->regs[29] = usp;
0148 
0149     p->thread.reg29 = (unsigned long) childregs;
0150     p->thread.reg31 = (unsigned long) ret_from_fork;
0151 
0152     /*
0153      * New tasks lose permission to use the fpu. This accelerates context
0154      * switching for most programs since they don't use the fpu.
0155      */
0156     childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
0157 
0158     clear_tsk_thread_flag(p, TIF_USEDFPU);
0159     clear_tsk_thread_flag(p, TIF_USEDMSA);
0160     clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
0161 
0162 #ifdef CONFIG_MIPS_MT_FPAFF
0163     clear_tsk_thread_flag(p, TIF_FPUBOUND);
0164 #endif /* CONFIG_MIPS_MT_FPAFF */
0165 
0166 #ifdef CONFIG_MIPS_FP_SUPPORT
0167     atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
0168 #endif
0169 
0170     if (clone_flags & CLONE_SETTLS)
0171         ti->tp_value = tls;
0172 
0173     return 0;
0174 }
0175 
0176 #ifdef CONFIG_STACKPROTECTOR
0177 #include <linux/stackprotector.h>
0178 unsigned long __stack_chk_guard __read_mostly;
0179 EXPORT_SYMBOL(__stack_chk_guard);
0180 #endif
0181 
0182 struct mips_frame_info {
0183     void        *func;
0184     unsigned long   func_size;
0185     int     frame_size;
0186     int     pc_offset;
0187 };
0188 
0189 #define J_TARGET(pc,target) \
0190         (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
0191 
0192 static inline int is_jr_ra_ins(union mips_instruction *ip)
0193 {
0194 #ifdef CONFIG_CPU_MICROMIPS
0195     /*
0196      * jr16 ra
0197      * jr ra
0198      */
0199     if (mm_insn_16bit(ip->word >> 16)) {
0200         if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
0201             ip->mm16_r5_format.rt == mm_jr16_op &&
0202             ip->mm16_r5_format.imm == 31)
0203             return 1;
0204         return 0;
0205     }
0206 
0207     if (ip->r_format.opcode == mm_pool32a_op &&
0208         ip->r_format.func == mm_pool32axf_op &&
0209         ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
0210         ip->r_format.rt == 31)
0211         return 1;
0212     return 0;
0213 #else
0214     if (ip->r_format.opcode == spec_op &&
0215         ip->r_format.func == jr_op &&
0216         ip->r_format.rs == 31)
0217         return 1;
0218     return 0;
0219 #endif
0220 }
0221 
0222 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
0223 {
0224 #ifdef CONFIG_CPU_MICROMIPS
0225     /*
0226      * swsp ra,offset
0227      * swm16 reglist,offset(sp)
0228      * swm32 reglist,offset(sp)
0229      * sw32 ra,offset(sp)
0230      * jradiussp - NOT SUPPORTED
0231      *
0232      * microMIPS is way more fun...
0233      */
0234     if (mm_insn_16bit(ip->word >> 16)) {
0235         switch (ip->mm16_r5_format.opcode) {
0236         case mm_swsp16_op:
0237             if (ip->mm16_r5_format.rt != 31)
0238                 return 0;
0239 
0240             *poff = ip->mm16_r5_format.imm;
0241             *poff = (*poff << 2) / sizeof(ulong);
0242             return 1;
0243 
0244         case mm_pool16c_op:
0245             switch (ip->mm16_m_format.func) {
0246             case mm_swm16_op:
0247                 *poff = ip->mm16_m_format.imm;
0248                 *poff += 1 + ip->mm16_m_format.rlist;
0249                 *poff = (*poff << 2) / sizeof(ulong);
0250                 return 1;
0251 
0252             default:
0253                 return 0;
0254             }
0255 
0256         default:
0257             return 0;
0258         }
0259     }
0260 
0261     switch (ip->i_format.opcode) {
0262     case mm_sw32_op:
0263         if (ip->i_format.rs != 29)
0264             return 0;
0265         if (ip->i_format.rt != 31)
0266             return 0;
0267 
0268         *poff = ip->i_format.simmediate / sizeof(ulong);
0269         return 1;
0270 
0271     case mm_pool32b_op:
0272         switch (ip->mm_m_format.func) {
0273         case mm_swm32_func:
0274             if (ip->mm_m_format.rd < 0x10)
0275                 return 0;
0276             if (ip->mm_m_format.base != 29)
0277                 return 0;
0278 
0279             *poff = ip->mm_m_format.simmediate;
0280             *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
0281             *poff /= sizeof(ulong);
0282             return 1;
0283         default:
0284             return 0;
0285         }
0286 
0287     default:
0288         return 0;
0289     }
0290 #else
0291     /* sw / sd $ra, offset($sp) */
0292     if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
0293         ip->i_format.rs == 29 && ip->i_format.rt == 31) {
0294         *poff = ip->i_format.simmediate / sizeof(ulong);
0295         return 1;
0296     }
0297 #ifdef CONFIG_CPU_LOONGSON64
0298     if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
0299               (ip->loongson3_lswc2_format.ls == 1) &&
0300               (ip->loongson3_lswc2_format.fr == 0) &&
0301               (ip->loongson3_lswc2_format.base == 29)) {
0302         if (ip->loongson3_lswc2_format.rt == 31) {
0303             *poff = ip->loongson3_lswc2_format.offset << 1;
0304             return 1;
0305         }
0306         if (ip->loongson3_lswc2_format.rq == 31) {
0307             *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
0308             return 1;
0309         }
0310     }
0311 #endif
0312     return 0;
0313 #endif
0314 }
0315 
0316 static inline int is_jump_ins(union mips_instruction *ip)
0317 {
0318 #ifdef CONFIG_CPU_MICROMIPS
0319     /*
0320      * jr16,jrc,jalr16,jalr16
0321      * jal
0322      * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
0323      * jraddiusp - NOT SUPPORTED
0324      *
0325      * microMIPS is kind of more fun...
0326      */
0327     if (mm_insn_16bit(ip->word >> 16)) {
0328         if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
0329             (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
0330             return 1;
0331         return 0;
0332     }
0333 
0334     if (ip->j_format.opcode == mm_j32_op)
0335         return 1;
0336     if (ip->j_format.opcode == mm_jal32_op)
0337         return 1;
0338     if (ip->r_format.opcode != mm_pool32a_op ||
0339             ip->r_format.func != mm_pool32axf_op)
0340         return 0;
0341     return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
0342 #else
0343     if (ip->j_format.opcode == j_op)
0344         return 1;
0345     if (ip->j_format.opcode == jal_op)
0346         return 1;
0347     if (ip->r_format.opcode != spec_op)
0348         return 0;
0349     return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
0350 #endif
0351 }
0352 
0353 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
0354 {
0355 #ifdef CONFIG_CPU_MICROMIPS
0356     unsigned short tmp;
0357 
0358     /*
0359      * addiusp -imm
0360      * addius5 sp,-imm
0361      * addiu32 sp,sp,-imm
0362      * jradiussp - NOT SUPPORTED
0363      *
0364      * microMIPS is not more fun...
0365      */
0366     if (mm_insn_16bit(ip->word >> 16)) {
0367         if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
0368             ip->mm16_r3_format.simmediate & mm_addiusp_func) {
0369             tmp = ip->mm_b0_format.simmediate >> 1;
0370             tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
0371             if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
0372                 tmp ^= 0x100;
0373             *frame_size = -(signed short)(tmp << 2);
0374             return 1;
0375         }
0376         if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
0377             ip->mm16_r5_format.rt == 29) {
0378             tmp = ip->mm16_r5_format.imm >> 1;
0379             *frame_size = -(signed short)(tmp & 0xf);
0380             return 1;
0381         }
0382         return 0;
0383     }
0384 
0385     if (ip->mm_i_format.opcode == mm_addiu32_op &&
0386         ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
0387         *frame_size = -ip->i_format.simmediate;
0388         return 1;
0389     }
0390 #else
0391     /* addiu/daddiu sp,sp,-imm */
0392     if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
0393         return 0;
0394 
0395     if (ip->i_format.opcode == addiu_op ||
0396         ip->i_format.opcode == daddiu_op) {
0397         *frame_size = -ip->i_format.simmediate;
0398         return 1;
0399     }
0400 #endif
0401     return 0;
0402 }
0403 
0404 static int get_frame_info(struct mips_frame_info *info)
0405 {
0406     bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
0407     union mips_instruction insn, *ip, *ip_end;
0408     unsigned int last_insn_size = 0;
0409     bool saw_jump = false;
0410 
0411     info->pc_offset = -1;
0412     info->frame_size = 0;
0413 
0414     ip = (void *)msk_isa16_mode((ulong)info->func);
0415     if (!ip)
0416         goto err;
0417 
0418     ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
0419 
0420     while (ip < ip_end) {
0421         ip = (void *)ip + last_insn_size;
0422 
0423         if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
0424             insn.word = ip->halfword[0] << 16;
0425             last_insn_size = 2;
0426         } else if (is_mmips) {
0427             insn.word = ip->halfword[0] << 16 | ip->halfword[1];
0428             last_insn_size = 4;
0429         } else {
0430             insn.word = ip->word;
0431             last_insn_size = 4;
0432         }
0433 
0434         if (is_jr_ra_ins(ip)) {
0435             break;
0436         } else if (!info->frame_size) {
0437             is_sp_move_ins(&insn, &info->frame_size);
0438             continue;
0439         } else if (!saw_jump && is_jump_ins(ip)) {
0440             /*
0441              * If we see a jump instruction, we are finished
0442              * with the frame save.
0443              *
0444              * Some functions can have a shortcut return at
0445              * the beginning of the function, so don't start
0446              * looking for jump instruction until we see the
0447              * frame setup.
0448              *
0449              * The RA save instruction can get put into the
0450              * delay slot of the jump instruction, so look
0451              * at the next instruction, too.
0452              */
0453             saw_jump = true;
0454             continue;
0455         }
0456         if (info->pc_offset == -1 &&
0457             is_ra_save_ins(&insn, &info->pc_offset))
0458             break;
0459         if (saw_jump)
0460             break;
0461     }
0462     if (info->frame_size && info->pc_offset >= 0) /* nested */
0463         return 0;
0464     if (info->pc_offset < 0) /* leaf */
0465         return 1;
0466     /* prologue seems bogus... */
0467 err:
0468     return -1;
0469 }
0470 
0471 static struct mips_frame_info schedule_mfi __read_mostly;
0472 
0473 #ifdef CONFIG_KALLSYMS
0474 static unsigned long get___schedule_addr(void)
0475 {
0476     return kallsyms_lookup_name("__schedule");
0477 }
0478 #else
0479 static unsigned long get___schedule_addr(void)
0480 {
0481     union mips_instruction *ip = (void *)schedule;
0482     int max_insns = 8;
0483     int i;
0484 
0485     for (i = 0; i < max_insns; i++, ip++) {
0486         if (ip->j_format.opcode == j_op)
0487             return J_TARGET(ip, ip->j_format.target);
0488     }
0489     return 0;
0490 }
0491 #endif
0492 
0493 static int __init frame_info_init(void)
0494 {
0495     unsigned long size = 0;
0496 #ifdef CONFIG_KALLSYMS
0497     unsigned long ofs;
0498 #endif
0499     unsigned long addr;
0500 
0501     addr = get___schedule_addr();
0502     if (!addr)
0503         addr = (unsigned long)schedule;
0504 
0505 #ifdef CONFIG_KALLSYMS
0506     kallsyms_lookup_size_offset(addr, &size, &ofs);
0507 #endif
0508     schedule_mfi.func = (void *)addr;
0509     schedule_mfi.func_size = size;
0510 
0511     get_frame_info(&schedule_mfi);
0512 
0513     /*
0514      * Without schedule() frame info, result given by
0515      * thread_saved_pc() and __get_wchan() are not reliable.
0516      */
0517     if (schedule_mfi.pc_offset < 0)
0518         printk("Can't analyze schedule() prologue at %p\n", schedule);
0519 
0520     return 0;
0521 }
0522 
0523 arch_initcall(frame_info_init);
0524 
0525 /*
0526  * Return saved PC of a blocked thread.
0527  */
0528 static unsigned long thread_saved_pc(struct task_struct *tsk)
0529 {
0530     struct thread_struct *t = &tsk->thread;
0531 
0532     /* New born processes are a special case */
0533     if (t->reg31 == (unsigned long) ret_from_fork)
0534         return t->reg31;
0535     if (schedule_mfi.pc_offset < 0)
0536         return 0;
0537     return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
0538 }
0539 
0540 
0541 #ifdef CONFIG_KALLSYMS
0542 /* generic stack unwinding function */
0543 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
0544                           unsigned long *sp,
0545                           unsigned long pc,
0546                           unsigned long *ra)
0547 {
0548     unsigned long low, high, irq_stack_high;
0549     struct mips_frame_info info;
0550     unsigned long size, ofs;
0551     struct pt_regs *regs;
0552     int leaf;
0553 
0554     if (!stack_page)
0555         return 0;
0556 
0557     /*
0558      * IRQ stacks start at IRQ_STACK_START
0559      * task stacks at THREAD_SIZE - 32
0560      */
0561     low = stack_page;
0562     if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
0563         high = stack_page + IRQ_STACK_START;
0564         irq_stack_high = high;
0565     } else {
0566         high = stack_page + THREAD_SIZE - 32;
0567         irq_stack_high = 0;
0568     }
0569 
0570     /*
0571      * If we reached the top of the interrupt stack, start unwinding
0572      * the interrupted task stack.
0573      */
0574     if (unlikely(*sp == irq_stack_high)) {
0575         unsigned long task_sp = *(unsigned long *)*sp;
0576 
0577         /*
0578          * Check that the pointer saved in the IRQ stack head points to
0579          * something within the stack of the current task
0580          */
0581         if (!object_is_on_stack((void *)task_sp))
0582             return 0;
0583 
0584         /*
0585          * Follow pointer to tasks kernel stack frame where interrupted
0586          * state was saved.
0587          */
0588         regs = (struct pt_regs *)task_sp;
0589         pc = regs->cp0_epc;
0590         if (!user_mode(regs) && __kernel_text_address(pc)) {
0591             *sp = regs->regs[29];
0592             *ra = regs->regs[31];
0593             return pc;
0594         }
0595         return 0;
0596     }
0597     if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
0598         return 0;
0599     /*
0600      * Return ra if an exception occurred at the first instruction
0601      */
0602     if (unlikely(ofs == 0)) {
0603         pc = *ra;
0604         *ra = 0;
0605         return pc;
0606     }
0607 
0608     info.func = (void *)(pc - ofs);
0609     info.func_size = ofs;   /* analyze from start to ofs */
0610     leaf = get_frame_info(&info);
0611     if (leaf < 0)
0612         return 0;
0613 
0614     if (*sp < low || *sp + info.frame_size > high)
0615         return 0;
0616 
0617     if (leaf)
0618         /*
0619          * For some extreme cases, get_frame_info() can
0620          * consider wrongly a nested function as a leaf
0621          * one. In that cases avoid to return always the
0622          * same value.
0623          */
0624         pc = pc != *ra ? *ra : 0;
0625     else
0626         pc = ((unsigned long *)(*sp))[info.pc_offset];
0627 
0628     *sp += info.frame_size;
0629     *ra = 0;
0630     return __kernel_text_address(pc) ? pc : 0;
0631 }
0632 EXPORT_SYMBOL(unwind_stack_by_address);
0633 
0634 /* used by show_backtrace() */
0635 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
0636                unsigned long pc, unsigned long *ra)
0637 {
0638     unsigned long stack_page = 0;
0639     int cpu;
0640 
0641     for_each_possible_cpu(cpu) {
0642         if (on_irq_stack(cpu, *sp)) {
0643             stack_page = (unsigned long)irq_stack[cpu];
0644             break;
0645         }
0646     }
0647 
0648     if (!stack_page)
0649         stack_page = (unsigned long)task_stack_page(task);
0650 
0651     return unwind_stack_by_address(stack_page, sp, pc, ra);
0652 }
0653 #endif
0654 
0655 /*
0656  * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
0657  */
0658 unsigned long __get_wchan(struct task_struct *task)
0659 {
0660     unsigned long pc = 0;
0661 #ifdef CONFIG_KALLSYMS
0662     unsigned long sp;
0663     unsigned long ra = 0;
0664 #endif
0665 
0666     if (!task_stack_page(task))
0667         goto out;
0668 
0669     pc = thread_saved_pc(task);
0670 
0671 #ifdef CONFIG_KALLSYMS
0672     sp = task->thread.reg29 + schedule_mfi.frame_size;
0673 
0674     while (in_sched_functions(pc))
0675         pc = unwind_stack(task, &sp, pc, &ra);
0676 #endif
0677 
0678 out:
0679     return pc;
0680 }
0681 
0682 unsigned long mips_stack_top(void)
0683 {
0684     unsigned long top = TASK_SIZE & PAGE_MASK;
0685 
0686     if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
0687         /* One page for branch delay slot "emulation" */
0688         top -= PAGE_SIZE;
0689     }
0690 
0691     /* Space for the VDSO, data page & GIC user page */
0692     top -= PAGE_ALIGN(current->thread.abi->vdso->size);
0693     top -= PAGE_SIZE;
0694     top -= mips_gic_present() ? PAGE_SIZE : 0;
0695 
0696     /* Space for cache colour alignment */
0697     if (cpu_has_dc_aliases)
0698         top -= shm_align_mask + 1;
0699 
0700     /* Space to randomize the VDSO base */
0701     if (current->flags & PF_RANDOMIZE)
0702         top -= VDSO_RANDOMIZE_SIZE;
0703 
0704     return top;
0705 }
0706 
0707 /*
0708  * Don't forget that the stack pointer must be aligned on a 8 bytes
0709  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
0710  */
0711 unsigned long arch_align_stack(unsigned long sp)
0712 {
0713     if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
0714         sp -= get_random_int() & ~PAGE_MASK;
0715 
0716     return sp & ALMASK;
0717 }
0718 
0719 static struct cpumask backtrace_csd_busy;
0720 
0721 static void handle_backtrace(void *info)
0722 {
0723     nmi_cpu_backtrace(get_irq_regs());
0724     cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
0725 }
0726 
0727 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
0728     CSD_INIT(handle_backtrace, NULL);
0729 
0730 static void raise_backtrace(cpumask_t *mask)
0731 {
0732     call_single_data_t *csd;
0733     int cpu;
0734 
0735     for_each_cpu(cpu, mask) {
0736         /*
0737          * If we previously sent an IPI to the target CPU & it hasn't
0738          * cleared its bit in the busy cpumask then it didn't handle
0739          * our previous IPI & it's not safe for us to reuse the
0740          * call_single_data_t.
0741          */
0742         if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
0743             pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
0744                 cpu);
0745             continue;
0746         }
0747 
0748         csd = &per_cpu(backtrace_csd, cpu);
0749         smp_call_function_single_async(cpu, csd);
0750     }
0751 }
0752 
0753 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
0754 {
0755     nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
0756 }
0757 
0758 int mips_get_process_fp_mode(struct task_struct *task)
0759 {
0760     int value = 0;
0761 
0762     if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
0763         value |= PR_FP_MODE_FR;
0764     if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
0765         value |= PR_FP_MODE_FRE;
0766 
0767     return value;
0768 }
0769 
0770 static long prepare_for_fp_mode_switch(void *unused)
0771 {
0772     /*
0773      * This is icky, but we use this to simply ensure that all CPUs have
0774      * context switched, regardless of whether they were previously running
0775      * kernel or user code. This ensures that no CPU that a mode-switching
0776      * program may execute on keeps its FPU enabled (& in the old mode)
0777      * throughout the mode switch.
0778      */
0779     return 0;
0780 }
0781 
0782 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
0783 {
0784     const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
0785     struct task_struct *t;
0786     struct cpumask process_cpus;
0787     int cpu;
0788 
0789     /* If nothing to change, return right away, successfully.  */
0790     if (value == mips_get_process_fp_mode(task))
0791         return 0;
0792 
0793     /* Only accept a mode change if 64-bit FP enabled for o32.  */
0794     if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
0795         return -EOPNOTSUPP;
0796 
0797     /* And only for o32 tasks.  */
0798     if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
0799         return -EOPNOTSUPP;
0800 
0801     /* Check the value is valid */
0802     if (value & ~known_bits)
0803         return -EOPNOTSUPP;
0804 
0805     /* Setting FRE without FR is not supported.  */
0806     if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
0807         return -EOPNOTSUPP;
0808 
0809     /* Avoid inadvertently triggering emulation */
0810     if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
0811         !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
0812         return -EOPNOTSUPP;
0813     if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
0814         return -EOPNOTSUPP;
0815 
0816     /* FR = 0 not supported in MIPS R6 */
0817     if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
0818         return -EOPNOTSUPP;
0819 
0820     /* Indicate the new FP mode in each thread */
0821     for_each_thread(task, t) {
0822         /* Update desired FP register width */
0823         if (value & PR_FP_MODE_FR) {
0824             clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
0825         } else {
0826             set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
0827             clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
0828         }
0829 
0830         /* Update desired FP single layout */
0831         if (value & PR_FP_MODE_FRE)
0832             set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
0833         else
0834             clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
0835     }
0836 
0837     /*
0838      * We need to ensure that all threads in the process have switched mode
0839      * before returning, in order to allow userland to not worry about
0840      * races. We can do this by forcing all CPUs that any thread in the
0841      * process may be running on to schedule something else - in this case
0842      * prepare_for_fp_mode_switch().
0843      *
0844      * We begin by generating a mask of all CPUs that any thread in the
0845      * process may be running on.
0846      */
0847     cpumask_clear(&process_cpus);
0848     for_each_thread(task, t)
0849         cpumask_set_cpu(task_cpu(t), &process_cpus);
0850 
0851     /*
0852      * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
0853      *
0854      * The CPUs may have rescheduled already since we switched mode or
0855      * generated the cpumask, but that doesn't matter. If the task in this
0856      * process is scheduled out then our scheduling
0857      * prepare_for_fp_mode_switch() will simply be redundant. If it's
0858      * scheduled in then it will already have picked up the new FP mode
0859      * whilst doing so.
0860      */
0861     cpus_read_lock();
0862     for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
0863         work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
0864     cpus_read_unlock();
0865 
0866     return 0;
0867 }
0868 
0869 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
0870 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
0871 {
0872     unsigned int i;
0873 
0874     for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
0875         /* k0/k1 are copied as zero. */
0876         if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
0877             uregs[i] = 0;
0878         else
0879             uregs[i] = regs->regs[i - MIPS32_EF_R0];
0880     }
0881 
0882     uregs[MIPS32_EF_LO] = regs->lo;
0883     uregs[MIPS32_EF_HI] = regs->hi;
0884     uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
0885     uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
0886     uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
0887     uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
0888 }
0889 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
0890 
0891 #ifdef CONFIG_64BIT
0892 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
0893 {
0894     unsigned int i;
0895 
0896     for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
0897         /* k0/k1 are copied as zero. */
0898         if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
0899             uregs[i] = 0;
0900         else
0901             uregs[i] = regs->regs[i - MIPS64_EF_R0];
0902     }
0903 
0904     uregs[MIPS64_EF_LO] = regs->lo;
0905     uregs[MIPS64_EF_HI] = regs->hi;
0906     uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
0907     uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
0908     uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
0909     uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
0910 }
0911 #endif /* CONFIG_64BIT */