Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  Copyright (C) 1995  Linus Torvalds
0003  *
0004  *  Pentium III FXSR, SSE support
0005  *  Gareth Hughes <gareth@valinux.com>, May 2000
0006  */
0007 
0008 /*
0009  * This file handles the architecture-dependent parts of process handling..
0010  */
0011 
0012 #include <linux/cpu.h>
0013 #include <linux/errno.h>
0014 #include <linux/sched.h>
0015 #include <linux/sched/task.h>
0016 #include <linux/sched/task_stack.h>
0017 #include <linux/fs.h>
0018 #include <linux/kernel.h>
0019 #include <linux/mm.h>
0020 #include <linux/elfcore.h>
0021 #include <linux/smp.h>
0022 #include <linux/stddef.h>
0023 #include <linux/slab.h>
0024 #include <linux/vmalloc.h>
0025 #include <linux/user.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/delay.h>
0028 #include <linux/reboot.h>
0029 #include <linux/mc146818rtc.h>
0030 #include <linux/export.h>
0031 #include <linux/kallsyms.h>
0032 #include <linux/ptrace.h>
0033 #include <linux/personality.h>
0034 #include <linux/percpu.h>
0035 #include <linux/prctl.h>
0036 #include <linux/ftrace.h>
0037 #include <linux/uaccess.h>
0038 #include <linux/io.h>
0039 #include <linux/kdebug.h>
0040 #include <linux/syscalls.h>
0041 
0042 #include <asm/ldt.h>
0043 #include <asm/processor.h>
0044 #include <asm/fpu/sched.h>
0045 #include <asm/desc.h>
0046 
0047 #include <linux/err.h>
0048 
0049 #include <asm/tlbflush.h>
0050 #include <asm/cpu.h>
0051 #include <asm/debugreg.h>
0052 #include <asm/switch_to.h>
0053 #include <asm/vm86.h>
0054 #include <asm/resctrl.h>
0055 #include <asm/proto.h>
0056 
0057 #include "process.h"
0058 
0059 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
0060          const char *log_lvl)
0061 {
0062     unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
0063     unsigned long d0, d1, d2, d3, d6, d7;
0064     unsigned short gs;
0065 
0066     savesegment(gs, gs);
0067 
0068     show_ip(regs, log_lvl);
0069 
0070     printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
0071         log_lvl, regs->ax, regs->bx, regs->cx, regs->dx);
0072     printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
0073         log_lvl, regs->si, regs->di, regs->bp, regs->sp);
0074     printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
0075            log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags);
0076 
0077     if (mode != SHOW_REGS_ALL)
0078         return;
0079 
0080     cr0 = read_cr0();
0081     cr2 = read_cr2();
0082     cr3 = __read_cr3();
0083     cr4 = __read_cr4();
0084     printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
0085         log_lvl, cr0, cr2, cr3, cr4);
0086 
0087     get_debugreg(d0, 0);
0088     get_debugreg(d1, 1);
0089     get_debugreg(d2, 2);
0090     get_debugreg(d3, 3);
0091     get_debugreg(d6, 6);
0092     get_debugreg(d7, 7);
0093 
0094     /* Only print out debug registers if they are in their non-default state. */
0095     if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
0096         (d6 == DR6_RESERVED) && (d7 == 0x400))
0097         return;
0098 
0099     printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
0100         log_lvl, d0, d1, d2, d3);
0101     printk("%sDR6: %08lx DR7: %08lx\n",
0102         log_lvl, d6, d7);
0103 }
0104 
0105 void release_thread(struct task_struct *dead_task)
0106 {
0107     BUG_ON(dead_task->mm);
0108     release_vm86_irqs(dead_task);
0109 }
0110 
0111 void
0112 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
0113 {
0114     loadsegment(gs, 0);
0115     regs->fs        = 0;
0116     regs->ds        = __USER_DS;
0117     regs->es        = __USER_DS;
0118     regs->ss        = __USER_DS;
0119     regs->cs        = __USER_CS;
0120     regs->ip        = new_ip;
0121     regs->sp        = new_sp;
0122     regs->flags     = X86_EFLAGS_IF;
0123 }
0124 EXPORT_SYMBOL_GPL(start_thread);
0125 
0126 
0127 /*
0128  *  switch_to(x,y) should switch tasks from x to y.
0129  *
0130  * We fsave/fwait so that an exception goes off at the right time
0131  * (as a call from the fsave or fwait in effect) rather than to
0132  * the wrong process. Lazy FP saving no longer makes any sense
0133  * with modern CPU's, and this simplifies a lot of things (SMP
0134  * and UP become the same).
0135  *
0136  * NOTE! We used to use the x86 hardware context switching. The
0137  * reason for not using it any more becomes apparent when you
0138  * try to recover gracefully from saved state that is no longer
0139  * valid (stale segment register values in particular). With the
0140  * hardware task-switch, there is no way to fix up bad state in
0141  * a reasonable manner.
0142  *
0143  * The fact that Intel documents the hardware task-switching to
0144  * be slow is a fairly red herring - this code is not noticeably
0145  * faster. However, there _is_ some room for improvement here,
0146  * so the performance issues may eventually be a valid point.
0147  * More important, however, is the fact that this allows us much
0148  * more flexibility.
0149  *
0150  * The return value (in %ax) will be the "prev" task after
0151  * the task-switch, and shows up in ret_from_fork in entry.S,
0152  * for example.
0153  */
0154 __visible __notrace_funcgraph struct task_struct *
0155 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
0156 {
0157     struct thread_struct *prev = &prev_p->thread,
0158                  *next = &next_p->thread;
0159     struct fpu *prev_fpu = &prev->fpu;
0160     int cpu = smp_processor_id();
0161 
0162     /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
0163 
0164     if (!test_thread_flag(TIF_NEED_FPU_LOAD))
0165         switch_fpu_prepare(prev_fpu, cpu);
0166 
0167     /*
0168      * Save away %gs. No need to save %fs, as it was saved on the
0169      * stack on entry.  No need to save %es and %ds, as those are
0170      * always kernel segments while inside the kernel.  Doing this
0171      * before setting the new TLS descriptors avoids the situation
0172      * where we temporarily have non-reloadable segments in %fs
0173      * and %gs.  This could be an issue if the NMI handler ever
0174      * used %fs or %gs (it does not today), or if the kernel is
0175      * running inside of a hypervisor layer.
0176      */
0177     savesegment(gs, prev->gs);
0178 
0179     /*
0180      * Load the per-thread Thread-Local Storage descriptor.
0181      */
0182     load_TLS(next, cpu);
0183 
0184     switch_to_extra(prev_p, next_p);
0185 
0186     /*
0187      * Leave lazy mode, flushing any hypercalls made here.
0188      * This must be done before restoring TLS segments so
0189      * the GDT and LDT are properly updated.
0190      */
0191     arch_end_context_switch(next_p);
0192 
0193     /*
0194      * Reload esp0 and cpu_current_top_of_stack.  This changes
0195      * current_thread_info().  Refresh the SYSENTER configuration in
0196      * case prev or next is vm86.
0197      */
0198     update_task_stack(next_p);
0199     refresh_sysenter_cs(next);
0200     this_cpu_write(cpu_current_top_of_stack,
0201                (unsigned long)task_stack_page(next_p) +
0202                THREAD_SIZE);
0203 
0204     /*
0205      * Restore %gs if needed (which is common)
0206      */
0207     if (prev->gs | next->gs)
0208         loadsegment(gs, next->gs);
0209 
0210     this_cpu_write(current_task, next_p);
0211 
0212     switch_fpu_finish();
0213 
0214     /* Load the Intel cache allocation PQR MSR. */
0215     resctrl_sched_in();
0216 
0217     return prev_p;
0218 }
0219 
0220 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
0221 {
0222     return do_arch_prctl_common(option, arg2);
0223 }