Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Switch a MMU context.
0003  *
0004  * This file is subject to the terms and conditions of the GNU General Public
0005  * License.  See the file "COPYING" in the main directory of this archive
0006  * for more details.
0007  *
0008  * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
0009  * Copyright (C) 1999 Silicon Graphics, Inc.
0010  */
0011 #ifndef _ASM_MMU_CONTEXT_H
0012 #define _ASM_MMU_CONTEXT_H
0013 
0014 #include <linux/errno.h>
0015 #include <linux/sched.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/smp.h>
0018 #include <linux/slab.h>
0019 
0020 #include <asm/barrier.h>
0021 #include <asm/cacheflush.h>
0022 #include <asm/dsemul.h>
0023 #include <asm/ginvt.h>
0024 #include <asm/hazards.h>
0025 #include <asm/tlbflush.h>
0026 #include <asm-generic/mm_hooks.h>
0027 
0028 #define htw_set_pwbase(pgd)                     \
0029 do {                                    \
0030     if (cpu_has_htw) {                      \
0031         write_c0_pwbase(pgd);                   \
0032         back_to_back_c0_hazard();               \
0033     }                               \
0034 } while (0)
0035 
0036 extern void tlbmiss_handler_setup_pgd(unsigned long);
0037 extern char tlbmiss_handler_setup_pgd_end[];
0038 
0039 /* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
0040 #define TLBMISS_HANDLER_SETUP_PGD(pgd)                  \
0041 do {                                    \
0042     tlbmiss_handler_setup_pgd((unsigned long)(pgd));        \
0043     htw_set_pwbase((unsigned long)pgd);             \
0044 } while (0)
0045 
0046 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
0047 
0048 #define TLBMISS_HANDLER_RESTORE()                   \
0049     write_c0_xcontext((unsigned long) smp_processor_id() <<     \
0050               SMP_CPUID_REGSHIFT)
0051 
0052 #define TLBMISS_HANDLER_SETUP()                     \
0053     do {                                \
0054         TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);      \
0055         TLBMISS_HANDLER_RESTORE();              \
0056     } while (0)
0057 
0058 #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
0059 
0060 /*
0061  * For the fast tlb miss handlers, we keep a per cpu array of pointers
0062  * to the current pgd for each processor. Also, the proc. id is stuffed
0063  * into the context register.
0064  */
0065 extern unsigned long pgd_current[];
0066 
0067 #define TLBMISS_HANDLER_RESTORE()                   \
0068     write_c0_context((unsigned long) smp_processor_id() <<      \
0069              SMP_CPUID_REGSHIFT)
0070 
0071 #define TLBMISS_HANDLER_SETUP()                     \
0072     TLBMISS_HANDLER_RESTORE();                  \
0073     back_to_back_c0_hazard();                   \
0074     TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
0075 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
0076 
0077 /*
0078  * The ginvt instruction will invalidate wired entries when its type field
0079  * targets anything other than the entire TLB. That means that if we were to
0080  * allow the kernel to create wired entries with the MMID of current->active_mm
0081  * then those wired entries could be invalidated when we later use ginvt to
0082  * invalidate TLB entries with that MMID.
0083  *
0084  * In order to prevent ginvt from trashing wired entries, we reserve one MMID
0085  * for use by the kernel when creating wired entries. This MMID will never be
0086  * assigned to a struct mm, and we'll never target it with a ginvt instruction.
0087  */
0088 #define MMID_KERNEL_WIRED   0
0089 
0090 /*
0091  *  All unused by hardware upper bits will be considered
0092  *  as a software asid extension.
0093  */
0094 static inline u64 asid_version_mask(unsigned int cpu)
0095 {
0096     unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
0097 
0098     return ~(u64)(asid_mask | (asid_mask - 1));
0099 }
0100 
0101 static inline u64 asid_first_version(unsigned int cpu)
0102 {
0103     return ~asid_version_mask(cpu) + 1;
0104 }
0105 
0106 static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
0107 {
0108     if (cpu_has_mmid)
0109         return atomic64_read(&mm->context.mmid);
0110 
0111     return mm->context.asid[cpu];
0112 }
0113 
0114 static inline void set_cpu_context(unsigned int cpu,
0115                    struct mm_struct *mm, u64 ctx)
0116 {
0117     if (cpu_has_mmid)
0118         atomic64_set(&mm->context.mmid, ctx);
0119     else
0120         mm->context.asid[cpu] = ctx;
0121 }
0122 
0123 #define asid_cache(cpu)     (cpu_data[cpu].asid_cache)
0124 #define cpu_asid(cpu, mm) \
0125     (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
0126 
0127 extern void get_new_mmu_context(struct mm_struct *mm);
0128 extern void check_mmu_context(struct mm_struct *mm);
0129 extern void check_switch_mmu_context(struct mm_struct *mm);
0130 
0131 /*
0132  * Initialize the context related info for a new mm_struct
0133  * instance.
0134  */
0135 #define init_new_context init_new_context
0136 static inline int
0137 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0138 {
0139     int i;
0140 
0141     if (cpu_has_mmid) {
0142         set_cpu_context(0, mm, 0);
0143     } else {
0144         for_each_possible_cpu(i)
0145             set_cpu_context(i, mm, 0);
0146     }
0147 
0148     mm->context.bd_emupage_allocmap = NULL;
0149     spin_lock_init(&mm->context.bd_emupage_lock);
0150     init_waitqueue_head(&mm->context.bd_emupage_queue);
0151 
0152     return 0;
0153 }
0154 
0155 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
0156                  struct task_struct *tsk)
0157 {
0158     unsigned int cpu = smp_processor_id();
0159     unsigned long flags;
0160     local_irq_save(flags);
0161 
0162     htw_stop();
0163     check_switch_mmu_context(next);
0164 
0165     /*
0166      * Mark current->active_mm as not "active" anymore.
0167      * We don't want to mislead possible IPI tlb flush routines.
0168      */
0169     cpumask_clear_cpu(cpu, mm_cpumask(prev));
0170     cpumask_set_cpu(cpu, mm_cpumask(next));
0171     htw_start();
0172 
0173     local_irq_restore(flags);
0174 }
0175 
0176 /*
0177  * Destroy context related info for an mm_struct that is about
0178  * to be put to rest.
0179  */
0180 #define destroy_context destroy_context
0181 static inline void destroy_context(struct mm_struct *mm)
0182 {
0183     dsemul_mm_cleanup(mm);
0184 }
0185 
0186 static inline void
0187 drop_mmu_context(struct mm_struct *mm)
0188 {
0189     unsigned long flags;
0190     unsigned int cpu;
0191     u32 old_mmid;
0192     u64 ctx;
0193 
0194     local_irq_save(flags);
0195 
0196     cpu = smp_processor_id();
0197     ctx = cpu_context(cpu, mm);
0198 
0199     if (!ctx) {
0200         /* no-op */
0201     } else if (cpu_has_mmid) {
0202         /*
0203          * Globally invalidating TLB entries associated with the MMID
0204          * is pretty cheap using the GINVT instruction, so we'll do
0205          * that rather than incur the overhead of allocating a new
0206          * MMID. The latter would be especially difficult since MMIDs
0207          * are global & other CPUs may be actively using ctx.
0208          */
0209         htw_stop();
0210         old_mmid = read_c0_memorymapid();
0211         write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
0212         mtc0_tlbw_hazard();
0213         ginvt_mmid();
0214         sync_ginv();
0215         write_c0_memorymapid(old_mmid);
0216         instruction_hazard();
0217         htw_start();
0218     } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
0219         /*
0220          * mm is currently active, so we can't really drop it.
0221          * Instead we bump the ASID.
0222          */
0223         htw_stop();
0224         get_new_mmu_context(mm);
0225         write_c0_entryhi(cpu_asid(cpu, mm));
0226         htw_start();
0227     } else {
0228         /* will get a new context next time */
0229         set_cpu_context(cpu, mm, 0);
0230     }
0231 
0232     local_irq_restore(flags);
0233 }
0234 
0235 #include <asm-generic/mmu_context.h>
0236 
0237 #endif /* _ASM_MMU_CONTEXT_H */