Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __SPARC64_MMU_CONTEXT_H
0003 #define __SPARC64_MMU_CONTEXT_H
0004 
0005 /* Derived heavily from Linus's Alpha/AXP ASN code... */
0006 
0007 #ifndef __ASSEMBLY__
0008 
0009 #include <linux/spinlock.h>
0010 #include <linux/mm_types.h>
0011 #include <linux/smp.h>
0012 #include <linux/sched.h>
0013 
0014 #include <asm/spitfire.h>
0015 #include <asm/adi_64.h>
0016 #include <asm-generic/mm_hooks.h>
0017 #include <asm/percpu.h>
0018 
0019 extern spinlock_t ctx_alloc_lock;
0020 extern unsigned long tlb_context_cache;
0021 extern unsigned long mmu_context_bmap[];
0022 
0023 DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
0024 void get_new_mmu_context(struct mm_struct *mm);
0025 
0026 #define init_new_context init_new_context
0027 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
0028 #define destroy_context destroy_context
0029 void destroy_context(struct mm_struct *mm);
0030 
0031 void __tsb_context_switch(unsigned long pgd_pa,
0032               struct tsb_config *tsb_base,
0033               struct tsb_config *tsb_huge,
0034               unsigned long tsb_descr_pa,
0035               unsigned long secondary_ctx);
0036 
0037 static inline void tsb_context_switch_ctx(struct mm_struct *mm,
0038                       unsigned long ctx)
0039 {
0040     __tsb_context_switch(__pa(mm->pgd),
0041                  &mm->context.tsb_block[MM_TSB_BASE],
0042 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0043                  (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
0044                   &mm->context.tsb_block[MM_TSB_HUGE] :
0045                   NULL)
0046 #else
0047                  NULL
0048 #endif
0049                  , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
0050                  ctx);
0051 }
0052 
0053 #define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
0054 
0055 void tsb_grow(struct mm_struct *mm,
0056           unsigned long tsb_index,
0057           unsigned long mm_rss);
0058 #ifdef CONFIG_SMP
0059 void smp_tsb_sync(struct mm_struct *mm);
0060 #else
0061 #define smp_tsb_sync(__mm) do { } while (0)
0062 #endif
0063 
0064 /* Set MMU context in the actual hardware. */
0065 #define load_secondary_context(__mm) \
0066     __asm__ __volatile__( \
0067     "\n661: stxa        %0, [%1] %2\n" \
0068     "   .section    .sun4v_1insn_patch, \"ax\"\n" \
0069     "   .word       661b\n" \
0070     "   stxa        %0, [%1] %3\n" \
0071     "   .previous\n" \
0072     "   flush       %%g6\n" \
0073     : /* No outputs */ \
0074     : "r" (CTX_HWBITS((__mm)->context)), \
0075       "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
0076 
0077 void __flush_tlb_mm(unsigned long, unsigned long);
0078 
0079 /* Switch the current MM context. */
0080 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
0081 {
0082     unsigned long ctx_valid, flags;
0083     int cpu = smp_processor_id();
0084 
0085     per_cpu(per_cpu_secondary_mm, cpu) = mm;
0086     if (unlikely(mm == &init_mm))
0087         return;
0088 
0089     spin_lock_irqsave(&mm->context.lock, flags);
0090     ctx_valid = CTX_VALID(mm->context);
0091     if (!ctx_valid)
0092         get_new_mmu_context(mm);
0093 
0094     /* We have to be extremely careful here or else we will miss
0095      * a TSB grow if we switch back and forth between a kernel
0096      * thread and an address space which has it's TSB size increased
0097      * on another processor.
0098      *
0099      * It is possible to play some games in order to optimize the
0100      * switch, but the safest thing to do is to unconditionally
0101      * perform the secondary context load and the TSB context switch.
0102      *
0103      * For reference the bad case is, for address space "A":
0104      *
0105      *      CPU 0           CPU 1
0106      *  run address space A
0107      *  set cpu0's bits in cpu_vm_mask
0108      *  switch to kernel thread, borrow
0109      *  address space A via entry_lazy_tlb
0110      *                  run address space A
0111      *                  set cpu1's bit in cpu_vm_mask
0112      *                  flush_tlb_pending()
0113      *                  reset cpu_vm_mask to just cpu1
0114      *                  TSB grow
0115      *  run address space A
0116      *  context was valid, so skip
0117      *  TSB context switch
0118      *
0119      * At that point cpu0 continues to use a stale TSB, the one from
0120      * before the TSB grow performed on cpu1.  cpu1 did not cross-call
0121      * cpu0 to update it's TSB because at that point the cpu_vm_mask
0122      * only had cpu1 set in it.
0123      */
0124     tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
0125 
0126     /* Any time a processor runs a context on an address space
0127      * for the first time, we must flush that context out of the
0128      * local TLB.
0129      */
0130     if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
0131         cpumask_set_cpu(cpu, mm_cpumask(mm));
0132         __flush_tlb_mm(CTX_HWBITS(mm->context),
0133                    SECONDARY_CONTEXT);
0134     }
0135     spin_unlock_irqrestore(&mm->context.lock, flags);
0136 }
0137 
0138 #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
0139 
0140 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
0141 static inline void arch_start_context_switch(struct task_struct *prev)
0142 {
0143     /* Save the current state of MCDPER register for the process
0144      * we are switching from
0145      */
0146     if (adi_capable()) {
0147         register unsigned long tmp_mcdper;
0148 
0149         __asm__ __volatile__(
0150             ".word 0x83438000\n\t"  /* rd  %mcdper, %g1 */
0151             "mov %%g1, %0\n\t"
0152             : "=r" (tmp_mcdper)
0153             :
0154             : "g1");
0155         if (tmp_mcdper)
0156             set_tsk_thread_flag(prev, TIF_MCDPER);
0157         else
0158             clear_tsk_thread_flag(prev, TIF_MCDPER);
0159     }
0160 }
0161 
0162 #define finish_arch_post_lock_switch    finish_arch_post_lock_switch
0163 static inline void finish_arch_post_lock_switch(void)
0164 {
0165     /* Restore the state of MCDPER register for the new process
0166      * just switched to.
0167      */
0168     if (adi_capable()) {
0169         register unsigned long tmp_mcdper;
0170 
0171         tmp_mcdper = test_thread_flag(TIF_MCDPER);
0172         __asm__ __volatile__(
0173             "mov %0, %%g1\n\t"
0174             ".word 0x9d800001\n\t"  /* wr %g0, %g1, %mcdper" */
0175             ".word 0xaf902001\n\t"  /* wrpr %g0, 1, %pmcdper */
0176             :
0177             : "ir" (tmp_mcdper)
0178             : "g1");
0179         if (current && current->mm && current->mm->context.adi) {
0180             struct pt_regs *regs;
0181 
0182             regs = task_pt_regs(current);
0183             regs->tstate |= TSTATE_MCDE;
0184         }
0185     }
0186 }
0187 
0188 #include <asm-generic/mmu_context.h>
0189 
0190 #endif /* !(__ASSEMBLY__) */
0191 
0192 #endif /* !(__SPARC64_MMU_CONTEXT_H) */