Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
0003 #define __ASM_POWERPC_MMU_CONTEXT_H
0004 #ifdef __KERNEL__
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/mm.h>
0008 #include <linux/sched.h>
0009 #include <linux/spinlock.h>
0010 #include <asm/mmu.h>    
0011 #include <asm/cputable.h>
0012 #include <asm/cputhreads.h>
0013 
0014 /*
0015  * Most if the context management is out of line
0016  */
0017 #define init_new_context init_new_context
0018 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
0019 #define destroy_context destroy_context
0020 extern void destroy_context(struct mm_struct *mm);
0021 #ifdef CONFIG_SPAPR_TCE_IOMMU
0022 struct mm_iommu_table_group_mem_t;
0023 
0024 extern bool mm_iommu_preregistered(struct mm_struct *mm);
0025 extern long mm_iommu_new(struct mm_struct *mm,
0026         unsigned long ua, unsigned long entries,
0027         struct mm_iommu_table_group_mem_t **pmem);
0028 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
0029         unsigned long entries, unsigned long dev_hpa,
0030         struct mm_iommu_table_group_mem_t **pmem);
0031 extern long mm_iommu_put(struct mm_struct *mm,
0032         struct mm_iommu_table_group_mem_t *mem);
0033 extern void mm_iommu_init(struct mm_struct *mm);
0034 extern void mm_iommu_cleanup(struct mm_struct *mm);
0035 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
0036         unsigned long ua, unsigned long size);
0037 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
0038         unsigned long ua, unsigned long entries);
0039 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
0040         unsigned long ua, unsigned int pageshift, unsigned long *hpa);
0041 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
0042         unsigned int pageshift, unsigned long *size);
0043 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
0044 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
0045 #else
0046 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
0047         unsigned int pageshift, unsigned long *size)
0048 {
0049     return false;
0050 }
0051 static inline void mm_iommu_init(struct mm_struct *mm) { }
0052 #endif
0053 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
0054 
0055 #ifdef CONFIG_PPC_BOOK3S_64
0056 extern void radix__switch_mmu_context(struct mm_struct *prev,
0057                       struct mm_struct *next);
0058 static inline void switch_mmu_context(struct mm_struct *prev,
0059                       struct mm_struct *next,
0060                       struct task_struct *tsk)
0061 {
0062     if (radix_enabled())
0063         return radix__switch_mmu_context(prev, next);
0064     return switch_slb(tsk, next);
0065 }
0066 
0067 extern int hash__alloc_context_id(void);
0068 void __init hash__reserve_context_id(int id);
0069 extern void __destroy_context(int context_id);
0070 static inline void mmu_context_init(void) { }
0071 
0072 #ifdef CONFIG_PPC_64S_HASH_MMU
0073 static inline int alloc_extended_context(struct mm_struct *mm,
0074                      unsigned long ea)
0075 {
0076     int context_id;
0077 
0078     int index = ea >> MAX_EA_BITS_PER_CONTEXT;
0079 
0080     context_id = hash__alloc_context_id();
0081     if (context_id < 0)
0082         return context_id;
0083 
0084     VM_WARN_ON(mm->context.extended_id[index]);
0085     mm->context.extended_id[index] = context_id;
0086     return context_id;
0087 }
0088 
0089 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
0090 {
0091     int context_id;
0092 
0093     context_id = get_user_context(&mm->context, ea);
0094     if (!context_id)
0095         return true;
0096     return false;
0097 }
0098 #endif
0099 
0100 #else
0101 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
0102                    struct task_struct *tsk);
0103 extern unsigned long __init_new_context(void);
0104 extern void __destroy_context(unsigned long context_id);
0105 extern void mmu_context_init(void);
0106 static inline int alloc_extended_context(struct mm_struct *mm,
0107                      unsigned long ea)
0108 {
0109     /* non book3s_64 should never find this called */
0110     WARN_ON(1);
0111     return -ENOMEM;
0112 }
0113 
0114 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
0115 {
0116     return false;
0117 }
0118 #endif
0119 
0120 extern void switch_cop(struct mm_struct *next);
0121 extern int use_cop(unsigned long acop, struct mm_struct *mm);
0122 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
0123 
0124 #ifdef CONFIG_PPC_BOOK3S_64
0125 static inline void inc_mm_active_cpus(struct mm_struct *mm)
0126 {
0127     atomic_inc(&mm->context.active_cpus);
0128 }
0129 
0130 static inline void dec_mm_active_cpus(struct mm_struct *mm)
0131 {
0132     atomic_dec(&mm->context.active_cpus);
0133 }
0134 
0135 static inline void mm_context_add_copro(struct mm_struct *mm)
0136 {
0137     /*
0138      * If any copro is in use, increment the active CPU count
0139      * in order to force TLB invalidations to be global as to
0140      * propagate to the Nest MMU.
0141      */
0142     if (atomic_inc_return(&mm->context.copros) == 1)
0143         inc_mm_active_cpus(mm);
0144 }
0145 
0146 static inline void mm_context_remove_copro(struct mm_struct *mm)
0147 {
0148     int c;
0149 
0150     /*
0151      * When removing the last copro, we need to broadcast a global
0152      * flush of the full mm, as the next TLBI may be local and the
0153      * nMMU and/or PSL need to be cleaned up.
0154      *
0155      * Both the 'copros' and 'active_cpus' counts are looked at in
0156      * flush_all_mm() to determine the scope (local/global) of the
0157      * TLBIs, so we need to flush first before decrementing
0158      * 'copros'. If this API is used by several callers for the
0159      * same context, it can lead to over-flushing. It's hopefully
0160      * not common enough to be a problem.
0161      *
0162      * Skip on hash, as we don't know how to do the proper flush
0163      * for the time being. Invalidations will remain global if
0164      * used on hash. Note that we can't drop 'copros' either, as
0165      * it could make some invalidations local with no flush
0166      * in-between.
0167      */
0168     if (radix_enabled()) {
0169         flush_all_mm(mm);
0170 
0171         c = atomic_dec_if_positive(&mm->context.copros);
0172         /* Detect imbalance between add and remove */
0173         WARN_ON(c < 0);
0174 
0175         if (c == 0)
0176             dec_mm_active_cpus(mm);
0177     }
0178 }
0179 
0180 /*
0181  * vas_windows counter shows number of open windows in the mm
0182  * context. During context switch, use this counter to clear the
0183  * foreign real address mapping (CP_ABORT) for the thread / process
0184  * that intend to use COPY/PASTE. When a process closes all windows,
0185  * disable CP_ABORT which is expensive to run.
0186  *
0187  * For user context, register a copro so that TLBIs are seen by the
0188  * nest MMU. mm_context_add/remove_vas_window() are used only for user
0189  * space windows.
0190  */
0191 static inline void mm_context_add_vas_window(struct mm_struct *mm)
0192 {
0193     atomic_inc(&mm->context.vas_windows);
0194     mm_context_add_copro(mm);
0195 }
0196 
0197 static inline void mm_context_remove_vas_window(struct mm_struct *mm)
0198 {
0199     int v;
0200 
0201     mm_context_remove_copro(mm);
0202     v = atomic_dec_if_positive(&mm->context.vas_windows);
0203 
0204     /* Detect imbalance between add and remove */
0205     WARN_ON(v < 0);
0206 }
0207 #else
0208 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
0209 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
0210 static inline void mm_context_add_copro(struct mm_struct *mm) { }
0211 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
0212 #endif
0213 
0214 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
0215 void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
0216                  unsigned long type, unsigned long pg_sizes,
0217                  unsigned long start, unsigned long end);
0218 #else
0219 static inline void do_h_rpt_invalidate_prt(unsigned long pid,
0220                        unsigned long lpid,
0221                        unsigned long type,
0222                        unsigned long pg_sizes,
0223                        unsigned long start,
0224                        unsigned long end) { }
0225 #endif
0226 
0227 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
0228                    struct task_struct *tsk);
0229 
0230 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
0231                  struct task_struct *tsk)
0232 {
0233     unsigned long flags;
0234 
0235     local_irq_save(flags);
0236     switch_mm_irqs_off(prev, next, tsk);
0237     local_irq_restore(flags);
0238 }
0239 #define switch_mm_irqs_off switch_mm_irqs_off
0240 
0241 /*
0242  * After we have set current->mm to a new value, this activates
0243  * the context for the new mm so we see the new mappings.
0244  */
0245 #define activate_mm activate_mm
0246 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
0247 {
0248     switch_mm_irqs_off(prev, next, current);
0249 }
0250 
0251 /* We don't currently use enter_lazy_tlb() for anything */
0252 #ifdef CONFIG_PPC_BOOK3E_64
0253 #define enter_lazy_tlb enter_lazy_tlb
0254 static inline void enter_lazy_tlb(struct mm_struct *mm,
0255                   struct task_struct *tsk)
0256 {
0257     /* 64-bit Book3E keeps track of current PGD in the PACA */
0258     get_paca()->pgd = NULL;
0259 }
0260 #endif
0261 
0262 extern void arch_exit_mmap(struct mm_struct *mm);
0263 
0264 static inline void arch_unmap(struct mm_struct *mm,
0265                   unsigned long start, unsigned long end)
0266 {
0267     unsigned long vdso_base = (unsigned long)mm->context.vdso;
0268 
0269     if (start <= vdso_base && vdso_base < end)
0270         mm->context.vdso = NULL;
0271 }
0272 
0273 #ifdef CONFIG_PPC_MEM_KEYS
0274 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
0275                    bool execute, bool foreign);
0276 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
0277 #else /* CONFIG_PPC_MEM_KEYS */
0278 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
0279         bool write, bool execute, bool foreign)
0280 {
0281     /* by default, allow everything */
0282     return true;
0283 }
0284 
0285 #define pkey_mm_init(mm)
0286 #define arch_dup_pkeys(oldmm, mm)
0287 
0288 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
0289 {
0290     return 0x0UL;
0291 }
0292 
0293 #endif /* CONFIG_PPC_MEM_KEYS */
0294 
0295 static inline int arch_dup_mmap(struct mm_struct *oldmm,
0296                 struct mm_struct *mm)
0297 {
0298     arch_dup_pkeys(oldmm, mm);
0299     return 0;
0300 }
0301 
0302 #include <asm-generic/mmu_context.h>
0303 
0304 #endif /* __KERNEL__ */
0305 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */