Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _S390_TLBFLUSH_H
0003 #define _S390_TLBFLUSH_H
0004 
0005 #include <linux/mm.h>
0006 #include <linux/sched.h>
0007 #include <asm/processor.h>
0008 
0009 /*
0010  * Flush all TLB entries on the local CPU.
0011  */
0012 static inline void __tlb_flush_local(void)
0013 {
0014     asm volatile("ptlb" : : : "memory");
0015 }
0016 
0017 /*
0018  * Flush TLB entries for a specific ASCE on all CPUs
0019  */
0020 static inline void __tlb_flush_idte(unsigned long asce)
0021 {
0022     unsigned long opt;
0023 
0024     opt = IDTE_PTOA;
0025     if (MACHINE_HAS_TLB_GUEST)
0026         opt |= IDTE_GUEST_ASCE;
0027     /* Global TLB flush for the mm */
0028     asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
0029 }
0030 
0031 /*
0032  * Flush all TLB entries on all CPUs.
0033  */
0034 static inline void __tlb_flush_global(void)
0035 {
0036     unsigned int dummy = 0;
0037 
0038     csp(&dummy, 0, 0);
0039 }
0040 
0041 /*
0042  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
0043  * this implicates multiple ASCEs!).
0044  */
0045 static inline void __tlb_flush_mm(struct mm_struct *mm)
0046 {
0047     unsigned long gmap_asce;
0048 
0049     /*
0050      * If the machine has IDTE we prefer to do a per mm flush
0051      * on all cpus instead of doing a local flush if the mm
0052      * only ran on the local cpu.
0053      */
0054     preempt_disable();
0055     atomic_inc(&mm->context.flush_count);
0056     /* Reset TLB flush mask */
0057     cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
0058     barrier();
0059     gmap_asce = READ_ONCE(mm->context.gmap_asce);
0060     if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
0061         if (gmap_asce)
0062             __tlb_flush_idte(gmap_asce);
0063         __tlb_flush_idte(mm->context.asce);
0064     } else {
0065         /* Global TLB flush */
0066         __tlb_flush_global();
0067     }
0068     atomic_dec(&mm->context.flush_count);
0069     preempt_enable();
0070 }
0071 
0072 static inline void __tlb_flush_kernel(void)
0073 {
0074     if (MACHINE_HAS_IDTE)
0075         __tlb_flush_idte(init_mm.context.asce);
0076     else
0077         __tlb_flush_global();
0078 }
0079 
0080 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
0081 {
0082     spin_lock(&mm->context.lock);
0083     if (mm->context.flush_mm) {
0084         mm->context.flush_mm = 0;
0085         __tlb_flush_mm(mm);
0086     }
0087     spin_unlock(&mm->context.lock);
0088 }
0089 
0090 /*
0091  * TLB flushing:
0092  *  flush_tlb() - flushes the current mm struct TLBs
0093  *  flush_tlb_all() - flushes all processes TLBs
0094  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
0095  *  flush_tlb_page(vma, vmaddr) - flushes one page
0096  *  flush_tlb_range(vma, start, end) - flushes a range of pages
0097  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
0098  */
0099 
0100 /*
0101  * flush_tlb_mm goes together with ptep_set_wrprotect for the
0102  * copy_page_range operation and flush_tlb_range is related to
0103  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
0104  * ptep_get_and_clear do not flush the TLBs directly if the mm has
0105  * only one user. At the end of the update the flush_tlb_mm and
0106  * flush_tlb_range functions need to do the flush.
0107  */
0108 #define flush_tlb()             do { } while (0)
0109 #define flush_tlb_all()             do { } while (0)
0110 #define flush_tlb_page(vma, addr)       do { } while (0)
0111 
0112 static inline void flush_tlb_mm(struct mm_struct *mm)
0113 {
0114     __tlb_flush_mm_lazy(mm);
0115 }
0116 
0117 static inline void flush_tlb_range(struct vm_area_struct *vma,
0118                    unsigned long start, unsigned long end)
0119 {
0120     __tlb_flush_mm_lazy(vma->vm_mm);
0121 }
0122 
0123 static inline void flush_tlb_kernel_range(unsigned long start,
0124                       unsigned long end)
0125 {
0126     __tlb_flush_kernel();
0127 }
0128 
0129 #endif /* _S390_TLBFLUSH_H */