0001
0002 #ifndef _S390_TLBFLUSH_H
0003 #define _S390_TLBFLUSH_H
0004
0005 #include <linux/mm.h>
0006 #include <linux/sched.h>
0007 #include <asm/processor.h>
0008
0009
0010
0011
0012 static inline void __tlb_flush_local(void)
0013 {
0014 asm volatile("ptlb" : : : "memory");
0015 }
0016
0017
0018
0019
0020 static inline void __tlb_flush_idte(unsigned long asce)
0021 {
0022 unsigned long opt;
0023
0024 opt = IDTE_PTOA;
0025 if (MACHINE_HAS_TLB_GUEST)
0026 opt |= IDTE_GUEST_ASCE;
0027
0028 asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
0029 }
0030
0031
0032
0033
0034 static inline void __tlb_flush_global(void)
0035 {
0036 unsigned int dummy = 0;
0037
0038 csp(&dummy, 0, 0);
0039 }
0040
0041
0042
0043
0044
0045 static inline void __tlb_flush_mm(struct mm_struct *mm)
0046 {
0047 unsigned long gmap_asce;
0048
0049
0050
0051
0052
0053
0054 preempt_disable();
0055 atomic_inc(&mm->context.flush_count);
0056
0057 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
0058 barrier();
0059 gmap_asce = READ_ONCE(mm->context.gmap_asce);
0060 if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
0061 if (gmap_asce)
0062 __tlb_flush_idte(gmap_asce);
0063 __tlb_flush_idte(mm->context.asce);
0064 } else {
0065
0066 __tlb_flush_global();
0067 }
0068 atomic_dec(&mm->context.flush_count);
0069 preempt_enable();
0070 }
0071
0072 static inline void __tlb_flush_kernel(void)
0073 {
0074 if (MACHINE_HAS_IDTE)
0075 __tlb_flush_idte(init_mm.context.asce);
0076 else
0077 __tlb_flush_global();
0078 }
0079
0080 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
0081 {
0082 spin_lock(&mm->context.lock);
0083 if (mm->context.flush_mm) {
0084 mm->context.flush_mm = 0;
0085 __tlb_flush_mm(mm);
0086 }
0087 spin_unlock(&mm->context.lock);
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 #define flush_tlb() do { } while (0)
0109 #define flush_tlb_all() do { } while (0)
0110 #define flush_tlb_page(vma, addr) do { } while (0)
0111
0112 static inline void flush_tlb_mm(struct mm_struct *mm)
0113 {
0114 __tlb_flush_mm_lazy(mm);
0115 }
0116
0117 static inline void flush_tlb_range(struct vm_area_struct *vma,
0118 unsigned long start, unsigned long end)
0119 {
0120 __tlb_flush_mm_lazy(vma->vm_mm);
0121 }
0122
0123 static inline void flush_tlb_kernel_range(unsigned long start,
0124 unsigned long end)
0125 {
0126 __tlb_flush_kernel();
0127 }
0128
0129 #endif