0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/kernel.h>
0021 #include <linux/mm.h>
0022 #include <linux/percpu.h>
0023 #include <linux/hardirq.h>
0024 #include <asm/tlbflush.h>
0025 #include <asm/tlb.h>
0026 #include <asm/bug.h>
0027 #include <asm/pte-walk.h>
0028
0029
0030 #include <trace/events/thp.h>
0031
0032 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
0033
0034
0035
0036
0037
0038
0039
0040 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
0041 pte_t *ptep, unsigned long pte, int huge)
0042 {
0043 unsigned long vpn;
0044 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
0045 unsigned long vsid;
0046 unsigned int psize;
0047 int ssize;
0048 real_pte_t rpte;
0049 int i, offset;
0050
0051 i = batch->index;
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 if (huge) {
0062 #ifdef CONFIG_HUGETLB_PAGE
0063 psize = get_slice_psize(mm, addr);
0064
0065 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
0066 if (unlikely(psize == MMU_PAGE_16G))
0067 offset = PTRS_PER_PUD;
0068 else
0069 offset = PTRS_PER_PMD;
0070 #else
0071 BUG();
0072 psize = pte_pagesize_index(mm, addr, pte);
0073 #endif
0074 } else {
0075 psize = pte_pagesize_index(mm, addr, pte);
0076
0077
0078
0079
0080
0081
0082 addr &= PAGE_MASK;
0083 offset = PTRS_PER_PTE;
0084 }
0085
0086
0087
0088 if (!is_kernel_addr(addr)) {
0089 ssize = user_segment_size(addr);
0090 vsid = get_user_vsid(&mm->context, addr, ssize);
0091 } else {
0092 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
0093 ssize = mmu_kernel_ssize;
0094 }
0095 WARN_ON(vsid == 0);
0096 vpn = hpt_vpn(addr, vsid, ssize);
0097 rpte = __real_pte(__pte(pte), ptep, offset);
0098
0099
0100
0101
0102
0103 if (!batch->active) {
0104 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
0105 put_cpu_var(ppc64_tlb_batch);
0106 return;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
0120 batch->ssize != ssize)) {
0121 __flush_tlb_pending(batch);
0122 i = 0;
0123 }
0124 if (i == 0) {
0125 batch->mm = mm;
0126 batch->psize = psize;
0127 batch->ssize = ssize;
0128 }
0129 batch->pte[i] = rpte;
0130 batch->vpn[i] = vpn;
0131 batch->index = ++i;
0132 if (i >= PPC64_TLB_BATCH_NR)
0133 __flush_tlb_pending(batch);
0134 put_cpu_var(ppc64_tlb_batch);
0135 }
0136
0137
0138
0139
0140
0141
0142
0143
0144 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
0145 {
0146 int i, local;
0147
0148 i = batch->index;
0149 local = mm_is_thread_local(batch->mm);
0150 if (i == 1)
0151 flush_hash_page(batch->vpn[0], batch->pte[0],
0152 batch->psize, batch->ssize, local);
0153 else
0154 flush_hash_range(i, local);
0155 batch->index = 0;
0156 }
0157
0158 void hash__tlb_flush(struct mmu_gather *tlb)
0159 {
0160 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
0161
0162
0163
0164
0165
0166
0167 if (tlbbatch->index)
0168 __flush_tlb_pending(tlbbatch);
0169
0170 put_cpu_var(ppc64_tlb_batch);
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 void __flush_hash_table_range(unsigned long start, unsigned long end)
0191 {
0192 int hugepage_shift;
0193 unsigned long flags;
0194
0195 start = ALIGN_DOWN(start, PAGE_SIZE);
0196 end = ALIGN(end, PAGE_SIZE);
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 local_irq_save(flags);
0208 arch_enter_lazy_mmu_mode();
0209 for (; start < end; start += PAGE_SIZE) {
0210 pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
0211 unsigned long pte;
0212
0213 if (ptep == NULL)
0214 continue;
0215 pte = pte_val(*ptep);
0216 if (!(pte & H_PAGE_HASHPTE))
0217 continue;
0218 hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
0219 }
0220 arch_leave_lazy_mmu_mode();
0221 local_irq_restore(flags);
0222 }
0223
0224 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
0225 {
0226 pte_t *pte;
0227 pte_t *start_pte;
0228 unsigned long flags;
0229
0230 addr = ALIGN_DOWN(addr, PMD_SIZE);
0231
0232
0233
0234
0235
0236
0237
0238
0239 local_irq_save(flags);
0240 arch_enter_lazy_mmu_mode();
0241 start_pte = pte_offset_map(pmd, addr);
0242 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
0243 unsigned long pteval = pte_val(*pte);
0244 if (pteval & H_PAGE_HASHPTE)
0245 hpte_need_flush(mm, addr, pte, pteval, 0);
0246 addr += PAGE_SIZE;
0247 }
0248 arch_leave_lazy_mmu_mode();
0249 local_irq_restore(flags);
0250 }