0001
0002
0003
0004
0005
0006
0007
0008
0009 #undef DEBUG_LOW
0010
0011 #include <linux/spinlock.h>
0012 #include <linux/bitops.h>
0013 #include <linux/of.h>
0014 #include <linux/processor.h>
0015 #include <linux/threads.h>
0016 #include <linux/smp.h>
0017 #include <linux/pgtable.h>
0018
0019 #include <asm/machdep.h>
0020 #include <asm/mmu.h>
0021 #include <asm/mmu_context.h>
0022 #include <asm/trace.h>
0023 #include <asm/tlb.h>
0024 #include <asm/cputable.h>
0025 #include <asm/udbg.h>
0026 #include <asm/kexec.h>
0027 #include <asm/ppc-opcode.h>
0028 #include <asm/feature-fixups.h>
0029
0030 #include <misc/cxl-base.h>
0031
0032 #ifdef DEBUG_LOW
0033 #define DBG_LOW(fmt...) udbg_printf(fmt)
0034 #else
0035 #define DBG_LOW(fmt...)
0036 #endif
0037
0038 #ifdef __BIG_ENDIAN__
0039 #define HPTE_LOCK_BIT 3
0040 #else
0041 #define HPTE_LOCK_BIT (56+3)
0042 #endif
0043
0044 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
0045
0046 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
0047 int apsize, int ssize)
0048 {
0049 unsigned long va;
0050 unsigned int penc;
0051 unsigned long sllp;
0052
0053
0054
0055
0056
0057
0058
0059
0060 va = vpn << VPN_SHIFT;
0061
0062
0063
0064
0065
0066 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
0067 va &= ~(0xffffULL << 48);
0068
0069 switch (psize) {
0070 case MMU_PAGE_4K:
0071
0072 va &= ~((1ul << (64 - 52)) - 1);
0073 va |= ssize << 8;
0074 sllp = get_sllp_encoding(apsize);
0075 va |= sllp << 5;
0076 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
0077 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
0078 : "memory");
0079 break;
0080 default:
0081
0082 penc = mmu_psize_defs[psize].penc[apsize];
0083 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
0084 va |= penc << 12;
0085 va |= ssize << 8;
0086
0087
0088
0089
0090
0091
0092
0093 va |= (vpn & 0xfe);
0094 va |= 1;
0095 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
0096 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
0097 : "memory");
0098 break;
0099 }
0100 return va;
0101 }
0102
0103 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
0104 int apsize, int ssize)
0105 {
0106 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
0107
0108
0109 unsigned long rb,rs,prs,r,ric;
0110
0111 rb = PPC_BIT(52);
0112 rs = 0;
0113 prs = 0;
0114 r = 1;
0115 ric = 0;
0116
0117
0118
0119
0120
0121 asm volatile("ptesync": : :"memory");
0122 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
0123 : : "r"(rb), "i"(r), "i"(prs),
0124 "i"(ric), "r"(rs) : "memory");
0125 }
0126
0127
0128 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
0129
0130 asm volatile("ptesync": : :"memory");
0131 ___tlbie(vpn, psize, apsize, ssize);
0132 }
0133 }
0134
0135 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
0136 {
0137 unsigned long rb;
0138
0139 rb = ___tlbie(vpn, psize, apsize, ssize);
0140 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
0141 }
0142
0143 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
0144 {
0145 unsigned long va;
0146 unsigned int penc;
0147 unsigned long sllp;
0148
0149
0150 va = vpn << VPN_SHIFT;
0151
0152
0153
0154
0155
0156 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
0157 va &= ~(0xffffULL << 48);
0158
0159 switch (psize) {
0160 case MMU_PAGE_4K:
0161
0162 va &= ~((1ul << (64 - 52)) - 1);
0163 va |= ssize << 8;
0164 sllp = get_sllp_encoding(apsize);
0165 va |= sllp << 5;
0166 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
0167 : : "r" (va), "i" (CPU_FTR_ARCH_206)
0168 : "memory");
0169 break;
0170 default:
0171
0172 penc = mmu_psize_defs[psize].penc[apsize];
0173 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
0174 va |= penc << 12;
0175 va |= ssize << 8;
0176
0177
0178
0179
0180
0181
0182
0183 va |= (vpn & 0xfe);
0184 va |= 1;
0185 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
0186 : : "r" (va), "i" (CPU_FTR_ARCH_206)
0187 : "memory");
0188 break;
0189 }
0190 trace_tlbie(0, 1, va, 0, 0, 0, 0);
0191
0192 }
0193
0194 static inline void tlbie(unsigned long vpn, int psize, int apsize,
0195 int ssize, int local)
0196 {
0197 unsigned int use_local;
0198 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
0199
0200 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
0201
0202 if (use_local)
0203 use_local = mmu_psize_defs[psize].tlbiel;
0204 if (lock_tlbie && !use_local)
0205 raw_spin_lock(&native_tlbie_lock);
0206 asm volatile("ptesync": : :"memory");
0207 if (use_local) {
0208 __tlbiel(vpn, psize, apsize, ssize);
0209 ppc_after_tlbiel_barrier();
0210 } else {
0211 __tlbie(vpn, psize, apsize, ssize);
0212 fixup_tlbie_vpn(vpn, psize, apsize, ssize);
0213 asm volatile("eieio; tlbsync; ptesync": : :"memory");
0214 }
0215 if (lock_tlbie && !use_local)
0216 raw_spin_unlock(&native_tlbie_lock);
0217 }
0218
0219 static inline void native_lock_hpte(struct hash_pte *hptep)
0220 {
0221 unsigned long *word = (unsigned long *)&hptep->v;
0222
0223 while (1) {
0224 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
0225 break;
0226 spin_begin();
0227 while(test_bit(HPTE_LOCK_BIT, word))
0228 spin_cpu_relax();
0229 spin_end();
0230 }
0231 }
0232
0233 static inline void native_unlock_hpte(struct hash_pte *hptep)
0234 {
0235 unsigned long *word = (unsigned long *)&hptep->v;
0236
0237 clear_bit_unlock(HPTE_LOCK_BIT, word);
0238 }
0239
0240 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
0241 unsigned long pa, unsigned long rflags,
0242 unsigned long vflags, int psize, int apsize, int ssize)
0243 {
0244 struct hash_pte *hptep = htab_address + hpte_group;
0245 unsigned long hpte_v, hpte_r;
0246 int i;
0247
0248 if (!(vflags & HPTE_V_BOLTED)) {
0249 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
0250 " rflags=%lx, vflags=%lx, psize=%d)\n",
0251 hpte_group, vpn, pa, rflags, vflags, psize);
0252 }
0253
0254 for (i = 0; i < HPTES_PER_GROUP; i++) {
0255 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
0256
0257 native_lock_hpte(hptep);
0258 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
0259 break;
0260 native_unlock_hpte(hptep);
0261 }
0262
0263 hptep++;
0264 }
0265
0266 if (i == HPTES_PER_GROUP)
0267 return -1;
0268
0269 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
0270 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
0271
0272 if (!(vflags & HPTE_V_BOLTED)) {
0273 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
0274 i, hpte_v, hpte_r);
0275 }
0276
0277 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
0278 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
0279 hpte_v = hpte_old_to_new_v(hpte_v);
0280 }
0281
0282 hptep->r = cpu_to_be64(hpte_r);
0283
0284 eieio();
0285
0286
0287
0288
0289 hptep->v = cpu_to_be64(hpte_v);
0290
0291 __asm__ __volatile__ ("ptesync" : : : "memory");
0292
0293 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
0294 }
0295
0296 static long native_hpte_remove(unsigned long hpte_group)
0297 {
0298 struct hash_pte *hptep;
0299 int i;
0300 int slot_offset;
0301 unsigned long hpte_v;
0302
0303 DBG_LOW(" remove(group=%lx)\n", hpte_group);
0304
0305
0306 slot_offset = mftb() & 0x7;
0307
0308 for (i = 0; i < HPTES_PER_GROUP; i++) {
0309 hptep = htab_address + hpte_group + slot_offset;
0310 hpte_v = be64_to_cpu(hptep->v);
0311
0312 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
0313
0314 native_lock_hpte(hptep);
0315 hpte_v = be64_to_cpu(hptep->v);
0316 if ((hpte_v & HPTE_V_VALID)
0317 && !(hpte_v & HPTE_V_BOLTED))
0318 break;
0319 native_unlock_hpte(hptep);
0320 }
0321
0322 slot_offset++;
0323 slot_offset &= 0x7;
0324 }
0325
0326 if (i == HPTES_PER_GROUP)
0327 return -1;
0328
0329
0330 hptep->v = 0;
0331
0332 return i;
0333 }
0334
0335 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
0336 unsigned long vpn, int bpsize,
0337 int apsize, int ssize, unsigned long flags)
0338 {
0339 struct hash_pte *hptep = htab_address + slot;
0340 unsigned long hpte_v, want_v;
0341 int ret = 0, local = 0;
0342
0343 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
0344
0345 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
0346 vpn, want_v & HPTE_V_AVPN, slot, newpp);
0347
0348 hpte_v = hpte_get_old_v(hptep);
0349
0350
0351
0352
0353
0354
0355
0356 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
0357 DBG_LOW(" -> miss\n");
0358 ret = -1;
0359 } else {
0360 native_lock_hpte(hptep);
0361
0362 hpte_v = hpte_get_old_v(hptep);
0363 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
0364 !(hpte_v & HPTE_V_VALID))) {
0365 ret = -1;
0366 } else {
0367 DBG_LOW(" -> hit\n");
0368
0369 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
0370 ~(HPTE_R_PPP | HPTE_R_N)) |
0371 (newpp & (HPTE_R_PPP | HPTE_R_N |
0372 HPTE_R_C)));
0373 }
0374 native_unlock_hpte(hptep);
0375 }
0376
0377 if (flags & HPTE_LOCAL_UPDATE)
0378 local = 1;
0379
0380
0381
0382 if (!(flags & HPTE_NOHPTE_UPDATE))
0383 tlbie(vpn, bpsize, apsize, ssize, local);
0384
0385 return ret;
0386 }
0387
0388 static long __native_hpte_find(unsigned long want_v, unsigned long slot)
0389 {
0390 struct hash_pte *hptep;
0391 unsigned long hpte_v;
0392 unsigned long i;
0393
0394 for (i = 0; i < HPTES_PER_GROUP; i++) {
0395
0396 hptep = htab_address + slot;
0397 hpte_v = hpte_get_old_v(hptep);
0398 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
0399
0400 return slot;
0401 ++slot;
0402 }
0403
0404 return -1;
0405 }
0406
0407 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
0408 {
0409 unsigned long hpte_group;
0410 unsigned long want_v;
0411 unsigned long hash;
0412 long slot;
0413
0414 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
0415 want_v = hpte_encode_avpn(vpn, psize, ssize);
0416
0417
0418
0419
0420
0421 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
0422 slot = __native_hpte_find(want_v, hpte_group);
0423 if (slot < 0) {
0424
0425 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
0426 slot = __native_hpte_find(want_v, hpte_group);
0427 if (slot < 0)
0428 return -1;
0429 }
0430
0431 return slot;
0432 }
0433
0434
0435
0436
0437
0438
0439
0440
0441 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
0442 int psize, int ssize)
0443 {
0444 unsigned long vpn;
0445 unsigned long vsid;
0446 long slot;
0447 struct hash_pte *hptep;
0448
0449 vsid = get_kernel_vsid(ea, ssize);
0450 vpn = hpt_vpn(ea, vsid, ssize);
0451
0452 slot = native_hpte_find(vpn, psize, ssize);
0453 if (slot == -1)
0454 panic("could not find page to bolt\n");
0455 hptep = htab_address + slot;
0456
0457
0458 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
0459 ~(HPTE_R_PPP | HPTE_R_N)) |
0460 (newpp & (HPTE_R_PPP | HPTE_R_N)));
0461
0462
0463
0464
0465 tlbie(vpn, psize, psize, ssize, 0);
0466 }
0467
0468
0469
0470
0471
0472
0473 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
0474 {
0475 unsigned long vpn;
0476 unsigned long vsid;
0477 long slot;
0478 struct hash_pte *hptep;
0479
0480 vsid = get_kernel_vsid(ea, ssize);
0481 vpn = hpt_vpn(ea, vsid, ssize);
0482
0483 slot = native_hpte_find(vpn, psize, ssize);
0484 if (slot == -1)
0485 return -ENOENT;
0486
0487 hptep = htab_address + slot;
0488
0489 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
0490
0491
0492 hptep->v = 0;
0493
0494
0495 tlbie(vpn, psize, psize, ssize, 0);
0496 return 0;
0497 }
0498
0499
0500 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
0501 int bpsize, int apsize, int ssize, int local)
0502 {
0503 struct hash_pte *hptep = htab_address + slot;
0504 unsigned long hpte_v;
0505 unsigned long want_v;
0506 unsigned long flags;
0507
0508 local_irq_save(flags);
0509
0510 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
0511
0512 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
0513 hpte_v = hpte_get_old_v(hptep);
0514
0515 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
0516 native_lock_hpte(hptep);
0517
0518 hpte_v = hpte_get_old_v(hptep);
0519
0520 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
0521
0522 hptep->v = 0;
0523 else
0524 native_unlock_hpte(hptep);
0525 }
0526
0527
0528
0529
0530
0531
0532
0533 tlbie(vpn, bpsize, apsize, ssize, local);
0534
0535 local_irq_restore(flags);
0536 }
0537
0538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0539 static void native_hugepage_invalidate(unsigned long vsid,
0540 unsigned long addr,
0541 unsigned char *hpte_slot_array,
0542 int psize, int ssize, int local)
0543 {
0544 int i;
0545 struct hash_pte *hptep;
0546 int actual_psize = MMU_PAGE_16M;
0547 unsigned int max_hpte_count, valid;
0548 unsigned long flags, s_addr = addr;
0549 unsigned long hpte_v, want_v, shift;
0550 unsigned long hidx, vpn = 0, hash, slot;
0551
0552 shift = mmu_psize_defs[psize].shift;
0553 max_hpte_count = 1U << (PMD_SHIFT - shift);
0554
0555 local_irq_save(flags);
0556 for (i = 0; i < max_hpte_count; i++) {
0557 valid = hpte_valid(hpte_slot_array, i);
0558 if (!valid)
0559 continue;
0560 hidx = hpte_hash_index(hpte_slot_array, i);
0561
0562
0563 addr = s_addr + (i * (1ul << shift));
0564 vpn = hpt_vpn(addr, vsid, ssize);
0565 hash = hpt_hash(vpn, shift, ssize);
0566 if (hidx & _PTEIDX_SECONDARY)
0567 hash = ~hash;
0568
0569 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
0570 slot += hidx & _PTEIDX_GROUP_IX;
0571
0572 hptep = htab_address + slot;
0573 want_v = hpte_encode_avpn(vpn, psize, ssize);
0574 hpte_v = hpte_get_old_v(hptep);
0575
0576
0577 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
0578
0579 native_lock_hpte(hptep);
0580 hpte_v = hpte_get_old_v(hptep);
0581
0582 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
0583
0584
0585
0586
0587 hptep->v = 0;
0588 } else
0589 native_unlock_hpte(hptep);
0590 }
0591
0592
0593
0594
0595
0596 tlbie(vpn, psize, actual_psize, ssize, local);
0597 }
0598 local_irq_restore(flags);
0599 }
0600 #else
0601 static void native_hugepage_invalidate(unsigned long vsid,
0602 unsigned long addr,
0603 unsigned char *hpte_slot_array,
0604 int psize, int ssize, int local)
0605 {
0606 WARN(1, "%s called without THP support\n", __func__);
0607 }
0608 #endif
0609
0610 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
0611 int *psize, int *apsize, int *ssize, unsigned long *vpn)
0612 {
0613 unsigned long avpn, pteg, vpi;
0614 unsigned long hpte_v = be64_to_cpu(hpte->v);
0615 unsigned long hpte_r = be64_to_cpu(hpte->r);
0616 unsigned long vsid, seg_off;
0617 int size, a_size, shift;
0618
0619 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
0620
0621 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
0622 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
0623 hpte_r = hpte_new_to_old_r(hpte_r);
0624 }
0625 if (!(hpte_v & HPTE_V_LARGE)) {
0626 size = MMU_PAGE_4K;
0627 a_size = MMU_PAGE_4K;
0628 } else {
0629 size = hpte_page_sizes[lp] & 0xf;
0630 a_size = hpte_page_sizes[lp] >> 4;
0631 }
0632
0633 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
0634 shift = mmu_psize_defs[size].shift;
0635
0636 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
0637 pteg = slot / HPTES_PER_GROUP;
0638 if (hpte_v & HPTE_V_SECONDARY)
0639 pteg = ~pteg;
0640
0641 switch (*ssize) {
0642 case MMU_SEGSIZE_256M:
0643
0644 seg_off = (avpn & 0x1f) << 23;
0645 vsid = avpn >> 5;
0646
0647 if (shift < 23) {
0648 vpi = (vsid ^ pteg) & htab_hash_mask;
0649 seg_off |= vpi << shift;
0650 }
0651 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
0652 break;
0653 case MMU_SEGSIZE_1T:
0654
0655 seg_off = (avpn & 0x1ffff) << 23;
0656 vsid = avpn >> 17;
0657 if (shift < 23) {
0658 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
0659 seg_off |= vpi << shift;
0660 }
0661 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
0662 break;
0663 default:
0664 *vpn = size = 0;
0665 }
0666 *psize = size;
0667 *apsize = a_size;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686 static notrace void native_hpte_clear(void)
0687 {
0688 unsigned long vpn = 0;
0689 unsigned long slot, slots;
0690 struct hash_pte *hptep = htab_address;
0691 unsigned long hpte_v;
0692 unsigned long pteg_count;
0693 int psize, apsize, ssize;
0694
0695 pteg_count = htab_hash_mask + 1;
0696
0697 slots = pteg_count * HPTES_PER_GROUP;
0698
0699 for (slot = 0; slot < slots; slot++, hptep++) {
0700
0701
0702
0703
0704
0705 hpte_v = be64_to_cpu(hptep->v);
0706
0707
0708
0709
0710
0711 if (hpte_v & HPTE_V_VALID) {
0712 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
0713 hptep->v = 0;
0714 ___tlbie(vpn, psize, apsize, ssize);
0715 }
0716 }
0717
0718 asm volatile("eieio; tlbsync; ptesync":::"memory");
0719 }
0720
0721
0722
0723
0724
0725 static void native_flush_hash_range(unsigned long number, int local)
0726 {
0727 unsigned long vpn = 0;
0728 unsigned long hash, index, hidx, shift, slot;
0729 struct hash_pte *hptep;
0730 unsigned long hpte_v;
0731 unsigned long want_v;
0732 unsigned long flags;
0733 real_pte_t pte;
0734 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
0735 unsigned long psize = batch->psize;
0736 int ssize = batch->ssize;
0737 int i;
0738 unsigned int use_local;
0739
0740 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
0741 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
0742
0743 local_irq_save(flags);
0744
0745 for (i = 0; i < number; i++) {
0746 vpn = batch->vpn[i];
0747 pte = batch->pte[i];
0748
0749 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
0750 hash = hpt_hash(vpn, shift, ssize);
0751 hidx = __rpte_to_hidx(pte, index);
0752 if (hidx & _PTEIDX_SECONDARY)
0753 hash = ~hash;
0754 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
0755 slot += hidx & _PTEIDX_GROUP_IX;
0756 hptep = htab_address + slot;
0757 want_v = hpte_encode_avpn(vpn, psize, ssize);
0758 hpte_v = hpte_get_old_v(hptep);
0759
0760 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
0761 continue;
0762
0763 native_lock_hpte(hptep);
0764 hpte_v = hpte_get_old_v(hptep);
0765
0766 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
0767 native_unlock_hpte(hptep);
0768 else
0769 hptep->v = 0;
0770
0771 } pte_iterate_hashed_end();
0772 }
0773
0774 if (use_local) {
0775 asm volatile("ptesync":::"memory");
0776 for (i = 0; i < number; i++) {
0777 vpn = batch->vpn[i];
0778 pte = batch->pte[i];
0779
0780 pte_iterate_hashed_subpages(pte, psize,
0781 vpn, index, shift) {
0782 __tlbiel(vpn, psize, psize, ssize);
0783 } pte_iterate_hashed_end();
0784 }
0785 ppc_after_tlbiel_barrier();
0786 } else {
0787 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
0788
0789 if (lock_tlbie)
0790 raw_spin_lock(&native_tlbie_lock);
0791
0792 asm volatile("ptesync":::"memory");
0793 for (i = 0; i < number; i++) {
0794 vpn = batch->vpn[i];
0795 pte = batch->pte[i];
0796
0797 pte_iterate_hashed_subpages(pte, psize,
0798 vpn, index, shift) {
0799 __tlbie(vpn, psize, psize, ssize);
0800 } pte_iterate_hashed_end();
0801 }
0802
0803
0804
0805 fixup_tlbie_vpn(vpn, psize, psize, ssize);
0806 asm volatile("eieio; tlbsync; ptesync":::"memory");
0807
0808 if (lock_tlbie)
0809 raw_spin_unlock(&native_tlbie_lock);
0810 }
0811
0812 local_irq_restore(flags);
0813 }
0814
0815 void __init hpte_init_native(void)
0816 {
0817 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
0818 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
0819 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
0820 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
0821 mmu_hash_ops.hpte_insert = native_hpte_insert;
0822 mmu_hash_ops.hpte_remove = native_hpte_remove;
0823 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
0824 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
0825 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
0826 }