0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/mm.h>
0019 #include <asm/machdep.h>
0020
0021 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
0022 pmd_t *pmdp, unsigned long trap, unsigned long flags,
0023 int ssize, unsigned int psize)
0024 {
0025 unsigned int index, valid;
0026 unsigned char *hpte_slot_array;
0027 unsigned long rflags, pa, hidx;
0028 unsigned long old_pmd, new_pmd;
0029 int ret, lpsize = MMU_PAGE_16M;
0030 unsigned long vpn, hash, shift, slot;
0031
0032
0033
0034
0035 do {
0036 pmd_t pmd = READ_ONCE(*pmdp);
0037
0038 old_pmd = pmd_val(pmd);
0039
0040 if (unlikely(old_pmd & H_PAGE_BUSY))
0041 return 0;
0042
0043 if (unlikely(!check_pte_access(access, old_pmd)))
0044 return 1;
0045
0046
0047
0048
0049 new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
0050 if (access & _PAGE_WRITE)
0051 new_pmd |= _PAGE_DIRTY;
0052 } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
0053
0054
0055
0056
0057 if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
0058 return 0;
0059
0060 rflags = htab_convert_pte_flags(new_pmd, flags);
0061
0062 #if 0
0063 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
0064
0065
0066
0067
0068
0069 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
0070 }
0071 #endif
0072
0073
0074
0075 shift = mmu_psize_defs[psize].shift;
0076 index = (ea & ~HPAGE_PMD_MASK) >> shift;
0077 BUG_ON(index >= PTE_FRAG_SIZE);
0078
0079 vpn = hpt_vpn(ea, vsid, ssize);
0080 hpte_slot_array = get_hpte_slot_array(pmdp);
0081 if (psize == MMU_PAGE_4K) {
0082
0083
0084
0085
0086
0087 if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
0088 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
0089 ssize, flags);
0090
0091
0092
0093
0094
0095
0096
0097
0098 memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
0099 }
0100 }
0101
0102 valid = hpte_valid(hpte_slot_array, index);
0103 if (valid) {
0104
0105 hash = hpt_hash(vpn, shift, ssize);
0106 hidx = hpte_hash_index(hpte_slot_array, index);
0107 if (hidx & _PTEIDX_SECONDARY)
0108 hash = ~hash;
0109 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
0110 slot += hidx & _PTEIDX_GROUP_IX;
0111
0112 ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
0113 psize, lpsize, ssize, flags);
0114
0115
0116
0117 if (ret == -1) {
0118
0119
0120
0121
0122
0123 valid = 0;
0124 hpte_slot_array[index] = 0;
0125 }
0126 }
0127
0128 if (!valid) {
0129 unsigned long hpte_group;
0130
0131 hash = hpt_hash(vpn, shift, ssize);
0132
0133 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
0134 new_pmd |= H_PAGE_HASHPTE;
0135
0136 repeat:
0137 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
0138
0139
0140 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
0141 psize, lpsize, ssize);
0142
0143
0144
0145 if (unlikely(slot == -1)) {
0146 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
0147 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
0148 rflags,
0149 HPTE_V_SECONDARY,
0150 psize, lpsize, ssize);
0151 if (slot == -1) {
0152 if (mftb() & 0x1)
0153 hpte_group = (hash & htab_hash_mask) *
0154 HPTES_PER_GROUP;
0155
0156 mmu_hash_ops.hpte_remove(hpte_group);
0157 goto repeat;
0158 }
0159 }
0160
0161
0162
0163
0164 if (unlikely(slot == -2)) {
0165 *pmdp = __pmd(old_pmd);
0166 hash_failure_debug(ea, access, vsid, trap, ssize,
0167 psize, lpsize, old_pmd);
0168 return -1;
0169 }
0170
0171
0172
0173
0174
0175 mark_hpte_slot_valid(hpte_slot_array, index, slot);
0176 }
0177
0178
0179
0180
0181 if (psize == MMU_PAGE_4K)
0182 new_pmd |= H_PAGE_COMBO;
0183
0184
0185
0186
0187
0188 smp_wmb();
0189 *pmdp = __pmd(new_pmd & ~H_PAGE_BUSY);
0190 return 0;
0191 }