0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/mm.h>
0012 #include <linux/hugetlb.h>
0013 #include <asm/cacheflush.h>
0014 #include <asm/machdep.h>
0015
0016 unsigned int hpage_shift;
0017 EXPORT_SYMBOL(hpage_shift);
0018
0019 #ifdef CONFIG_PPC_64S_HASH_MMU
0020 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
0021 pte_t *ptep, unsigned long trap, unsigned long flags,
0022 int ssize, unsigned int shift, unsigned int mmu_psize)
0023 {
0024 real_pte_t rpte;
0025 unsigned long vpn;
0026 unsigned long old_pte, new_pte;
0027 unsigned long rflags, pa;
0028 long slot, offset;
0029
0030 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
0031
0032
0033 vpn = hpt_vpn(ea, vsid, ssize);
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 do {
0049 old_pte = pte_val(*ptep);
0050
0051 if (unlikely(old_pte & H_PAGE_BUSY))
0052 return 0;
0053
0054 if (unlikely(!check_pte_access(access, old_pte)))
0055 return 1;
0056
0057
0058
0059
0060
0061 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
0062 if (access & _PAGE_WRITE)
0063 new_pte |= _PAGE_DIRTY;
0064 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
0065
0066
0067 if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
0068 return 0;
0069
0070 rflags = htab_convert_pte_flags(new_pte, flags);
0071 if (unlikely(mmu_psize == MMU_PAGE_16G))
0072 offset = PTRS_PER_PUD;
0073 else
0074 offset = PTRS_PER_PMD;
0075 rpte = __real_pte(__pte(old_pte), ptep, offset);
0076
0077 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
0078
0079
0080
0081
0082 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
0083
0084
0085 if (unlikely(old_pte & H_PAGE_HASHPTE)) {
0086
0087 unsigned long gslot;
0088
0089 gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
0090 if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize,
0091 mmu_psize, ssize, flags) == -1)
0092 old_pte &= ~_PAGE_HPTEFLAGS;
0093 }
0094
0095 if (likely(!(old_pte & H_PAGE_HASHPTE))) {
0096 unsigned long hash = hpt_hash(vpn, shift, ssize);
0097
0098 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
0099
0100
0101 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
0102
0103 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
0104 mmu_psize, ssize);
0105
0106
0107
0108
0109
0110 if (unlikely(slot == -2)) {
0111 *ptep = __pte(old_pte);
0112 hash_failure_debug(ea, access, vsid, trap, ssize,
0113 mmu_psize, mmu_psize, old_pte);
0114 return -1;
0115 }
0116
0117 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
0118 }
0119
0120
0121
0122
0123 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
0124 return 0;
0125 }
0126 #endif
0127
0128 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
0129 unsigned long addr, pte_t *ptep)
0130 {
0131 unsigned long pte_val;
0132
0133
0134
0135
0136
0137 pte_val = pte_update(vma->vm_mm, addr, ptep,
0138 _PAGE_PRESENT, _PAGE_INVALID, 1);
0139
0140 return __pte(pte_val);
0141 }
0142
0143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
0144 pte_t *ptep, pte_t old_pte, pte_t pte)
0145 {
0146
0147 if (radix_enabled())
0148 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
0149 old_pte, pte);
0150 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
0151 }
0152
0153 void __init hugetlbpage_init_defaultsize(void)
0154 {
0155
0156
0157
0158 if (mmu_psize_defs[MMU_PAGE_16M].shift)
0159 hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift;
0160 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
0161 hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift;
0162 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
0163 hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift;
0164 }