0001 #ifndef _ASM_POWERPC_PTE_WALK_H
0002 #define _ASM_POWERPC_PTE_WALK_H
0003
0004 #include <linux/sched.h>
0005
0006
0007 extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
0008 bool *is_thp, unsigned *hshift);
0009
0010 static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
0011 bool *is_thp, unsigned *hshift)
0012 {
0013 pte_t *pte;
0014
0015 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
0016 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
0017
0018 #if defined(CONFIG_DEBUG_VM) && \
0019 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
0020
0021
0022
0023 if (hshift)
0024 WARN_ON(*hshift);
0025 #endif
0026 return pte;
0027 }
0028
0029 static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
0030 {
0031 pgd_t *pgdir = init_mm.pgd;
0032 return __find_linux_pte(pgdir, ea, NULL, hshift);
0033 }
0034
0035
0036
0037
0038
0039 static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
0040 {
0041 pte_t *ptep;
0042 phys_addr_t pa;
0043 int hugepage_shift;
0044
0045
0046
0047
0048
0049 ptep = find_init_mm_pte(addr, &hugepage_shift);
0050 if (WARN_ON(!ptep))
0051 return 0;
0052
0053 pa = PFN_PHYS(pte_pfn(*ptep));
0054
0055 if (!hugepage_shift)
0056 hugepage_shift = PAGE_SHIFT;
0057
0058 pa |= addr & ((1ul << hugepage_shift) - 1);
0059
0060 return pa;
0061 }
0062
0063
0064
0065
0066
0067 static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
0068 bool *is_thp, unsigned *hshift)
0069 {
0070 pte_t *pte;
0071
0072 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
0073 VM_WARN(pgdir != current->mm->pgd,
0074 "%s lock less page table lookup called on wrong mm\n", __func__);
0075 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
0076
0077 #if defined(CONFIG_DEBUG_VM) && \
0078 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
0079
0080
0081
0082 if (hshift)
0083 WARN_ON(*hshift);
0084 #endif
0085 return pte;
0086 }
0087
0088 #endif