Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2012 ARM Ltd.
0004  */
0005 #ifndef __ASM_PGTABLE_H
0006 #define __ASM_PGTABLE_H
0007 
0008 #include <asm/bug.h>
0009 #include <asm/proc-fns.h>
0010 
0011 #include <asm/memory.h>
0012 #include <asm/mte.h>
0013 #include <asm/pgtable-hwdef.h>
0014 #include <asm/pgtable-prot.h>
0015 #include <asm/tlbflush.h>
0016 
0017 /*
0018  * VMALLOC range.
0019  *
0020  * VMALLOC_START: beginning of the kernel vmalloc space
0021  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
0022  *  and fixed mappings
0023  */
0024 #define VMALLOC_START       (MODULES_END)
0025 #define VMALLOC_END     (VMEMMAP_START - SZ_256M)
0026 
0027 #define vmemmap         ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
0028 
0029 #ifndef __ASSEMBLY__
0030 
0031 #include <asm/cmpxchg.h>
0032 #include <asm/fixmap.h>
0033 #include <linux/mmdebug.h>
0034 #include <linux/mm_types.h>
0035 #include <linux/sched.h>
0036 #include <linux/page_table_check.h>
0037 
0038 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0039 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
0040 
0041 /* Set stride and tlb_level in flush_*_tlb_range */
0042 #define flush_pmd_tlb_range(vma, addr, end) \
0043     __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
0044 #define flush_pud_tlb_range(vma, addr, end) \
0045     __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
0046 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0047 
0048 static inline bool arch_thp_swp_supported(void)
0049 {
0050     return !system_supports_mte();
0051 }
0052 #define arch_thp_swp_supported arch_thp_swp_supported
0053 
0054 /*
0055  * Outside of a few very special situations (e.g. hibernation), we always
0056  * use broadcast TLB invalidation instructions, therefore a spurious page
0057  * fault on one CPU which has been handled concurrently by another CPU
0058  * does not need to perform additional invalidation.
0059  */
0060 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
0061 
0062 /*
0063  * ZERO_PAGE is a global shared page that is always zero: used
0064  * for zero-mapped memory areas etc..
0065  */
0066 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
0067 #define ZERO_PAGE(vaddr)    phys_to_page(__pa_symbol(empty_zero_page))
0068 
0069 #define pte_ERROR(e)    \
0070     pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
0071 
0072 /*
0073  * Macros to convert between a physical address and its placement in a
0074  * page table entry, taking care of 52-bit addresses.
0075  */
0076 #ifdef CONFIG_ARM64_PA_BITS_52
0077 static inline phys_addr_t __pte_to_phys(pte_t pte)
0078 {
0079     return (pte_val(pte) & PTE_ADDR_LOW) |
0080         ((pte_val(pte) & PTE_ADDR_HIGH) << 36);
0081 }
0082 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
0083 {
0084     return (phys | (phys >> 36)) & PTE_ADDR_MASK;
0085 }
0086 #else
0087 #define __pte_to_phys(pte)  (pte_val(pte) & PTE_ADDR_MASK)
0088 #define __phys_to_pte_val(phys) (phys)
0089 #endif
0090 
0091 #define pte_pfn(pte)        (__pte_to_phys(pte) >> PAGE_SHIFT)
0092 #define pfn_pte(pfn,prot)   \
0093     __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
0094 
0095 #define pte_none(pte)       (!pte_val(pte))
0096 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
0097 #define pte_page(pte)       (pfn_to_page(pte_pfn(pte)))
0098 
0099 /*
0100  * The following only work if pte_present(). Undefined behaviour otherwise.
0101  */
0102 #define pte_present(pte)    (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
0103 #define pte_young(pte)      (!!(pte_val(pte) & PTE_AF))
0104 #define pte_special(pte)    (!!(pte_val(pte) & PTE_SPECIAL))
0105 #define pte_write(pte)      (!!(pte_val(pte) & PTE_WRITE))
0106 #define pte_user(pte)       (!!(pte_val(pte) & PTE_USER))
0107 #define pte_user_exec(pte)  (!(pte_val(pte) & PTE_UXN))
0108 #define pte_cont(pte)       (!!(pte_val(pte) & PTE_CONT))
0109 #define pte_devmap(pte)     (!!(pte_val(pte) & PTE_DEVMAP))
0110 #define pte_tagged(pte)     ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
0111                  PTE_ATTRINDX(MT_NORMAL_TAGGED))
0112 
0113 #define pte_cont_addr_end(addr, end)                        \
0114 ({  unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;    \
0115     (__boundary - 1 < (end) - 1) ? __boundary : (end);          \
0116 })
0117 
0118 #define pmd_cont_addr_end(addr, end)                        \
0119 ({  unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;    \
0120     (__boundary - 1 < (end) - 1) ? __boundary : (end);          \
0121 })
0122 
0123 #define pte_hw_dirty(pte)   (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
0124 #define pte_sw_dirty(pte)   (!!(pte_val(pte) & PTE_DIRTY))
0125 #define pte_dirty(pte)      (pte_sw_dirty(pte) || pte_hw_dirty(pte))
0126 
0127 #define pte_valid(pte)      (!!(pte_val(pte) & PTE_VALID))
0128 /*
0129  * Execute-only user mappings do not have the PTE_USER bit set. All valid
0130  * kernel mappings have the PTE_UXN bit set.
0131  */
0132 #define pte_valid_not_user(pte) \
0133     ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
0134 /*
0135  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
0136  * so that we don't erroneously return false for pages that have been
0137  * remapped as PROT_NONE but are yet to be flushed from the TLB.
0138  * Note that we can't make any assumptions based on the state of the access
0139  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
0140  * TLB.
0141  */
0142 #define pte_accessible(mm, pte) \
0143     (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
0144 
0145 /*
0146  * p??_access_permitted() is true for valid user mappings (PTE_USER
0147  * bit set, subject to the write permission check). For execute-only
0148  * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
0149  * not set) must return false. PROT_NONE mappings do not have the
0150  * PTE_VALID bit set.
0151  */
0152 #define pte_access_permitted(pte, write) \
0153     (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
0154 #define pmd_access_permitted(pmd, write) \
0155     (pte_access_permitted(pmd_pte(pmd), (write)))
0156 #define pud_access_permitted(pud, write) \
0157     (pte_access_permitted(pud_pte(pud), (write)))
0158 
0159 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
0160 {
0161     pte_val(pte) &= ~pgprot_val(prot);
0162     return pte;
0163 }
0164 
0165 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
0166 {
0167     pte_val(pte) |= pgprot_val(prot);
0168     return pte;
0169 }
0170 
0171 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
0172 {
0173     pmd_val(pmd) &= ~pgprot_val(prot);
0174     return pmd;
0175 }
0176 
0177 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
0178 {
0179     pmd_val(pmd) |= pgprot_val(prot);
0180     return pmd;
0181 }
0182 
0183 static inline pte_t pte_mkwrite(pte_t pte)
0184 {
0185     pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
0186     pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
0187     return pte;
0188 }
0189 
0190 static inline pte_t pte_mkclean(pte_t pte)
0191 {
0192     pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
0193     pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
0194 
0195     return pte;
0196 }
0197 
0198 static inline pte_t pte_mkdirty(pte_t pte)
0199 {
0200     pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
0201 
0202     if (pte_write(pte))
0203         pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
0204 
0205     return pte;
0206 }
0207 
0208 static inline pte_t pte_wrprotect(pte_t pte)
0209 {
0210     /*
0211      * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
0212      * clear), set the PTE_DIRTY bit.
0213      */
0214     if (pte_hw_dirty(pte))
0215         pte = pte_mkdirty(pte);
0216 
0217     pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
0218     pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
0219     return pte;
0220 }
0221 
0222 static inline pte_t pte_mkold(pte_t pte)
0223 {
0224     return clear_pte_bit(pte, __pgprot(PTE_AF));
0225 }
0226 
0227 static inline pte_t pte_mkyoung(pte_t pte)
0228 {
0229     return set_pte_bit(pte, __pgprot(PTE_AF));
0230 }
0231 
0232 static inline pte_t pte_mkspecial(pte_t pte)
0233 {
0234     return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
0235 }
0236 
0237 static inline pte_t pte_mkcont(pte_t pte)
0238 {
0239     pte = set_pte_bit(pte, __pgprot(PTE_CONT));
0240     return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
0241 }
0242 
0243 static inline pte_t pte_mknoncont(pte_t pte)
0244 {
0245     return clear_pte_bit(pte, __pgprot(PTE_CONT));
0246 }
0247 
0248 static inline pte_t pte_mkpresent(pte_t pte)
0249 {
0250     return set_pte_bit(pte, __pgprot(PTE_VALID));
0251 }
0252 
0253 static inline pmd_t pmd_mkcont(pmd_t pmd)
0254 {
0255     return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
0256 }
0257 
0258 static inline pte_t pte_mkdevmap(pte_t pte)
0259 {
0260     return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
0261 }
0262 
0263 static inline void set_pte(pte_t *ptep, pte_t pte)
0264 {
0265     WRITE_ONCE(*ptep, pte);
0266 
0267     /*
0268      * Only if the new pte is valid and kernel, otherwise TLB maintenance
0269      * or update_mmu_cache() have the necessary barriers.
0270      */
0271     if (pte_valid_not_user(pte)) {
0272         dsb(ishst);
0273         isb();
0274     }
0275 }
0276 
0277 extern void __sync_icache_dcache(pte_t pteval);
0278 
0279 /*
0280  * PTE bits configuration in the presence of hardware Dirty Bit Management
0281  * (PTE_WRITE == PTE_DBM):
0282  *
0283  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
0284  *   0      0      |   1           0          0
0285  *   0      1      |   1           1          0
0286  *   1      0      |   1           0          1
0287  *   1      1      |   0           1          x
0288  *
0289  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
0290  * the page fault mechanism. Checking the dirty status of a pte becomes:
0291  *
0292  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
0293  */
0294 
0295 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
0296                        pte_t pte)
0297 {
0298     pte_t old_pte;
0299 
0300     if (!IS_ENABLED(CONFIG_DEBUG_VM))
0301         return;
0302 
0303     old_pte = READ_ONCE(*ptep);
0304 
0305     if (!pte_valid(old_pte) || !pte_valid(pte))
0306         return;
0307     if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
0308         return;
0309 
0310     /*
0311      * Check for potential race with hardware updates of the pte
0312      * (ptep_set_access_flags safely changes valid ptes without going
0313      * through an invalid entry).
0314      */
0315     VM_WARN_ONCE(!pte_young(pte),
0316              "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
0317              __func__, pte_val(old_pte), pte_val(pte));
0318     VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
0319              "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
0320              __func__, pte_val(old_pte), pte_val(pte));
0321 }
0322 
0323 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0324                 pte_t *ptep, pte_t pte)
0325 {
0326     if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
0327         __sync_icache_dcache(pte);
0328 
0329     /*
0330      * If the PTE would provide user space access to the tags associated
0331      * with it then ensure that the MTE tags are synchronised.  Although
0332      * pte_access_permitted() returns false for exec only mappings, they
0333      * don't expose tags (instruction fetches don't check tags).
0334      */
0335     if (system_supports_mte() && pte_access_permitted(pte, false) &&
0336         !pte_special(pte)) {
0337         pte_t old_pte = READ_ONCE(*ptep);
0338         /*
0339          * We only need to synchronise if the new PTE has tags enabled
0340          * or if swapping in (in which case another mapping may have
0341          * set tags in the past even if this PTE isn't tagged).
0342          * (!pte_none() && !pte_present()) is an open coded version of
0343          * is_swap_pte()
0344          */
0345         if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
0346             mte_sync_tags(old_pte, pte);
0347     }
0348 
0349     __check_racy_pte_update(mm, ptep, pte);
0350 
0351     set_pte(ptep, pte);
0352 }
0353 
0354 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
0355                   pte_t *ptep, pte_t pte)
0356 {
0357     page_table_check_pte_set(mm, addr, ptep, pte);
0358     return __set_pte_at(mm, addr, ptep, pte);
0359 }
0360 
0361 /*
0362  * Huge pte definitions.
0363  */
0364 #define pte_mkhuge(pte)     (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
0365 
0366 /*
0367  * Hugetlb definitions.
0368  */
0369 #define HUGE_MAX_HSTATE     4
0370 #define HPAGE_SHIFT     PMD_SHIFT
0371 #define HPAGE_SIZE      (_AC(1, UL) << HPAGE_SHIFT)
0372 #define HPAGE_MASK      (~(HPAGE_SIZE - 1))
0373 #define HUGETLB_PAGE_ORDER  (HPAGE_SHIFT - PAGE_SHIFT)
0374 
0375 static inline pte_t pgd_pte(pgd_t pgd)
0376 {
0377     return __pte(pgd_val(pgd));
0378 }
0379 
0380 static inline pte_t p4d_pte(p4d_t p4d)
0381 {
0382     return __pte(p4d_val(p4d));
0383 }
0384 
0385 static inline pte_t pud_pte(pud_t pud)
0386 {
0387     return __pte(pud_val(pud));
0388 }
0389 
0390 static inline pud_t pte_pud(pte_t pte)
0391 {
0392     return __pud(pte_val(pte));
0393 }
0394 
0395 static inline pmd_t pud_pmd(pud_t pud)
0396 {
0397     return __pmd(pud_val(pud));
0398 }
0399 
0400 static inline pte_t pmd_pte(pmd_t pmd)
0401 {
0402     return __pte(pmd_val(pmd));
0403 }
0404 
0405 static inline pmd_t pte_pmd(pte_t pte)
0406 {
0407     return __pmd(pte_val(pte));
0408 }
0409 
0410 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
0411 {
0412     return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
0413 }
0414 
0415 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
0416 {
0417     return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
0418 }
0419 
0420 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
0421 static inline pte_t pte_swp_mkexclusive(pte_t pte)
0422 {
0423     return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
0424 }
0425 
0426 static inline int pte_swp_exclusive(pte_t pte)
0427 {
0428     return pte_val(pte) & PTE_SWP_EXCLUSIVE;
0429 }
0430 
0431 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
0432 {
0433     return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
0434 }
0435 
0436 /*
0437  * Select all bits except the pfn
0438  */
0439 static inline pgprot_t pte_pgprot(pte_t pte)
0440 {
0441     unsigned long pfn = pte_pfn(pte);
0442 
0443     return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
0444 }
0445 
0446 #ifdef CONFIG_NUMA_BALANCING
0447 /*
0448  * See the comment in include/linux/pgtable.h
0449  */
0450 static inline int pte_protnone(pte_t pte)
0451 {
0452     return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
0453 }
0454 
0455 static inline int pmd_protnone(pmd_t pmd)
0456 {
0457     return pte_protnone(pmd_pte(pmd));
0458 }
0459 #endif
0460 
0461 #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
0462 
0463 static inline int pmd_present(pmd_t pmd)
0464 {
0465     return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
0466 }
0467 
0468 /*
0469  * THP definitions.
0470  */
0471 
0472 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0473 static inline int pmd_trans_huge(pmd_t pmd)
0474 {
0475     return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
0476 }
0477 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0478 
0479 #define pmd_dirty(pmd)      pte_dirty(pmd_pte(pmd))
0480 #define pmd_young(pmd)      pte_young(pmd_pte(pmd))
0481 #define pmd_valid(pmd)      pte_valid(pmd_pte(pmd))
0482 #define pmd_user(pmd)       pte_user(pmd_pte(pmd))
0483 #define pmd_user_exec(pmd)  pte_user_exec(pmd_pte(pmd))
0484 #define pmd_cont(pmd)       pte_cont(pmd_pte(pmd))
0485 #define pmd_wrprotect(pmd)  pte_pmd(pte_wrprotect(pmd_pte(pmd)))
0486 #define pmd_mkold(pmd)      pte_pmd(pte_mkold(pmd_pte(pmd)))
0487 #define pmd_mkwrite(pmd)    pte_pmd(pte_mkwrite(pmd_pte(pmd)))
0488 #define pmd_mkclean(pmd)    pte_pmd(pte_mkclean(pmd_pte(pmd)))
0489 #define pmd_mkdirty(pmd)    pte_pmd(pte_mkdirty(pmd_pte(pmd)))
0490 #define pmd_mkyoung(pmd)    pte_pmd(pte_mkyoung(pmd_pte(pmd)))
0491 
0492 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
0493 {
0494     pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
0495     pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
0496 
0497     return pmd;
0498 }
0499 
0500 #define pmd_thp_or_huge(pmd)    (pmd_huge(pmd) || pmd_trans_huge(pmd))
0501 
0502 #define pmd_write(pmd)      pte_write(pmd_pte(pmd))
0503 
0504 #define pmd_mkhuge(pmd)     (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
0505 
0506 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0507 #define pmd_devmap(pmd)     pte_devmap(pmd_pte(pmd))
0508 #endif
0509 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
0510 {
0511     return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
0512 }
0513 
0514 #define __pmd_to_phys(pmd)  __pte_to_phys(pmd_pte(pmd))
0515 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
0516 #define pmd_pfn(pmd)        ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
0517 #define pfn_pmd(pfn,prot)   __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
0518 #define mk_pmd(page,prot)   pfn_pmd(page_to_pfn(page),prot)
0519 
0520 #define pud_young(pud)      pte_young(pud_pte(pud))
0521 #define pud_mkyoung(pud)    pte_pud(pte_mkyoung(pud_pte(pud)))
0522 #define pud_write(pud)      pte_write(pud_pte(pud))
0523 
0524 #define pud_mkhuge(pud)     (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
0525 
0526 #define __pud_to_phys(pud)  __pte_to_phys(pud_pte(pud))
0527 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
0528 #define pud_pfn(pud)        ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
0529 #define pfn_pud(pfn,prot)   __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
0530 
0531 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0532                   pmd_t *pmdp, pmd_t pmd)
0533 {
0534     page_table_check_pmd_set(mm, addr, pmdp, pmd);
0535     return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
0536 }
0537 
0538 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
0539                   pud_t *pudp, pud_t pud)
0540 {
0541     page_table_check_pud_set(mm, addr, pudp, pud);
0542     return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
0543 }
0544 
0545 #define __p4d_to_phys(p4d)  __pte_to_phys(p4d_pte(p4d))
0546 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
0547 
0548 #define __pgd_to_phys(pgd)  __pte_to_phys(pgd_pte(pgd))
0549 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
0550 
0551 #define __pgprot_modify(prot,mask,bits) \
0552     __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
0553 
0554 #define pgprot_nx(prot) \
0555     __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
0556 
0557 /*
0558  * Mark the prot value as uncacheable and unbufferable.
0559  */
0560 #define pgprot_noncached(prot) \
0561     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
0562 #define pgprot_writecombine(prot) \
0563     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
0564 #define pgprot_device(prot) \
0565     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
0566 #define pgprot_tagged(prot) \
0567     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
0568 #define pgprot_mhp  pgprot_tagged
0569 /*
0570  * DMA allocations for non-coherent devices use what the Arm architecture calls
0571  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
0572  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
0573  * is intended for MMIO and thus forbids speculation, preserves access size,
0574  * requires strict alignment and can also force write responses to come from the
0575  * endpoint.
0576  */
0577 #define pgprot_dmacoherent(prot) \
0578     __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
0579             PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
0580 
0581 #define __HAVE_PHYS_MEM_ACCESS_PROT
0582 struct file;
0583 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
0584                      unsigned long size, pgprot_t vma_prot);
0585 
0586 #define pmd_none(pmd)       (!pmd_val(pmd))
0587 
0588 #define pmd_table(pmd)      ((pmd_val(pmd) & PMD_TYPE_MASK) == \
0589                  PMD_TYPE_TABLE)
0590 #define pmd_sect(pmd)       ((pmd_val(pmd) & PMD_TYPE_MASK) == \
0591                  PMD_TYPE_SECT)
0592 #define pmd_leaf(pmd)       (pmd_present(pmd) && !pmd_table(pmd))
0593 #define pmd_bad(pmd)        (!pmd_table(pmd))
0594 
0595 #define pmd_leaf_size(pmd)  (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
0596 #define pte_leaf_size(pte)  (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
0597 
0598 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
0599 static inline bool pud_sect(pud_t pud) { return false; }
0600 static inline bool pud_table(pud_t pud) { return true; }
0601 #else
0602 #define pud_sect(pud)       ((pud_val(pud) & PUD_TYPE_MASK) == \
0603                  PUD_TYPE_SECT)
0604 #define pud_table(pud)      ((pud_val(pud) & PUD_TYPE_MASK) == \
0605                  PUD_TYPE_TABLE)
0606 #endif
0607 
0608 extern pgd_t init_pg_dir[PTRS_PER_PGD];
0609 extern pgd_t init_pg_end[];
0610 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
0611 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
0612 extern pgd_t idmap_pg_end[];
0613 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
0614 extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
0615 
0616 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
0617 
0618 static inline bool in_swapper_pgdir(void *addr)
0619 {
0620     return ((unsigned long)addr & PAGE_MASK) ==
0621             ((unsigned long)swapper_pg_dir & PAGE_MASK);
0622 }
0623 
0624 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
0625 {
0626 #ifdef __PAGETABLE_PMD_FOLDED
0627     if (in_swapper_pgdir(pmdp)) {
0628         set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
0629         return;
0630     }
0631 #endif /* __PAGETABLE_PMD_FOLDED */
0632 
0633     WRITE_ONCE(*pmdp, pmd);
0634 
0635     if (pmd_valid(pmd)) {
0636         dsb(ishst);
0637         isb();
0638     }
0639 }
0640 
0641 static inline void pmd_clear(pmd_t *pmdp)
0642 {
0643     set_pmd(pmdp, __pmd(0));
0644 }
0645 
0646 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
0647 {
0648     return __pmd_to_phys(pmd);
0649 }
0650 
0651 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
0652 {
0653     return (unsigned long)__va(pmd_page_paddr(pmd));
0654 }
0655 
0656 /* Find an entry in the third-level page table. */
0657 #define pte_offset_phys(dir,addr)   (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
0658 
0659 #define pte_set_fixmap(addr)        ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
0660 #define pte_set_fixmap_offset(pmd, addr)    pte_set_fixmap(pte_offset_phys(pmd, addr))
0661 #define pte_clear_fixmap()      clear_fixmap(FIX_PTE)
0662 
0663 #define pmd_page(pmd)           phys_to_page(__pmd_to_phys(pmd))
0664 
0665 /* use ONLY for statically allocated translation tables */
0666 #define pte_offset_kimg(dir,addr)   ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
0667 
0668 /*
0669  * Conversion functions: convert a page and protection to a page entry,
0670  * and a page entry and page directory to the page they refer to.
0671  */
0672 #define mk_pte(page,prot)   pfn_pte(page_to_pfn(page),prot)
0673 
0674 #if CONFIG_PGTABLE_LEVELS > 2
0675 
0676 #define pmd_ERROR(e)    \
0677     pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
0678 
0679 #define pud_none(pud)       (!pud_val(pud))
0680 #define pud_bad(pud)        (!pud_table(pud))
0681 #define pud_present(pud)    pte_present(pud_pte(pud))
0682 #define pud_leaf(pud)       (pud_present(pud) && !pud_table(pud))
0683 #define pud_valid(pud)      pte_valid(pud_pte(pud))
0684 #define pud_user(pud)       pte_user(pud_pte(pud))
0685 
0686 
0687 static inline void set_pud(pud_t *pudp, pud_t pud)
0688 {
0689 #ifdef __PAGETABLE_PUD_FOLDED
0690     if (in_swapper_pgdir(pudp)) {
0691         set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
0692         return;
0693     }
0694 #endif /* __PAGETABLE_PUD_FOLDED */
0695 
0696     WRITE_ONCE(*pudp, pud);
0697 
0698     if (pud_valid(pud)) {
0699         dsb(ishst);
0700         isb();
0701     }
0702 }
0703 
0704 static inline void pud_clear(pud_t *pudp)
0705 {
0706     set_pud(pudp, __pud(0));
0707 }
0708 
0709 static inline phys_addr_t pud_page_paddr(pud_t pud)
0710 {
0711     return __pud_to_phys(pud);
0712 }
0713 
0714 static inline pmd_t *pud_pgtable(pud_t pud)
0715 {
0716     return (pmd_t *)__va(pud_page_paddr(pud));
0717 }
0718 
0719 /* Find an entry in the second-level page table. */
0720 #define pmd_offset_phys(dir, addr)  (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
0721 
0722 #define pmd_set_fixmap(addr)        ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
0723 #define pmd_set_fixmap_offset(pud, addr)    pmd_set_fixmap(pmd_offset_phys(pud, addr))
0724 #define pmd_clear_fixmap()      clear_fixmap(FIX_PMD)
0725 
0726 #define pud_page(pud)           phys_to_page(__pud_to_phys(pud))
0727 
0728 /* use ONLY for statically allocated translation tables */
0729 #define pmd_offset_kimg(dir,addr)   ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
0730 
0731 #else
0732 
0733 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
0734 
0735 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
0736 #define pmd_set_fixmap(addr)        NULL
0737 #define pmd_set_fixmap_offset(pudp, addr)   ((pmd_t *)pudp)
0738 #define pmd_clear_fixmap()
0739 
0740 #define pmd_offset_kimg(dir,addr)   ((pmd_t *)dir)
0741 
0742 #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
0743 
0744 #if CONFIG_PGTABLE_LEVELS > 3
0745 
0746 #define pud_ERROR(e)    \
0747     pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
0748 
0749 #define p4d_none(p4d)       (!p4d_val(p4d))
0750 #define p4d_bad(p4d)        (!(p4d_val(p4d) & 2))
0751 #define p4d_present(p4d)    (p4d_val(p4d))
0752 
0753 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
0754 {
0755     if (in_swapper_pgdir(p4dp)) {
0756         set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
0757         return;
0758     }
0759 
0760     WRITE_ONCE(*p4dp, p4d);
0761     dsb(ishst);
0762     isb();
0763 }
0764 
0765 static inline void p4d_clear(p4d_t *p4dp)
0766 {
0767     set_p4d(p4dp, __p4d(0));
0768 }
0769 
0770 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
0771 {
0772     return __p4d_to_phys(p4d);
0773 }
0774 
0775 static inline pud_t *p4d_pgtable(p4d_t p4d)
0776 {
0777     return (pud_t *)__va(p4d_page_paddr(p4d));
0778 }
0779 
0780 /* Find an entry in the first-level page table. */
0781 #define pud_offset_phys(dir, addr)  (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
0782 
0783 #define pud_set_fixmap(addr)        ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
0784 #define pud_set_fixmap_offset(p4d, addr)    pud_set_fixmap(pud_offset_phys(p4d, addr))
0785 #define pud_clear_fixmap()      clear_fixmap(FIX_PUD)
0786 
0787 #define p4d_page(p4d)       pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
0788 
0789 /* use ONLY for statically allocated translation tables */
0790 #define pud_offset_kimg(dir,addr)   ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
0791 
0792 #else
0793 
0794 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
0795 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
0796 
0797 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
0798 #define pud_set_fixmap(addr)        NULL
0799 #define pud_set_fixmap_offset(pgdp, addr)   ((pud_t *)pgdp)
0800 #define pud_clear_fixmap()
0801 
0802 #define pud_offset_kimg(dir,addr)   ((pud_t *)dir)
0803 
0804 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
0805 
0806 #define pgd_ERROR(e)    \
0807     pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
0808 
0809 #define pgd_set_fixmap(addr)    ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
0810 #define pgd_clear_fixmap()  clear_fixmap(FIX_PGD)
0811 
0812 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0813 {
0814     /*
0815      * Normal and Normal-Tagged are two different memory types and indices
0816      * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
0817      */
0818     const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
0819                   PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
0820                   PTE_ATTRINDX_MASK;
0821     /* preserve the hardware dirty information */
0822     if (pte_hw_dirty(pte))
0823         pte = pte_mkdirty(pte);
0824     pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
0825     return pte;
0826 }
0827 
0828 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
0829 {
0830     return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
0831 }
0832 
0833 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
0834 extern int ptep_set_access_flags(struct vm_area_struct *vma,
0835                  unsigned long address, pte_t *ptep,
0836                  pte_t entry, int dirty);
0837 
0838 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0839 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
0840 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
0841                     unsigned long address, pmd_t *pmdp,
0842                     pmd_t entry, int dirty)
0843 {
0844     return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
0845 }
0846 
0847 static inline int pud_devmap(pud_t pud)
0848 {
0849     return 0;
0850 }
0851 
0852 static inline int pgd_devmap(pgd_t pgd)
0853 {
0854     return 0;
0855 }
0856 #endif
0857 
0858 #ifdef CONFIG_PAGE_TABLE_CHECK
0859 static inline bool pte_user_accessible_page(pte_t pte)
0860 {
0861     return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
0862 }
0863 
0864 static inline bool pmd_user_accessible_page(pmd_t pmd)
0865 {
0866     return pmd_present(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
0867 }
0868 
0869 static inline bool pud_user_accessible_page(pud_t pud)
0870 {
0871     return pud_present(pud) && pud_user(pud);
0872 }
0873 #endif
0874 
0875 /*
0876  * Atomic pte/pmd modifications.
0877  */
0878 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
0879 static inline int __ptep_test_and_clear_young(pte_t *ptep)
0880 {
0881     pte_t old_pte, pte;
0882 
0883     pte = READ_ONCE(*ptep);
0884     do {
0885         old_pte = pte;
0886         pte = pte_mkold(pte);
0887         pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
0888                            pte_val(old_pte), pte_val(pte));
0889     } while (pte_val(pte) != pte_val(old_pte));
0890 
0891     return pte_young(pte);
0892 }
0893 
0894 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
0895                         unsigned long address,
0896                         pte_t *ptep)
0897 {
0898     return __ptep_test_and_clear_young(ptep);
0899 }
0900 
0901 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
0902 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
0903                      unsigned long address, pte_t *ptep)
0904 {
0905     int young = ptep_test_and_clear_young(vma, address, ptep);
0906 
0907     if (young) {
0908         /*
0909          * We can elide the trailing DSB here since the worst that can
0910          * happen is that a CPU continues to use the young entry in its
0911          * TLB and we mistakenly reclaim the associated page. The
0912          * window for such an event is bounded by the next
0913          * context-switch, which provides a DSB to complete the TLB
0914          * invalidation.
0915          */
0916         flush_tlb_page_nosync(vma, address);
0917     }
0918 
0919     return young;
0920 }
0921 
0922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0923 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
0924 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
0925                         unsigned long address,
0926                         pmd_t *pmdp)
0927 {
0928     return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
0929 }
0930 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0931 
0932 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
0933 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
0934                        unsigned long address, pte_t *ptep)
0935 {
0936     pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
0937 
0938     page_table_check_pte_clear(mm, address, pte);
0939 
0940     return pte;
0941 }
0942 
0943 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0944 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
0945 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
0946                         unsigned long address, pmd_t *pmdp)
0947 {
0948     pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
0949 
0950     page_table_check_pmd_clear(mm, address, pmd);
0951 
0952     return pmd;
0953 }
0954 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0955 
0956 /*
0957  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
0958  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
0959  */
0960 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
0961 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
0962 {
0963     pte_t old_pte, pte;
0964 
0965     pte = READ_ONCE(*ptep);
0966     do {
0967         old_pte = pte;
0968         pte = pte_wrprotect(pte);
0969         pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
0970                            pte_val(old_pte), pte_val(pte));
0971     } while (pte_val(pte) != pte_val(old_pte));
0972 }
0973 
0974 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0975 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
0976 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
0977                       unsigned long address, pmd_t *pmdp)
0978 {
0979     ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
0980 }
0981 
0982 #define pmdp_establish pmdp_establish
0983 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
0984         unsigned long address, pmd_t *pmdp, pmd_t pmd)
0985 {
0986     page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
0987     return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
0988 }
0989 #endif
0990 
0991 /*
0992  * Encode and decode a swap entry:
0993  *  bits 0-1:   present (must be zero)
0994  *  bits 2:     remember PG_anon_exclusive
0995  *  bits 3-7:   swap type
0996  *  bits 8-57:  swap offset
0997  *  bit  58:    PTE_PROT_NONE (must be zero)
0998  */
0999 #define __SWP_TYPE_SHIFT    3
1000 #define __SWP_TYPE_BITS     5
1001 #define __SWP_OFFSET_BITS   50
1002 #define __SWP_TYPE_MASK     ((1 << __SWP_TYPE_BITS) - 1)
1003 #define __SWP_OFFSET_SHIFT  (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1004 #define __SWP_OFFSET_MASK   ((1UL << __SWP_OFFSET_BITS) - 1)
1005 
1006 #define __swp_type(x)       (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1007 #define __swp_offset(x)     (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1008 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1009 
1010 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1011 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
1012 
1013 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1014 #define __pmd_to_swp_entry(pmd)     ((swp_entry_t) { pmd_val(pmd) })
1015 #define __swp_entry_to_pmd(swp)     __pmd((swp).val)
1016 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1017 
1018 /*
1019  * Ensure that there are not more swap files than can be encoded in the kernel
1020  * PTEs.
1021  */
1022 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1023 
1024 extern int kern_addr_valid(unsigned long addr);
1025 
1026 #ifdef CONFIG_ARM64_MTE
1027 
1028 #define __HAVE_ARCH_PREPARE_TO_SWAP
1029 static inline int arch_prepare_to_swap(struct page *page)
1030 {
1031     if (system_supports_mte())
1032         return mte_save_tags(page);
1033     return 0;
1034 }
1035 
1036 #define __HAVE_ARCH_SWAP_INVALIDATE
1037 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1038 {
1039     if (system_supports_mte())
1040         mte_invalidate_tags(type, offset);
1041 }
1042 
1043 static inline void arch_swap_invalidate_area(int type)
1044 {
1045     if (system_supports_mte())
1046         mte_invalidate_tags_area(type);
1047 }
1048 
1049 #define __HAVE_ARCH_SWAP_RESTORE
1050 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1051 {
1052     if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
1053         set_bit(PG_mte_tagged, &folio->flags);
1054 }
1055 
1056 #endif /* CONFIG_ARM64_MTE */
1057 
1058 /*
1059  * On AArch64, the cache coherency is handled via the set_pte_at() function.
1060  */
1061 static inline void update_mmu_cache(struct vm_area_struct *vma,
1062                     unsigned long addr, pte_t *ptep)
1063 {
1064     /*
1065      * We don't do anything here, so there's a very small chance of
1066      * us retaking a user fault which we just fixed up. The alternative
1067      * is doing a dsb(ishst), but that penalises the fastpath.
1068      */
1069 }
1070 
1071 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1072 
1073 #ifdef CONFIG_ARM64_PA_BITS_52
1074 #define phys_to_ttbr(addr)  (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1075 #else
1076 #define phys_to_ttbr(addr)  (addr)
1077 #endif
1078 
1079 /*
1080  * On arm64 without hardware Access Flag, copying from user will fail because
1081  * the pte is old and cannot be marked young. So we always end up with zeroed
1082  * page after fork() + CoW for pfn mappings. We don't always have a
1083  * hardware-managed access flag on arm64.
1084  */
1085 static inline bool arch_faults_on_old_pte(void)
1086 {
1087     /* The register read below requires a stable CPU to make any sense */
1088     cant_migrate();
1089 
1090     return !cpu_has_hw_af();
1091 }
1092 #define arch_faults_on_old_pte      arch_faults_on_old_pte
1093 
1094 /*
1095  * Experimentally, it's cheap to set the access flag in hardware and we
1096  * benefit from prefaulting mappings as 'old' to start with.
1097  */
1098 static inline bool arch_wants_old_prefaulted_pte(void)
1099 {
1100     return !arch_faults_on_old_pte();
1101 }
1102 #define arch_wants_old_prefaulted_pte   arch_wants_old_prefaulted_pte
1103 
1104 static inline bool pud_sect_supported(void)
1105 {
1106     return PAGE_SIZE == SZ_4K;
1107 }
1108 
1109 
1110 #endif /* !__ASSEMBLY__ */
1111 
1112 #endif /* __ASM_PGTABLE_H */