Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * pgtable.h: SpitFire page table operations.
0004  *
0005  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
0006  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0007  */
0008 
0009 #ifndef _SPARC64_PGTABLE_H
0010 #define _SPARC64_PGTABLE_H
0011 
0012 /* This file contains the functions and defines necessary to modify and use
0013  * the SpitFire page tables.
0014  */
0015 
0016 #include <asm-generic/pgtable-nop4d.h>
0017 #include <linux/compiler.h>
0018 #include <linux/const.h>
0019 #include <asm/types.h>
0020 #include <asm/spitfire.h>
0021 #include <asm/asi.h>
0022 #include <asm/adi.h>
0023 #include <asm/page.h>
0024 #include <asm/processor.h>
0025 
0026 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
0027  * The page copy blockops can use 0x6000000 to 0x8000000.
0028  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
0029  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
0030  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
0031  * The vmalloc area spans 0x100000000 to 0x200000000.
0032  * Since modules need to be in the lowest 32-bits of the address space,
0033  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
0034  * There is a single static kernel PMD which maps from 0x0 to address
0035  * 0x400000000.
0036  */
0037 #define TLBTEMP_BASE        _AC(0x0000000006000000,UL)
0038 #define TSBMAP_8K_BASE      _AC(0x0000000008000000,UL)
0039 #define TSBMAP_4M_BASE      _AC(0x0000000008400000,UL)
0040 #define MODULES_VADDR       _AC(0x0000000010000000,UL)
0041 #define MODULES_LEN     _AC(0x00000000e0000000,UL)
0042 #define MODULES_END     _AC(0x00000000f0000000,UL)
0043 #define LOW_OBP_ADDRESS     _AC(0x00000000f0000000,UL)
0044 #define HI_OBP_ADDRESS      _AC(0x0000000100000000,UL)
0045 #define VMALLOC_START       _AC(0x0000000100000000,UL)
0046 #define VMEMMAP_BASE        VMALLOC_END
0047 
0048 /* PMD_SHIFT determines the size of the area a second-level page
0049  * table can map
0050  */
0051 #define PMD_SHIFT   (PAGE_SHIFT + (PAGE_SHIFT-3))
0052 #define PMD_SIZE    (_AC(1,UL) << PMD_SHIFT)
0053 #define PMD_MASK    (~(PMD_SIZE-1))
0054 #define PMD_BITS    (PAGE_SHIFT - 3)
0055 
0056 /* PUD_SHIFT determines the size of the area a third-level page
0057  * table can map
0058  */
0059 #define PUD_SHIFT   (PMD_SHIFT + PMD_BITS)
0060 #define PUD_SIZE    (_AC(1,UL) << PUD_SHIFT)
0061 #define PUD_MASK    (~(PUD_SIZE-1))
0062 #define PUD_BITS    (PAGE_SHIFT - 3)
0063 
0064 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
0065 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
0066 #define PGDIR_SIZE  (_AC(1,UL) << PGDIR_SHIFT)
0067 #define PGDIR_MASK  (~(PGDIR_SIZE-1))
0068 #define PGDIR_BITS  (PAGE_SHIFT - 3)
0069 
0070 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
0071 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
0072 #endif
0073 
0074 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
0075 #error Page table parameters do not cover virtual address space properly.
0076 #endif
0077 
0078 #if (PMD_SHIFT != HPAGE_SHIFT)
0079 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
0080 #endif
0081 
0082 #ifndef __ASSEMBLY__
0083 
0084 extern unsigned long VMALLOC_END;
0085 
0086 #define vmemmap         ((struct page *)VMEMMAP_BASE)
0087 
0088 #include <linux/sched.h>
0089 
0090 bool kern_addr_valid(unsigned long addr);
0091 
0092 /* Entries per page directory level. */
0093 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
0094 #define PTRS_PER_PMD    (1UL << PMD_BITS)
0095 #define PTRS_PER_PUD    (1UL << PUD_BITS)
0096 #define PTRS_PER_PGD    (1UL << PGDIR_BITS)
0097 
0098 #define pmd_ERROR(e)                            \
0099     pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",     \
0100            __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
0101 #define pud_ERROR(e)                            \
0102     pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",     \
0103            __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
0104 #define pgd_ERROR(e)                            \
0105     pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",     \
0106            __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
0107 
0108 #endif /* !(__ASSEMBLY__) */
0109 
0110 /* PTE bits which are the same in SUN4U and SUN4V format.  */
0111 #define _PAGE_VALID   _AC(0x8000000000000000,UL) /* Valid TTE            */
0112 #define _PAGE_R       _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
0113 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
0114 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
0115 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
0116 
0117 /* SUN4U pte bits... */
0118 #define _PAGE_SZ4MB_4U    _AC(0x6000000000000000,UL) /* 4MB Page             */
0119 #define _PAGE_SZ512K_4U   _AC(0x4000000000000000,UL) /* 512K Page            */
0120 #define _PAGE_SZ64K_4U    _AC(0x2000000000000000,UL) /* 64K Page             */
0121 #define _PAGE_SZ8K_4U     _AC(0x0000000000000000,UL) /* 8K Page              */
0122 #define _PAGE_NFO_4U      _AC(0x1000000000000000,UL) /* No Fault Only        */
0123 #define _PAGE_IE_4U   _AC(0x0800000000000000,UL) /* Invert Endianness    */
0124 #define _PAGE_SOFT2_4U    _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
0125 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
0126 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
0127 #define _PAGE_RES1_4U     _AC(0x0002000000000000,UL) /* Reserved             */
0128 #define _PAGE_SZ32MB_4U   _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
0129 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
0130 #define _PAGE_SZALL_4U    _AC(0x6001000000000000,UL) /* All pgsz bits        */
0131 #define _PAGE_SN_4U   _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
0132 #define _PAGE_RES2_4U     _AC(0x0000780000000000,UL) /* Reserved             */
0133 #define _PAGE_PADDR_4U    _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
0134 #define _PAGE_SOFT_4U     _AC(0x0000000000001F80,UL) /* Software bits:       */
0135 #define _PAGE_EXEC_4U     _AC(0x0000000000001000,UL) /* Executable SW bit    */
0136 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
0137 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
0138 #define _PAGE_READ_4U     _AC(0x0000000000000200,UL) /* Readable SW Bit      */
0139 #define _PAGE_WRITE_4U    _AC(0x0000000000000100,UL) /* Writable SW Bit      */
0140 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
0141 #define _PAGE_L_4U    _AC(0x0000000000000040,UL) /* Locked TTE           */
0142 #define _PAGE_CP_4U   _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
0143 #define _PAGE_CV_4U   _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
0144 #define _PAGE_E_4U    _AC(0x0000000000000008,UL) /* side-Effect          */
0145 #define _PAGE_P_4U    _AC(0x0000000000000004,UL) /* Privileged Page      */
0146 #define _PAGE_W_4U    _AC(0x0000000000000002,UL) /* Writable             */
0147 
0148 /* SUN4V pte bits... */
0149 #define _PAGE_NFO_4V      _AC(0x4000000000000000,UL) /* No Fault Only        */
0150 #define _PAGE_SOFT2_4V    _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
0151 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
0152 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
0153 #define _PAGE_READ_4V     _AC(0x0800000000000000,UL) /* Readable SW Bit      */
0154 #define _PAGE_WRITE_4V    _AC(0x0400000000000000,UL) /* Writable SW Bit      */
0155 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
0156 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
0157 #define _PAGE_PADDR_4V    _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
0158 #define _PAGE_IE_4V   _AC(0x0000000000001000,UL) /* Invert Endianness    */
0159 #define _PAGE_E_4V    _AC(0x0000000000000800,UL) /* side-Effect          */
0160 #define _PAGE_CP_4V   _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
0161 #define _PAGE_CV_4V   _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
0162 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
0163 #define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
0164 #define _PAGE_P_4V    _AC(0x0000000000000100,UL) /* Privileged Page      */
0165 #define _PAGE_EXEC_4V     _AC(0x0000000000000080,UL) /* Executable Page      */
0166 #define _PAGE_W_4V    _AC(0x0000000000000040,UL) /* Writable             */
0167 #define _PAGE_SOFT_4V     _AC(0x0000000000000030,UL) /* Software bits        */
0168 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
0169 #define _PAGE_RESV_4V     _AC(0x0000000000000008,UL) /* Reserved             */
0170 #define _PAGE_SZ16GB_4V   _AC(0x0000000000000007,UL) /* 16GB Page            */
0171 #define _PAGE_SZ2GB_4V    _AC(0x0000000000000006,UL) /* 2GB Page             */
0172 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
0173 #define _PAGE_SZ32MB_4V   _AC(0x0000000000000004,UL) /* 32MB Page            */
0174 #define _PAGE_SZ4MB_4V    _AC(0x0000000000000003,UL) /* 4MB Page             */
0175 #define _PAGE_SZ512K_4V   _AC(0x0000000000000002,UL) /* 512K Page            */
0176 #define _PAGE_SZ64K_4V    _AC(0x0000000000000001,UL) /* 64K Page             */
0177 #define _PAGE_SZ8K_4V     _AC(0x0000000000000000,UL) /* 8K Page              */
0178 #define _PAGE_SZALL_4V    _AC(0x0000000000000007,UL) /* All pgsz bits        */
0179 
0180 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
0181 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
0182 
0183 #if REAL_HPAGE_SHIFT != 22
0184 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
0185 #endif
0186 
0187 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
0188 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
0189 
0190 #ifndef __ASSEMBLY__
0191 
0192 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
0193 
0194 unsigned long pte_sz_bits(unsigned long size);
0195 
0196 extern pgprot_t PAGE_KERNEL;
0197 extern pgprot_t PAGE_KERNEL_LOCKED;
0198 extern pgprot_t PAGE_COPY;
0199 extern pgprot_t PAGE_SHARED;
0200 
0201 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
0202 extern unsigned long _PAGE_IE;
0203 extern unsigned long _PAGE_E;
0204 extern unsigned long _PAGE_CACHE;
0205 
0206 extern unsigned long pg_iobits;
0207 extern unsigned long _PAGE_ALL_SZ_BITS;
0208 
0209 extern struct page *mem_map_zero;
0210 #define ZERO_PAGE(vaddr)    (mem_map_zero)
0211 
0212 /* PFNs are real physical page numbers.  However, mem_map only begins to record
0213  * per-page information starting at pfn_base.  This is to handle systems where
0214  * the first physical page in the machine is at some huge physical address,
0215  * such as 4GB.   This is common on a partitioned E10000, for example.
0216  */
0217 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
0218 {
0219     unsigned long paddr = pfn << PAGE_SHIFT;
0220 
0221     BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
0222     return __pte(paddr | pgprot_val(prot));
0223 }
0224 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
0225 
0226 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0227 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
0228 {
0229     pte_t pte = pfn_pte(page_nr, pgprot);
0230 
0231     return __pmd(pte_val(pte));
0232 }
0233 #define mk_pmd(page, pgprot)    pfn_pmd(page_to_pfn(page), (pgprot))
0234 #endif
0235 
0236 /* This one can be done with two shifts.  */
0237 static inline unsigned long pte_pfn(pte_t pte)
0238 {
0239     unsigned long ret;
0240 
0241     __asm__ __volatile__(
0242     "\n661: sllx        %1, %2, %0\n"
0243     "   srlx        %0, %3, %0\n"
0244     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0245     "   .word       661b\n"
0246     "   sllx        %1, %4, %0\n"
0247     "   srlx        %0, %5, %0\n"
0248     "   .previous\n"
0249     : "=r" (ret)
0250     : "r" (pte_val(pte)),
0251       "i" (21), "i" (21 + PAGE_SHIFT),
0252       "i" (8), "i" (8 + PAGE_SHIFT));
0253 
0254     return ret;
0255 }
0256 #define pte_page(x) pfn_to_page(pte_pfn(x))
0257 
0258 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
0259 {
0260     unsigned long mask, tmp;
0261 
0262     /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
0263      * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
0264      *
0265      * Even if we use negation tricks the result is still a 6
0266      * instruction sequence, so don't try to play fancy and just
0267      * do the most straightforward implementation.
0268      *
0269      * Note: We encode this into 3 sun4v 2-insn patch sequences.
0270      */
0271 
0272     BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
0273     __asm__ __volatile__(
0274     "\n661: sethi       %%uhi(%2), %1\n"
0275     "   sethi       %%hi(%2), %0\n"
0276     "\n662: or      %1, %%ulo(%2), %1\n"
0277     "   or      %0, %%lo(%2), %0\n"
0278     "\n663: sllx        %1, 32, %1\n"
0279     "   or      %0, %1, %0\n"
0280     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0281     "   .word       661b\n"
0282     "   sethi       %%uhi(%3), %1\n"
0283     "   sethi       %%hi(%3), %0\n"
0284     "   .word       662b\n"
0285     "   or      %1, %%ulo(%3), %1\n"
0286     "   or      %0, %%lo(%3), %0\n"
0287     "   .word       663b\n"
0288     "   sllx        %1, 32, %1\n"
0289     "   or      %0, %1, %0\n"
0290     "   .previous\n"
0291     "   .section    .sun_m7_2insn_patch, \"ax\"\n"
0292     "   .word       661b\n"
0293     "   sethi       %%uhi(%4), %1\n"
0294     "   sethi       %%hi(%4), %0\n"
0295     "   .word       662b\n"
0296     "   or      %1, %%ulo(%4), %1\n"
0297     "   or      %0, %%lo(%4), %0\n"
0298     "   .word       663b\n"
0299     "   sllx        %1, 32, %1\n"
0300     "   or      %0, %1, %0\n"
0301     "   .previous\n"
0302     : "=r" (mask), "=r" (tmp)
0303     : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
0304            _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
0305            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
0306       "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
0307            _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
0308            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
0309       "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
0310            _PAGE_CP_4V | _PAGE_E_4V |
0311            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
0312 
0313     return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
0314 }
0315 
0316 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0317 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
0318 {
0319     pte_t pte = __pte(pmd_val(pmd));
0320 
0321     pte = pte_modify(pte, newprot);
0322 
0323     return __pmd(pte_val(pte));
0324 }
0325 #endif
0326 
0327 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0328 {
0329     unsigned long val = pgprot_val(prot);
0330 
0331     __asm__ __volatile__(
0332     "\n661: andn        %0, %2, %0\n"
0333     "   or      %0, %3, %0\n"
0334     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0335     "   .word       661b\n"
0336     "   andn        %0, %4, %0\n"
0337     "   or      %0, %5, %0\n"
0338     "   .previous\n"
0339     "   .section    .sun_m7_2insn_patch, \"ax\"\n"
0340     "   .word       661b\n"
0341     "   andn        %0, %6, %0\n"
0342     "   or      %0, %5, %0\n"
0343     "   .previous\n"
0344     : "=r" (val)
0345     : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
0346                  "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
0347                  "i" (_PAGE_CP_4V));
0348 
0349     return __pgprot(val);
0350 }
0351 /* Various pieces of code check for platform support by ifdef testing
0352  * on "pgprot_noncached".  That's broken and should be fixed, but for
0353  * now...
0354  */
0355 #define pgprot_noncached pgprot_noncached
0356 
0357 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0358 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
0359 #define arch_make_huge_pte arch_make_huge_pte
0360 static inline unsigned long __pte_default_huge_mask(void)
0361 {
0362     unsigned long mask;
0363 
0364     __asm__ __volatile__(
0365     "\n661: sethi       %%uhi(%1), %0\n"
0366     "   sllx        %0, 32, %0\n"
0367     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0368     "   .word       661b\n"
0369     "   mov     %2, %0\n"
0370     "   nop\n"
0371     "   .previous\n"
0372     : "=r" (mask)
0373     : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
0374 
0375     return mask;
0376 }
0377 
0378 static inline pte_t pte_mkhuge(pte_t pte)
0379 {
0380     return __pte(pte_val(pte) | __pte_default_huge_mask());
0381 }
0382 
0383 static inline bool is_default_hugetlb_pte(pte_t pte)
0384 {
0385     unsigned long mask = __pte_default_huge_mask();
0386 
0387     return (pte_val(pte) & mask) == mask;
0388 }
0389 
0390 static inline bool is_hugetlb_pmd(pmd_t pmd)
0391 {
0392     return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
0393 }
0394 
0395 static inline bool is_hugetlb_pud(pud_t pud)
0396 {
0397     return !!(pud_val(pud) & _PAGE_PUD_HUGE);
0398 }
0399 
0400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0401 static inline pmd_t pmd_mkhuge(pmd_t pmd)
0402 {
0403     pte_t pte = __pte(pmd_val(pmd));
0404 
0405     pte = pte_mkhuge(pte);
0406     pte_val(pte) |= _PAGE_PMD_HUGE;
0407 
0408     return __pmd(pte_val(pte));
0409 }
0410 #endif
0411 #else
0412 static inline bool is_hugetlb_pte(pte_t pte)
0413 {
0414     return false;
0415 }
0416 #endif
0417 
0418 static inline pte_t pte_mkdirty(pte_t pte)
0419 {
0420     unsigned long val = pte_val(pte), tmp;
0421 
0422     __asm__ __volatile__(
0423     "\n661: or      %0, %3, %0\n"
0424     "   nop\n"
0425     "\n662: nop\n"
0426     "   nop\n"
0427     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0428     "   .word       661b\n"
0429     "   sethi       %%uhi(%4), %1\n"
0430     "   sllx        %1, 32, %1\n"
0431     "   .word       662b\n"
0432     "   or      %1, %%lo(%4), %1\n"
0433     "   or      %0, %1, %0\n"
0434     "   .previous\n"
0435     : "=r" (val), "=r" (tmp)
0436     : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
0437       "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
0438 
0439     return __pte(val);
0440 }
0441 
0442 static inline pte_t pte_mkclean(pte_t pte)
0443 {
0444     unsigned long val = pte_val(pte), tmp;
0445 
0446     __asm__ __volatile__(
0447     "\n661: andn        %0, %3, %0\n"
0448     "   nop\n"
0449     "\n662: nop\n"
0450     "   nop\n"
0451     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0452     "   .word       661b\n"
0453     "   sethi       %%uhi(%4), %1\n"
0454     "   sllx        %1, 32, %1\n"
0455     "   .word       662b\n"
0456     "   or      %1, %%lo(%4), %1\n"
0457     "   andn        %0, %1, %0\n"
0458     "   .previous\n"
0459     : "=r" (val), "=r" (tmp)
0460     : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
0461       "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
0462 
0463     return __pte(val);
0464 }
0465 
0466 static inline pte_t pte_mkwrite(pte_t pte)
0467 {
0468     unsigned long val = pte_val(pte), mask;
0469 
0470     __asm__ __volatile__(
0471     "\n661: mov     %1, %0\n"
0472     "   nop\n"
0473     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0474     "   .word       661b\n"
0475     "   sethi       %%uhi(%2), %0\n"
0476     "   sllx        %0, 32, %0\n"
0477     "   .previous\n"
0478     : "=r" (mask)
0479     : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
0480 
0481     return __pte(val | mask);
0482 }
0483 
0484 static inline pte_t pte_wrprotect(pte_t pte)
0485 {
0486     unsigned long val = pte_val(pte), tmp;
0487 
0488     __asm__ __volatile__(
0489     "\n661: andn        %0, %3, %0\n"
0490     "   nop\n"
0491     "\n662: nop\n"
0492     "   nop\n"
0493     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0494     "   .word       661b\n"
0495     "   sethi       %%uhi(%4), %1\n"
0496     "   sllx        %1, 32, %1\n"
0497     "   .word       662b\n"
0498     "   or      %1, %%lo(%4), %1\n"
0499     "   andn        %0, %1, %0\n"
0500     "   .previous\n"
0501     : "=r" (val), "=r" (tmp)
0502     : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
0503       "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
0504 
0505     return __pte(val);
0506 }
0507 
0508 static inline pte_t pte_mkold(pte_t pte)
0509 {
0510     unsigned long mask;
0511 
0512     __asm__ __volatile__(
0513     "\n661: mov     %1, %0\n"
0514     "   nop\n"
0515     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0516     "   .word       661b\n"
0517     "   sethi       %%uhi(%2), %0\n"
0518     "   sllx        %0, 32, %0\n"
0519     "   .previous\n"
0520     : "=r" (mask)
0521     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0522 
0523     mask |= _PAGE_R;
0524 
0525     return __pte(pte_val(pte) & ~mask);
0526 }
0527 
0528 static inline pte_t pte_mkyoung(pte_t pte)
0529 {
0530     unsigned long mask;
0531 
0532     __asm__ __volatile__(
0533     "\n661: mov     %1, %0\n"
0534     "   nop\n"
0535     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0536     "   .word       661b\n"
0537     "   sethi       %%uhi(%2), %0\n"
0538     "   sllx        %0, 32, %0\n"
0539     "   .previous\n"
0540     : "=r" (mask)
0541     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0542 
0543     mask |= _PAGE_R;
0544 
0545     return __pte(pte_val(pte) | mask);
0546 }
0547 
0548 static inline pte_t pte_mkspecial(pte_t pte)
0549 {
0550     pte_val(pte) |= _PAGE_SPECIAL;
0551     return pte;
0552 }
0553 
0554 static inline pte_t pte_mkmcd(pte_t pte)
0555 {
0556     pte_val(pte) |= _PAGE_MCD_4V;
0557     return pte;
0558 }
0559 
0560 static inline pte_t pte_mknotmcd(pte_t pte)
0561 {
0562     pte_val(pte) &= ~_PAGE_MCD_4V;
0563     return pte;
0564 }
0565 
0566 static inline unsigned long pte_young(pte_t pte)
0567 {
0568     unsigned long mask;
0569 
0570     __asm__ __volatile__(
0571     "\n661: mov     %1, %0\n"
0572     "   nop\n"
0573     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0574     "   .word       661b\n"
0575     "   sethi       %%uhi(%2), %0\n"
0576     "   sllx        %0, 32, %0\n"
0577     "   .previous\n"
0578     : "=r" (mask)
0579     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0580 
0581     return (pte_val(pte) & mask);
0582 }
0583 
0584 static inline unsigned long pte_dirty(pte_t pte)
0585 {
0586     unsigned long mask;
0587 
0588     __asm__ __volatile__(
0589     "\n661: mov     %1, %0\n"
0590     "   nop\n"
0591     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0592     "   .word       661b\n"
0593     "   sethi       %%uhi(%2), %0\n"
0594     "   sllx        %0, 32, %0\n"
0595     "   .previous\n"
0596     : "=r" (mask)
0597     : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
0598 
0599     return (pte_val(pte) & mask);
0600 }
0601 
0602 static inline unsigned long pte_write(pte_t pte)
0603 {
0604     unsigned long mask;
0605 
0606     __asm__ __volatile__(
0607     "\n661: mov     %1, %0\n"
0608     "   nop\n"
0609     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0610     "   .word       661b\n"
0611     "   sethi       %%uhi(%2), %0\n"
0612     "   sllx        %0, 32, %0\n"
0613     "   .previous\n"
0614     : "=r" (mask)
0615     : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
0616 
0617     return (pte_val(pte) & mask);
0618 }
0619 
0620 static inline unsigned long pte_exec(pte_t pte)
0621 {
0622     unsigned long mask;
0623 
0624     __asm__ __volatile__(
0625     "\n661: sethi       %%hi(%1), %0\n"
0626     "   .section    .sun4v_1insn_patch, \"ax\"\n"
0627     "   .word       661b\n"
0628     "   mov     %2, %0\n"
0629     "   .previous\n"
0630     : "=r" (mask)
0631     : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
0632 
0633     return (pte_val(pte) & mask);
0634 }
0635 
0636 static inline unsigned long pte_present(pte_t pte)
0637 {
0638     unsigned long val = pte_val(pte);
0639 
0640     __asm__ __volatile__(
0641     "\n661: and     %0, %2, %0\n"
0642     "   .section    .sun4v_1insn_patch, \"ax\"\n"
0643     "   .word       661b\n"
0644     "   and     %0, %3, %0\n"
0645     "   .previous\n"
0646     : "=r" (val)
0647     : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
0648 
0649     return val;
0650 }
0651 
0652 #define pte_accessible pte_accessible
0653 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
0654 {
0655     return pte_val(a) & _PAGE_VALID;
0656 }
0657 
0658 static inline unsigned long pte_special(pte_t pte)
0659 {
0660     return pte_val(pte) & _PAGE_SPECIAL;
0661 }
0662 
0663 #define pmd_leaf    pmd_large
0664 static inline unsigned long pmd_large(pmd_t pmd)
0665 {
0666     pte_t pte = __pte(pmd_val(pmd));
0667 
0668     return pte_val(pte) & _PAGE_PMD_HUGE;
0669 }
0670 
0671 static inline unsigned long pmd_pfn(pmd_t pmd)
0672 {
0673     pte_t pte = __pte(pmd_val(pmd));
0674 
0675     return pte_pfn(pte);
0676 }
0677 
0678 #define pmd_write pmd_write
0679 static inline unsigned long pmd_write(pmd_t pmd)
0680 {
0681     pte_t pte = __pte(pmd_val(pmd));
0682 
0683     return pte_write(pte);
0684 }
0685 
0686 #define pud_write(pud)  pte_write(__pte(pud_val(pud)))
0687 
0688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0689 static inline unsigned long pmd_dirty(pmd_t pmd)
0690 {
0691     pte_t pte = __pte(pmd_val(pmd));
0692 
0693     return pte_dirty(pte);
0694 }
0695 
0696 static inline unsigned long pmd_young(pmd_t pmd)
0697 {
0698     pte_t pte = __pte(pmd_val(pmd));
0699 
0700     return pte_young(pte);
0701 }
0702 
0703 static inline unsigned long pmd_trans_huge(pmd_t pmd)
0704 {
0705     pte_t pte = __pte(pmd_val(pmd));
0706 
0707     return pte_val(pte) & _PAGE_PMD_HUGE;
0708 }
0709 
0710 static inline pmd_t pmd_mkold(pmd_t pmd)
0711 {
0712     pte_t pte = __pte(pmd_val(pmd));
0713 
0714     pte = pte_mkold(pte);
0715 
0716     return __pmd(pte_val(pte));
0717 }
0718 
0719 static inline pmd_t pmd_wrprotect(pmd_t pmd)
0720 {
0721     pte_t pte = __pte(pmd_val(pmd));
0722 
0723     pte = pte_wrprotect(pte);
0724 
0725     return __pmd(pte_val(pte));
0726 }
0727 
0728 static inline pmd_t pmd_mkdirty(pmd_t pmd)
0729 {
0730     pte_t pte = __pte(pmd_val(pmd));
0731 
0732     pte = pte_mkdirty(pte);
0733 
0734     return __pmd(pte_val(pte));
0735 }
0736 
0737 static inline pmd_t pmd_mkclean(pmd_t pmd)
0738 {
0739     pte_t pte = __pte(pmd_val(pmd));
0740 
0741     pte = pte_mkclean(pte);
0742 
0743     return __pmd(pte_val(pte));
0744 }
0745 
0746 static inline pmd_t pmd_mkyoung(pmd_t pmd)
0747 {
0748     pte_t pte = __pte(pmd_val(pmd));
0749 
0750     pte = pte_mkyoung(pte);
0751 
0752     return __pmd(pte_val(pte));
0753 }
0754 
0755 static inline pmd_t pmd_mkwrite(pmd_t pmd)
0756 {
0757     pte_t pte = __pte(pmd_val(pmd));
0758 
0759     pte = pte_mkwrite(pte);
0760 
0761     return __pmd(pte_val(pte));
0762 }
0763 
0764 static inline pgprot_t pmd_pgprot(pmd_t entry)
0765 {
0766     unsigned long val = pmd_val(entry);
0767 
0768     return __pgprot(val);
0769 }
0770 #endif
0771 
0772 static inline int pmd_present(pmd_t pmd)
0773 {
0774     return pmd_val(pmd) != 0UL;
0775 }
0776 
0777 #define pmd_none(pmd)           (!pmd_val(pmd))
0778 
0779 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
0780  * very simple, it's just the physical address.  PTE tables are of
0781  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
0782  * the top bits outside of the range of any physical address size we
0783  * support are clear as well.  We also validate the physical itself.
0784  */
0785 #define pmd_bad(pmd)            (pmd_val(pmd) & ~PAGE_MASK)
0786 
0787 #define pud_none(pud)           (!pud_val(pud))
0788 
0789 #define pud_bad(pud)            (pud_val(pud) & ~PAGE_MASK)
0790 
0791 #define p4d_none(p4d)           (!p4d_val(p4d))
0792 
0793 #define p4d_bad(p4d)            (p4d_val(p4d) & ~PAGE_MASK)
0794 
0795 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0796 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0797         pmd_t *pmdp, pmd_t pmd);
0798 #else
0799 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0800                   pmd_t *pmdp, pmd_t pmd)
0801 {
0802     *pmdp = pmd;
0803 }
0804 #endif
0805 
0806 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
0807 {
0808     unsigned long val = __pa((unsigned long) (ptep));
0809 
0810     pmd_val(*pmdp) = val;
0811 }
0812 
0813 #define pud_set(pudp, pmdp) \
0814     (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
0815 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
0816 {
0817     pte_t pte = __pte(pmd_val(pmd));
0818     unsigned long pfn;
0819 
0820     pfn = pte_pfn(pte);
0821 
0822     return ((unsigned long) __va(pfn << PAGE_SHIFT));
0823 }
0824 
0825 static inline pmd_t *pud_pgtable(pud_t pud)
0826 {
0827     pte_t pte = __pte(pud_val(pud));
0828     unsigned long pfn;
0829 
0830     pfn = pte_pfn(pte);
0831 
0832     return ((pmd_t *) __va(pfn << PAGE_SHIFT));
0833 }
0834 
0835 #define pmd_page(pmd)           virt_to_page((void *)pmd_page_vaddr(pmd))
0836 #define pud_page(pud)           virt_to_page((void *)pud_pgtable(pud))
0837 #define pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0UL)
0838 #define pud_present(pud)        (pud_val(pud) != 0U)
0839 #define pud_clear(pudp)         (pud_val(*(pudp)) = 0UL)
0840 #define p4d_pgtable(p4d)        \
0841     ((pud_t *) __va(p4d_val(p4d)))
0842 #define p4d_present(p4d)        (p4d_val(p4d) != 0U)
0843 #define p4d_clear(p4dp)         (p4d_val(*(p4dp)) = 0UL)
0844 
0845 /* only used by the stubbed out hugetlb gup code, should never be called */
0846 #define p4d_page(p4d)           NULL
0847 
0848 #define pud_leaf    pud_large
0849 static inline unsigned long pud_large(pud_t pud)
0850 {
0851     pte_t pte = __pte(pud_val(pud));
0852 
0853     return pte_val(pte) & _PAGE_PMD_HUGE;
0854 }
0855 
0856 static inline unsigned long pud_pfn(pud_t pud)
0857 {
0858     pte_t pte = __pte(pud_val(pud));
0859 
0860     return pte_pfn(pte);
0861 }
0862 
0863 /* Same in both SUN4V and SUN4U.  */
0864 #define pte_none(pte)           (!pte_val(pte))
0865 
0866 #define p4d_set(p4dp, pudp) \
0867     (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
0868 
0869 /* We cannot include <linux/mm_types.h> at this point yet: */
0870 extern struct mm_struct init_mm;
0871 
0872 /* Actual page table PTE updates.  */
0873 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
0874            pte_t *ptep, pte_t orig, int fullmm,
0875            unsigned int hugepage_shift);
0876 
0877 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
0878                 pte_t *ptep, pte_t orig, int fullmm,
0879                 unsigned int hugepage_shift)
0880 {
0881     /* It is more efficient to let flush_tlb_kernel_range()
0882      * handle init_mm tlb flushes.
0883      *
0884      * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
0885      *             and SUN4V pte layout, so this inline test is fine.
0886      */
0887     if (likely(mm != &init_mm) && pte_accessible(mm, orig))
0888         tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
0889 }
0890 
0891 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
0892 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
0893                         unsigned long addr,
0894                         pmd_t *pmdp)
0895 {
0896     pmd_t pmd = *pmdp;
0897     set_pmd_at(mm, addr, pmdp, __pmd(0UL));
0898     return pmd;
0899 }
0900 
0901 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0902                  pte_t *ptep, pte_t pte, int fullmm)
0903 {
0904     pte_t orig = *ptep;
0905 
0906     *ptep = pte;
0907     maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
0908 }
0909 
0910 #define set_pte_at(mm,addr,ptep,pte)    \
0911     __set_pte_at((mm), (addr), (ptep), (pte), 0)
0912 
0913 #define pte_clear(mm,addr,ptep)     \
0914     set_pte_at((mm), (addr), (ptep), __pte(0UL))
0915 
0916 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
0917 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
0918     __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
0919 
0920 #ifdef DCACHE_ALIASING_POSSIBLE
0921 #define __HAVE_ARCH_MOVE_PTE
0922 #define move_pte(pte, prot, old_addr, new_addr)             \
0923 ({                                  \
0924     pte_t newpte = (pte);                       \
0925     if (tlb_type != hypervisor && pte_present(pte)) {       \
0926         unsigned long this_pfn = pte_pfn(pte);          \
0927                                     \
0928         if (pfn_valid(this_pfn) &&              \
0929             (((old_addr) ^ (new_addr)) & (1 << 13)))        \
0930             flush_dcache_page_all(current->mm,      \
0931                           pfn_to_page(this_pfn));   \
0932     }                               \
0933     newpte;                             \
0934 })
0935 #endif
0936 
0937 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
0938 
0939 void paging_init(void);
0940 unsigned long find_ecache_flush_span(unsigned long size);
0941 
0942 struct seq_file;
0943 void mmu_info(struct seq_file *);
0944 
0945 struct vm_area_struct;
0946 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
0947 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0948 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
0949               pmd_t *pmd);
0950 
0951 #define __HAVE_ARCH_PMDP_INVALIDATE
0952 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
0953                 pmd_t *pmdp);
0954 
0955 #define __HAVE_ARCH_PGTABLE_DEPOSIT
0956 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
0957                 pgtable_t pgtable);
0958 
0959 #define __HAVE_ARCH_PGTABLE_WITHDRAW
0960 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
0961 #endif
0962 
0963 /* Encode and de-code a swap entry */
0964 #define __swp_type(entry)   (((entry).val >> PAGE_SHIFT) & 0xffUL)
0965 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
0966 #define __swp_entry(type, offset)   \
0967     ( (swp_entry_t) \
0968       { \
0969         (((long)(type) << PAGE_SHIFT) | \
0970                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
0971       } )
0972 #define __pte_to_swp_entry(pte)     ((swp_entry_t) { pte_val(pte) })
0973 #define __swp_entry_to_pte(x)       ((pte_t) { (x).val })
0974 
0975 int page_in_phys_avail(unsigned long paddr);
0976 
0977 /*
0978  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
0979  * its high 4 bits.  These macros/functions put it there or get it from there.
0980  */
0981 #define MK_IOSPACE_PFN(space, pfn)  (pfn | (space << (BITS_PER_LONG - 4)))
0982 #define GET_IOSPACE(pfn)        (pfn >> (BITS_PER_LONG - 4))
0983 #define GET_PFN(pfn)            (pfn & 0x0fffffffffffffffUL)
0984 
0985 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
0986             unsigned long, pgprot_t);
0987 
0988 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
0989               unsigned long addr, pte_t pte);
0990 
0991 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
0992           unsigned long addr, pte_t oldpte);
0993 
0994 #define __HAVE_ARCH_DO_SWAP_PAGE
0995 static inline void arch_do_swap_page(struct mm_struct *mm,
0996                      struct vm_area_struct *vma,
0997                      unsigned long addr,
0998                      pte_t pte, pte_t oldpte)
0999 {
1000     /* If this is a new page being mapped in, there can be no
1001      * ADI tags stored away for this page. Skip looking for
1002      * stored tags
1003      */
1004     if (pte_none(oldpte))
1005         return;
1006 
1007     if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1008         adi_restore_tags(mm, vma, addr, pte);
1009 }
1010 
1011 #define __HAVE_ARCH_UNMAP_ONE
1012 static inline int arch_unmap_one(struct mm_struct *mm,
1013                  struct vm_area_struct *vma,
1014                  unsigned long addr, pte_t oldpte)
1015 {
1016     if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1017         return adi_save_tags(mm, vma, addr, oldpte);
1018     return 0;
1019 }
1020 
1021 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1022                      unsigned long from, unsigned long pfn,
1023                      unsigned long size, pgprot_t prot)
1024 {
1025     unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1026     int space = GET_IOSPACE(pfn);
1027     unsigned long phys_base;
1028 
1029     phys_base = offset | (((unsigned long) space) << 32UL);
1030 
1031     return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1032 }
1033 #define io_remap_pfn_range io_remap_pfn_range
1034 
1035 static inline unsigned long __untagged_addr(unsigned long start)
1036 {
1037     if (adi_capable()) {
1038         long addr = start;
1039 
1040         /* If userspace has passed a versioned address, kernel
1041          * will not find it in the VMAs since it does not store
1042          * the version tags in the list of VMAs. Storing version
1043          * tags in list of VMAs is impractical since they can be
1044          * changed any time from userspace without dropping into
1045          * kernel. Any address search in VMAs will be done with
1046          * non-versioned addresses. Ensure the ADI version bits
1047          * are dropped here by sign extending the last bit before
1048          * ADI bits. IOMMU does not implement version tags.
1049          */
1050         return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1051     }
1052 
1053     return start;
1054 }
1055 #define untagged_addr(addr) \
1056     ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1057 
1058 static inline bool pte_access_permitted(pte_t pte, bool write)
1059 {
1060     u64 prot;
1061 
1062     if (tlb_type == hypervisor) {
1063         prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1064         if (write)
1065             prot |= _PAGE_WRITE_4V;
1066     } else {
1067         prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1068         if (write)
1069             prot |= _PAGE_WRITE_4U;
1070     }
1071 
1072     return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1073 }
1074 #define pte_access_permitted pte_access_permitted
1075 
1076 #include <asm/tlbflush.h>
1077 
1078 /* We provide our own get_unmapped_area to cope with VA holes and
1079  * SHM area cache aliasing for userland.
1080  */
1081 #define HAVE_ARCH_UNMAPPED_AREA
1082 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1083 
1084 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1085  * the largest alignment possible such that larget PTEs can be used.
1086  */
1087 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1088                    unsigned long, unsigned long,
1089                    unsigned long);
1090 #define HAVE_ARCH_FB_UNMAPPED_AREA
1091 
1092 void sun4v_register_fault_status(void);
1093 void sun4v_ktsb_register(void);
1094 void __init cheetah_ecache_flush_init(void);
1095 void sun4v_patch_tlb_handlers(void);
1096 
1097 extern unsigned long cmdline_memory_size;
1098 
1099 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1100 
1101 #define pmd_pgtable(PMD)    ((pte_t *)pmd_page_vaddr(PMD))
1102 
1103 #ifdef CONFIG_HUGETLB_PAGE
1104 
1105 #define pud_leaf_size pud_leaf_size
1106 extern unsigned long pud_leaf_size(pud_t pud);
1107 
1108 #define pmd_leaf_size pmd_leaf_size
1109 extern unsigned long pmd_leaf_size(pmd_t pmd);
1110 
1111 #define pte_leaf_size pte_leaf_size
1112 extern unsigned long pte_leaf_size(pte_t pte);
1113 
1114 #endif /* CONFIG_HUGETLB_PAGE */
1115 
1116 #endif /* !(__ASSEMBLY__) */
1117 
1118 #endif /* !(_SPARC64_PGTABLE_H) */