Back to home page

LXR

 
 

    


0001 /*
0002  * pgtable.h: SpitFire page table operations.
0003  *
0004  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
0005  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0006  */
0007 
0008 #ifndef _SPARC64_PGTABLE_H
0009 #define _SPARC64_PGTABLE_H
0010 
0011 /* This file contains the functions and defines necessary to modify and use
0012  * the SpitFire page tables.
0013  */
0014 
0015 #include <linux/compiler.h>
0016 #include <linux/const.h>
0017 #include <asm/types.h>
0018 #include <asm/spitfire.h>
0019 #include <asm/asi.h>
0020 #include <asm/page.h>
0021 #include <asm/processor.h>
0022 
0023 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
0024  * The page copy blockops can use 0x6000000 to 0x8000000.
0025  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
0026  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
0027  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
0028  * The vmalloc area spans 0x100000000 to 0x200000000.
0029  * Since modules need to be in the lowest 32-bits of the address space,
0030  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
0031  * There is a single static kernel PMD which maps from 0x0 to address
0032  * 0x400000000.
0033  */
0034 #define TLBTEMP_BASE        _AC(0x0000000006000000,UL)
0035 #define TSBMAP_8K_BASE      _AC(0x0000000008000000,UL)
0036 #define TSBMAP_4M_BASE      _AC(0x0000000008400000,UL)
0037 #define MODULES_VADDR       _AC(0x0000000010000000,UL)
0038 #define MODULES_LEN     _AC(0x00000000e0000000,UL)
0039 #define MODULES_END     _AC(0x00000000f0000000,UL)
0040 #define LOW_OBP_ADDRESS     _AC(0x00000000f0000000,UL)
0041 #define HI_OBP_ADDRESS      _AC(0x0000000100000000,UL)
0042 #define VMALLOC_START       _AC(0x0000000100000000,UL)
0043 #define VMEMMAP_BASE        VMALLOC_END
0044 
0045 /* PMD_SHIFT determines the size of the area a second-level page
0046  * table can map
0047  */
0048 #define PMD_SHIFT   (PAGE_SHIFT + (PAGE_SHIFT-3))
0049 #define PMD_SIZE    (_AC(1,UL) << PMD_SHIFT)
0050 #define PMD_MASK    (~(PMD_SIZE-1))
0051 #define PMD_BITS    (PAGE_SHIFT - 3)
0052 
0053 /* PUD_SHIFT determines the size of the area a third-level page
0054  * table can map
0055  */
0056 #define PUD_SHIFT   (PMD_SHIFT + PMD_BITS)
0057 #define PUD_SIZE    (_AC(1,UL) << PUD_SHIFT)
0058 #define PUD_MASK    (~(PUD_SIZE-1))
0059 #define PUD_BITS    (PAGE_SHIFT - 3)
0060 
0061 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
0062 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
0063 #define PGDIR_SIZE  (_AC(1,UL) << PGDIR_SHIFT)
0064 #define PGDIR_MASK  (~(PGDIR_SIZE-1))
0065 #define PGDIR_BITS  (PAGE_SHIFT - 3)
0066 
0067 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
0068 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
0069 #endif
0070 
0071 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
0072 #error Page table parameters do not cover virtual address space properly.
0073 #endif
0074 
0075 #if (PMD_SHIFT != HPAGE_SHIFT)
0076 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
0077 #endif
0078 
0079 #ifndef __ASSEMBLY__
0080 
0081 extern unsigned long VMALLOC_END;
0082 
0083 #define vmemmap         ((struct page *)VMEMMAP_BASE)
0084 
0085 #include <linux/sched.h>
0086 
0087 bool kern_addr_valid(unsigned long addr);
0088 
0089 /* Entries per page directory level. */
0090 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
0091 #define PTRS_PER_PMD    (1UL << PMD_BITS)
0092 #define PTRS_PER_PUD    (1UL << PUD_BITS)
0093 #define PTRS_PER_PGD    (1UL << PGDIR_BITS)
0094 
0095 /* Kernel has a separate 44bit address space. */
0096 #define FIRST_USER_ADDRESS  0UL
0097 
0098 #define pmd_ERROR(e)                            \
0099     pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",     \
0100            __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
0101 #define pud_ERROR(e)                            \
0102     pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",     \
0103            __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
0104 #define pgd_ERROR(e)                            \
0105     pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",     \
0106            __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
0107 
0108 #endif /* !(__ASSEMBLY__) */
0109 
0110 /* PTE bits which are the same in SUN4U and SUN4V format.  */
0111 #define _PAGE_VALID   _AC(0x8000000000000000,UL) /* Valid TTE            */
0112 #define _PAGE_R       _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
0113 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
0114 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
0115 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
0116 
0117 /* Advertise support for _PAGE_SPECIAL */
0118 #define __HAVE_ARCH_PTE_SPECIAL
0119 
0120 /* SUN4U pte bits... */
0121 #define _PAGE_SZ4MB_4U    _AC(0x6000000000000000,UL) /* 4MB Page             */
0122 #define _PAGE_SZ512K_4U   _AC(0x4000000000000000,UL) /* 512K Page            */
0123 #define _PAGE_SZ64K_4U    _AC(0x2000000000000000,UL) /* 64K Page             */
0124 #define _PAGE_SZ8K_4U     _AC(0x0000000000000000,UL) /* 8K Page              */
0125 #define _PAGE_NFO_4U      _AC(0x1000000000000000,UL) /* No Fault Only        */
0126 #define _PAGE_IE_4U   _AC(0x0800000000000000,UL) /* Invert Endianness    */
0127 #define _PAGE_SOFT2_4U    _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
0128 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
0129 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
0130 #define _PAGE_RES1_4U     _AC(0x0002000000000000,UL) /* Reserved             */
0131 #define _PAGE_SZ32MB_4U   _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
0132 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
0133 #define _PAGE_SZALL_4U    _AC(0x6001000000000000,UL) /* All pgsz bits        */
0134 #define _PAGE_SN_4U   _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
0135 #define _PAGE_RES2_4U     _AC(0x0000780000000000,UL) /* Reserved             */
0136 #define _PAGE_PADDR_4U    _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
0137 #define _PAGE_SOFT_4U     _AC(0x0000000000001F80,UL) /* Software bits:       */
0138 #define _PAGE_EXEC_4U     _AC(0x0000000000001000,UL) /* Executable SW bit    */
0139 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
0140 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
0141 #define _PAGE_READ_4U     _AC(0x0000000000000200,UL) /* Readable SW Bit      */
0142 #define _PAGE_WRITE_4U    _AC(0x0000000000000100,UL) /* Writable SW Bit      */
0143 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
0144 #define _PAGE_L_4U    _AC(0x0000000000000040,UL) /* Locked TTE           */
0145 #define _PAGE_CP_4U   _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
0146 #define _PAGE_CV_4U   _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
0147 #define _PAGE_E_4U    _AC(0x0000000000000008,UL) /* side-Effect          */
0148 #define _PAGE_P_4U    _AC(0x0000000000000004,UL) /* Privileged Page      */
0149 #define _PAGE_W_4U    _AC(0x0000000000000002,UL) /* Writable             */
0150 
0151 /* SUN4V pte bits... */
0152 #define _PAGE_NFO_4V      _AC(0x4000000000000000,UL) /* No Fault Only        */
0153 #define _PAGE_SOFT2_4V    _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
0154 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
0155 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
0156 #define _PAGE_READ_4V     _AC(0x0800000000000000,UL) /* Readable SW Bit      */
0157 #define _PAGE_WRITE_4V    _AC(0x0400000000000000,UL) /* Writable SW Bit      */
0158 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
0159 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
0160 #define _PAGE_PADDR_4V    _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
0161 #define _PAGE_IE_4V   _AC(0x0000000000001000,UL) /* Invert Endianness    */
0162 #define _PAGE_E_4V    _AC(0x0000000000000800,UL) /* side-Effect          */
0163 #define _PAGE_CP_4V   _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
0164 #define _PAGE_CV_4V   _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
0165 #define _PAGE_P_4V    _AC(0x0000000000000100,UL) /* Privileged Page      */
0166 #define _PAGE_EXEC_4V     _AC(0x0000000000000080,UL) /* Executable Page      */
0167 #define _PAGE_W_4V    _AC(0x0000000000000040,UL) /* Writable             */
0168 #define _PAGE_SOFT_4V     _AC(0x0000000000000030,UL) /* Software bits        */
0169 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
0170 #define _PAGE_RESV_4V     _AC(0x0000000000000008,UL) /* Reserved             */
0171 #define _PAGE_SZ16GB_4V   _AC(0x0000000000000007,UL) /* 16GB Page            */
0172 #define _PAGE_SZ2GB_4V    _AC(0x0000000000000006,UL) /* 2GB Page             */
0173 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
0174 #define _PAGE_SZ32MB_4V   _AC(0x0000000000000004,UL) /* 32MB Page            */
0175 #define _PAGE_SZ4MB_4V    _AC(0x0000000000000003,UL) /* 4MB Page             */
0176 #define _PAGE_SZ512K_4V   _AC(0x0000000000000002,UL) /* 512K Page            */
0177 #define _PAGE_SZ64K_4V    _AC(0x0000000000000001,UL) /* 64K Page             */
0178 #define _PAGE_SZ8K_4V     _AC(0x0000000000000000,UL) /* 8K Page              */
0179 #define _PAGE_SZALL_4V    _AC(0x0000000000000007,UL) /* All pgsz bits        */
0180 
0181 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
0182 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
0183 
0184 #if REAL_HPAGE_SHIFT != 22
0185 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
0186 #endif
0187 
0188 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
0189 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
0190 
0191 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
0192 #define __P000  __pgprot(0)
0193 #define __P001  __pgprot(0)
0194 #define __P010  __pgprot(0)
0195 #define __P011  __pgprot(0)
0196 #define __P100  __pgprot(0)
0197 #define __P101  __pgprot(0)
0198 #define __P110  __pgprot(0)
0199 #define __P111  __pgprot(0)
0200 
0201 #define __S000  __pgprot(0)
0202 #define __S001  __pgprot(0)
0203 #define __S010  __pgprot(0)
0204 #define __S011  __pgprot(0)
0205 #define __S100  __pgprot(0)
0206 #define __S101  __pgprot(0)
0207 #define __S110  __pgprot(0)
0208 #define __S111  __pgprot(0)
0209 
0210 #ifndef __ASSEMBLY__
0211 
0212 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
0213 
0214 unsigned long pte_sz_bits(unsigned long size);
0215 
0216 extern pgprot_t PAGE_KERNEL;
0217 extern pgprot_t PAGE_KERNEL_LOCKED;
0218 extern pgprot_t PAGE_COPY;
0219 extern pgprot_t PAGE_SHARED;
0220 
0221 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
0222 extern unsigned long _PAGE_IE;
0223 extern unsigned long _PAGE_E;
0224 extern unsigned long _PAGE_CACHE;
0225 
0226 extern unsigned long pg_iobits;
0227 extern unsigned long _PAGE_ALL_SZ_BITS;
0228 
0229 extern struct page *mem_map_zero;
0230 #define ZERO_PAGE(vaddr)    (mem_map_zero)
0231 
0232 /* PFNs are real physical page numbers.  However, mem_map only begins to record
0233  * per-page information starting at pfn_base.  This is to handle systems where
0234  * the first physical page in the machine is at some huge physical address,
0235  * such as 4GB.   This is common on a partitioned E10000, for example.
0236  */
0237 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
0238 {
0239     unsigned long paddr = pfn << PAGE_SHIFT;
0240 
0241     BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
0242     return __pte(paddr | pgprot_val(prot));
0243 }
0244 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
0245 
0246 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0247 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
0248 {
0249     pte_t pte = pfn_pte(page_nr, pgprot);
0250 
0251     return __pmd(pte_val(pte));
0252 }
0253 #define mk_pmd(page, pgprot)    pfn_pmd(page_to_pfn(page), (pgprot))
0254 #endif
0255 
0256 /* This one can be done with two shifts.  */
0257 static inline unsigned long pte_pfn(pte_t pte)
0258 {
0259     unsigned long ret;
0260 
0261     __asm__ __volatile__(
0262     "\n661: sllx        %1, %2, %0\n"
0263     "   srlx        %0, %3, %0\n"
0264     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0265     "   .word       661b\n"
0266     "   sllx        %1, %4, %0\n"
0267     "   srlx        %0, %5, %0\n"
0268     "   .previous\n"
0269     : "=r" (ret)
0270     : "r" (pte_val(pte)),
0271       "i" (21), "i" (21 + PAGE_SHIFT),
0272       "i" (8), "i" (8 + PAGE_SHIFT));
0273 
0274     return ret;
0275 }
0276 #define pte_page(x) pfn_to_page(pte_pfn(x))
0277 
0278 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
0279 {
0280     unsigned long mask, tmp;
0281 
0282     /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
0283      * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
0284      *
0285      * Even if we use negation tricks the result is still a 6
0286      * instruction sequence, so don't try to play fancy and just
0287      * do the most straightforward implementation.
0288      *
0289      * Note: We encode this into 3 sun4v 2-insn patch sequences.
0290      */
0291 
0292     BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
0293     __asm__ __volatile__(
0294     "\n661: sethi       %%uhi(%2), %1\n"
0295     "   sethi       %%hi(%2), %0\n"
0296     "\n662: or      %1, %%ulo(%2), %1\n"
0297     "   or      %0, %%lo(%2), %0\n"
0298     "\n663: sllx        %1, 32, %1\n"
0299     "   or      %0, %1, %0\n"
0300     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0301     "   .word       661b\n"
0302     "   sethi       %%uhi(%3), %1\n"
0303     "   sethi       %%hi(%3), %0\n"
0304     "   .word       662b\n"
0305     "   or      %1, %%ulo(%3), %1\n"
0306     "   or      %0, %%lo(%3), %0\n"
0307     "   .word       663b\n"
0308     "   sllx        %1, 32, %1\n"
0309     "   or      %0, %1, %0\n"
0310     "   .previous\n"
0311     "   .section    .sun_m7_2insn_patch, \"ax\"\n"
0312     "   .word       661b\n"
0313     "   sethi       %%uhi(%4), %1\n"
0314     "   sethi       %%hi(%4), %0\n"
0315     "   .word       662b\n"
0316     "   or      %1, %%ulo(%4), %1\n"
0317     "   or      %0, %%lo(%4), %0\n"
0318     "   .word       663b\n"
0319     "   sllx        %1, 32, %1\n"
0320     "   or      %0, %1, %0\n"
0321     "   .previous\n"
0322     : "=r" (mask), "=r" (tmp)
0323     : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
0324            _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
0325            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
0326       "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
0327            _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
0328            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
0329       "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
0330            _PAGE_CP_4V | _PAGE_E_4V |
0331            _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
0332 
0333     return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
0334 }
0335 
0336 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0337 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
0338 {
0339     pte_t pte = __pte(pmd_val(pmd));
0340 
0341     pte = pte_modify(pte, newprot);
0342 
0343     return __pmd(pte_val(pte));
0344 }
0345 #endif
0346 
0347 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0348 {
0349     unsigned long val = pgprot_val(prot);
0350 
0351     __asm__ __volatile__(
0352     "\n661: andn        %0, %2, %0\n"
0353     "   or      %0, %3, %0\n"
0354     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0355     "   .word       661b\n"
0356     "   andn        %0, %4, %0\n"
0357     "   or      %0, %5, %0\n"
0358     "   .previous\n"
0359     "   .section    .sun_m7_2insn_patch, \"ax\"\n"
0360     "   .word       661b\n"
0361     "   andn        %0, %6, %0\n"
0362     "   or      %0, %5, %0\n"
0363     "   .previous\n"
0364     : "=r" (val)
0365     : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
0366                  "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
0367                  "i" (_PAGE_CP_4V));
0368 
0369     return __pgprot(val);
0370 }
0371 /* Various pieces of code check for platform support by ifdef testing
0372  * on "pgprot_noncached".  That's broken and should be fixed, but for
0373  * now...
0374  */
0375 #define pgprot_noncached pgprot_noncached
0376 
0377 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0378 static inline unsigned long __pte_huge_mask(void)
0379 {
0380     unsigned long mask;
0381 
0382     __asm__ __volatile__(
0383     "\n661: sethi       %%uhi(%1), %0\n"
0384     "   sllx        %0, 32, %0\n"
0385     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0386     "   .word       661b\n"
0387     "   mov     %2, %0\n"
0388     "   nop\n"
0389     "   .previous\n"
0390     : "=r" (mask)
0391     : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
0392 
0393     return mask;
0394 }
0395 
0396 static inline pte_t pte_mkhuge(pte_t pte)
0397 {
0398     return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
0399 }
0400 
0401 static inline bool is_hugetlb_pte(pte_t pte)
0402 {
0403     return !!(pte_val(pte) & __pte_huge_mask());
0404 }
0405 
0406 static inline bool is_hugetlb_pmd(pmd_t pmd)
0407 {
0408     return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
0409 }
0410 
0411 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0412 static inline pmd_t pmd_mkhuge(pmd_t pmd)
0413 {
0414     pte_t pte = __pte(pmd_val(pmd));
0415 
0416     pte = pte_mkhuge(pte);
0417     pte_val(pte) |= _PAGE_PMD_HUGE;
0418 
0419     return __pmd(pte_val(pte));
0420 }
0421 #endif
0422 #else
0423 static inline bool is_hugetlb_pte(pte_t pte)
0424 {
0425     return false;
0426 }
0427 #endif
0428 
0429 static inline pte_t pte_mkdirty(pte_t pte)
0430 {
0431     unsigned long val = pte_val(pte), tmp;
0432 
0433     __asm__ __volatile__(
0434     "\n661: or      %0, %3, %0\n"
0435     "   nop\n"
0436     "\n662: nop\n"
0437     "   nop\n"
0438     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0439     "   .word       661b\n"
0440     "   sethi       %%uhi(%4), %1\n"
0441     "   sllx        %1, 32, %1\n"
0442     "   .word       662b\n"
0443     "   or      %1, %%lo(%4), %1\n"
0444     "   or      %0, %1, %0\n"
0445     "   .previous\n"
0446     : "=r" (val), "=r" (tmp)
0447     : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
0448       "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
0449 
0450     return __pte(val);
0451 }
0452 
0453 static inline pte_t pte_mkclean(pte_t pte)
0454 {
0455     unsigned long val = pte_val(pte), tmp;
0456 
0457     __asm__ __volatile__(
0458     "\n661: andn        %0, %3, %0\n"
0459     "   nop\n"
0460     "\n662: nop\n"
0461     "   nop\n"
0462     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0463     "   .word       661b\n"
0464     "   sethi       %%uhi(%4), %1\n"
0465     "   sllx        %1, 32, %1\n"
0466     "   .word       662b\n"
0467     "   or      %1, %%lo(%4), %1\n"
0468     "   andn        %0, %1, %0\n"
0469     "   .previous\n"
0470     : "=r" (val), "=r" (tmp)
0471     : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
0472       "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
0473 
0474     return __pte(val);
0475 }
0476 
0477 static inline pte_t pte_mkwrite(pte_t pte)
0478 {
0479     unsigned long val = pte_val(pte), mask;
0480 
0481     __asm__ __volatile__(
0482     "\n661: mov     %1, %0\n"
0483     "   nop\n"
0484     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0485     "   .word       661b\n"
0486     "   sethi       %%uhi(%2), %0\n"
0487     "   sllx        %0, 32, %0\n"
0488     "   .previous\n"
0489     : "=r" (mask)
0490     : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
0491 
0492     return __pte(val | mask);
0493 }
0494 
0495 static inline pte_t pte_wrprotect(pte_t pte)
0496 {
0497     unsigned long val = pte_val(pte), tmp;
0498 
0499     __asm__ __volatile__(
0500     "\n661: andn        %0, %3, %0\n"
0501     "   nop\n"
0502     "\n662: nop\n"
0503     "   nop\n"
0504     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0505     "   .word       661b\n"
0506     "   sethi       %%uhi(%4), %1\n"
0507     "   sllx        %1, 32, %1\n"
0508     "   .word       662b\n"
0509     "   or      %1, %%lo(%4), %1\n"
0510     "   andn        %0, %1, %0\n"
0511     "   .previous\n"
0512     : "=r" (val), "=r" (tmp)
0513     : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
0514       "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
0515 
0516     return __pte(val);
0517 }
0518 
0519 static inline pte_t pte_mkold(pte_t pte)
0520 {
0521     unsigned long mask;
0522 
0523     __asm__ __volatile__(
0524     "\n661: mov     %1, %0\n"
0525     "   nop\n"
0526     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0527     "   .word       661b\n"
0528     "   sethi       %%uhi(%2), %0\n"
0529     "   sllx        %0, 32, %0\n"
0530     "   .previous\n"
0531     : "=r" (mask)
0532     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0533 
0534     mask |= _PAGE_R;
0535 
0536     return __pte(pte_val(pte) & ~mask);
0537 }
0538 
0539 static inline pte_t pte_mkyoung(pte_t pte)
0540 {
0541     unsigned long mask;
0542 
0543     __asm__ __volatile__(
0544     "\n661: mov     %1, %0\n"
0545     "   nop\n"
0546     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0547     "   .word       661b\n"
0548     "   sethi       %%uhi(%2), %0\n"
0549     "   sllx        %0, 32, %0\n"
0550     "   .previous\n"
0551     : "=r" (mask)
0552     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0553 
0554     mask |= _PAGE_R;
0555 
0556     return __pte(pte_val(pte) | mask);
0557 }
0558 
0559 static inline pte_t pte_mkspecial(pte_t pte)
0560 {
0561     pte_val(pte) |= _PAGE_SPECIAL;
0562     return pte;
0563 }
0564 
0565 static inline unsigned long pte_young(pte_t pte)
0566 {
0567     unsigned long mask;
0568 
0569     __asm__ __volatile__(
0570     "\n661: mov     %1, %0\n"
0571     "   nop\n"
0572     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0573     "   .word       661b\n"
0574     "   sethi       %%uhi(%2), %0\n"
0575     "   sllx        %0, 32, %0\n"
0576     "   .previous\n"
0577     : "=r" (mask)
0578     : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
0579 
0580     return (pte_val(pte) & mask);
0581 }
0582 
0583 static inline unsigned long pte_dirty(pte_t pte)
0584 {
0585     unsigned long mask;
0586 
0587     __asm__ __volatile__(
0588     "\n661: mov     %1, %0\n"
0589     "   nop\n"
0590     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0591     "   .word       661b\n"
0592     "   sethi       %%uhi(%2), %0\n"
0593     "   sllx        %0, 32, %0\n"
0594     "   .previous\n"
0595     : "=r" (mask)
0596     : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
0597 
0598     return (pte_val(pte) & mask);
0599 }
0600 
0601 static inline unsigned long pte_write(pte_t pte)
0602 {
0603     unsigned long mask;
0604 
0605     __asm__ __volatile__(
0606     "\n661: mov     %1, %0\n"
0607     "   nop\n"
0608     "   .section    .sun4v_2insn_patch, \"ax\"\n"
0609     "   .word       661b\n"
0610     "   sethi       %%uhi(%2), %0\n"
0611     "   sllx        %0, 32, %0\n"
0612     "   .previous\n"
0613     : "=r" (mask)
0614     : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
0615 
0616     return (pte_val(pte) & mask);
0617 }
0618 
0619 static inline unsigned long pte_exec(pte_t pte)
0620 {
0621     unsigned long mask;
0622 
0623     __asm__ __volatile__(
0624     "\n661: sethi       %%hi(%1), %0\n"
0625     "   .section    .sun4v_1insn_patch, \"ax\"\n"
0626     "   .word       661b\n"
0627     "   mov     %2, %0\n"
0628     "   .previous\n"
0629     : "=r" (mask)
0630     : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
0631 
0632     return (pte_val(pte) & mask);
0633 }
0634 
0635 static inline unsigned long pte_present(pte_t pte)
0636 {
0637     unsigned long val = pte_val(pte);
0638 
0639     __asm__ __volatile__(
0640     "\n661: and     %0, %2, %0\n"
0641     "   .section    .sun4v_1insn_patch, \"ax\"\n"
0642     "   .word       661b\n"
0643     "   and     %0, %3, %0\n"
0644     "   .previous\n"
0645     : "=r" (val)
0646     : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
0647 
0648     return val;
0649 }
0650 
0651 #define pte_accessible pte_accessible
0652 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
0653 {
0654     return pte_val(a) & _PAGE_VALID;
0655 }
0656 
0657 static inline unsigned long pte_special(pte_t pte)
0658 {
0659     return pte_val(pte) & _PAGE_SPECIAL;
0660 }
0661 
0662 static inline unsigned long pmd_large(pmd_t pmd)
0663 {
0664     pte_t pte = __pte(pmd_val(pmd));
0665 
0666     return pte_val(pte) & _PAGE_PMD_HUGE;
0667 }
0668 
0669 static inline unsigned long pmd_pfn(pmd_t pmd)
0670 {
0671     pte_t pte = __pte(pmd_val(pmd));
0672 
0673     return pte_pfn(pte);
0674 }
0675 
0676 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0677 static inline unsigned long pmd_dirty(pmd_t pmd)
0678 {
0679     pte_t pte = __pte(pmd_val(pmd));
0680 
0681     return pte_dirty(pte);
0682 }
0683 
0684 static inline unsigned long pmd_young(pmd_t pmd)
0685 {
0686     pte_t pte = __pte(pmd_val(pmd));
0687 
0688     return pte_young(pte);
0689 }
0690 
0691 static inline unsigned long pmd_write(pmd_t pmd)
0692 {
0693     pte_t pte = __pte(pmd_val(pmd));
0694 
0695     return pte_write(pte);
0696 }
0697 
0698 static inline unsigned long pmd_trans_huge(pmd_t pmd)
0699 {
0700     pte_t pte = __pte(pmd_val(pmd));
0701 
0702     return pte_val(pte) & _PAGE_PMD_HUGE;
0703 }
0704 
0705 static inline pmd_t pmd_mkold(pmd_t pmd)
0706 {
0707     pte_t pte = __pte(pmd_val(pmd));
0708 
0709     pte = pte_mkold(pte);
0710 
0711     return __pmd(pte_val(pte));
0712 }
0713 
0714 static inline pmd_t pmd_wrprotect(pmd_t pmd)
0715 {
0716     pte_t pte = __pte(pmd_val(pmd));
0717 
0718     pte = pte_wrprotect(pte);
0719 
0720     return __pmd(pte_val(pte));
0721 }
0722 
0723 static inline pmd_t pmd_mkdirty(pmd_t pmd)
0724 {
0725     pte_t pte = __pte(pmd_val(pmd));
0726 
0727     pte = pte_mkdirty(pte);
0728 
0729     return __pmd(pte_val(pte));
0730 }
0731 
0732 static inline pmd_t pmd_mkclean(pmd_t pmd)
0733 {
0734     pte_t pte = __pte(pmd_val(pmd));
0735 
0736     pte = pte_mkclean(pte);
0737 
0738     return __pmd(pte_val(pte));
0739 }
0740 
0741 static inline pmd_t pmd_mkyoung(pmd_t pmd)
0742 {
0743     pte_t pte = __pte(pmd_val(pmd));
0744 
0745     pte = pte_mkyoung(pte);
0746 
0747     return __pmd(pte_val(pte));
0748 }
0749 
0750 static inline pmd_t pmd_mkwrite(pmd_t pmd)
0751 {
0752     pte_t pte = __pte(pmd_val(pmd));
0753 
0754     pte = pte_mkwrite(pte);
0755 
0756     return __pmd(pte_val(pte));
0757 }
0758 
0759 static inline pgprot_t pmd_pgprot(pmd_t entry)
0760 {
0761     unsigned long val = pmd_val(entry);
0762 
0763     return __pgprot(val);
0764 }
0765 #endif
0766 
0767 static inline int pmd_present(pmd_t pmd)
0768 {
0769     return pmd_val(pmd) != 0UL;
0770 }
0771 
0772 #define pmd_none(pmd)           (!pmd_val(pmd))
0773 
0774 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
0775  * very simple, it's just the physical address.  PTE tables are of
0776  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
0777  * the top bits outside of the range of any physical address size we
0778  * support are clear as well.  We also validate the physical itself.
0779  */
0780 #define pmd_bad(pmd)            (pmd_val(pmd) & ~PAGE_MASK)
0781 
0782 #define pud_none(pud)           (!pud_val(pud))
0783 
0784 #define pud_bad(pud)            (pud_val(pud) & ~PAGE_MASK)
0785 
0786 #define pgd_none(pgd)           (!pgd_val(pgd))
0787 
0788 #define pgd_bad(pgd)            (pgd_val(pgd) & ~PAGE_MASK)
0789 
0790 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0791 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0792         pmd_t *pmdp, pmd_t pmd);
0793 #else
0794 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0795                   pmd_t *pmdp, pmd_t pmd)
0796 {
0797     *pmdp = pmd;
0798 }
0799 #endif
0800 
0801 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
0802 {
0803     unsigned long val = __pa((unsigned long) (ptep));
0804 
0805     pmd_val(*pmdp) = val;
0806 }
0807 
0808 #define pud_set(pudp, pmdp) \
0809     (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
0810 static inline unsigned long __pmd_page(pmd_t pmd)
0811 {
0812     pte_t pte = __pte(pmd_val(pmd));
0813     unsigned long pfn;
0814 
0815     pfn = pte_pfn(pte);
0816 
0817     return ((unsigned long) __va(pfn << PAGE_SHIFT));
0818 }
0819 #define pmd_page(pmd)           virt_to_page((void *)__pmd_page(pmd))
0820 #define pud_page_vaddr(pud)     \
0821     ((unsigned long) __va(pud_val(pud)))
0822 #define pud_page(pud)           virt_to_page((void *)pud_page_vaddr(pud))
0823 #define pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0UL)
0824 #define pud_present(pud)        (pud_val(pud) != 0U)
0825 #define pud_clear(pudp)         (pud_val(*(pudp)) = 0UL)
0826 #define pgd_page_vaddr(pgd)     \
0827     ((unsigned long) __va(pgd_val(pgd)))
0828 #define pgd_present(pgd)        (pgd_val(pgd) != 0U)
0829 #define pgd_clear(pgdp)         (pgd_val(*(pgdp)) = 0UL)
0830 
0831 static inline unsigned long pud_large(pud_t pud)
0832 {
0833     pte_t pte = __pte(pud_val(pud));
0834 
0835     return pte_val(pte) & _PAGE_PMD_HUGE;
0836 }
0837 
0838 static inline unsigned long pud_pfn(pud_t pud)
0839 {
0840     pte_t pte = __pte(pud_val(pud));
0841 
0842     return pte_pfn(pte);
0843 }
0844 
0845 /* Same in both SUN4V and SUN4U.  */
0846 #define pte_none(pte)           (!pte_val(pte))
0847 
0848 #define pgd_set(pgdp, pudp) \
0849     (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
0850 
0851 /* to find an entry in a page-table-directory. */
0852 #define pgd_index(address)  (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
0853 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
0854 
0855 /* to find an entry in a kernel page-table-directory */
0856 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
0857 
0858 /* Find an entry in the third-level page table.. */
0859 #define pud_index(address)  (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
0860 #define pud_offset(pgdp, address)   \
0861     ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
0862 
0863 /* Find an entry in the second-level page table.. */
0864 #define pmd_offset(pudp, address)   \
0865     ((pmd_t *) pud_page_vaddr(*(pudp)) + \
0866      (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
0867 
0868 /* Find an entry in the third-level page table.. */
0869 #define pte_index(dir, address) \
0870     ((pte_t *) __pmd_page(*(dir)) + \
0871      ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
0872 #define pte_offset_kernel       pte_index
0873 #define pte_offset_map          pte_index
0874 #define pte_unmap(pte)          do { } while (0)
0875 
0876 /* Actual page table PTE updates.  */
0877 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
0878            pte_t *ptep, pte_t orig, int fullmm);
0879 
0880 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
0881                 pte_t *ptep, pte_t orig, int fullmm)
0882 {
0883     /* It is more efficient to let flush_tlb_kernel_range()
0884      * handle init_mm tlb flushes.
0885      *
0886      * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
0887      *             and SUN4V pte layout, so this inline test is fine.
0888      */
0889     if (likely(mm != &init_mm) && pte_accessible(mm, orig))
0890         tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
0891 }
0892 
0893 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
0894 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
0895                         unsigned long addr,
0896                         pmd_t *pmdp)
0897 {
0898     pmd_t pmd = *pmdp;
0899     set_pmd_at(mm, addr, pmdp, __pmd(0UL));
0900     return pmd;
0901 }
0902 
0903 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0904                  pte_t *ptep, pte_t pte, int fullmm)
0905 {
0906     pte_t orig = *ptep;
0907 
0908     *ptep = pte;
0909     maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
0910 }
0911 
0912 #define set_pte_at(mm,addr,ptep,pte)    \
0913     __set_pte_at((mm), (addr), (ptep), (pte), 0)
0914 
0915 #define pte_clear(mm,addr,ptep)     \
0916     set_pte_at((mm), (addr), (ptep), __pte(0UL))
0917 
0918 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
0919 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
0920     __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
0921 
0922 #ifdef DCACHE_ALIASING_POSSIBLE
0923 #define __HAVE_ARCH_MOVE_PTE
0924 #define move_pte(pte, prot, old_addr, new_addr)             \
0925 ({                                  \
0926     pte_t newpte = (pte);                       \
0927     if (tlb_type != hypervisor && pte_present(pte)) {       \
0928         unsigned long this_pfn = pte_pfn(pte);          \
0929                                     \
0930         if (pfn_valid(this_pfn) &&              \
0931             (((old_addr) ^ (new_addr)) & (1 << 13)))        \
0932             flush_dcache_page_all(current->mm,      \
0933                           pfn_to_page(this_pfn));   \
0934     }                               \
0935     newpte;                             \
0936 })
0937 #endif
0938 
0939 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
0940 
0941 void paging_init(void);
0942 unsigned long find_ecache_flush_span(unsigned long size);
0943 
0944 struct seq_file;
0945 void mmu_info(struct seq_file *);
0946 
0947 struct vm_area_struct;
0948 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
0949 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0950 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
0951               pmd_t *pmd);
0952 
0953 #define __HAVE_ARCH_PMDP_INVALIDATE
0954 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
0955                 pmd_t *pmdp);
0956 
0957 #define __HAVE_ARCH_PGTABLE_DEPOSIT
0958 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
0959                 pgtable_t pgtable);
0960 
0961 #define __HAVE_ARCH_PGTABLE_WITHDRAW
0962 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
0963 #endif
0964 
0965 /* Encode and de-code a swap entry */
0966 #define __swp_type(entry)   (((entry).val >> PAGE_SHIFT) & 0xffUL)
0967 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
0968 #define __swp_entry(type, offset)   \
0969     ( (swp_entry_t) \
0970       { \
0971         (((long)(type) << PAGE_SHIFT) | \
0972                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
0973       } )
0974 #define __pte_to_swp_entry(pte)     ((swp_entry_t) { pte_val(pte) })
0975 #define __swp_entry_to_pte(x)       ((pte_t) { (x).val })
0976 
0977 int page_in_phys_avail(unsigned long paddr);
0978 
0979 /*
0980  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
0981  * its high 4 bits.  These macros/functions put it there or get it from there.
0982  */
0983 #define MK_IOSPACE_PFN(space, pfn)  (pfn | (space << (BITS_PER_LONG - 4)))
0984 #define GET_IOSPACE(pfn)        (pfn >> (BITS_PER_LONG - 4))
0985 #define GET_PFN(pfn)            (pfn & 0x0fffffffffffffffUL)
0986 
0987 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
0988             unsigned long, pgprot_t);
0989 
0990 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
0991                      unsigned long from, unsigned long pfn,
0992                      unsigned long size, pgprot_t prot)
0993 {
0994     unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
0995     int space = GET_IOSPACE(pfn);
0996     unsigned long phys_base;
0997 
0998     phys_base = offset | (((unsigned long) space) << 32UL);
0999 
1000     return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1001 }
1002 #define io_remap_pfn_range io_remap_pfn_range 
1003 
1004 #include <asm/tlbflush.h>
1005 #include <asm-generic/pgtable.h>
1006 
1007 /* We provide our own get_unmapped_area to cope with VA holes and
1008  * SHM area cache aliasing for userland.
1009  */
1010 #define HAVE_ARCH_UNMAPPED_AREA
1011 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1012 
1013 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1014  * the largest alignment possible such that larget PTEs can be used.
1015  */
1016 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1017                    unsigned long, unsigned long,
1018                    unsigned long);
1019 #define HAVE_ARCH_FB_UNMAPPED_AREA
1020 
1021 void pgtable_cache_init(void);
1022 void sun4v_register_fault_status(void);
1023 void sun4v_ktsb_register(void);
1024 void __init cheetah_ecache_flush_init(void);
1025 void sun4v_patch_tlb_handlers(void);
1026 
1027 extern unsigned long cmdline_memory_size;
1028 
1029 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1030 
1031 #endif /* !(__ASSEMBLY__) */
1032 
1033 #endif /* !(_SPARC64_PGTABLE_H) */