Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
0003 #define _ASM_POWERPC_NOHASH_32_PTE_44x_H
0004 #ifdef __KERNEL__
0005 
0006 /*
0007  * Definitions for PPC440
0008  *
0009  * Because of the 3 word TLB entries to support 36-bit addressing,
0010  * the attribute are difficult to map in such a fashion that they
0011  * are easily loaded during exception processing.  I decided to
0012  * organize the entry so the ERPN is the only portion in the
0013  * upper word of the PTE and the attribute bits below are packed
0014  * in as sensibly as they can be in the area below a 4KB page size
0015  * oriented RPN.  This at least makes it easy to load the RPN and
0016  * ERPN fields in the TLB. -Matt
0017  *
0018  * This isn't entirely true anymore, at least some bits are now
0019  * easier to move into the TLB from the PTE. -BenH.
0020  *
0021  * Note that these bits preclude future use of a page size
0022  * less than 4KB.
0023  *
0024  *
0025  * PPC 440 core has following TLB attribute fields;
0026  *
0027  *   TLB1:
0028  *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
0029  *   RPN.................................  -  -  -  -  -  - ERPN.......
0030  *
0031  *   TLB2:
0032  *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
0033  *   -  -  -  -  -    - U0 U1 U2 U3 W  I  M  G  E   - UX UW UR SX SW SR
0034  *
0035  * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
0036  * TLB2 storage attribute fields. Those are:
0037  *
0038  *   TLB2:
0039  *   0...10    11   12   13   14   15   16...31
0040  *   no change WL1  IL1I IL1D IL2I IL2D no change
0041  *
0042  * There are some constrains and options, to decide mapping software bits
0043  * into TLB entry.
0044  *
0045  *   - PRESENT *must* be in the bottom three bits because swap cache
0046  *     entries use the top 29 bits for TLB2.
0047  *
0048  *   - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
0049  *     because it doesn't support SMP. However, some later 460 variants
0050  *     have -some- form of SMP support and so I keep the bit there for
0051  *     future use
0052  *
0053  * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
0054  * for memory protection related functions (see PTE structure in
0055  * include/asm-ppc/mmu.h).  The _PAGE_XXX definitions in this file map to the
0056  * above bits.  Note that the bit values are CPU specific, not architecture
0057  * specific.
0058  *
0059  * The kernel PTE entry holds an arch-dependent swp_entry structure under
0060  * certain situations. In other words, in such situations some portion of
0061  * the PTE bits are used as a swp_entry. In the PPC implementation, the
0062  * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
0063  * hold protection values. That means the three protection bits are
0064  * reserved for both PTE and SWAP entry at the most significant three
0065  * LSBs.
0066  *
0067  * There are three protection bits available for SWAP entry:
0068  *  _PAGE_PRESENT
0069  *  _PAGE_HASHPTE (if HW has)
0070  *
0071  * So those three bits have to be inside of 0-2nd LSB of PTE.
0072  *
0073  */
0074 
0075 #define _PAGE_PRESENT   0x00000001      /* S: PTE valid */
0076 #define _PAGE_RW    0x00000002      /* S: Write permission */
0077 #define _PAGE_EXEC  0x00000004      /* H: Execute permission */
0078 #define _PAGE_ACCESSED  0x00000008      /* S: Page referenced */
0079 #define _PAGE_DIRTY 0x00000010      /* S: Page dirty */
0080 #define _PAGE_SPECIAL   0x00000020      /* S: Special page */
0081 #define _PAGE_USER  0x00000040      /* S: User page */
0082 #define _PAGE_ENDIAN    0x00000080      /* H: E bit */
0083 #define _PAGE_GUARDED   0x00000100      /* H: G bit */
0084 #define _PAGE_COHERENT  0x00000200      /* H: M bit */
0085 #define _PAGE_NO_CACHE  0x00000400      /* H: I bit */
0086 #define _PAGE_WRITETHRU 0x00000800      /* H: W bit */
0087 
0088 /* No page size encoding in the linux PTE */
0089 #define _PAGE_PSIZE     0
0090 
0091 #define _PAGE_KERNEL_RO     0
0092 #define _PAGE_KERNEL_ROX    _PAGE_EXEC
0093 #define _PAGE_KERNEL_RW     (_PAGE_DIRTY | _PAGE_RW)
0094 #define _PAGE_KERNEL_RWX    (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
0095 
0096 /* TODO: Add large page lowmem mapping support */
0097 #define _PMD_PRESENT    0
0098 #define _PMD_PRESENT_MASK (PAGE_MASK)
0099 #define _PMD_BAD    (~PAGE_MASK)
0100 #define _PMD_USER   0
0101 
0102 /* ERPN in a PTE never gets cleared, ignore it */
0103 #define _PTE_NONE_MASK  0xffffffff00000000ULL
0104 
0105 /*
0106  * We define 2 sets of base prot bits, one for basic pages (ie,
0107  * cacheable kernel and user pages) and one for non cacheable
0108  * pages. We always set _PAGE_COHERENT when SMP is enabled or
0109  * the processor might need it for DMA coherency.
0110  */
0111 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
0112 #if defined(CONFIG_SMP)
0113 #define _PAGE_BASE  (_PAGE_BASE_NC | _PAGE_COHERENT)
0114 #else
0115 #define _PAGE_BASE  (_PAGE_BASE_NC)
0116 #endif
0117 
0118 /* Permission masks used to generate the __P and __S table */
0119 #define PAGE_NONE   __pgprot(_PAGE_BASE)
0120 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
0121 #define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
0122 #define PAGE_COPY   __pgprot(_PAGE_BASE | _PAGE_USER)
0123 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0124 #define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
0125 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0126 
0127 #endif /* __KERNEL__ */
0128 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_44x_H */