Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_MMU_8XX_H_
0003 #define _ASM_POWERPC_MMU_8XX_H_
0004 /*
0005  * PPC8xx support
0006  */
0007 
0008 /* Control/status registers for the MPC8xx.
0009  * A write operation to these registers causes serialized access.
0010  * During software tablewalk, the registers used perform mask/shift-add
0011  * operations when written/read.  A TLB entry is created when the Mx_RPN
0012  * is written, and the contents of several registers are used to
0013  * create the entry.
0014  */
0015 #define SPRN_MI_CTR 784 /* Instruction TLB control register */
0016 #define MI_GPM      0x80000000  /* Set domain manager mode */
0017 #define MI_PPM      0x40000000  /* Set subpage protection */
0018 #define MI_CIDEF    0x20000000  /* Set cache inhibit when MMU dis */
0019 #define MI_RSV4I    0x08000000  /* Reserve 4 TLB entries */
0020 #define MI_PPCS     0x02000000  /* Use MI_RPN prob/priv state */
0021 #define MI_IDXMASK  0x00001f00  /* TLB index to be loaded */
0022 
0023 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
0024  * Ks = 0, Kp = 1.
0025  */
0026 #define SPRN_MI_AP  786
0027 #define MI_Ks       0x80000000  /* Should not be set */
0028 #define MI_Kp       0x40000000  /* Should always be set */
0029 
0030 /*
0031  * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
0032  * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
0033  * respectively NA for All or X for Supervisor and no access for User.
0034  * Then we use the APG to say whether accesses are according to Page rules or
0035  * "all Supervisor" rules (Access to all)
0036  * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
0037  * "all User" rules, that will lead to NA for all.
0038  * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
0039  * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
0040  * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
0041  * 2 => User => 11 (all accesses performed according as user iaw page definition)
0042  * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
0043  * 4-15 => Not Used
0044  */
0045 #define MI_APG_INIT 0xde000000
0046 
0047 /* The effective page number register.  When read, contains the information
0048  * about the last instruction TLB miss.  When MI_RPN is written, bits in
0049  * this register are used to create the TLB entry.
0050  */
0051 #define SPRN_MI_EPN 787
0052 #define MI_EPNMASK  0xfffff000  /* Effective page number for entry */
0053 #define MI_EVALID   0x00000200  /* Entry is valid */
0054 #define MI_ASIDMASK 0x0000000f  /* ASID match value */
0055                     /* Reset value is undefined */
0056 
0057 /* A "level 1" or "segment" or whatever you want to call it register.
0058  * For the instruction TLB, it contains bits that get loaded into the
0059  * TLB entry when the MI_RPN is written.
0060  */
0061 #define SPRN_MI_TWC 789
0062 #define MI_APG      0x000001e0  /* Access protection group (0) */
0063 #define MI_GUARDED  0x00000010  /* Guarded storage */
0064 #define MI_PSMASK   0x0000000c  /* Mask of page size bits */
0065 #define MI_PS8MEG   0x0000000c  /* 8M page size */
0066 #define MI_PS512K   0x00000004  /* 512K page size */
0067 #define MI_PS4K_16K 0x00000000  /* 4K or 16K page size */
0068 #define MI_SVALID   0x00000001  /* Segment entry is valid */
0069                     /* Reset value is undefined */
0070 
0071 /* Real page number.  Defined by the pte.  Writing this register
0072  * causes a TLB entry to be created for the instruction TLB, using
0073  * additional information from the MI_EPN, and MI_TWC registers.
0074  */
0075 #define SPRN_MI_RPN 790
0076 #define MI_SPS16K   0x00000008  /* Small page size (0 = 4k, 1 = 16k) */
0077 
0078 /* Define an RPN value for mapping kernel memory to large virtual
0079  * pages for boot initialization.  This has real page number of 0,
0080  * large page size, shared page, cache enabled, and valid.
0081  * Also mark all subpages valid and write access.
0082  */
0083 #define MI_BOOTINIT 0x000001fd
0084 
0085 #define SPRN_MD_CTR 792 /* Data TLB control register */
0086 #define MD_GPM      0x80000000  /* Set domain manager mode */
0087 #define MD_PPM      0x40000000  /* Set subpage protection */
0088 #define MD_CIDEF    0x20000000  /* Set cache inhibit when MMU dis */
0089 #define MD_WTDEF    0x10000000  /* Set writethrough when MMU dis */
0090 #define MD_RSV4I    0x08000000  /* Reserve 4 TLB entries */
0091 #define MD_TWAM     0x04000000  /* Use 4K page hardware assist */
0092 #define MD_PPCS     0x02000000  /* Use MI_RPN prob/priv state */
0093 #define MD_IDXMASK  0x00001f00  /* TLB index to be loaded */
0094 
0095 #define SPRN_M_CASID    793 /* Address space ID (context) to match */
0096 #define MC_ASIDMASK 0x0000000f  /* Bits used for ASID value */
0097 
0098 
0099 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
0100  * Ks = 0, Kp = 1.
0101  */
0102 #define SPRN_MD_AP  794
0103 #define MD_Ks       0x80000000  /* Should not be set */
0104 #define MD_Kp       0x40000000  /* Should always be set */
0105 
0106 /* See explanation above at the definition of MI_APG_INIT */
0107 #define MD_APG_INIT 0xdc000000
0108 #define MD_APG_KUAP 0xde000000
0109 
0110 /* The effective page number register.  When read, contains the information
0111  * about the last instruction TLB miss.  When MD_RPN is written, bits in
0112  * this register are used to create the TLB entry.
0113  */
0114 #define SPRN_MD_EPN 795
0115 #define MD_EPNMASK  0xfffff000  /* Effective page number for entry */
0116 #define MD_EVALID   0x00000200  /* Entry is valid */
0117 #define MD_ASIDMASK 0x0000000f  /* ASID match value */
0118                     /* Reset value is undefined */
0119 
0120 /* The pointer to the base address of the first level page table.
0121  * During a software tablewalk, reading this register provides the address
0122  * of the entry associated with MD_EPN.
0123  */
0124 #define SPRN_M_TWB  796
0125 #define M_L1TB      0xfffff000  /* Level 1 table base address */
0126 #define M_L1INDX    0x00000ffc  /* Level 1 index, when read */
0127                     /* Reset value is undefined */
0128 
0129 /* A "level 1" or "segment" or whatever you want to call it register.
0130  * For the data TLB, it contains bits that get loaded into the TLB entry
0131  * when the MD_RPN is written.  It is also provides the hardware assist
0132  * for finding the PTE address during software tablewalk.
0133  */
0134 #define SPRN_MD_TWC 797
0135 #define MD_L2TB     0xfffff000  /* Level 2 table base address */
0136 #define MD_L2INDX   0xfffffe00  /* Level 2 index (*pte), when read */
0137 #define MD_APG      0x000001e0  /* Access protection group (0) */
0138 #define MD_GUARDED  0x00000010  /* Guarded storage */
0139 #define MD_PSMASK   0x0000000c  /* Mask of page size bits */
0140 #define MD_PS8MEG   0x0000000c  /* 8M page size */
0141 #define MD_PS512K   0x00000004  /* 512K page size */
0142 #define MD_PS4K_16K 0x00000000  /* 4K or 16K page size */
0143 #define MD_WT       0x00000002  /* Use writethrough page attribute */
0144 #define MD_SVALID   0x00000001  /* Segment entry is valid */
0145                     /* Reset value is undefined */
0146 
0147 
0148 /* Real page number.  Defined by the pte.  Writing this register
0149  * causes a TLB entry to be created for the data TLB, using
0150  * additional information from the MD_EPN, and MD_TWC registers.
0151  */
0152 #define SPRN_MD_RPN 798
0153 #define MD_SPS16K   0x00000008  /* Small page size (0 = 4k, 1 = 16k) */
0154 
0155 /* This is a temporary storage register that could be used to save
0156  * a processor working register during a tablewalk.
0157  */
0158 #define SPRN_M_TW   799
0159 
0160 #if defined(CONFIG_PPC_4K_PAGES)
0161 #define mmu_virtual_psize   MMU_PAGE_4K
0162 #elif defined(CONFIG_PPC_16K_PAGES)
0163 #define mmu_virtual_psize   MMU_PAGE_16K
0164 #define PTE_FRAG_NR     4
0165 #define PTE_FRAG_SIZE_SHIFT 12
0166 #define PTE_FRAG_SIZE       (1UL << 12)
0167 #else
0168 #error "Unsupported PAGE_SIZE"
0169 #endif
0170 
0171 #define mmu_linear_psize    MMU_PAGE_8M
0172 
0173 #define MODULES_VADDR   (PAGE_OFFSET - SZ_256M)
0174 #define MODULES_END PAGE_OFFSET
0175 
0176 #ifndef __ASSEMBLY__
0177 
0178 #include <linux/mmdebug.h>
0179 #include <linux/sizes.h>
0180 
0181 void mmu_pin_tlb(unsigned long top, bool readonly);
0182 
0183 typedef struct {
0184     unsigned int id;
0185     unsigned int active;
0186     void __user *vdso;
0187     void *pte_frag;
0188 } mm_context_t;
0189 
0190 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
0191 #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
0192 
0193 /* Page size definitions, common between 32 and 64-bit
0194  *
0195  *    shift : is the "PAGE_SHIFT" value for that page size
0196  *    penc  : is the pte encoding mask
0197  *
0198  */
0199 struct mmu_psize_def {
0200     unsigned int    shift;  /* number of bits */
0201     unsigned int    enc;    /* PTE encoding */
0202     unsigned int    ind;    /* Corresponding indirect page size shift */
0203     unsigned int    flags;
0204 #define MMU_PAGE_SIZE_DIRECT    0x1 /* Supported as a direct size */
0205 #define MMU_PAGE_SIZE_INDIRECT  0x2 /* Supported as an indirect size */
0206 };
0207 
0208 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
0209 
0210 static inline int shift_to_mmu_psize(unsigned int shift)
0211 {
0212     int psize;
0213 
0214     for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
0215         if (mmu_psize_defs[psize].shift == shift)
0216             return psize;
0217     return -1;
0218 }
0219 
0220 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
0221 {
0222     if (mmu_psize_defs[mmu_psize].shift)
0223         return mmu_psize_defs[mmu_psize].shift;
0224     BUG();
0225 }
0226 
0227 static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
0228                       unsigned int max_page_shift, unsigned long size)
0229 {
0230     if (end - addr < size)
0231         return false;
0232 
0233     if ((1UL << max_page_shift) < size)
0234         return false;
0235 
0236     if (!IS_ALIGNED(addr, size))
0237         return false;
0238 
0239     if (!IS_ALIGNED(PFN_PHYS(pfn), size))
0240         return false;
0241 
0242     return true;
0243 }
0244 
0245 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
0246                              u64 pfn, unsigned int max_page_shift)
0247 {
0248     if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
0249         return SZ_512K;
0250     if (PAGE_SIZE == SZ_16K)
0251         return SZ_16K;
0252     if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
0253         return SZ_16K;
0254     return PAGE_SIZE;
0255 }
0256 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
0257 
0258 static inline int arch_vmap_pte_supported_shift(unsigned long size)
0259 {
0260     if (size >= SZ_512K)
0261         return 19;
0262     else if (size >= SZ_16K)
0263         return 14;
0264     else
0265         return PAGE_SHIFT;
0266 }
0267 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
0268 
0269 /* patch sites */
0270 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
0271 extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
0272 
0273 #endif /* !__ASSEMBLY__ */
0274 
0275 #endif /* _ASM_POWERPC_MMU_8XX_H_ */