Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
0003 #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
0004 
0005 /*
0006  * 32-bit hash table MMU support
0007  */
0008 
0009 /*
0010  * BATs
0011  */
0012 
0013 /* Block size masks */
0014 #define BL_128K 0x000
0015 #define BL_256K 0x001
0016 #define BL_512K 0x003
0017 #define BL_1M   0x007
0018 #define BL_2M   0x00F
0019 #define BL_4M   0x01F
0020 #define BL_8M   0x03F
0021 #define BL_16M  0x07F
0022 #define BL_32M  0x0FF
0023 #define BL_64M  0x1FF
0024 #define BL_128M 0x3FF
0025 #define BL_256M 0x7FF
0026 
0027 /* BAT Access Protection */
0028 #define BPP_XX  0x00        /* No access */
0029 #define BPP_RX  0x01        /* Read only */
0030 #define BPP_RW  0x02        /* Read/write */
0031 
0032 #ifndef __ASSEMBLY__
0033 /* Contort a phys_addr_t into the right format/bits for a BAT */
0034 #ifdef CONFIG_PHYS_64BIT
0035 #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
0036                 ((x & 0x0000000e00000000ULL) >> 24) | \
0037                 ((x & 0x0000000100000000ULL) >> 30)))
0038 #define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
0039               (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
0040               (((u64)(x) << 30) & 0x0000000100000000ULL))
0041 #else
0042 #define BAT_PHYS_ADDR(x) (x)
0043 #define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
0044 #endif
0045 
0046 struct ppc_bat {
0047     u32 batu;
0048     u32 batl;
0049 };
0050 #endif /* !__ASSEMBLY__ */
0051 
0052 /*
0053  * Hash table
0054  */
0055 
0056 /* Values for PP (assumes Ks=0, Kp=1) */
0057 #define PP_RWXX 0   /* Supervisor read/write, User none */
0058 #define PP_RWRX 1   /* Supervisor read/write, User read */
0059 #define PP_RWRW 2   /* Supervisor read/write, User read/write */
0060 #define PP_RXRX 3   /* Supervisor read,       User read */
0061 
0062 /* Values for Segment Registers */
0063 #define SR_NX   0x10000000  /* No Execute */
0064 #define SR_KP   0x20000000  /* User key */
0065 #define SR_KS   0x40000000  /* Supervisor key */
0066 
0067 #ifdef __ASSEMBLY__
0068 
0069 #include <asm/asm-offsets.h>
0070 
0071 .macro uus_addi sr reg1 reg2 imm
0072     .if NUM_USER_SEGMENTS > \sr
0073     addi    \reg1,\reg2,\imm
0074     .endif
0075 .endm
0076 
0077 .macro uus_mtsr sr reg1
0078     .if NUM_USER_SEGMENTS > \sr
0079     mtsr    \sr, \reg1
0080     .endif
0081 .endm
0082 
0083 /*
0084  * This isync() shouldn't be necessary as the kernel is not excepted to run
0085  * any instruction in userspace soon after the update of segments and 'rfi'
0086  * instruction is used to return to userspace, but hash based cores
0087  * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
0088  * there. 603 cores don't have this behaviour so don't do the 'isync' as it
0089  * saves several CPU cycles.
0090  */
0091 .macro uus_isync
0092 #ifdef CONFIG_PPC_BOOK3S_604
0093 BEGIN_MMU_FTR_SECTION
0094     isync
0095 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
0096 #endif
0097 .endm
0098 
0099 .macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
0100     uus_addi    1, \tmp2, \tmp1, 0x111
0101     uus_addi    2, \tmp3, \tmp1, 0x222
0102     uus_addi    3, \tmp4, \tmp1, 0x333
0103 
0104     uus_mtsr    0, \tmp1
0105     uus_mtsr    1, \tmp2
0106     uus_mtsr    2, \tmp3
0107     uus_mtsr    3, \tmp4
0108 
0109     uus_addi    4, \tmp1, \tmp1, 0x444
0110     uus_addi    5, \tmp2, \tmp2, 0x444
0111     uus_addi    6, \tmp3, \tmp3, 0x444
0112     uus_addi    7, \tmp4, \tmp4, 0x444
0113 
0114     uus_mtsr    4, \tmp1
0115     uus_mtsr    5, \tmp2
0116     uus_mtsr    6, \tmp3
0117     uus_mtsr    7, \tmp4
0118 
0119     uus_addi    8, \tmp1, \tmp1, 0x444
0120     uus_addi    9, \tmp2, \tmp2, 0x444
0121     uus_addi    10, \tmp3, \tmp3, 0x444
0122     uus_addi    11, \tmp4, \tmp4, 0x444
0123 
0124     uus_mtsr    8, \tmp1
0125     uus_mtsr    9, \tmp2
0126     uus_mtsr    10, \tmp3
0127     uus_mtsr    11, \tmp4
0128 
0129     uus_addi    12, \tmp1, \tmp1, 0x444
0130     uus_addi    13, \tmp2, \tmp2, 0x444
0131     uus_addi    14, \tmp3, \tmp3, 0x444
0132     uus_addi    15, \tmp4, \tmp4, 0x444
0133 
0134     uus_mtsr    12, \tmp1
0135     uus_mtsr    13, \tmp2
0136     uus_mtsr    14, \tmp3
0137     uus_mtsr    15, \tmp4
0138 
0139     uus_isync
0140 .endm
0141 
0142 #else
0143 
0144 /*
0145  * This macro defines the mapping from contexts to VSIDs (virtual
0146  * segment IDs).  We use a skew on both the context and the high 4 bits
0147  * of the 32-bit virtual address (the "effective segment ID") in order
0148  * to spread out the entries in the MMU hash table.  Note, if this
0149  * function is changed then hash functions will have to be
0150  * changed to correspond.
0151  */
0152 #define CTX_TO_VSID(c, id)  ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
0153 
0154 /*
0155  * Hardware Page Table Entry
0156  * Note that the xpn and x bitfields are used only by processors that
0157  * support extended addressing; otherwise, those bits are reserved.
0158  */
0159 struct hash_pte {
0160     unsigned long v:1;  /* Entry is valid */
0161     unsigned long vsid:24;  /* Virtual segment identifier */
0162     unsigned long h:1;  /* Hash algorithm indicator */
0163     unsigned long api:6;    /* Abbreviated page index */
0164     unsigned long rpn:20;   /* Real (physical) page number */
0165     unsigned long xpn:3;    /* Real page number bits 0-2, optional */
0166     unsigned long r:1;  /* Referenced */
0167     unsigned long c:1;  /* Changed */
0168     unsigned long w:1;  /* Write-thru cache mode */
0169     unsigned long i:1;  /* Cache inhibited */
0170     unsigned long m:1;  /* Memory coherence */
0171     unsigned long g:1;  /* Guarded */
0172     unsigned long x:1;  /* Real page number bit 3, optional */
0173     unsigned long pp:2; /* Page protection */
0174 };
0175 
0176 typedef struct {
0177     unsigned long id;
0178     unsigned long sr0;
0179     void __user *vdso;
0180 } mm_context_t;
0181 
0182 #ifdef CONFIG_PPC_KUEP
0183 #define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
0184 #endif
0185 
0186 void update_bats(void);
0187 static inline void cleanup_cpu_mmu_context(void) { }
0188 
0189 /* patch sites */
0190 extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
0191 extern s32 patch__hash_page_B, patch__hash_page_C;
0192 extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
0193 extern s32 patch__flush_hash_B;
0194 
0195 #include <asm/reg.h>
0196 #include <asm/task_size_32.h>
0197 
0198 static __always_inline void update_user_segment(u32 n, u32 val)
0199 {
0200     if (n << 28 < TASK_SIZE)
0201         mtsr(val + n * 0x111, n << 28);
0202 }
0203 
0204 static __always_inline void update_user_segments(u32 val)
0205 {
0206     val &= 0xf0ffffff;
0207 
0208     update_user_segment(0, val);
0209     update_user_segment(1, val);
0210     update_user_segment(2, val);
0211     update_user_segment(3, val);
0212     update_user_segment(4, val);
0213     update_user_segment(5, val);
0214     update_user_segment(6, val);
0215     update_user_segment(7, val);
0216     update_user_segment(8, val);
0217     update_user_segment(9, val);
0218     update_user_segment(10, val);
0219     update_user_segment(11, val);
0220     update_user_segment(12, val);
0221     update_user_segment(13, val);
0222     update_user_segment(14, val);
0223     update_user_segment(15, val);
0224 }
0225 
0226 int __init find_free_bat(void);
0227 unsigned int bat_block_size(unsigned long base, unsigned long top);
0228 #endif /* !__ASSEMBLY__ */
0229 
0230 /* We happily ignore the smaller BATs on 601, we don't actually use
0231  * those definitions on hash32 at the moment anyway
0232  */
0233 #define mmu_virtual_psize   MMU_PAGE_4K
0234 #define mmu_linear_psize    MMU_PAGE_256M
0235 
0236 #endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */