0001
0002 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
0003 #define _ASM_POWERPC_BOOK3S_64_MMU_H_
0004
0005 #include <asm/page.h>
0006
0007 #ifndef __ASSEMBLY__
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 struct mmu_psize_def {
0018 unsigned int shift;
0019 int penc[MMU_PAGE_COUNT];
0020 unsigned int tlbiel;
0021 unsigned long avpnm;
0022 unsigned long h_rpt_pgsize;
0023 union {
0024 unsigned long sllp;
0025 unsigned long ap;
0026 };
0027 };
0028 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
0029 #endif
0030
0031
0032 #include <asm/book3s/64/mmu-hash.h>
0033
0034 #ifndef __ASSEMBLY__
0035
0036
0037
0038 struct prtb_entry {
0039 __be64 prtb0;
0040 __be64 prtb1;
0041 };
0042 extern struct prtb_entry *process_tb;
0043
0044 struct patb_entry {
0045 __be64 patb0;
0046 __be64 patb1;
0047 };
0048 extern struct patb_entry *partition_tb;
0049
0050
0051 #define PATB_HR (1UL << 63)
0052 #define RPDB_MASK 0x0fffffffffffff00UL
0053 #define RPDB_SHIFT (1UL << 8)
0054 #define RTS1_SHIFT 61
0055 #define RTS1_MASK (3UL << RTS1_SHIFT)
0056 #define RTS2_SHIFT 5
0057 #define RTS2_MASK (7UL << RTS2_SHIFT)
0058 #define RPDS_MASK 0x1f
0059
0060
0061 #define PATB_GR (1UL << 63)
0062 #define PRTS_MASK 0x1f
0063 #define PRTB_MASK 0x0ffffffffffff000UL
0064
0065
0066 extern unsigned int mmu_lpid_bits;
0067
0068
0069 extern unsigned int mmu_pid_bits;
0070
0071
0072 extern unsigned int mmu_base_pid;
0073
0074
0075
0076
0077 extern unsigned long __ro_after_init radix_mem_block_size;
0078
0079 #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
0080 #define PRTB_ENTRIES (1ul << mmu_pid_bits)
0081
0082 #define PATB_SIZE_SHIFT (mmu_lpid_bits + 4)
0083 #define PATB_ENTRIES (1ul << mmu_lpid_bits)
0084
0085 typedef unsigned long mm_context_id_t;
0086 struct spinlock;
0087
0088
0089 #define NV_MAX_NPUS 8
0090
0091 typedef struct {
0092 union {
0093
0094
0095
0096
0097
0098
0099
0100
0101 mm_context_id_t id;
0102 #ifdef CONFIG_PPC_64S_HASH_MMU
0103 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
0104 #endif
0105 };
0106
0107
0108 atomic_t active_cpus;
0109
0110
0111 atomic_t copros;
0112
0113
0114 atomic_t vas_windows;
0115
0116 #ifdef CONFIG_PPC_64S_HASH_MMU
0117 struct hash_mm_context *hash_context;
0118 #endif
0119
0120 void __user *vdso;
0121
0122
0123
0124 void *pte_frag;
0125 void *pmd_frag;
0126 #ifdef CONFIG_SPAPR_TCE_IOMMU
0127 struct list_head iommu_group_mem_list;
0128 #endif
0129
0130 #ifdef CONFIG_PPC_MEM_KEYS
0131
0132
0133
0134
0135
0136 u32 pkey_allocation_map;
0137 s16 execute_only_pkey;
0138 #endif
0139 } mm_context_t;
0140
0141 #ifdef CONFIG_PPC_64S_HASH_MMU
0142 static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
0143 {
0144 return ctx->hash_context->user_psize;
0145 }
0146
0147 static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
0148 {
0149 ctx->hash_context->user_psize = user_psize;
0150 }
0151
0152 static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
0153 {
0154 return ctx->hash_context->low_slices_psize;
0155 }
0156
0157 static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
0158 {
0159 return ctx->hash_context->high_slices_psize;
0160 }
0161
0162 static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
0163 {
0164 return ctx->hash_context->slb_addr_limit;
0165 }
0166
0167 static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
0168 {
0169 ctx->hash_context->slb_addr_limit = limit;
0170 }
0171
0172 static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
0173 {
0174 #ifdef CONFIG_PPC_64K_PAGES
0175 if (psize == MMU_PAGE_64K)
0176 return &ctx->hash_context->mask_64k;
0177 #endif
0178 #ifdef CONFIG_HUGETLB_PAGE
0179 if (psize == MMU_PAGE_16M)
0180 return &ctx->hash_context->mask_16m;
0181 if (psize == MMU_PAGE_16G)
0182 return &ctx->hash_context->mask_16g;
0183 #endif
0184 BUG_ON(psize != MMU_PAGE_4K);
0185
0186 return &ctx->hash_context->mask_4k;
0187 }
0188
0189 #ifdef CONFIG_PPC_SUBPAGE_PROT
0190 static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
0191 {
0192 return ctx->hash_context->spt;
0193 }
0194 #endif
0195
0196
0197
0198
0199 extern int mmu_virtual_psize;
0200 extern int mmu_vmalloc_psize;
0201 extern int mmu_io_psize;
0202 #else
0203 #ifdef CONFIG_PPC_64K_PAGES
0204 #define mmu_virtual_psize MMU_PAGE_64K
0205 #else
0206 #define mmu_virtual_psize MMU_PAGE_4K
0207 #endif
0208 #endif
0209 extern int mmu_linear_psize;
0210 extern int mmu_vmemmap_psize;
0211
0212
0213 void mmu_early_init_devtree(void);
0214 void hash__early_init_devtree(void);
0215 void radix__early_init_devtree(void);
0216 #ifdef CONFIG_PPC_PKEY
0217 void pkey_early_init_devtree(void);
0218 #else
0219 static inline void pkey_early_init_devtree(void) {}
0220 #endif
0221
0222 extern void hash__early_init_mmu(void);
0223 extern void radix__early_init_mmu(void);
0224 static inline void __init early_init_mmu(void)
0225 {
0226 if (radix_enabled())
0227 return radix__early_init_mmu();
0228 return hash__early_init_mmu();
0229 }
0230 extern void hash__early_init_mmu_secondary(void);
0231 extern void radix__early_init_mmu_secondary(void);
0232 static inline void early_init_mmu_secondary(void)
0233 {
0234 if (radix_enabled())
0235 return radix__early_init_mmu_secondary();
0236 return hash__early_init_mmu_secondary();
0237 }
0238
0239 extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
0240 phys_addr_t first_memblock_size);
0241 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
0242 phys_addr_t first_memblock_size)
0243 {
0244
0245
0246
0247
0248
0249 if (!early_radix_enabled())
0250 hash__setup_initial_memory_limit(first_memblock_base,
0251 first_memblock_size);
0252 }
0253
0254 #ifdef CONFIG_PPC_PSERIES
0255 void __init radix_init_pseries(void);
0256 #else
0257 static inline void radix_init_pseries(void) { }
0258 #endif
0259
0260 #ifdef CONFIG_HOTPLUG_CPU
0261 #define arch_clear_mm_cpumask_cpu(cpu, mm) \
0262 do { \
0263 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
0264 atomic_dec(&(mm)->context.active_cpus); \
0265 cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
0266 } \
0267 } while (0)
0268
0269 void cleanup_cpu_mmu_context(void);
0270 #endif
0271
0272 #ifdef CONFIG_PPC_64S_HASH_MMU
0273 static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
0274 {
0275 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
0276
0277 if (likely(index < ARRAY_SIZE(ctx->extended_id)))
0278 return ctx->extended_id[index];
0279
0280
0281 WARN_ON(1);
0282 return 0;
0283 }
0284
0285 static inline unsigned long get_user_vsid(mm_context_t *ctx,
0286 unsigned long ea, int ssize)
0287 {
0288 unsigned long context = get_user_context(ctx, ea);
0289
0290 return get_vsid(context, ea, ssize);
0291 }
0292 #endif
0293
0294 #endif
0295 #endif