0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/sched.h>
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/string.h>
0012 #include <linux/types.h>
0013 #include <linux/mm.h>
0014 #include <linux/pkeys.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/idr.h>
0017 #include <linux/export.h>
0018 #include <linux/gfp.h>
0019 #include <linux/slab.h>
0020 #include <linux/cpu.h>
0021
0022 #include <asm/mmu_context.h>
0023 #include <asm/pgalloc.h>
0024
0025 #include "internal.h"
0026
0027 static DEFINE_IDA(mmu_context_ida);
0028
0029 static int alloc_context_id(int min_id, int max_id)
0030 {
0031 return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
0032 }
0033
0034 #ifdef CONFIG_PPC_64S_HASH_MMU
0035 void __init hash__reserve_context_id(int id)
0036 {
0037 int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
0038
0039 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
0040 }
0041
0042 int hash__alloc_context_id(void)
0043 {
0044 unsigned long max;
0045
0046 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
0047 max = MAX_USER_CONTEXT;
0048 else
0049 max = MAX_USER_CONTEXT_65BIT_VA;
0050
0051 return alloc_context_id(MIN_USER_CONTEXT, max);
0052 }
0053 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
0054 #endif
0055
0056 #ifdef CONFIG_PPC_64S_HASH_MMU
0057 static int realloc_context_ids(mm_context_t *ctx)
0058 {
0059 int i, id;
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
0074 if (i == 0 || ctx->extended_id[i]) {
0075 id = hash__alloc_context_id();
0076 if (id < 0)
0077 goto error;
0078
0079 ctx->extended_id[i] = id;
0080 }
0081 }
0082
0083
0084 return ctx->id;
0085
0086 error:
0087 for (i--; i >= 0; i--) {
0088 if (ctx->extended_id[i])
0089 ida_free(&mmu_context_ida, ctx->extended_id[i]);
0090 }
0091
0092 return id;
0093 }
0094
0095 static int hash__init_new_context(struct mm_struct *mm)
0096 {
0097 int index;
0098
0099 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
0100 GFP_KERNEL);
0101 if (!mm->context.hash_context)
0102 return -ENOMEM;
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 if (mm->context.id == 0) {
0119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
0120 slice_init_new_context_exec(mm);
0121 } else {
0122
0123 memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
0124 #ifdef CONFIG_PPC_SUBPAGE_PROT
0125
0126 if (current->mm->context.hash_context->spt) {
0127 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
0128 GFP_KERNEL);
0129 if (!mm->context.hash_context->spt) {
0130 kfree(mm->context.hash_context);
0131 return -ENOMEM;
0132 }
0133 }
0134 #endif
0135 }
0136
0137 index = realloc_context_ids(&mm->context);
0138 if (index < 0) {
0139 #ifdef CONFIG_PPC_SUBPAGE_PROT
0140 kfree(mm->context.hash_context->spt);
0141 #endif
0142 kfree(mm->context.hash_context);
0143 return index;
0144 }
0145
0146 pkey_mm_init(mm);
0147 return index;
0148 }
0149
0150 void hash__setup_new_exec(void)
0151 {
0152 slice_setup_new_exec();
0153
0154 slb_setup_new_exec();
0155 }
0156 #else
0157 static inline int hash__init_new_context(struct mm_struct *mm)
0158 {
0159 BUILD_BUG();
0160 return 0;
0161 }
0162 #endif
0163
0164 static int radix__init_new_context(struct mm_struct *mm)
0165 {
0166 unsigned long rts_field;
0167 int index, max_id;
0168
0169 max_id = (1 << mmu_pid_bits) - 1;
0170 index = alloc_context_id(mmu_base_pid, max_id);
0171 if (index < 0)
0172 return index;
0173
0174
0175
0176
0177 rts_field = radix__get_tree_size();
0178 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
0179
0180
0181
0182
0183
0184
0185
0186 asm volatile("ptesync;isync" : : : "memory");
0187
0188 #ifdef CONFIG_PPC_64S_HASH_MMU
0189 mm->context.hash_context = NULL;
0190 #endif
0191
0192 return index;
0193 }
0194
0195 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0196 {
0197 int index;
0198
0199 if (radix_enabled())
0200 index = radix__init_new_context(mm);
0201 else
0202 index = hash__init_new_context(mm);
0203
0204 if (index < 0)
0205 return index;
0206
0207 mm->context.id = index;
0208
0209 mm->context.pte_frag = NULL;
0210 mm->context.pmd_frag = NULL;
0211 #ifdef CONFIG_SPAPR_TCE_IOMMU
0212 mm_iommu_init(mm);
0213 #endif
0214 atomic_set(&mm->context.active_cpus, 0);
0215 atomic_set(&mm->context.copros, 0);
0216
0217 return 0;
0218 }
0219
0220 void __destroy_context(int context_id)
0221 {
0222 ida_free(&mmu_context_ida, context_id);
0223 }
0224 EXPORT_SYMBOL_GPL(__destroy_context);
0225
0226 static void destroy_contexts(mm_context_t *ctx)
0227 {
0228 if (radix_enabled()) {
0229 ida_free(&mmu_context_ida, ctx->id);
0230 } else {
0231 #ifdef CONFIG_PPC_64S_HASH_MMU
0232 int index, context_id;
0233
0234 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
0235 context_id = ctx->extended_id[index];
0236 if (context_id)
0237 ida_free(&mmu_context_ida, context_id);
0238 }
0239 kfree(ctx->hash_context);
0240 #else
0241 BUILD_BUG();
0242 #endif
0243 }
0244 }
0245
0246 static void pmd_frag_destroy(void *pmd_frag)
0247 {
0248 int count;
0249 struct page *page;
0250
0251 page = virt_to_page(pmd_frag);
0252
0253 count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
0254
0255 if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
0256 pgtable_pmd_page_dtor(page);
0257 __free_page(page);
0258 }
0259 }
0260
0261 static void destroy_pagetable_cache(struct mm_struct *mm)
0262 {
0263 void *frag;
0264
0265 frag = mm->context.pte_frag;
0266 if (frag)
0267 pte_frag_destroy(frag);
0268
0269 frag = mm->context.pmd_frag;
0270 if (frag)
0271 pmd_frag_destroy(frag);
0272 return;
0273 }
0274
0275 void destroy_context(struct mm_struct *mm)
0276 {
0277 #ifdef CONFIG_SPAPR_TCE_IOMMU
0278 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
0279 #endif
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 if (radix_enabled())
0294 process_tb[mm->context.id].prtb0 = 0;
0295 else
0296 subpage_prot_free(mm);
0297 destroy_contexts(&mm->context);
0298 mm->context.id = MMU_NO_CONTEXT;
0299 }
0300
0301 void arch_exit_mmap(struct mm_struct *mm)
0302 {
0303 destroy_pagetable_cache(mm);
0304
0305 if (radix_enabled()) {
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 process_tb[mm->context.id].prtb0 = 0;
0321 }
0322 }
0323
0324 #ifdef CONFIG_PPC_RADIX_MMU
0325 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
0326 {
0327 mtspr(SPRN_PID, next->context.id);
0328 isync();
0329 }
0330 #endif
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 #ifdef CONFIG_HOTPLUG_CPU
0342 void cleanup_cpu_mmu_context(void)
0343 {
0344 int cpu = smp_processor_id();
0345
0346 clear_tasks_mm_cpumask(cpu);
0347 tlbiel_all();
0348 }
0349 #endif