Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  MMU context allocation for 64-bit kernels.
0004  *
0005  *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
0006  */
0007 
0008 #include <linux/sched.h>
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/string.h>
0012 #include <linux/types.h>
0013 #include <linux/mm.h>
0014 #include <linux/pkeys.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/idr.h>
0017 #include <linux/export.h>
0018 #include <linux/gfp.h>
0019 #include <linux/slab.h>
0020 #include <linux/cpu.h>
0021 
0022 #include <asm/mmu_context.h>
0023 #include <asm/pgalloc.h>
0024 
0025 #include "internal.h"
0026 
0027 static DEFINE_IDA(mmu_context_ida);
0028 
0029 static int alloc_context_id(int min_id, int max_id)
0030 {
0031     return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
0032 }
0033 
0034 #ifdef CONFIG_PPC_64S_HASH_MMU
0035 void __init hash__reserve_context_id(int id)
0036 {
0037     int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
0038 
0039     WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
0040 }
0041 
0042 int hash__alloc_context_id(void)
0043 {
0044     unsigned long max;
0045 
0046     if (mmu_has_feature(MMU_FTR_68_BIT_VA))
0047         max = MAX_USER_CONTEXT;
0048     else
0049         max = MAX_USER_CONTEXT_65BIT_VA;
0050 
0051     return alloc_context_id(MIN_USER_CONTEXT, max);
0052 }
0053 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
0054 #endif
0055 
0056 #ifdef CONFIG_PPC_64S_HASH_MMU
0057 static int realloc_context_ids(mm_context_t *ctx)
0058 {
0059     int i, id;
0060 
0061     /*
0062      * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
0063      * there wasn't one allocated previously (which happens in the exec
0064      * case where ctx is newly allocated).
0065      *
0066      * We have to be a bit careful here. We must keep the existing ids in
0067      * the array, so that we can test if they're non-zero to decide if we
0068      * need to allocate a new one. However in case of error we must free the
0069      * ids we've allocated but *not* any of the existing ones (or risk a
0070      * UAF). That's why we decrement i at the start of the error handling
0071      * loop, to skip the id that we just tested but couldn't reallocate.
0072      */
0073     for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
0074         if (i == 0 || ctx->extended_id[i]) {
0075             id = hash__alloc_context_id();
0076             if (id < 0)
0077                 goto error;
0078 
0079             ctx->extended_id[i] = id;
0080         }
0081     }
0082 
0083     /* The caller expects us to return id */
0084     return ctx->id;
0085 
0086 error:
0087     for (i--; i >= 0; i--) {
0088         if (ctx->extended_id[i])
0089             ida_free(&mmu_context_ida, ctx->extended_id[i]);
0090     }
0091 
0092     return id;
0093 }
0094 
0095 static int hash__init_new_context(struct mm_struct *mm)
0096 {
0097     int index;
0098 
0099     mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
0100                        GFP_KERNEL);
0101     if (!mm->context.hash_context)
0102         return -ENOMEM;
0103 
0104     /*
0105      * The old code would re-promote on fork, we don't do that when using
0106      * slices as it could cause problem promoting slices that have been
0107      * forced down to 4K.
0108      *
0109      * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
0110      * explicitly against context.id == 0. This ensures that we properly
0111      * initialize context slice details for newly allocated mm's (which will
0112      * have id == 0) and don't alter context slice inherited via fork (which
0113      * will have id != 0).
0114      *
0115      * We should not be calling init_new_context() on init_mm. Hence a
0116      * check against 0 is OK.
0117      */
0118     if (mm->context.id == 0) {
0119         memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
0120         slice_init_new_context_exec(mm);
0121     } else {
0122         /* This is fork. Copy hash_context details from current->mm */
0123         memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
0124 #ifdef CONFIG_PPC_SUBPAGE_PROT
0125         /* inherit subpage prot details if we have one. */
0126         if (current->mm->context.hash_context->spt) {
0127             mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
0128                                 GFP_KERNEL);
0129             if (!mm->context.hash_context->spt) {
0130                 kfree(mm->context.hash_context);
0131                 return -ENOMEM;
0132             }
0133         }
0134 #endif
0135     }
0136 
0137     index = realloc_context_ids(&mm->context);
0138     if (index < 0) {
0139 #ifdef CONFIG_PPC_SUBPAGE_PROT
0140         kfree(mm->context.hash_context->spt);
0141 #endif
0142         kfree(mm->context.hash_context);
0143         return index;
0144     }
0145 
0146     pkey_mm_init(mm);
0147     return index;
0148 }
0149 
0150 void hash__setup_new_exec(void)
0151 {
0152     slice_setup_new_exec();
0153 
0154     slb_setup_new_exec();
0155 }
0156 #else
0157 static inline int hash__init_new_context(struct mm_struct *mm)
0158 {
0159     BUILD_BUG();
0160     return 0;
0161 }
0162 #endif
0163 
0164 static int radix__init_new_context(struct mm_struct *mm)
0165 {
0166     unsigned long rts_field;
0167     int index, max_id;
0168 
0169     max_id = (1 << mmu_pid_bits) - 1;
0170     index = alloc_context_id(mmu_base_pid, max_id);
0171     if (index < 0)
0172         return index;
0173 
0174     /*
0175      * set the process table entry,
0176      */
0177     rts_field = radix__get_tree_size();
0178     process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
0179 
0180     /*
0181      * Order the above store with subsequent update of the PID
0182      * register (at which point HW can start loading/caching
0183      * the entry) and the corresponding load by the MMU from
0184      * the L2 cache.
0185      */
0186     asm volatile("ptesync;isync" : : : "memory");
0187 
0188 #ifdef CONFIG_PPC_64S_HASH_MMU
0189     mm->context.hash_context = NULL;
0190 #endif
0191 
0192     return index;
0193 }
0194 
0195 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0196 {
0197     int index;
0198 
0199     if (radix_enabled())
0200         index = radix__init_new_context(mm);
0201     else
0202         index = hash__init_new_context(mm);
0203 
0204     if (index < 0)
0205         return index;
0206 
0207     mm->context.id = index;
0208 
0209     mm->context.pte_frag = NULL;
0210     mm->context.pmd_frag = NULL;
0211 #ifdef CONFIG_SPAPR_TCE_IOMMU
0212     mm_iommu_init(mm);
0213 #endif
0214     atomic_set(&mm->context.active_cpus, 0);
0215     atomic_set(&mm->context.copros, 0);
0216 
0217     return 0;
0218 }
0219 
0220 void __destroy_context(int context_id)
0221 {
0222     ida_free(&mmu_context_ida, context_id);
0223 }
0224 EXPORT_SYMBOL_GPL(__destroy_context);
0225 
0226 static void destroy_contexts(mm_context_t *ctx)
0227 {
0228     if (radix_enabled()) {
0229         ida_free(&mmu_context_ida, ctx->id);
0230     } else {
0231 #ifdef CONFIG_PPC_64S_HASH_MMU
0232         int index, context_id;
0233 
0234         for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
0235             context_id = ctx->extended_id[index];
0236             if (context_id)
0237                 ida_free(&mmu_context_ida, context_id);
0238         }
0239         kfree(ctx->hash_context);
0240 #else
0241         BUILD_BUG(); // radix_enabled() should be constant true
0242 #endif
0243     }
0244 }
0245 
0246 static void pmd_frag_destroy(void *pmd_frag)
0247 {
0248     int count;
0249     struct page *page;
0250 
0251     page = virt_to_page(pmd_frag);
0252     /* drop all the pending references */
0253     count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
0254     /* We allow PTE_FRAG_NR fragments from a PTE page */
0255     if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
0256         pgtable_pmd_page_dtor(page);
0257         __free_page(page);
0258     }
0259 }
0260 
0261 static void destroy_pagetable_cache(struct mm_struct *mm)
0262 {
0263     void *frag;
0264 
0265     frag = mm->context.pte_frag;
0266     if (frag)
0267         pte_frag_destroy(frag);
0268 
0269     frag = mm->context.pmd_frag;
0270     if (frag)
0271         pmd_frag_destroy(frag);
0272     return;
0273 }
0274 
0275 void destroy_context(struct mm_struct *mm)
0276 {
0277 #ifdef CONFIG_SPAPR_TCE_IOMMU
0278     WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
0279 #endif
0280     /*
0281      * For tasks which were successfully initialized we end up calling
0282      * arch_exit_mmap() which clears the process table entry. And
0283      * arch_exit_mmap() is called before the required fullmm TLB flush
0284      * which does a RIC=2 flush. Hence for an initialized task, we do clear
0285      * any cached process table entries.
0286      *
0287      * The condition below handles the error case during task init. We have
0288      * set the process table entry early and if we fail a task
0289      * initialization, we need to ensure the process table entry is zeroed.
0290      * We need not worry about process table entry caches because the task
0291      * never ran with the PID value.
0292      */
0293     if (radix_enabled())
0294         process_tb[mm->context.id].prtb0 = 0;
0295     else
0296         subpage_prot_free(mm);
0297     destroy_contexts(&mm->context);
0298     mm->context.id = MMU_NO_CONTEXT;
0299 }
0300 
0301 void arch_exit_mmap(struct mm_struct *mm)
0302 {
0303     destroy_pagetable_cache(mm);
0304 
0305     if (radix_enabled()) {
0306         /*
0307          * Radix doesn't have a valid bit in the process table
0308          * entries. However we know that at least P9 implementation
0309          * will avoid caching an entry with an invalid RTS field,
0310          * and 0 is invalid. So this will do.
0311          *
0312          * This runs before the "fullmm" tlb flush in exit_mmap,
0313          * which does a RIC=2 tlbie to clear the process table
0314          * entry. See the "fullmm" comments in tlb-radix.c.
0315          *
0316          * No barrier required here after the store because
0317          * this process will do the invalidate, which starts with
0318          * ptesync.
0319          */
0320         process_tb[mm->context.id].prtb0 = 0;
0321     }
0322 }
0323 
0324 #ifdef CONFIG_PPC_RADIX_MMU
0325 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
0326 {
0327     mtspr(SPRN_PID, next->context.id);
0328     isync();
0329 }
0330 #endif
0331 
0332 /**
0333  * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
0334  *
0335  * This clears the CPU from mm_cpumask for all processes, and then flushes the
0336  * local TLB to ensure TLB coherency in case the CPU is onlined again.
0337  *
0338  * KVM guest translations are not necessarily flushed here. If KVM started
0339  * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
0340  */
0341 #ifdef CONFIG_HOTPLUG_CPU
0342 void cleanup_cpu_mmu_context(void)
0343 {
0344     int cpu = smp_processor_id();
0345 
0346     clear_tasks_mm_cpumask(cpu);
0347     tlbiel_all();
0348 }
0349 #endif