Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * c 2001 PPC 64 Team, IBM Corp
0004  */
0005 
0006 #include <linux/smp.h>
0007 #include <linux/export.h>
0008 #include <linux/memblock.h>
0009 #include <linux/sched/task.h>
0010 #include <linux/numa.h>
0011 #include <linux/pgtable.h>
0012 
0013 #include <asm/lppaca.h>
0014 #include <asm/paca.h>
0015 #include <asm/sections.h>
0016 #include <asm/kexec.h>
0017 #include <asm/svm.h>
0018 #include <asm/ultravisor.h>
0019 #include <asm/rtas.h>
0020 
0021 #include "setup.h"
0022 
0023 #ifndef CONFIG_SMP
0024 #define boot_cpuid 0
0025 #endif
0026 
0027 static void *__init alloc_paca_data(unsigned long size, unsigned long align,
0028                 unsigned long limit, int cpu)
0029 {
0030     void *ptr;
0031     int nid;
0032 
0033     /*
0034      * boot_cpuid paca is allocated very early before cpu_to_node is up.
0035      * Set bottom-up mode, because the boot CPU should be on node-0,
0036      * which will put its paca in the right place.
0037      */
0038     if (cpu == boot_cpuid) {
0039         nid = NUMA_NO_NODE;
0040         memblock_set_bottom_up(true);
0041     } else {
0042         nid = early_cpu_to_node(cpu);
0043     }
0044 
0045     ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
0046                      limit, nid);
0047     if (!ptr)
0048         panic("cannot allocate paca data");
0049 
0050     if (cpu == boot_cpuid)
0051         memblock_set_bottom_up(false);
0052 
0053     return ptr;
0054 }
0055 
0056 #ifdef CONFIG_PPC_PSERIES
0057 
0058 #define LPPACA_SIZE 0x400
0059 
0060 static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
0061                     int cpu)
0062 {
0063     size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
0064     static unsigned long shared_lppaca_size;
0065     static void *shared_lppaca;
0066     void *ptr;
0067 
0068     if (!shared_lppaca) {
0069         memblock_set_bottom_up(true);
0070 
0071         /*
0072          * See Documentation/powerpc/ultravisor.rst for more details.
0073          *
0074          * UV/HV data sharing is in PAGE_SIZE granularity. In order to
0075          * minimize the number of pages shared, align the allocation to
0076          * PAGE_SIZE.
0077          */
0078         shared_lppaca =
0079             memblock_alloc_try_nid(shared_lppaca_total_size,
0080                            PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
0081                            limit, NUMA_NO_NODE);
0082         if (!shared_lppaca)
0083             panic("cannot allocate shared data");
0084 
0085         memblock_set_bottom_up(false);
0086         uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
0087                   shared_lppaca_total_size >> PAGE_SHIFT);
0088     }
0089 
0090     ptr = shared_lppaca + shared_lppaca_size;
0091     shared_lppaca_size += size;
0092 
0093     /*
0094      * This is very early in boot, so no harm done if the kernel crashes at
0095      * this point.
0096      */
0097     BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
0098 
0099     return ptr;
0100 }
0101 
0102 /*
0103  * See asm/lppaca.h for more detail.
0104  *
0105  * lppaca structures must must be 1kB in size, L1 cache line aligned,
0106  * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
0107  * these requirements.
0108  */
0109 static inline void init_lppaca(struct lppaca *lppaca)
0110 {
0111     BUILD_BUG_ON(sizeof(struct lppaca) != 640);
0112 
0113     *lppaca = (struct lppaca) {
0114         .desc = cpu_to_be32(0xd397d781),    /* "LpPa" */
0115         .size = cpu_to_be16(LPPACA_SIZE),
0116         .fpregs_in_use = 1,
0117         .slb_count = cpu_to_be16(64),
0118         .vmxregs_in_use = 0,
0119         .page_ins = 0, };
0120 };
0121 
0122 static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
0123 {
0124     struct lppaca *lp;
0125 
0126     BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
0127 
0128     if (early_cpu_has_feature(CPU_FTR_HVMODE))
0129         return NULL;
0130 
0131     if (is_secure_guest())
0132         lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
0133     else
0134         lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
0135 
0136     init_lppaca(lp);
0137 
0138     return lp;
0139 }
0140 #endif /* CONFIG_PPC_PSERIES */
0141 
0142 #ifdef CONFIG_PPC_64S_HASH_MMU
0143 /*
0144  * 3 persistent SLBs are allocated here.  The buffer will be zero
0145  * initially, hence will all be invaild until we actually write them.
0146  *
0147  * If you make the number of persistent SLB entries dynamic, please also
0148  * update PR KVM to flush and restore them accordingly.
0149  */
0150 static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
0151 {
0152     struct slb_shadow *s;
0153 
0154     if (cpu != boot_cpuid) {
0155         /*
0156          * Boot CPU comes here before early_radix_enabled
0157          * is parsed (e.g., for disable_radix). So allocate
0158          * always and this will be fixed up in free_unused_pacas.
0159          */
0160         if (early_radix_enabled())
0161             return NULL;
0162     }
0163 
0164     s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
0165 
0166     s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
0167     s->buffer_length = cpu_to_be32(sizeof(*s));
0168 
0169     return s;
0170 }
0171 #endif /* CONFIG_PPC_64S_HASH_MMU */
0172 
0173 #ifdef CONFIG_PPC_PSERIES
0174 /**
0175  * new_rtas_args() - Allocates rtas args
0176  * @cpu:    CPU number
0177  * @limit:  Memory limit for this allocation
0178  *
0179  * Allocates a struct rtas_args and return it's pointer,
0180  * if not in Hypervisor mode
0181  *
0182  * Return:  Pointer to allocated rtas_args
0183  *      NULL if CPU in Hypervisor Mode
0184  */
0185 static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit)
0186 {
0187     limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX);
0188 
0189     if (early_cpu_has_feature(CPU_FTR_HVMODE))
0190         return NULL;
0191 
0192     return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES,
0193                    limit, cpu);
0194 }
0195 #endif /* CONFIG_PPC_PSERIES */
0196 
0197 /* The Paca is an array with one entry per processor.  Each contains an
0198  * lppaca, which contains the information shared between the
0199  * hypervisor and Linux.
0200  * On systems with hardware multi-threading, there are two threads
0201  * per processor.  The Paca array must contain an entry for each thread.
0202  * The VPD Areas will give a max logical processors = 2 * max physical
0203  * processors.  The processor VPD array needs one entry per physical
0204  * processor (not thread).
0205  */
0206 struct paca_struct **paca_ptrs __read_mostly;
0207 EXPORT_SYMBOL(paca_ptrs);
0208 
0209 void __init initialise_paca(struct paca_struct *new_paca, int cpu)
0210 {
0211 #ifdef CONFIG_PPC_PSERIES
0212     new_paca->lppaca_ptr = NULL;
0213 #endif
0214 #ifdef CONFIG_PPC_BOOK3E
0215     new_paca->kernel_pgd = swapper_pg_dir;
0216 #endif
0217     new_paca->lock_token = 0x8000;
0218     new_paca->paca_index = cpu;
0219     new_paca->kernel_toc = kernel_toc_addr();
0220     new_paca->kernelbase = (unsigned long) _stext;
0221     /* Only set MSR:IR/DR when MMU is initialized */
0222     new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
0223     new_paca->hw_cpu_id = 0xffff;
0224     new_paca->kexec_state = KEXEC_STATE_NONE;
0225     new_paca->__current = &init_task;
0226     new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
0227 #ifdef CONFIG_PPC_64S_HASH_MMU
0228     new_paca->slb_shadow_ptr = NULL;
0229 #endif
0230 
0231 #ifdef CONFIG_PPC_BOOK3E
0232     /* For now -- if we have threads this will be adjusted later */
0233     new_paca->tcd_ptr = &new_paca->tcd;
0234 #endif
0235 
0236 #ifdef CONFIG_PPC_PSERIES
0237     new_paca->rtas_args_reentrant = NULL;
0238 #endif
0239 }
0240 
0241 /* Put the paca pointer into r13 and SPRG_PACA */
0242 void setup_paca(struct paca_struct *new_paca)
0243 {
0244     /* Setup r13 */
0245     local_paca = new_paca;
0246 
0247 #ifdef CONFIG_PPC_BOOK3E
0248     /* On Book3E, initialize the TLB miss exception frames */
0249     mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
0250 #else
0251     /*
0252      * In HV mode, we setup both HPACA and PACA to avoid problems
0253      * if we do a GET_PACA() before the feature fixups have been
0254      * applied.
0255      *
0256      * Normally you should test against CPU_FTR_HVMODE, but CPU features
0257      * are not yet set up when we first reach here.
0258      */
0259     if (mfmsr() & MSR_HV)
0260         mtspr(SPRN_SPRG_HPACA, local_paca);
0261 #endif
0262     mtspr(SPRN_SPRG_PACA, local_paca);
0263 
0264 }
0265 
0266 static int __initdata paca_nr_cpu_ids;
0267 static int __initdata paca_ptrs_size;
0268 static int __initdata paca_struct_size;
0269 
0270 void __init allocate_paca_ptrs(void)
0271 {
0272     paca_nr_cpu_ids = nr_cpu_ids;
0273 
0274     paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
0275     paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
0276     if (!paca_ptrs)
0277         panic("Failed to allocate %d bytes for paca pointers\n",
0278               paca_ptrs_size);
0279 
0280     memset(paca_ptrs, 0x88, paca_ptrs_size);
0281 }
0282 
0283 void __init allocate_paca(int cpu)
0284 {
0285     u64 limit;
0286     struct paca_struct *paca;
0287 
0288     BUG_ON(cpu >= paca_nr_cpu_ids);
0289 
0290 #ifdef CONFIG_PPC_BOOK3S_64
0291     /*
0292      * We access pacas in real mode, and cannot take SLB faults
0293      * on them when in virtual mode, so allocate them accordingly.
0294      */
0295     limit = min(ppc64_bolted_size(), ppc64_rma_size);
0296 #else
0297     limit = ppc64_rma_size;
0298 #endif
0299 
0300     paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
0301                 limit, cpu);
0302     paca_ptrs[cpu] = paca;
0303 
0304     initialise_paca(paca, cpu);
0305 #ifdef CONFIG_PPC_PSERIES
0306     paca->lppaca_ptr = new_lppaca(cpu, limit);
0307 #endif
0308 #ifdef CONFIG_PPC_64S_HASH_MMU
0309     paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
0310 #endif
0311 #ifdef CONFIG_PPC_PSERIES
0312     paca->rtas_args_reentrant = new_rtas_args(cpu, limit);
0313 #endif
0314     paca_struct_size += sizeof(struct paca_struct);
0315 }
0316 
0317 void __init free_unused_pacas(void)
0318 {
0319     int new_ptrs_size;
0320 
0321     new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
0322     if (new_ptrs_size < paca_ptrs_size)
0323         memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
0324                    paca_ptrs_size - new_ptrs_size);
0325 
0326     paca_nr_cpu_ids = nr_cpu_ids;
0327     paca_ptrs_size = new_ptrs_size;
0328 
0329 #ifdef CONFIG_PPC_64S_HASH_MMU
0330     if (early_radix_enabled()) {
0331         /* Ugly fixup, see new_slb_shadow() */
0332         memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
0333                    sizeof(struct slb_shadow));
0334         paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
0335     }
0336 #endif
0337 
0338     printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
0339             paca_ptrs_size + paca_struct_size, nr_cpu_ids);
0340 }
0341 
0342 #ifdef CONFIG_PPC_64S_HASH_MMU
0343 void copy_mm_to_paca(struct mm_struct *mm)
0344 {
0345     mm_context_t *context = &mm->context;
0346 
0347     VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
0348     memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
0349            LOW_SLICE_ARRAY_SZ);
0350     memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
0351            TASK_SLICE_ARRAY_SZ(context));
0352 }
0353 #endif /* CONFIG_PPC_64S_HASH_MMU */