Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *
0004  * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
0005  *    Elizabeth Clarke (beth@mips.com)
0006  *    Ralf Baechle (ralf@linux-mips.org)
0007  * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
0008  */
0009 #include <linux/kernel.h>
0010 #include <linux/sched.h>
0011 #include <linux/cpumask.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/compiler.h>
0014 #include <linux/sched/task_stack.h>
0015 #include <linux/smp.h>
0016 
0017 #include <linux/atomic.h>
0018 #include <asm/cacheflush.h>
0019 #include <asm/cpu.h>
0020 #include <asm/processor.h>
0021 #include <asm/hardirq.h>
0022 #include <asm/mmu_context.h>
0023 #include <asm/time.h>
0024 #include <asm/mipsregs.h>
0025 #include <asm/mipsmtregs.h>
0026 #include <asm/mips_mt.h>
0027 #include <asm/mips-cps.h>
0028 
0029 static void __init smvp_copy_vpe_config(void)
0030 {
0031     write_vpe_c0_status(
0032         (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
0033 
0034     /* set config to be the same as vpe0, particularly kseg0 coherency alg */
0035     write_vpe_c0_config( read_c0_config());
0036 
0037     /* make sure there are no software interrupts pending */
0038     write_vpe_c0_cause(0);
0039 
0040     /* Propagate Config7 */
0041     write_vpe_c0_config7(read_c0_config7());
0042 
0043     write_vpe_c0_count(read_c0_count());
0044 }
0045 
0046 static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
0047     unsigned int ncpu)
0048 {
0049     if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
0050         return ncpu;
0051 
0052     /* Deactivate all but VPE 0 */
0053     if (tc != 0) {
0054         unsigned long tmp = read_vpe_c0_vpeconf0();
0055 
0056         tmp &= ~VPECONF0_VPA;
0057 
0058         /* master VPE */
0059         tmp |= VPECONF0_MVP;
0060         write_vpe_c0_vpeconf0(tmp);
0061 
0062         /* Record this as available CPU */
0063         set_cpu_possible(tc, true);
0064         set_cpu_present(tc, true);
0065         __cpu_number_map[tc]    = ++ncpu;
0066         __cpu_logical_map[ncpu] = tc;
0067     }
0068 
0069     /* Disable multi-threading with TC's */
0070     write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
0071 
0072     if (tc != 0)
0073         smvp_copy_vpe_config();
0074 
0075     cpu_set_vpe_id(&cpu_data[ncpu], tc);
0076 
0077     return ncpu;
0078 }
0079 
0080 static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
0081 {
0082     unsigned long tmp;
0083 
0084     if (!tc)
0085         return;
0086 
0087     /* bind a TC to each VPE, May as well put all excess TC's
0088        on the last VPE */
0089     if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
0090         write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
0091     else {
0092         write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
0093 
0094         /* and set XTC */
0095         write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
0096     }
0097 
0098     tmp = read_tc_c0_tcstatus();
0099 
0100     /* mark not allocated and not dynamically allocatable */
0101     tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
0102     tmp |= TCSTATUS_IXMT;       /* interrupt exempt */
0103     write_tc_c0_tcstatus(tmp);
0104 
0105     write_tc_c0_tchalt(TCHALT_H);
0106 }
0107 
0108 static void vsmp_init_secondary(void)
0109 {
0110     /* This is Malta specific: IPI,performance and timer interrupts */
0111     if (mips_gic_present())
0112         change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
0113                      STATUSF_IP4 | STATUSF_IP5 |
0114                      STATUSF_IP6 | STATUSF_IP7);
0115     else
0116         change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
0117                      STATUSF_IP6 | STATUSF_IP7);
0118 }
0119 
0120 static void vsmp_smp_finish(void)
0121 {
0122     /* CDFIXME: remove this? */
0123     write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
0124 
0125 #ifdef CONFIG_MIPS_MT_FPAFF
0126     /* If we have an FPU, enroll ourselves in the FPU-full mask */
0127     if (cpu_has_fpu)
0128         cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
0129 #endif /* CONFIG_MIPS_MT_FPAFF */
0130 
0131     local_irq_enable();
0132 }
0133 
0134 /*
0135  * Setup the PC, SP, and GP of a secondary processor and start it
0136  * running!
0137  * smp_bootstrap is the place to resume from
0138  * __KSTK_TOS(idle) is apparently the stack pointer
0139  * (unsigned long)idle->thread_info the gp
0140  * assumes a 1:1 mapping of TC => VPE
0141  */
0142 static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
0143 {
0144     struct thread_info *gp = task_thread_info(idle);
0145     dvpe();
0146     set_c0_mvpcontrol(MVPCONTROL_VPC);
0147 
0148     settc(cpu);
0149 
0150     /* restart */
0151     write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
0152 
0153     /* enable the tc this vpe/cpu will be running */
0154     write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
0155 
0156     write_tc_c0_tchalt(0);
0157 
0158     /* enable the VPE */
0159     write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
0160 
0161     /* stack pointer */
0162     write_tc_gpr_sp( __KSTK_TOS(idle));
0163 
0164     /* global pointer */
0165     write_tc_gpr_gp((unsigned long)gp);
0166 
0167     flush_icache_range((unsigned long)gp,
0168                (unsigned long)(gp + sizeof(struct thread_info)));
0169 
0170     /* finally out of configuration and into chaos */
0171     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0172 
0173     evpe(EVPE_ENABLE);
0174 
0175     return 0;
0176 }
0177 
0178 /*
0179  * Common setup before any secondaries are started
0180  * Make sure all CPU's are in a sensible state before we boot any of the
0181  * secondaries
0182  */
0183 static void __init vsmp_smp_setup(void)
0184 {
0185     unsigned int mvpconf0, ntc, tc, ncpu = 0;
0186     unsigned int nvpe;
0187 
0188 #ifdef CONFIG_MIPS_MT_FPAFF
0189     /* If we have an FPU, enroll ourselves in the FPU-full mask */
0190     if (cpu_has_fpu)
0191         cpumask_set_cpu(0, &mt_fpu_cpumask);
0192 #endif /* CONFIG_MIPS_MT_FPAFF */
0193     if (!cpu_has_mipsmt)
0194         return;
0195 
0196     /* disable MT so we can configure */
0197     dvpe();
0198     dmt();
0199 
0200     /* Put MVPE's into 'configuration state' */
0201     set_c0_mvpcontrol(MVPCONTROL_VPC);
0202 
0203     mvpconf0 = read_c0_mvpconf0();
0204     ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
0205 
0206     nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
0207     smp_num_siblings = nvpe;
0208 
0209     /* we'll always have more TC's than VPE's, so loop setting everything
0210        to a sensible state */
0211     for (tc = 0; tc <= ntc; tc++) {
0212         settc(tc);
0213 
0214         smvp_tc_init(tc, mvpconf0);
0215         ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
0216     }
0217 
0218     /* Release config state */
0219     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0220 
0221     /* We'll wait until starting the secondaries before starting MVPE */
0222 
0223     printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
0224 }
0225 
0226 static void __init vsmp_prepare_cpus(unsigned int max_cpus)
0227 {
0228     mips_mt_set_cpuoptions();
0229 }
0230 
0231 const struct plat_smp_ops vsmp_smp_ops = {
0232     .send_ipi_single    = mips_smp_send_ipi_single,
0233     .send_ipi_mask      = mips_smp_send_ipi_mask,
0234     .init_secondary     = vsmp_init_secondary,
0235     .smp_finish     = vsmp_smp_finish,
0236     .boot_secondary     = vsmp_boot_secondary,
0237     .smp_setup      = vsmp_smp_setup,
0238     .prepare_cpus       = vsmp_prepare_cpus,
0239 };
0240