0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clockchips.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/profile.h>
0011 #include <linux/delay.h>
0012 #include <linux/sched/mm.h>
0013 #include <linux/cpu.h>
0014
0015 #include <asm/cacheflush.h>
0016 #include <asm/switch_to.h>
0017 #include <asm/tlbflush.h>
0018 #include <asm/timer.h>
0019 #include <asm/oplib.h>
0020
0021 #include "irq.h"
0022 #include "kernel.h"
0023
0024 #define IRQ_IPI_SINGLE 12
0025 #define IRQ_IPI_MASK 13
0026 #define IRQ_IPI_RESCHED 14
0027 #define IRQ_CROSS_CALL 15
0028
0029 static inline unsigned long
0030 swap_ulong(volatile unsigned long *ptr, unsigned long val)
0031 {
0032 __asm__ __volatile__("swap [%1], %0\n\t" :
0033 "=&r" (val), "=&r" (ptr) :
0034 "0" (val), "1" (ptr));
0035 return val;
0036 }
0037
0038 void sun4m_cpu_pre_starting(void *arg)
0039 {
0040 }
0041
0042 void sun4m_cpu_pre_online(void *arg)
0043 {
0044 int cpuid = hard_smp_processor_id();
0045
0046
0047
0048
0049
0050
0051 swap_ulong(&cpu_callin_map[cpuid], 1);
0052
0053
0054 local_ops->cache_all();
0055 local_ops->tlb_all();
0056
0057
0058 __asm__ __volatile__("ld [%0], %%g6\n\t"
0059 : : "r" (¤t_set[cpuid])
0060 : "memory" );
0061
0062
0063 mmgrab(&init_mm);
0064 current->active_mm = &init_mm;
0065
0066 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
0067 mb();
0068 }
0069
0070
0071
0072
0073 void __init smp4m_boot_cpus(void)
0074 {
0075 sun4m_unmask_profile_irq();
0076 local_ops->cache_all();
0077 }
0078
0079 int smp4m_boot_one_cpu(int i, struct task_struct *idle)
0080 {
0081 unsigned long *entry = &sun4m_cpu_startup;
0082 int timeout;
0083 int cpu_node;
0084
0085 cpu_find_by_mid(i, &cpu_node);
0086 current_set[i] = task_thread_info(idle);
0087
0088
0089 entry += ((i - 1) * 3);
0090
0091
0092
0093
0094
0095
0096 smp_penguin_ctable.which_io = 0;
0097 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
0098 smp_penguin_ctable.reg_size = 0;
0099
0100
0101 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
0102 local_ops->cache_all();
0103 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
0104
0105
0106 for (timeout = 0; timeout < 10000; timeout++) {
0107 if (cpu_callin_map[i])
0108 break;
0109 udelay(200);
0110 }
0111
0112 if (!(cpu_callin_map[i])) {
0113 printk(KERN_ERR "Processor %d is stuck.\n", i);
0114 return -ENODEV;
0115 }
0116
0117 local_ops->cache_all();
0118 return 0;
0119 }
0120
0121 void __init smp4m_smp_done(void)
0122 {
0123 int i, first;
0124 int *prev;
0125
0126
0127 first = 0;
0128 prev = &first;
0129 for_each_online_cpu(i) {
0130 *prev = i;
0131 prev = &cpu_data(i).next;
0132 }
0133 *prev = first;
0134 local_ops->cache_all();
0135
0136
0137 }
0138
0139 static void sun4m_send_ipi(int cpu, int level)
0140 {
0141 sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
0142 }
0143
0144 static void sun4m_ipi_resched(int cpu)
0145 {
0146 sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
0147 }
0148
0149 static void sun4m_ipi_single(int cpu)
0150 {
0151 sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
0152 }
0153
0154 static void sun4m_ipi_mask_one(int cpu)
0155 {
0156 sun4m_send_ipi(cpu, IRQ_IPI_MASK);
0157 }
0158
0159 static struct smp_funcall {
0160 void *func;
0161 unsigned long arg1;
0162 unsigned long arg2;
0163 unsigned long arg3;
0164 unsigned long arg4;
0165 unsigned long arg5;
0166 unsigned long processors_in[SUN4M_NCPUS];
0167 unsigned long processors_out[SUN4M_NCPUS];
0168 } ccall_info;
0169
0170 static DEFINE_SPINLOCK(cross_call_lock);
0171
0172
0173 static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1,
0174 unsigned long arg2, unsigned long arg3,
0175 unsigned long arg4)
0176 {
0177 register int ncpus = SUN4M_NCPUS;
0178 unsigned long flags;
0179
0180 spin_lock_irqsave(&cross_call_lock, flags);
0181
0182
0183 ccall_info.func = func;
0184 ccall_info.arg1 = arg1;
0185 ccall_info.arg2 = arg2;
0186 ccall_info.arg3 = arg3;
0187 ccall_info.arg4 = arg4;
0188 ccall_info.arg5 = 0;
0189
0190
0191 {
0192 register int i;
0193
0194 cpumask_clear_cpu(smp_processor_id(), &mask);
0195 cpumask_and(&mask, cpu_online_mask, &mask);
0196 for (i = 0; i < ncpus; i++) {
0197 if (cpumask_test_cpu(i, &mask)) {
0198 ccall_info.processors_in[i] = 0;
0199 ccall_info.processors_out[i] = 0;
0200 sun4m_send_ipi(i, IRQ_CROSS_CALL);
0201 } else {
0202 ccall_info.processors_in[i] = 1;
0203 ccall_info.processors_out[i] = 1;
0204 }
0205 }
0206 }
0207
0208 {
0209 register int i;
0210
0211 i = 0;
0212 do {
0213 if (!cpumask_test_cpu(i, &mask))
0214 continue;
0215 while (!ccall_info.processors_in[i])
0216 barrier();
0217 } while (++i < ncpus);
0218
0219 i = 0;
0220 do {
0221 if (!cpumask_test_cpu(i, &mask))
0222 continue;
0223 while (!ccall_info.processors_out[i])
0224 barrier();
0225 } while (++i < ncpus);
0226 }
0227 spin_unlock_irqrestore(&cross_call_lock, flags);
0228 }
0229
0230
0231 void smp4m_cross_call_irq(void)
0232 {
0233 void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
0234 unsigned long) = ccall_info.func;
0235 int i = smp_processor_id();
0236
0237 ccall_info.processors_in[i] = 1;
0238 func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
0239 ccall_info.arg5);
0240 ccall_info.processors_out[i] = 1;
0241 }
0242
0243 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
0244 {
0245 struct pt_regs *old_regs;
0246 struct clock_event_device *ce;
0247 int cpu = smp_processor_id();
0248
0249 old_regs = set_irq_regs(regs);
0250
0251 ce = &per_cpu(sparc32_clockevent, cpu);
0252
0253 if (clockevent_state_periodic(ce))
0254 sun4m_clear_profile_irq(cpu);
0255 else
0256 sparc_config.load_profile_irq(cpu, 0);
0257
0258 irq_enter();
0259 ce->event_handler(ce);
0260 irq_exit();
0261
0262 set_irq_regs(old_regs);
0263 }
0264
0265 static const struct sparc32_ipi_ops sun4m_ipi_ops = {
0266 .cross_call = sun4m_cross_call,
0267 .resched = sun4m_ipi_resched,
0268 .single = sun4m_ipi_single,
0269 .mask_one = sun4m_ipi_mask_one,
0270 };
0271
0272 void __init sun4m_init_smp(void)
0273 {
0274 sparc32_ipi_ops = &sun4m_ipi_ops;
0275 }