0001
0002
0003
0004
0005
0006
0007 #include <linux/export.h>
0008 #include <linux/kernel.h>
0009 #include <linux/sched/mm.h>
0010 #include <linux/sched/hotplug.h>
0011 #include <linux/mm.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/threads.h>
0014 #include <linux/smp.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/kernel_stat.h>
0017 #include <linux/delay.h>
0018 #include <linux/init.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/fs.h>
0021 #include <linux/seq_file.h>
0022 #include <linux/cache.h>
0023 #include <linux/jiffies.h>
0024 #include <linux/profile.h>
0025 #include <linux/memblock.h>
0026 #include <linux/vmalloc.h>
0027 #include <linux/ftrace.h>
0028 #include <linux/cpu.h>
0029 #include <linux/slab.h>
0030 #include <linux/kgdb.h>
0031
0032 #include <asm/head.h>
0033 #include <asm/ptrace.h>
0034 #include <linux/atomic.h>
0035 #include <asm/tlbflush.h>
0036 #include <asm/mmu_context.h>
0037 #include <asm/cpudata.h>
0038 #include <asm/hvtramp.h>
0039 #include <asm/io.h>
0040 #include <asm/timer.h>
0041 #include <asm/setup.h>
0042
0043 #include <asm/irq.h>
0044 #include <asm/irq_regs.h>
0045 #include <asm/page.h>
0046 #include <asm/oplib.h>
0047 #include <linux/uaccess.h>
0048 #include <asm/starfire.h>
0049 #include <asm/tlb.h>
0050 #include <asm/pgalloc.h>
0051 #include <asm/sections.h>
0052 #include <asm/prom.h>
0053 #include <asm/mdesc.h>
0054 #include <asm/ldc.h>
0055 #include <asm/hypervisor.h>
0056 #include <asm/pcr.h>
0057
0058 #include "cpumap.h"
0059 #include "kernel.h"
0060
0061 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
0062 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
0063 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
0064
0065 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
0066 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
0067
0068 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
0069 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
0070
0071 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
0072 EXPORT_SYMBOL(cpu_core_map);
0073 EXPORT_SYMBOL(cpu_core_sib_map);
0074 EXPORT_SYMBOL(cpu_core_sib_cache_map);
0075
0076 static cpumask_t smp_commenced_mask;
0077
0078 static DEFINE_PER_CPU(bool, poke);
0079 static bool cpu_poke;
0080
0081 void smp_info(struct seq_file *m)
0082 {
0083 int i;
0084
0085 seq_printf(m, "State:\n");
0086 for_each_online_cpu(i)
0087 seq_printf(m, "CPU%d:\t\tonline\n", i);
0088 }
0089
0090 void smp_bogo(struct seq_file *m)
0091 {
0092 int i;
0093
0094 for_each_online_cpu(i)
0095 seq_printf(m,
0096 "Cpu%dClkTck\t: %016lx\n",
0097 i, cpu_data(i).clock_tick);
0098 }
0099
0100 extern void setup_sparc64_timer(void);
0101
0102 static volatile unsigned long callin_flag = 0;
0103
0104 void smp_callin(void)
0105 {
0106 int cpuid = hard_smp_processor_id();
0107
0108 __local_per_cpu_offset = __per_cpu_offset(cpuid);
0109
0110 if (tlb_type == hypervisor)
0111 sun4v_ktsb_register();
0112
0113 __flush_tlb_all();
0114
0115 setup_sparc64_timer();
0116
0117 if (cheetah_pcache_forced_on)
0118 cheetah_enable_pcache();
0119
0120 callin_flag = 1;
0121 __asm__ __volatile__("membar #Sync\n\t"
0122 "flush %%g6" : : : "memory");
0123
0124
0125
0126
0127 current_thread_info()->new_child = 0;
0128
0129
0130 mmgrab(&init_mm);
0131 current->active_mm = &init_mm;
0132
0133
0134 notify_cpu_starting(cpuid);
0135
0136 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
0137 rmb();
0138
0139 set_cpu_online(cpuid, true);
0140
0141 local_irq_enable();
0142
0143 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
0144 }
0145
0146 void cpu_panic(void)
0147 {
0148 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
0149 panic("SMP bolixed\n");
0150 }
0151
0152
0153
0154
0155
0156
0157
0158
0159 #define MASTER 0
0160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
0161
0162 #define NUM_ROUNDS 64
0163 #define NUM_ITERS 5
0164
0165 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
0166 static unsigned long go[SLAVE + 1];
0167
0168 #define DEBUG_TICK_SYNC 0
0169
0170 static inline long get_delta (long *rt, long *master)
0171 {
0172 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
0173 unsigned long tcenter, t0, t1, tm;
0174 unsigned long i;
0175
0176 for (i = 0; i < NUM_ITERS; i++) {
0177 t0 = tick_ops->get_tick();
0178 go[MASTER] = 1;
0179 membar_safe("#StoreLoad");
0180 while (!(tm = go[SLAVE]))
0181 rmb();
0182 go[SLAVE] = 0;
0183 wmb();
0184 t1 = tick_ops->get_tick();
0185
0186 if (t1 - t0 < best_t1 - best_t0)
0187 best_t0 = t0, best_t1 = t1, best_tm = tm;
0188 }
0189
0190 *rt = best_t1 - best_t0;
0191 *master = best_tm - best_t0;
0192
0193
0194 tcenter = (best_t0/2 + best_t1/2);
0195 if (best_t0 % 2 + best_t1 % 2 == 2)
0196 tcenter++;
0197 return tcenter - best_tm;
0198 }
0199
0200 void smp_synchronize_tick_client(void)
0201 {
0202 long i, delta, adj, adjust_latency = 0, done = 0;
0203 unsigned long flags, rt, master_time_stamp;
0204 #if DEBUG_TICK_SYNC
0205 struct {
0206 long rt;
0207 long master;
0208 long diff;
0209 long lat;
0210 } t[NUM_ROUNDS];
0211 #endif
0212
0213 go[MASTER] = 1;
0214
0215 while (go[MASTER])
0216 rmb();
0217
0218 local_irq_save(flags);
0219 {
0220 for (i = 0; i < NUM_ROUNDS; i++) {
0221 delta = get_delta(&rt, &master_time_stamp);
0222 if (delta == 0)
0223 done = 1;
0224
0225 if (!done) {
0226 if (i > 0) {
0227 adjust_latency += -delta;
0228 adj = -delta + adjust_latency/4;
0229 } else
0230 adj = -delta;
0231
0232 tick_ops->add_tick(adj);
0233 }
0234 #if DEBUG_TICK_SYNC
0235 t[i].rt = rt;
0236 t[i].master = master_time_stamp;
0237 t[i].diff = delta;
0238 t[i].lat = adjust_latency/4;
0239 #endif
0240 }
0241 }
0242 local_irq_restore(flags);
0243
0244 #if DEBUG_TICK_SYNC
0245 for (i = 0; i < NUM_ROUNDS; i++)
0246 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
0247 t[i].rt, t[i].master, t[i].diff, t[i].lat);
0248 #endif
0249
0250 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
0251 "(last diff %ld cycles, maxerr %lu cycles)\n",
0252 smp_processor_id(), delta, rt);
0253 }
0254
0255 static void smp_start_sync_tick_client(int cpu);
0256
0257 static void smp_synchronize_one_tick(int cpu)
0258 {
0259 unsigned long flags, i;
0260
0261 go[MASTER] = 0;
0262
0263 smp_start_sync_tick_client(cpu);
0264
0265
0266 while (!go[MASTER])
0267 rmb();
0268
0269
0270 go[MASTER] = 0;
0271 membar_safe("#StoreLoad");
0272
0273 raw_spin_lock_irqsave(&itc_sync_lock, flags);
0274 {
0275 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
0276 while (!go[MASTER])
0277 rmb();
0278 go[MASTER] = 0;
0279 wmb();
0280 go[SLAVE] = tick_ops->get_tick();
0281 membar_safe("#StoreLoad");
0282 }
0283 }
0284 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
0285 }
0286
0287 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
0288 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
0289 void **descrp)
0290 {
0291 extern unsigned long sparc64_ttable_tl0;
0292 extern unsigned long kern_locked_tte_data;
0293 struct hvtramp_descr *hdesc;
0294 unsigned long trampoline_ra;
0295 struct trap_per_cpu *tb;
0296 u64 tte_vaddr, tte_data;
0297 unsigned long hv_err;
0298 int i;
0299
0300 hdesc = kzalloc(sizeof(*hdesc) +
0301 (sizeof(struct hvtramp_mapping) *
0302 num_kernel_image_mappings - 1),
0303 GFP_KERNEL);
0304 if (!hdesc) {
0305 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
0306 "hvtramp_descr.\n");
0307 return;
0308 }
0309 *descrp = hdesc;
0310
0311 hdesc->cpu = cpu;
0312 hdesc->num_mappings = num_kernel_image_mappings;
0313
0314 tb = &trap_block[cpu];
0315
0316 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
0317 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
0318
0319 hdesc->thread_reg = thread_reg;
0320
0321 tte_vaddr = (unsigned long) KERNBASE;
0322 tte_data = kern_locked_tte_data;
0323
0324 for (i = 0; i < hdesc->num_mappings; i++) {
0325 hdesc->maps[i].vaddr = tte_vaddr;
0326 hdesc->maps[i].tte = tte_data;
0327 tte_vaddr += 0x400000;
0328 tte_data += 0x400000;
0329 }
0330
0331 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
0332
0333 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
0334 kimage_addr_to_ra(&sparc64_ttable_tl0),
0335 __pa(hdesc));
0336 if (hv_err)
0337 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
0338 "gives error %lu\n", hv_err);
0339 }
0340 #endif
0341
0342 extern unsigned long sparc64_cpu_startup;
0343
0344
0345
0346
0347
0348 static struct thread_info *cpu_new_thread = NULL;
0349
0350 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
0351 {
0352 unsigned long entry =
0353 (unsigned long)(&sparc64_cpu_startup);
0354 unsigned long cookie =
0355 (unsigned long)(&cpu_new_thread);
0356 void *descr = NULL;
0357 int timeout, ret;
0358
0359 callin_flag = 0;
0360 cpu_new_thread = task_thread_info(idle);
0361
0362 if (tlb_type == hypervisor) {
0363 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
0364 if (ldom_domaining_enabled)
0365 ldom_startcpu_cpuid(cpu,
0366 (unsigned long) cpu_new_thread,
0367 &descr);
0368 else
0369 #endif
0370 prom_startcpu_cpuid(cpu, entry, cookie);
0371 } else {
0372 struct device_node *dp = of_find_node_by_cpuid(cpu);
0373
0374 prom_startcpu(dp->phandle, entry, cookie);
0375 }
0376
0377 for (timeout = 0; timeout < 50000; timeout++) {
0378 if (callin_flag)
0379 break;
0380 udelay(100);
0381 }
0382
0383 if (callin_flag) {
0384 ret = 0;
0385 } else {
0386 printk("Processor %d is stuck.\n", cpu);
0387 ret = -ENODEV;
0388 }
0389 cpu_new_thread = NULL;
0390
0391 kfree(descr);
0392
0393 return ret;
0394 }
0395
0396 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
0397 {
0398 u64 result, target;
0399 int stuck, tmp;
0400
0401 if (this_is_starfire) {
0402
0403 cpu = (((cpu & 0x3c) << 1) |
0404 ((cpu & 0x40) >> 4) |
0405 (cpu & 0x3));
0406 }
0407
0408 target = (cpu << 14) | 0x70;
0409 again:
0410
0411
0412
0413
0414
0415
0416
0417 tmp = 0x40;
0418 __asm__ __volatile__(
0419 "wrpr %1, %2, %%pstate\n\t"
0420 "stxa %4, [%0] %3\n\t"
0421 "stxa %5, [%0+%8] %3\n\t"
0422 "add %0, %8, %0\n\t"
0423 "stxa %6, [%0+%8] %3\n\t"
0424 "membar #Sync\n\t"
0425 "stxa %%g0, [%7] %3\n\t"
0426 "membar #Sync\n\t"
0427 "mov 0x20, %%g1\n\t"
0428 "ldxa [%%g1] 0x7f, %%g0\n\t"
0429 "membar #Sync"
0430 : "=r" (tmp)
0431 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
0432 "r" (data0), "r" (data1), "r" (data2), "r" (target),
0433 "r" (0x10), "0" (tmp)
0434 : "g1");
0435
0436
0437 stuck = 100000;
0438 do {
0439 __asm__ __volatile__("ldxa [%%g0] %1, %0"
0440 : "=r" (result)
0441 : "i" (ASI_INTR_DISPATCH_STAT));
0442 if (result == 0) {
0443 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
0444 : : "r" (pstate));
0445 return;
0446 }
0447 stuck -= 1;
0448 if (stuck == 0)
0449 break;
0450 } while (result & 0x1);
0451 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
0452 : : "r" (pstate));
0453 if (stuck == 0) {
0454 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
0455 smp_processor_id(), result);
0456 } else {
0457 udelay(2);
0458 goto again;
0459 }
0460 }
0461
0462 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
0463 {
0464 u64 *mondo, data0, data1, data2;
0465 u16 *cpu_list;
0466 u64 pstate;
0467 int i;
0468
0469 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
0470 cpu_list = __va(tb->cpu_list_pa);
0471 mondo = __va(tb->cpu_mondo_block_pa);
0472 data0 = mondo[0];
0473 data1 = mondo[1];
0474 data2 = mondo[2];
0475 for (i = 0; i < cnt; i++)
0476 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
0477 }
0478
0479
0480
0481
0482
0483 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
0484 {
0485 int nack_busy_id, is_jbus, need_more;
0486 u64 *mondo, pstate, ver, busy_mask;
0487 u16 *cpu_list;
0488
0489 cpu_list = __va(tb->cpu_list_pa);
0490 mondo = __va(tb->cpu_mondo_block_pa);
0491
0492
0493
0494
0495
0496 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
0497 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
0498 (ver >> 32) == __SERRANO_ID);
0499
0500 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
0501
0502 retry:
0503 need_more = 0;
0504 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
0505 : : "r" (pstate), "i" (PSTATE_IE));
0506
0507
0508 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
0509 "stxa %1, [%4] %6\n\t"
0510 "stxa %2, [%5] %6\n\t"
0511 "membar #Sync\n\t"
0512 :
0513 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
0514 "r" (0x40), "r" (0x50), "r" (0x60),
0515 "i" (ASI_INTR_W));
0516
0517 nack_busy_id = 0;
0518 busy_mask = 0;
0519 {
0520 int i;
0521
0522 for (i = 0; i < cnt; i++) {
0523 u64 target, nr;
0524
0525 nr = cpu_list[i];
0526 if (nr == 0xffff)
0527 continue;
0528
0529 target = (nr << 14) | 0x70;
0530 if (is_jbus) {
0531 busy_mask |= (0x1UL << (nr * 2));
0532 } else {
0533 target |= (nack_busy_id << 24);
0534 busy_mask |= (0x1UL <<
0535 (nack_busy_id * 2));
0536 }
0537 __asm__ __volatile__(
0538 "stxa %%g0, [%0] %1\n\t"
0539 "membar #Sync\n\t"
0540 :
0541 : "r" (target), "i" (ASI_INTR_W));
0542 nack_busy_id++;
0543 if (nack_busy_id == 32) {
0544 need_more = 1;
0545 break;
0546 }
0547 }
0548 }
0549
0550
0551 {
0552 u64 dispatch_stat, nack_mask;
0553 long stuck;
0554
0555 stuck = 100000 * nack_busy_id;
0556 nack_mask = busy_mask << 1;
0557 do {
0558 __asm__ __volatile__("ldxa [%%g0] %1, %0"
0559 : "=r" (dispatch_stat)
0560 : "i" (ASI_INTR_DISPATCH_STAT));
0561 if (!(dispatch_stat & (busy_mask | nack_mask))) {
0562 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
0563 : : "r" (pstate));
0564 if (unlikely(need_more)) {
0565 int i, this_cnt = 0;
0566 for (i = 0; i < cnt; i++) {
0567 if (cpu_list[i] == 0xffff)
0568 continue;
0569 cpu_list[i] = 0xffff;
0570 this_cnt++;
0571 if (this_cnt == 32)
0572 break;
0573 }
0574 goto retry;
0575 }
0576 return;
0577 }
0578 if (!--stuck)
0579 break;
0580 } while (dispatch_stat & busy_mask);
0581
0582 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
0583 : : "r" (pstate));
0584
0585 if (dispatch_stat & busy_mask) {
0586
0587
0588
0589 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
0590 smp_processor_id(), dispatch_stat);
0591 } else {
0592 int i, this_busy_nack = 0;
0593
0594
0595
0596
0597 udelay(2 * nack_busy_id);
0598
0599
0600
0601
0602 for (i = 0; i < cnt; i++) {
0603 u64 check_mask, nr;
0604
0605 nr = cpu_list[i];
0606 if (nr == 0xffff)
0607 continue;
0608
0609 if (is_jbus)
0610 check_mask = (0x2UL << (2*nr));
0611 else
0612 check_mask = (0x2UL <<
0613 this_busy_nack);
0614 if ((dispatch_stat & check_mask) == 0)
0615 cpu_list[i] = 0xffff;
0616 this_busy_nack += 2;
0617 if (this_busy_nack == 64)
0618 break;
0619 }
0620
0621 goto retry;
0622 }
0623 }
0624 }
0625
0626 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
0627 #define MONDO_USEC_WAIT_MIN 2
0628 #define MONDO_USEC_WAIT_MAX 100
0629 #define MONDO_RETRY_LIMIT 500000
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
0646 {
0647 int this_cpu, tot_cpus, prev_sent, i, rem;
0648 int usec_wait, retries, tot_retries;
0649 u16 first_cpu = 0xffff;
0650 unsigned long xc_rcvd = 0;
0651 unsigned long status;
0652 int ecpuerror_id = 0;
0653 int enocpu_id = 0;
0654 u16 *cpu_list;
0655 u16 cpu;
0656
0657 this_cpu = smp_processor_id();
0658 cpu_list = __va(tb->cpu_list_pa);
0659 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
0660 if (usec_wait > MONDO_USEC_WAIT_MAX)
0661 usec_wait = MONDO_USEC_WAIT_MAX;
0662 retries = tot_retries = 0;
0663 tot_cpus = cnt;
0664 prev_sent = 0;
0665
0666 do {
0667 int n_sent, mondo_delivered, target_cpu_busy;
0668
0669 status = sun4v_cpu_mondo_send(cnt,
0670 tb->cpu_list_pa,
0671 tb->cpu_mondo_block_pa);
0672
0673
0674 if (likely(status == HV_EOK))
0675 goto xcall_done;
0676
0677
0678 if (unlikely((status != HV_EWOULDBLOCK) &&
0679 (status != HV_ECPUERROR) &&
0680 (status != HV_ENOCPU)))
0681 goto fatal_errors;
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 rem = 0;
0704 n_sent = 0;
0705 for (i = 0; i < cnt; i++) {
0706 cpu = cpu_list[i];
0707 if (likely(cpu == 0xffff)) {
0708 n_sent++;
0709 } else if ((status == HV_ECPUERROR) &&
0710 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
0711 ecpuerror_id = cpu + 1;
0712 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
0713 enocpu_id = cpu + 1;
0714 } else {
0715 cpu_list[rem++] = cpu;
0716 }
0717 }
0718
0719
0720 if (rem == 0)
0721 break;
0722
0723
0724 cnt = rem;
0725
0726
0727
0728
0729 if (first_cpu != cpu_list[0]) {
0730 first_cpu = cpu_list[0];
0731 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
0732 }
0733
0734
0735 mondo_delivered = (n_sent > prev_sent);
0736 prev_sent = n_sent;
0737
0738
0739 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
0740 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
0741
0742
0743
0744
0745 if (likely(mondo_delivered || target_cpu_busy)) {
0746 tot_retries += retries;
0747 retries = 0;
0748 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
0749 goto fatal_mondo_timeout;
0750 }
0751
0752
0753
0754
0755 if (!mondo_delivered)
0756 udelay(usec_wait);
0757
0758 retries++;
0759 } while (1);
0760
0761 xcall_done:
0762 if (unlikely(ecpuerror_id > 0)) {
0763 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
0764 this_cpu, ecpuerror_id - 1);
0765 } else if (unlikely(enocpu_id > 0)) {
0766 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
0767 this_cpu, enocpu_id - 1);
0768 }
0769 return;
0770
0771 fatal_errors:
0772
0773 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
0774 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
0775 panic("Unexpected SUN4V mondo error %lu\n", status);
0776
0777 fatal_mondo_timeout:
0778
0779 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
0780 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
0781 panic("SUN4V mondo timeout panic\n");
0782 }
0783
0784 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
0785
0786 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
0787 {
0788 struct trap_per_cpu *tb;
0789 int this_cpu, i, cnt;
0790 unsigned long flags;
0791 u16 *cpu_list;
0792 u64 *mondo;
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804 local_irq_save(flags);
0805
0806 this_cpu = smp_processor_id();
0807 tb = &trap_block[this_cpu];
0808
0809 mondo = __va(tb->cpu_mondo_block_pa);
0810 mondo[0] = data0;
0811 mondo[1] = data1;
0812 mondo[2] = data2;
0813 wmb();
0814
0815 cpu_list = __va(tb->cpu_list_pa);
0816
0817
0818 cnt = 0;
0819 for_each_cpu(i, mask) {
0820 if (i == this_cpu || !cpu_online(i))
0821 continue;
0822 cpu_list[cnt++] = i;
0823 }
0824
0825 if (cnt)
0826 xcall_deliver_impl(tb, cnt);
0827
0828 local_irq_restore(flags);
0829 }
0830
0831
0832
0833
0834
0835 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
0836 {
0837 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
0838
0839 xcall_deliver(data0, data1, data2, mask);
0840 }
0841
0842
0843 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
0844 {
0845 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
0846 }
0847
0848 extern unsigned long xcall_sync_tick;
0849
0850 static void smp_start_sync_tick_client(int cpu)
0851 {
0852 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
0853 cpumask_of(cpu));
0854 }
0855
0856 extern unsigned long xcall_call_function;
0857
0858 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
0859 {
0860 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
0861 }
0862
0863 extern unsigned long xcall_call_function_single;
0864
0865 void arch_send_call_function_single_ipi(int cpu)
0866 {
0867 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
0868 cpumask_of(cpu));
0869 }
0870
0871 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
0872 {
0873 clear_softint(1 << irq);
0874 irq_enter();
0875 generic_smp_call_function_interrupt();
0876 irq_exit();
0877 }
0878
0879 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
0880 {
0881 clear_softint(1 << irq);
0882 irq_enter();
0883 generic_smp_call_function_single_interrupt();
0884 irq_exit();
0885 }
0886
0887 static void tsb_sync(void *info)
0888 {
0889 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
0890 struct mm_struct *mm = info;
0891
0892
0893
0894
0895
0896
0897
0898 if (tp->pgd_paddr == __pa(mm->pgd))
0899 tsb_context_switch(mm);
0900 }
0901
0902 void smp_tsb_sync(struct mm_struct *mm)
0903 {
0904 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
0905 }
0906
0907 extern unsigned long xcall_flush_tlb_mm;
0908 extern unsigned long xcall_flush_tlb_page;
0909 extern unsigned long xcall_flush_tlb_kernel_range;
0910 extern unsigned long xcall_fetch_glob_regs;
0911 extern unsigned long xcall_fetch_glob_pmu;
0912 extern unsigned long xcall_fetch_glob_pmu_n4;
0913 extern unsigned long xcall_receive_signal;
0914 extern unsigned long xcall_new_mmu_context_version;
0915 #ifdef CONFIG_KGDB
0916 extern unsigned long xcall_kgdb_capture;
0917 #endif
0918
0919 #ifdef DCACHE_ALIASING_POSSIBLE
0920 extern unsigned long xcall_flush_dcache_page_cheetah;
0921 #endif
0922 extern unsigned long xcall_flush_dcache_page_spitfire;
0923
0924 static inline void __local_flush_dcache_page(struct page *page)
0925 {
0926 #ifdef DCACHE_ALIASING_POSSIBLE
0927 __flush_dcache_page(page_address(page),
0928 ((tlb_type == spitfire) &&
0929 page_mapping_file(page) != NULL));
0930 #else
0931 if (page_mapping_file(page) != NULL &&
0932 tlb_type == spitfire)
0933 __flush_icache_page(__pa(page_address(page)));
0934 #endif
0935 }
0936
0937 void smp_flush_dcache_page_impl(struct page *page, int cpu)
0938 {
0939 int this_cpu;
0940
0941 if (tlb_type == hypervisor)
0942 return;
0943
0944 #ifdef CONFIG_DEBUG_DCFLUSH
0945 atomic_inc(&dcpage_flushes);
0946 #endif
0947
0948 this_cpu = get_cpu();
0949
0950 if (cpu == this_cpu) {
0951 __local_flush_dcache_page(page);
0952 } else if (cpu_online(cpu)) {
0953 void *pg_addr = page_address(page);
0954 u64 data0 = 0;
0955
0956 if (tlb_type == spitfire) {
0957 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
0958 if (page_mapping_file(page) != NULL)
0959 data0 |= ((u64)1 << 32);
0960 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
0961 #ifdef DCACHE_ALIASING_POSSIBLE
0962 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
0963 #endif
0964 }
0965 if (data0) {
0966 xcall_deliver(data0, __pa(pg_addr),
0967 (u64) pg_addr, cpumask_of(cpu));
0968 #ifdef CONFIG_DEBUG_DCFLUSH
0969 atomic_inc(&dcpage_flushes_xcall);
0970 #endif
0971 }
0972 }
0973
0974 put_cpu();
0975 }
0976
0977 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
0978 {
0979 void *pg_addr;
0980 u64 data0;
0981
0982 if (tlb_type == hypervisor)
0983 return;
0984
0985 preempt_disable();
0986
0987 #ifdef CONFIG_DEBUG_DCFLUSH
0988 atomic_inc(&dcpage_flushes);
0989 #endif
0990 data0 = 0;
0991 pg_addr = page_address(page);
0992 if (tlb_type == spitfire) {
0993 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
0994 if (page_mapping_file(page) != NULL)
0995 data0 |= ((u64)1 << 32);
0996 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
0997 #ifdef DCACHE_ALIASING_POSSIBLE
0998 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
0999 #endif
1000 }
1001 if (data0) {
1002 xcall_deliver(data0, __pa(pg_addr),
1003 (u64) pg_addr, cpu_online_mask);
1004 #ifdef CONFIG_DEBUG_DCFLUSH
1005 atomic_inc(&dcpage_flushes_xcall);
1006 #endif
1007 }
1008 __local_flush_dcache_page(page);
1009
1010 preempt_enable();
1011 }
1012
1013 #ifdef CONFIG_KGDB
1014 void kgdb_roundup_cpus(void)
1015 {
1016 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1017 }
1018 #endif
1019
1020 void smp_fetch_global_regs(void)
1021 {
1022 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1023 }
1024
1025 void smp_fetch_global_pmu(void)
1026 {
1027 if (tlb_type == hypervisor &&
1028 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1029 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1030 else
1031 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1032 }
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 void smp_flush_tlb_mm(struct mm_struct *mm)
1049 {
1050 u32 ctx = CTX_HWBITS(mm->context);
1051
1052 get_cpu();
1053
1054 smp_cross_call_masked(&xcall_flush_tlb_mm,
1055 ctx, 0, 0,
1056 mm_cpumask(mm));
1057
1058 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1059
1060 put_cpu();
1061 }
1062
1063 struct tlb_pending_info {
1064 unsigned long ctx;
1065 unsigned long nr;
1066 unsigned long *vaddrs;
1067 };
1068
1069 static void tlb_pending_func(void *info)
1070 {
1071 struct tlb_pending_info *t = info;
1072
1073 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1074 }
1075
1076 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1077 {
1078 u32 ctx = CTX_HWBITS(mm->context);
1079 struct tlb_pending_info info;
1080
1081 get_cpu();
1082
1083 info.ctx = ctx;
1084 info.nr = nr;
1085 info.vaddrs = vaddrs;
1086
1087 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1088 &info, 1);
1089
1090 __flush_tlb_pending(ctx, nr, vaddrs);
1091
1092 put_cpu();
1093 }
1094
1095 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1096 {
1097 unsigned long context = CTX_HWBITS(mm->context);
1098
1099 get_cpu();
1100
1101 smp_cross_call_masked(&xcall_flush_tlb_page,
1102 context, vaddr, 0,
1103 mm_cpumask(mm));
1104
1105 __flush_tlb_page(context, vaddr);
1106
1107 put_cpu();
1108 }
1109
1110 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1111 {
1112 start &= PAGE_MASK;
1113 end = PAGE_ALIGN(end);
1114 if (start != end) {
1115 smp_cross_call(&xcall_flush_tlb_kernel_range,
1116 0, start, end);
1117
1118 __flush_tlb_kernel_range(start, end);
1119 }
1120 }
1121
1122
1123
1124 extern unsigned long xcall_capture;
1125
1126 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1127 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1128 static unsigned long penguins_are_doing_time;
1129
1130 void smp_capture(void)
1131 {
1132 int result = atomic_add_return(1, &smp_capture_depth);
1133
1134 if (result == 1) {
1135 int ncpus = num_online_cpus();
1136
1137 #ifdef CAPTURE_DEBUG
1138 printk("CPU[%d]: Sending penguins to jail...",
1139 smp_processor_id());
1140 #endif
1141 penguins_are_doing_time = 1;
1142 atomic_inc(&smp_capture_registry);
1143 smp_cross_call(&xcall_capture, 0, 0, 0);
1144 while (atomic_read(&smp_capture_registry) != ncpus)
1145 rmb();
1146 #ifdef CAPTURE_DEBUG
1147 printk("done\n");
1148 #endif
1149 }
1150 }
1151
1152 void smp_release(void)
1153 {
1154 if (atomic_dec_and_test(&smp_capture_depth)) {
1155 #ifdef CAPTURE_DEBUG
1156 printk("CPU[%d]: Giving pardon to "
1157 "imprisoned penguins\n",
1158 smp_processor_id());
1159 #endif
1160 penguins_are_doing_time = 0;
1161 membar_safe("#StoreLoad");
1162 atomic_dec(&smp_capture_registry);
1163 }
1164 }
1165
1166
1167
1168
1169 extern void prom_world(int);
1170
1171 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1172 {
1173 clear_softint(1 << irq);
1174
1175 preempt_disable();
1176
1177 __asm__ __volatile__("flushw");
1178 prom_world(1);
1179 atomic_inc(&smp_capture_registry);
1180 membar_safe("#StoreLoad");
1181 while (penguins_are_doing_time)
1182 rmb();
1183 atomic_dec(&smp_capture_registry);
1184 prom_world(0);
1185
1186 preempt_enable();
1187 }
1188
1189 void __init smp_prepare_cpus(unsigned int max_cpus)
1190 {
1191 }
1192
1193 void smp_prepare_boot_cpu(void)
1194 {
1195 }
1196
1197 void __init smp_setup_processor_id(void)
1198 {
1199 if (tlb_type == spitfire)
1200 xcall_deliver_impl = spitfire_xcall_deliver;
1201 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1202 xcall_deliver_impl = cheetah_xcall_deliver;
1203 else
1204 xcall_deliver_impl = hypervisor_xcall_deliver;
1205 }
1206
1207 void __init smp_fill_in_cpu_possible_map(void)
1208 {
1209 int possible_cpus = num_possible_cpus();
1210 int i;
1211
1212 if (possible_cpus > nr_cpu_ids)
1213 possible_cpus = nr_cpu_ids;
1214
1215 for (i = 0; i < possible_cpus; i++)
1216 set_cpu_possible(i, true);
1217 for (; i < NR_CPUS; i++)
1218 set_cpu_possible(i, false);
1219 }
1220
1221 void smp_fill_in_sib_core_maps(void)
1222 {
1223 unsigned int i;
1224
1225 for_each_present_cpu(i) {
1226 unsigned int j;
1227
1228 cpumask_clear(&cpu_core_map[i]);
1229 if (cpu_data(i).core_id == 0) {
1230 cpumask_set_cpu(i, &cpu_core_map[i]);
1231 continue;
1232 }
1233
1234 for_each_present_cpu(j) {
1235 if (cpu_data(i).core_id ==
1236 cpu_data(j).core_id)
1237 cpumask_set_cpu(j, &cpu_core_map[i]);
1238 }
1239 }
1240
1241 for_each_present_cpu(i) {
1242 unsigned int j;
1243
1244 for_each_present_cpu(j) {
1245 if (cpu_data(i).max_cache_id ==
1246 cpu_data(j).max_cache_id)
1247 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1248
1249 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1250 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1251 }
1252 }
1253
1254 for_each_present_cpu(i) {
1255 unsigned int j;
1256
1257 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1258 if (cpu_data(i).proc_id == -1) {
1259 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1260 continue;
1261 }
1262
1263 for_each_present_cpu(j) {
1264 if (cpu_data(i).proc_id ==
1265 cpu_data(j).proc_id)
1266 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1267 }
1268 }
1269 }
1270
1271 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1272 {
1273 int ret = smp_boot_one_cpu(cpu, tidle);
1274
1275 if (!ret) {
1276 cpumask_set_cpu(cpu, &smp_commenced_mask);
1277 while (!cpu_online(cpu))
1278 mb();
1279 if (!cpu_online(cpu)) {
1280 ret = -ENODEV;
1281 } else {
1282
1283
1284
1285 if (tlb_type != hypervisor)
1286 smp_synchronize_one_tick(cpu);
1287 }
1288 }
1289 return ret;
1290 }
1291
1292 #ifdef CONFIG_HOTPLUG_CPU
1293 void cpu_play_dead(void)
1294 {
1295 int cpu = smp_processor_id();
1296 unsigned long pstate;
1297
1298 idle_task_exit();
1299
1300 if (tlb_type == hypervisor) {
1301 struct trap_per_cpu *tb = &trap_block[cpu];
1302
1303 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1304 tb->cpu_mondo_pa, 0);
1305 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1306 tb->dev_mondo_pa, 0);
1307 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1308 tb->resum_mondo_pa, 0);
1309 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1310 tb->nonresum_mondo_pa, 0);
1311 }
1312
1313 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1314 membar_safe("#Sync");
1315
1316 local_irq_disable();
1317
1318 __asm__ __volatile__(
1319 "rdpr %%pstate, %0\n\t"
1320 "wrpr %0, %1, %%pstate"
1321 : "=r" (pstate)
1322 : "i" (PSTATE_IE));
1323
1324 while (1)
1325 barrier();
1326 }
1327
1328 int __cpu_disable(void)
1329 {
1330 int cpu = smp_processor_id();
1331 cpuinfo_sparc *c;
1332 int i;
1333
1334 for_each_cpu(i, &cpu_core_map[cpu])
1335 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1336 cpumask_clear(&cpu_core_map[cpu]);
1337
1338 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1339 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1340 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1341
1342 c = &cpu_data(cpu);
1343
1344 c->core_id = 0;
1345 c->proc_id = -1;
1346
1347 smp_wmb();
1348
1349
1350 fixup_irqs();
1351
1352 local_irq_enable();
1353 mdelay(1);
1354 local_irq_disable();
1355
1356 set_cpu_online(cpu, false);
1357
1358 cpu_map_rebuild();
1359
1360 return 0;
1361 }
1362
1363 void __cpu_die(unsigned int cpu)
1364 {
1365 int i;
1366
1367 for (i = 0; i < 100; i++) {
1368 smp_rmb();
1369 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1370 break;
1371 msleep(100);
1372 }
1373 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1374 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1375 } else {
1376 #if defined(CONFIG_SUN_LDOMS)
1377 unsigned long hv_err;
1378 int limit = 100;
1379
1380 do {
1381 hv_err = sun4v_cpu_stop(cpu);
1382 if (hv_err == HV_EOK) {
1383 set_cpu_present(cpu, false);
1384 break;
1385 }
1386 } while (--limit > 0);
1387 if (limit <= 0) {
1388 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1389 hv_err);
1390 }
1391 #endif
1392 }
1393 }
1394 #endif
1395
1396 void __init smp_cpus_done(unsigned int max_cpus)
1397 {
1398 }
1399
1400 static void send_cpu_ipi(int cpu)
1401 {
1402 xcall_deliver((u64) &xcall_receive_signal,
1403 0, 0, cpumask_of(cpu));
1404 }
1405
1406 void scheduler_poke(void)
1407 {
1408 if (!cpu_poke)
1409 return;
1410
1411 if (!__this_cpu_read(poke))
1412 return;
1413
1414 __this_cpu_write(poke, false);
1415 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1416 }
1417
1418 static unsigned long send_cpu_poke(int cpu)
1419 {
1420 unsigned long hv_err;
1421
1422 per_cpu(poke, cpu) = true;
1423 hv_err = sun4v_cpu_poke(cpu);
1424 if (hv_err != HV_EOK) {
1425 per_cpu(poke, cpu) = false;
1426 pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1427 __func__, hv_err);
1428 }
1429
1430 return hv_err;
1431 }
1432
1433 void smp_send_reschedule(int cpu)
1434 {
1435 if (cpu == smp_processor_id()) {
1436 WARN_ON_ONCE(preemptible());
1437 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1438 return;
1439 }
1440
1441
1442 if (cpu_poke && idle_cpu(cpu)) {
1443 unsigned long ret;
1444
1445 ret = send_cpu_poke(cpu);
1446 if (ret == HV_EOK)
1447 return;
1448 }
1449
1450
1451
1452
1453
1454
1455 send_cpu_ipi(cpu);
1456 }
1457
1458 void smp_init_cpu_poke(void)
1459 {
1460 unsigned long major;
1461 unsigned long minor;
1462 int ret;
1463
1464 if (tlb_type != hypervisor)
1465 return;
1466
1467 ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
1468 if (ret) {
1469 pr_debug("HV_GRP_CORE is not registered\n");
1470 return;
1471 }
1472
1473 if (major == 1 && minor >= 6) {
1474
1475 cpu_poke = true;
1476 return;
1477 }
1478
1479 pr_debug("CPU_POKE not supported\n");
1480 }
1481
1482 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1483 {
1484 clear_softint(1 << irq);
1485 scheduler_ipi();
1486 }
1487
1488 static void stop_this_cpu(void *dummy)
1489 {
1490 set_cpu_online(smp_processor_id(), false);
1491 prom_stopself();
1492 }
1493
1494 void smp_send_stop(void)
1495 {
1496 int cpu;
1497
1498 if (tlb_type == hypervisor) {
1499 int this_cpu = smp_processor_id();
1500 #ifdef CONFIG_SERIAL_SUNHV
1501 sunhv_migrate_hvcons_irq(this_cpu);
1502 #endif
1503 for_each_online_cpu(cpu) {
1504 if (cpu == this_cpu)
1505 continue;
1506
1507 set_cpu_online(cpu, false);
1508 #ifdef CONFIG_SUN_LDOMS
1509 if (ldom_domaining_enabled) {
1510 unsigned long hv_err;
1511 hv_err = sun4v_cpu_stop(cpu);
1512 if (hv_err)
1513 printk(KERN_ERR "sun4v_cpu_stop() "
1514 "failed err=%lu\n", hv_err);
1515 } else
1516 #endif
1517 prom_stopcpu_cpuid(cpu);
1518 }
1519 } else
1520 smp_call_function(stop_this_cpu, NULL, 0);
1521 }
1522
1523 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1524 {
1525 if (cpu_to_node(from) == cpu_to_node(to))
1526 return LOCAL_DISTANCE;
1527 else
1528 return REMOTE_DISTANCE;
1529 }
1530
1531 static int __init pcpu_cpu_to_node(int cpu)
1532 {
1533 return cpu_to_node(cpu);
1534 }
1535
1536 void __init setup_per_cpu_areas(void)
1537 {
1538 unsigned long delta;
1539 unsigned int cpu;
1540 int rc = -EINVAL;
1541
1542 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1543 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1544 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1545 pcpu_cpu_distance,
1546 pcpu_cpu_to_node);
1547 if (rc)
1548 pr_warn("PERCPU: %s allocator failed (%d), "
1549 "falling back to page size\n",
1550 pcpu_fc_names[pcpu_chosen_fc], rc);
1551 }
1552 if (rc < 0)
1553 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1554 pcpu_cpu_to_node);
1555 if (rc < 0)
1556 panic("cannot initialize percpu area (err=%d)", rc);
1557
1558 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1559 for_each_possible_cpu(cpu)
1560 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1561
1562
1563 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1564
1565 of_fill_in_cpu_data();
1566 if (tlb_type == hypervisor)
1567 mdesc_fill_in_cpu_data(cpu_all_mask);
1568 }