0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt
0018
0019 #include <linux/kernel.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/delay.h>
0022 #include <linux/sched.h> /* for idle_task_exit */
0023 #include <linux/sched/hotplug.h>
0024 #include <linux/cpu.h>
0025 #include <linux/of.h>
0026 #include <linux/slab.h>
0027 #include <asm/prom.h>
0028 #include <asm/rtas.h>
0029 #include <asm/firmware.h>
0030 #include <asm/machdep.h>
0031 #include <asm/vdso_datapage.h>
0032 #include <asm/xics.h>
0033 #include <asm/xive.h>
0034 #include <asm/plpar_wrappers.h>
0035 #include <asm/topology.h>
0036
0037 #include "pseries.h"
0038
0039
0040 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
0041
0042
0043
0044
0045
0046 static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES];
0047
0048 static void rtas_stop_self(void)
0049 {
0050 static struct rtas_args args;
0051
0052 local_irq_disable();
0053
0054 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
0055
0056 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
0057
0058 panic("Alas, I survived.\n");
0059 }
0060
0061 static void pseries_cpu_offline_self(void)
0062 {
0063 unsigned int hwcpu = hard_smp_processor_id();
0064
0065 local_irq_disable();
0066 idle_task_exit();
0067 if (xive_enabled())
0068 xive_teardown_cpu();
0069 else
0070 xics_teardown_cpu();
0071
0072 unregister_slb_shadow(hwcpu);
0073 rtas_stop_self();
0074
0075
0076 BUG();
0077 for(;;);
0078 }
0079
0080 static int pseries_cpu_disable(void)
0081 {
0082 int cpu = smp_processor_id();
0083
0084 set_cpu_online(cpu, false);
0085 vdso_data->processorCount--;
0086
0087
0088 if (cpu == boot_cpuid)
0089 boot_cpuid = cpumask_any(cpu_online_mask);
0090
0091
0092 if (xive_enabled())
0093 xive_smp_disable_cpu();
0094 else
0095 xics_migrate_irqs_away();
0096
0097 cleanup_cpu_mmu_context();
0098
0099 return 0;
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 static void pseries_cpu_die(unsigned int cpu)
0115 {
0116 int cpu_status = 1;
0117 unsigned int pcpu = get_hard_smp_processor_id(cpu);
0118 unsigned long timeout = jiffies + msecs_to_jiffies(120000);
0119
0120 while (true) {
0121 cpu_status = smp_query_cpu_stopped(pcpu);
0122 if (cpu_status == QCSS_STOPPED ||
0123 cpu_status == QCSS_HARDWARE_ERROR)
0124 break;
0125
0126 if (time_after(jiffies, timeout)) {
0127 pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
0128 cpu, pcpu);
0129 timeout = jiffies + msecs_to_jiffies(120000);
0130 }
0131
0132 cond_resched();
0133 }
0134
0135 if (cpu_status == QCSS_HARDWARE_ERROR) {
0136 pr_warn("CPU %i (hwid %i) reported error while dying\n",
0137 cpu, pcpu);
0138 }
0139
0140 paca_ptrs[cpu]->cpu_start = 0;
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int find_cpu_id_range(unsigned int nthreads, int assigned_node,
0153 cpumask_var_t *cpu_mask)
0154 {
0155 cpumask_var_t candidate_mask;
0156 unsigned int cpu, node;
0157 int rc = -ENOSPC;
0158
0159 if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL))
0160 return -ENOMEM;
0161
0162 cpumask_clear(*cpu_mask);
0163 for (cpu = 0; cpu < nthreads; cpu++)
0164 cpumask_set_cpu(cpu, *cpu_mask);
0165
0166 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
0167
0168
0169 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
0170
0171 if (assigned_node != NUMA_NO_NODE) {
0172
0173
0174
0175
0176
0177 for_each_online_node(node) {
0178 if (node == assigned_node)
0179 continue;
0180 cpumask_andnot(candidate_mask, candidate_mask,
0181 node_recorded_ids_map[node]);
0182 }
0183 }
0184
0185 if (cpumask_empty(candidate_mask))
0186 goto out;
0187
0188 while (!cpumask_empty(*cpu_mask)) {
0189 if (cpumask_subset(*cpu_mask, candidate_mask))
0190
0191 break;
0192 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
0193 }
0194
0195 if (!cpumask_empty(*cpu_mask))
0196 rc = 0;
0197
0198 out:
0199 free_cpumask_var(candidate_mask);
0200 return rc;
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210 static int pseries_add_processor(struct device_node *np)
0211 {
0212 int len, nthreads, node, cpu, assigned_node;
0213 int rc = 0;
0214 cpumask_var_t cpu_mask;
0215 const __be32 *intserv;
0216
0217 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
0218 if (!intserv)
0219 return 0;
0220
0221 nthreads = len / sizeof(u32);
0222
0223 if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
0224 return -ENOMEM;
0225
0226
0227
0228
0229
0230 node = of_node_to_nid(np);
0231 if (node < 0 || !node_possible(node))
0232 node = first_online_node;
0233
0234 BUG_ON(node == NUMA_NO_NODE);
0235 assigned_node = node;
0236
0237 cpu_maps_update_begin();
0238
0239 rc = find_cpu_id_range(nthreads, node, &cpu_mask);
0240 if (rc && nr_node_ids > 1) {
0241
0242
0243
0244 node = NUMA_NO_NODE;
0245 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
0246 }
0247
0248 if (rc) {
0249 pr_err("Cannot add cpu %pOF; this system configuration"
0250 " supports %d logical cpus.\n", np, num_possible_cpus());
0251 goto out;
0252 }
0253
0254 for_each_cpu(cpu, cpu_mask) {
0255 BUG_ON(cpu_present(cpu));
0256 set_cpu_present(cpu, true);
0257 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
0258 }
0259
0260
0261 cpumask_or(node_recorded_ids_map[assigned_node],
0262 node_recorded_ids_map[assigned_node], cpu_mask);
0263
0264
0265
0266
0267
0268 if (node == NUMA_NO_NODE) {
0269 cpu = cpumask_first(cpu_mask);
0270 pr_warn("Reusing free CPU ids %d-%d from another node\n",
0271 cpu, cpu + nthreads - 1);
0272 for_each_online_node(node) {
0273 if (node == assigned_node)
0274 continue;
0275 cpumask_andnot(node_recorded_ids_map[node],
0276 node_recorded_ids_map[node],
0277 cpu_mask);
0278 }
0279 }
0280
0281 out:
0282 cpu_maps_update_done();
0283 free_cpumask_var(cpu_mask);
0284 return rc;
0285 }
0286
0287
0288
0289
0290
0291
0292 static void pseries_remove_processor(struct device_node *np)
0293 {
0294 unsigned int cpu;
0295 int len, nthreads, i;
0296 const __be32 *intserv;
0297 u32 thread;
0298
0299 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
0300 if (!intserv)
0301 return;
0302
0303 nthreads = len / sizeof(u32);
0304
0305 cpu_maps_update_begin();
0306 for (i = 0; i < nthreads; i++) {
0307 thread = be32_to_cpu(intserv[i]);
0308 for_each_present_cpu(cpu) {
0309 if (get_hard_smp_processor_id(cpu) != thread)
0310 continue;
0311 BUG_ON(cpu_online(cpu));
0312 set_cpu_present(cpu, false);
0313 set_hard_smp_processor_id(cpu, -1);
0314 update_numa_cpu_lookup_table(cpu, -1);
0315 break;
0316 }
0317 if (cpu >= nr_cpu_ids)
0318 printk(KERN_WARNING "Could not find cpu to remove "
0319 "with physical id 0x%x\n", thread);
0320 }
0321 cpu_maps_update_done();
0322 }
0323
0324 static int dlpar_offline_cpu(struct device_node *dn)
0325 {
0326 int rc = 0;
0327 unsigned int cpu;
0328 int len, nthreads, i;
0329 const __be32 *intserv;
0330 u32 thread;
0331
0332 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
0333 if (!intserv)
0334 return -EINVAL;
0335
0336 nthreads = len / sizeof(u32);
0337
0338 cpu_maps_update_begin();
0339 for (i = 0; i < nthreads; i++) {
0340 thread = be32_to_cpu(intserv[i]);
0341 for_each_present_cpu(cpu) {
0342 if (get_hard_smp_processor_id(cpu) != thread)
0343 continue;
0344
0345 if (!cpu_online(cpu))
0346 break;
0347
0348
0349
0350
0351
0352
0353
0354
0355 if (num_online_cpus() == 1) {
0356 pr_warn("Unable to remove last online CPU %pOFn\n", dn);
0357 rc = -EBUSY;
0358 goto out_unlock;
0359 }
0360
0361 cpu_maps_update_done();
0362 rc = device_offline(get_cpu_device(cpu));
0363 if (rc)
0364 goto out;
0365 cpu_maps_update_begin();
0366 break;
0367 }
0368 if (cpu == num_possible_cpus()) {
0369 pr_warn("Could not find cpu to offline with physical id 0x%x\n",
0370 thread);
0371 }
0372 }
0373 out_unlock:
0374 cpu_maps_update_done();
0375
0376 out:
0377 return rc;
0378 }
0379
0380 static int dlpar_online_cpu(struct device_node *dn)
0381 {
0382 int rc = 0;
0383 unsigned int cpu;
0384 int len, nthreads, i;
0385 const __be32 *intserv;
0386 u32 thread;
0387
0388 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
0389 if (!intserv)
0390 return -EINVAL;
0391
0392 nthreads = len / sizeof(u32);
0393
0394 cpu_maps_update_begin();
0395 for (i = 0; i < nthreads; i++) {
0396 thread = be32_to_cpu(intserv[i]);
0397 for_each_present_cpu(cpu) {
0398 if (get_hard_smp_processor_id(cpu) != thread)
0399 continue;
0400 cpu_maps_update_done();
0401 find_and_update_cpu_nid(cpu);
0402 rc = device_online(get_cpu_device(cpu));
0403 if (rc) {
0404 dlpar_offline_cpu(dn);
0405 goto out;
0406 }
0407 cpu_maps_update_begin();
0408
0409 break;
0410 }
0411 if (cpu == num_possible_cpus())
0412 printk(KERN_WARNING "Could not find cpu to online "
0413 "with physical id 0x%x\n", thread);
0414 }
0415 cpu_maps_update_done();
0416
0417 out:
0418 return rc;
0419
0420 }
0421
0422 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
0423 {
0424 struct device_node *child = NULL;
0425 u32 my_drc_index;
0426 bool found;
0427 int rc;
0428
0429
0430 found = false;
0431
0432 for_each_child_of_node(parent, child) {
0433 rc = of_property_read_u32(child, "ibm,my-drc-index",
0434 &my_drc_index);
0435 if (rc)
0436 continue;
0437
0438 if (my_drc_index == drc_index) {
0439 of_node_put(child);
0440 found = true;
0441 break;
0442 }
0443 }
0444
0445 return found;
0446 }
0447
0448 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
0449 {
0450 struct property *info;
0451 struct of_drc_info drc;
0452 const __be32 *value;
0453 u32 index;
0454 int count, i, j;
0455
0456 info = of_find_property(parent, "ibm,drc-info", NULL);
0457 if (!info)
0458 return false;
0459
0460 value = of_prop_next_u32(info, NULL, &count);
0461
0462
0463 if (value)
0464 value++;
0465 else
0466 return false;
0467
0468 for (i = 0; i < count; i++) {
0469 if (of_read_drc_info_cell(&info, &value, &drc))
0470 return false;
0471
0472 if (strncmp(drc.drc_type, "CPU", 3))
0473 break;
0474
0475 if (drc_index > drc.last_drc_index)
0476 continue;
0477
0478 index = drc.drc_index_start;
0479 for (j = 0; j < drc.num_sequential_elems; j++) {
0480 if (drc_index == index)
0481 return true;
0482
0483 index += drc.sequential_inc;
0484 }
0485 }
0486
0487 return false;
0488 }
0489
0490 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
0491 {
0492 bool found = false;
0493 int rc, index;
0494
0495 if (of_find_property(parent, "ibm,drc-info", NULL))
0496 return drc_info_valid_index(parent, drc_index);
0497
0498
0499
0500
0501
0502 index = 1;
0503 while (!found) {
0504 u32 drc;
0505
0506 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
0507 index++, &drc);
0508
0509 if (rc)
0510 break;
0511
0512 if (drc == drc_index)
0513 found = true;
0514 }
0515
0516 return found;
0517 }
0518
0519 static int pseries_cpuhp_attach_nodes(struct device_node *dn)
0520 {
0521 struct of_changeset cs;
0522 int ret;
0523
0524
0525
0526
0527
0528 for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
0529 ret = of_changeset_attach_node(&cs, dn);
0530 if (ret)
0531 goto out;
0532 }
0533
0534 ret = of_changeset_apply(&cs);
0535 out:
0536 of_changeset_destroy(&cs);
0537 return ret;
0538 }
0539
0540 static ssize_t dlpar_cpu_add(u32 drc_index)
0541 {
0542 struct device_node *dn, *parent;
0543 int rc, saved_rc;
0544
0545 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
0546
0547 parent = of_find_node_by_path("/cpus");
0548 if (!parent) {
0549 pr_warn("Failed to find CPU root node \"/cpus\"\n");
0550 return -ENODEV;
0551 }
0552
0553 if (dlpar_cpu_exists(parent, drc_index)) {
0554 of_node_put(parent);
0555 pr_warn("CPU with drc index %x already exists\n", drc_index);
0556 return -EINVAL;
0557 }
0558
0559 if (!valid_cpu_drc_index(parent, drc_index)) {
0560 of_node_put(parent);
0561 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
0562 return -EINVAL;
0563 }
0564
0565 rc = dlpar_acquire_drc(drc_index);
0566 if (rc) {
0567 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
0568 rc, drc_index);
0569 of_node_put(parent);
0570 return -EINVAL;
0571 }
0572
0573 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
0574 if (!dn) {
0575 pr_warn("Failed call to configure-connector, drc index: %x\n",
0576 drc_index);
0577 dlpar_release_drc(drc_index);
0578 of_node_put(parent);
0579 return -EINVAL;
0580 }
0581
0582 rc = pseries_cpuhp_attach_nodes(dn);
0583
0584
0585 of_node_put(parent);
0586
0587 if (rc) {
0588 saved_rc = rc;
0589 pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
0590 dn, rc, drc_index);
0591
0592 rc = dlpar_release_drc(drc_index);
0593 if (!rc)
0594 dlpar_free_cc_nodes(dn);
0595
0596 return saved_rc;
0597 }
0598
0599 update_numa_distance(dn);
0600
0601 rc = dlpar_online_cpu(dn);
0602 if (rc) {
0603 saved_rc = rc;
0604 pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
0605 dn, rc, drc_index);
0606
0607 rc = dlpar_detach_node(dn);
0608 if (!rc)
0609 dlpar_release_drc(drc_index);
0610
0611 return saved_rc;
0612 }
0613
0614 pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
0615 drc_index);
0616 return rc;
0617 }
0618
0619 static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn)
0620 {
0621 unsigned int use_count = 0;
0622 struct device_node *dn;
0623
0624 WARN_ON(!of_node_is_type(cachedn, "cache"));
0625
0626 for_each_of_cpu_node(dn) {
0627 if (of_find_next_cache_node(dn) == cachedn)
0628 use_count++;
0629 }
0630
0631 for_each_node_by_type(dn, "cache") {
0632 if (of_find_next_cache_node(dn) == cachedn)
0633 use_count++;
0634 }
0635
0636 return use_count;
0637 }
0638
0639 static int pseries_cpuhp_detach_nodes(struct device_node *cpudn)
0640 {
0641 struct device_node *dn;
0642 struct of_changeset cs;
0643 int ret = 0;
0644
0645 of_changeset_init(&cs);
0646 ret = of_changeset_detach_node(&cs, cpudn);
0647 if (ret)
0648 goto out;
0649
0650 dn = cpudn;
0651 while ((dn = of_find_next_cache_node(dn))) {
0652 if (pseries_cpuhp_cache_use_count(dn) > 1)
0653 break;
0654
0655 ret = of_changeset_detach_node(&cs, dn);
0656 if (ret)
0657 goto out;
0658 }
0659
0660 ret = of_changeset_apply(&cs);
0661 out:
0662 of_changeset_destroy(&cs);
0663 return ret;
0664 }
0665
0666 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
0667 {
0668 int rc;
0669
0670 pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
0671 dn, drc_index);
0672
0673 rc = dlpar_offline_cpu(dn);
0674 if (rc) {
0675 pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
0676 return -EINVAL;
0677 }
0678
0679 rc = dlpar_release_drc(drc_index);
0680 if (rc) {
0681 pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
0682 drc_index, dn, rc);
0683 dlpar_online_cpu(dn);
0684 return rc;
0685 }
0686
0687 rc = pseries_cpuhp_detach_nodes(dn);
0688 if (rc) {
0689 int saved_rc = rc;
0690
0691 pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
0692
0693 rc = dlpar_acquire_drc(drc_index);
0694 if (!rc)
0695 dlpar_online_cpu(dn);
0696
0697 return saved_rc;
0698 }
0699
0700 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
0701 return 0;
0702 }
0703
0704 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
0705 {
0706 struct device_node *dn;
0707 u32 my_index;
0708 int rc;
0709
0710 for_each_node_by_type(dn, "cpu") {
0711 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
0712 if (rc)
0713 continue;
0714
0715 if (my_index == drc_index)
0716 break;
0717 }
0718
0719 return dn;
0720 }
0721
0722 static int dlpar_cpu_remove_by_index(u32 drc_index)
0723 {
0724 struct device_node *dn;
0725 int rc;
0726
0727 dn = cpu_drc_index_to_dn(drc_index);
0728 if (!dn) {
0729 pr_warn("Cannot find CPU (drc index %x) to remove\n",
0730 drc_index);
0731 return -ENODEV;
0732 }
0733
0734 rc = dlpar_cpu_remove(dn, drc_index);
0735 of_node_put(dn);
0736 return rc;
0737 }
0738
0739 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
0740 {
0741 u32 drc_index;
0742 int rc;
0743
0744 drc_index = hp_elog->_drc_u.drc_index;
0745
0746 lock_device_hotplug();
0747
0748 switch (hp_elog->action) {
0749 case PSERIES_HP_ELOG_ACTION_REMOVE:
0750 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
0751 rc = dlpar_cpu_remove_by_index(drc_index);
0752
0753
0754
0755
0756
0757 if (rc)
0758 dlpar_unisolate_drc(drc_index);
0759 }
0760 else
0761 rc = -EINVAL;
0762 break;
0763 case PSERIES_HP_ELOG_ACTION_ADD:
0764 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
0765 rc = dlpar_cpu_add(drc_index);
0766 else
0767 rc = -EINVAL;
0768 break;
0769 default:
0770 pr_err("Invalid action (%d) specified\n", hp_elog->action);
0771 rc = -EINVAL;
0772 break;
0773 }
0774
0775 unlock_device_hotplug();
0776 return rc;
0777 }
0778
0779 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
0780
0781 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
0782 {
0783 u32 drc_index;
0784 int rc;
0785
0786 rc = kstrtou32(buf, 0, &drc_index);
0787 if (rc)
0788 return -EINVAL;
0789
0790 rc = dlpar_cpu_add(drc_index);
0791
0792 return rc ? rc : count;
0793 }
0794
0795 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
0796 {
0797 struct device_node *dn;
0798 u32 drc_index;
0799 int rc;
0800
0801 dn = of_find_node_by_path(buf);
0802 if (!dn)
0803 return -EINVAL;
0804
0805 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
0806 if (rc) {
0807 of_node_put(dn);
0808 return -EINVAL;
0809 }
0810
0811 rc = dlpar_cpu_remove(dn, drc_index);
0812 of_node_put(dn);
0813
0814 return rc ? rc : count;
0815 }
0816
0817 #endif
0818
0819 static int pseries_smp_notifier(struct notifier_block *nb,
0820 unsigned long action, void *data)
0821 {
0822 struct of_reconfig_data *rd = data;
0823 int err = 0;
0824
0825 switch (action) {
0826 case OF_RECONFIG_ATTACH_NODE:
0827 err = pseries_add_processor(rd->dn);
0828 break;
0829 case OF_RECONFIG_DETACH_NODE:
0830 pseries_remove_processor(rd->dn);
0831 break;
0832 }
0833 return notifier_from_errno(err);
0834 }
0835
0836 static struct notifier_block pseries_smp_nb = {
0837 .notifier_call = pseries_smp_notifier,
0838 };
0839
0840 static int __init pseries_cpu_hotplug_init(void)
0841 {
0842 int qcss_tok;
0843 unsigned int node;
0844
0845 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
0846 ppc_md.cpu_probe = dlpar_cpu_probe;
0847 ppc_md.cpu_release = dlpar_cpu_release;
0848 #endif
0849
0850 rtas_stop_self_token = rtas_token("stop-self");
0851 qcss_tok = rtas_token("query-cpu-stopped-state");
0852
0853 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
0854 qcss_tok == RTAS_UNKNOWN_SERVICE) {
0855 printk(KERN_INFO "CPU Hotplug not supported by firmware "
0856 "- disabling.\n");
0857 return 0;
0858 }
0859
0860 smp_ops->cpu_offline_self = pseries_cpu_offline_self;
0861 smp_ops->cpu_disable = pseries_cpu_disable;
0862 smp_ops->cpu_die = pseries_cpu_die;
0863
0864
0865 if (firmware_has_feature(FW_FEATURE_LPAR)) {
0866 for_each_node(node) {
0867 if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
0868 GFP_KERNEL, node))
0869 return -ENOMEM;
0870
0871
0872 cpumask_copy(node_recorded_ids_map[node],
0873 cpumask_of_node(node));
0874 }
0875
0876 of_reconfig_notifier_register(&pseries_smp_nb);
0877 }
0878
0879 return 0;
0880 }
0881 machine_arch_initcall(pseries, pseries_cpu_hotplug_init);