0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define pr_fmt(fmt) "xen_cpu: " fmt
0035
0036 #include <linux/interrupt.h>
0037 #include <linux/spinlock.h>
0038 #include <linux/cpu.h>
0039 #include <linux/stat.h>
0040 #include <linux/capability.h>
0041
0042 #include <xen/xen.h>
0043 #include <xen/acpi.h>
0044 #include <xen/xenbus.h>
0045 #include <xen/events.h>
0046 #include <xen/interface/platform.h>
0047 #include <asm/xen/hypervisor.h>
0048 #include <asm/xen/hypercall.h>
0049
0050
0051
0052
0053
0054
0055
0056
0057 struct pcpu {
0058 struct list_head list;
0059 struct device dev;
0060 uint32_t cpu_id;
0061 uint32_t flags;
0062 };
0063
0064 static struct bus_type xen_pcpu_subsys = {
0065 .name = "xen_cpu",
0066 .dev_name = "xen_cpu",
0067 };
0068
0069 static DEFINE_MUTEX(xen_pcpu_lock);
0070
0071 static LIST_HEAD(xen_pcpus);
0072
0073 static int xen_pcpu_down(uint32_t cpu_id)
0074 {
0075 struct xen_platform_op op = {
0076 .cmd = XENPF_cpu_offline,
0077 .interface_version = XENPF_INTERFACE_VERSION,
0078 .u.cpu_ol.cpuid = cpu_id,
0079 };
0080
0081 return HYPERVISOR_platform_op(&op);
0082 }
0083
0084 static int xen_pcpu_up(uint32_t cpu_id)
0085 {
0086 struct xen_platform_op op = {
0087 .cmd = XENPF_cpu_online,
0088 .interface_version = XENPF_INTERFACE_VERSION,
0089 .u.cpu_ol.cpuid = cpu_id,
0090 };
0091
0092 return HYPERVISOR_platform_op(&op);
0093 }
0094
0095 static ssize_t online_show(struct device *dev,
0096 struct device_attribute *attr,
0097 char *buf)
0098 {
0099 struct pcpu *cpu = container_of(dev, struct pcpu, dev);
0100
0101 return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
0102 }
0103
0104 static ssize_t __ref online_store(struct device *dev,
0105 struct device_attribute *attr,
0106 const char *buf, size_t count)
0107 {
0108 struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
0109 unsigned long long val;
0110 ssize_t ret;
0111
0112 if (!capable(CAP_SYS_ADMIN))
0113 return -EPERM;
0114
0115 if (kstrtoull(buf, 0, &val) < 0)
0116 return -EINVAL;
0117
0118 switch (val) {
0119 case 0:
0120 ret = xen_pcpu_down(pcpu->cpu_id);
0121 break;
0122 case 1:
0123 ret = xen_pcpu_up(pcpu->cpu_id);
0124 break;
0125 default:
0126 ret = -EINVAL;
0127 }
0128
0129 if (ret >= 0)
0130 ret = count;
0131 return ret;
0132 }
0133 static DEVICE_ATTR_RW(online);
0134
0135 static struct attribute *pcpu_dev_attrs[] = {
0136 &dev_attr_online.attr,
0137 NULL
0138 };
0139
0140 static umode_t pcpu_dev_is_visible(struct kobject *kobj,
0141 struct attribute *attr, int idx)
0142 {
0143 struct device *dev = kobj_to_dev(kobj);
0144
0145
0146
0147
0148
0149 return dev->id ? attr->mode : 0;
0150 }
0151
0152 static const struct attribute_group pcpu_dev_group = {
0153 .attrs = pcpu_dev_attrs,
0154 .is_visible = pcpu_dev_is_visible,
0155 };
0156
0157 static const struct attribute_group *pcpu_dev_groups[] = {
0158 &pcpu_dev_group,
0159 NULL
0160 };
0161
0162 static bool xen_pcpu_online(uint32_t flags)
0163 {
0164 return !!(flags & XEN_PCPU_FLAGS_ONLINE);
0165 }
0166
0167 static void pcpu_online_status(struct xenpf_pcpuinfo *info,
0168 struct pcpu *pcpu)
0169 {
0170 if (xen_pcpu_online(info->flags) &&
0171 !xen_pcpu_online(pcpu->flags)) {
0172
0173 pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
0174 kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
0175 } else if (!xen_pcpu_online(info->flags) &&
0176 xen_pcpu_online(pcpu->flags)) {
0177
0178 pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
0179 kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
0180 }
0181 }
0182
0183 static struct pcpu *get_pcpu(uint32_t cpu_id)
0184 {
0185 struct pcpu *pcpu;
0186
0187 list_for_each_entry(pcpu, &xen_pcpus, list) {
0188 if (pcpu->cpu_id == cpu_id)
0189 return pcpu;
0190 }
0191
0192 return NULL;
0193 }
0194
0195 static void pcpu_release(struct device *dev)
0196 {
0197 struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
0198
0199 list_del(&pcpu->list);
0200 kfree(pcpu);
0201 }
0202
0203 static void unregister_and_remove_pcpu(struct pcpu *pcpu)
0204 {
0205 struct device *dev;
0206
0207 if (!pcpu)
0208 return;
0209
0210 dev = &pcpu->dev;
0211
0212 device_unregister(dev);
0213 }
0214
0215 static int register_pcpu(struct pcpu *pcpu)
0216 {
0217 struct device *dev;
0218 int err = -EINVAL;
0219
0220 if (!pcpu)
0221 return err;
0222
0223 dev = &pcpu->dev;
0224 dev->bus = &xen_pcpu_subsys;
0225 dev->id = pcpu->cpu_id;
0226 dev->release = pcpu_release;
0227 dev->groups = pcpu_dev_groups;
0228
0229 err = device_register(dev);
0230 if (err) {
0231 pcpu_release(dev);
0232 return err;
0233 }
0234
0235 return 0;
0236 }
0237
0238 static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
0239 {
0240 struct pcpu *pcpu;
0241 int err;
0242
0243 if (info->flags & XEN_PCPU_FLAGS_INVALID)
0244 return ERR_PTR(-ENODEV);
0245
0246 pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
0247 if (!pcpu)
0248 return ERR_PTR(-ENOMEM);
0249
0250 INIT_LIST_HEAD(&pcpu->list);
0251 pcpu->cpu_id = info->xen_cpuid;
0252 pcpu->flags = info->flags;
0253
0254
0255 list_add_tail(&pcpu->list, &xen_pcpus);
0256
0257 err = register_pcpu(pcpu);
0258 if (err) {
0259 pr_warn("Failed to register pcpu%u\n", info->xen_cpuid);
0260 return ERR_PTR(-ENOENT);
0261 }
0262
0263 return pcpu;
0264 }
0265
0266
0267
0268
0269 static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
0270 {
0271 int ret;
0272 struct pcpu *pcpu = NULL;
0273 struct xenpf_pcpuinfo *info;
0274 struct xen_platform_op op = {
0275 .cmd = XENPF_get_cpuinfo,
0276 .interface_version = XENPF_INTERFACE_VERSION,
0277 .u.pcpu_info.xen_cpuid = cpu,
0278 };
0279
0280 ret = HYPERVISOR_platform_op(&op);
0281 if (ret)
0282 return ret;
0283
0284 info = &op.u.pcpu_info;
0285 if (max_cpu)
0286 *max_cpu = info->max_present;
0287
0288 pcpu = get_pcpu(cpu);
0289
0290
0291
0292
0293 if (info->flags & XEN_PCPU_FLAGS_INVALID) {
0294 unregister_and_remove_pcpu(pcpu);
0295 return 0;
0296 }
0297
0298 if (!pcpu) {
0299 pcpu = create_and_register_pcpu(info);
0300 if (IS_ERR_OR_NULL(pcpu))
0301 return -ENODEV;
0302 } else
0303 pcpu_online_status(info, pcpu);
0304
0305 return 0;
0306 }
0307
0308
0309
0310
0311 static int xen_sync_pcpus(void)
0312 {
0313
0314
0315
0316 uint32_t cpu = 0, max_cpu = 0;
0317 int err = 0;
0318 struct pcpu *pcpu, *tmp;
0319
0320 mutex_lock(&xen_pcpu_lock);
0321
0322 while (!err && (cpu <= max_cpu)) {
0323 err = sync_pcpu(cpu, &max_cpu);
0324 cpu++;
0325 }
0326
0327 if (err)
0328 list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
0329 unregister_and_remove_pcpu(pcpu);
0330
0331 mutex_unlock(&xen_pcpu_lock);
0332
0333 return err;
0334 }
0335
0336 static void xen_pcpu_work_fn(struct work_struct *work)
0337 {
0338 xen_sync_pcpus();
0339 }
0340 static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
0341
0342 static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
0343 {
0344 schedule_work(&xen_pcpu_work);
0345 return IRQ_HANDLED;
0346 }
0347
0348 static int __init xen_pcpu_init(void)
0349 {
0350 int irq, ret;
0351
0352 if (!xen_initial_domain())
0353 return -ENODEV;
0354
0355 irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
0356 xen_pcpu_interrupt, 0,
0357 "xen-pcpu", NULL);
0358 if (irq < 0) {
0359 pr_warn("Failed to bind pcpu virq\n");
0360 return irq;
0361 }
0362
0363 ret = subsys_system_register(&xen_pcpu_subsys, NULL);
0364 if (ret) {
0365 pr_warn("Failed to register pcpu subsys\n");
0366 goto err1;
0367 }
0368
0369 ret = xen_sync_pcpus();
0370 if (ret) {
0371 pr_warn("Failed to sync pcpu info\n");
0372 goto err2;
0373 }
0374
0375 return 0;
0376
0377 err2:
0378 bus_unregister(&xen_pcpu_subsys);
0379 err1:
0380 unbind_from_irqhandler(irq, NULL);
0381 return ret;
0382 }
0383 arch_initcall(xen_pcpu_init);