0001
0002 #include <linux/smp.h>
0003 #include <linux/cpu.h>
0004 #include <linux/slab.h>
0005 #include <linux/cpumask.h>
0006 #include <linux/percpu.h>
0007
0008 #include <xen/events.h>
0009
0010 #include <xen/hvc-console.h>
0011 #include "xen-ops.h"
0012 #include "smp.h"
0013
0014 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
0015 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
0016 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
0017 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
0018
0019 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
0020 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
0021
0022
0023
0024
0025 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
0026 {
0027 inc_irq_stat(irq_resched_count);
0028 scheduler_ipi();
0029
0030 return IRQ_HANDLED;
0031 }
0032
0033 void xen_smp_intr_free(unsigned int cpu)
0034 {
0035 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
0036 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
0037 per_cpu(xen_resched_irq, cpu).irq = -1;
0038 kfree(per_cpu(xen_resched_irq, cpu).name);
0039 per_cpu(xen_resched_irq, cpu).name = NULL;
0040 }
0041 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
0042 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
0043 per_cpu(xen_callfunc_irq, cpu).irq = -1;
0044 kfree(per_cpu(xen_callfunc_irq, cpu).name);
0045 per_cpu(xen_callfunc_irq, cpu).name = NULL;
0046 }
0047 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
0048 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
0049 per_cpu(xen_debug_irq, cpu).irq = -1;
0050 kfree(per_cpu(xen_debug_irq, cpu).name);
0051 per_cpu(xen_debug_irq, cpu).name = NULL;
0052 }
0053 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
0054 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
0055 NULL);
0056 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
0057 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
0058 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
0059 }
0060 }
0061
0062 int xen_smp_intr_init(unsigned int cpu)
0063 {
0064 int rc;
0065 char *resched_name, *callfunc_name, *debug_name;
0066
0067 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
0068 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
0069 cpu,
0070 xen_reschedule_interrupt,
0071 IRQF_PERCPU|IRQF_NOBALANCING,
0072 resched_name,
0073 NULL);
0074 if (rc < 0)
0075 goto fail;
0076 per_cpu(xen_resched_irq, cpu).irq = rc;
0077 per_cpu(xen_resched_irq, cpu).name = resched_name;
0078
0079 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
0080 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
0081 cpu,
0082 xen_call_function_interrupt,
0083 IRQF_PERCPU|IRQF_NOBALANCING,
0084 callfunc_name,
0085 NULL);
0086 if (rc < 0)
0087 goto fail;
0088 per_cpu(xen_callfunc_irq, cpu).irq = rc;
0089 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
0090
0091 if (!xen_fifo_events) {
0092 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
0093 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
0094 xen_debug_interrupt,
0095 IRQF_PERCPU | IRQF_NOBALANCING,
0096 debug_name, NULL);
0097 if (rc < 0)
0098 goto fail;
0099 per_cpu(xen_debug_irq, cpu).irq = rc;
0100 per_cpu(xen_debug_irq, cpu).name = debug_name;
0101 }
0102
0103 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
0104 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
0105 cpu,
0106 xen_call_function_single_interrupt,
0107 IRQF_PERCPU|IRQF_NOBALANCING,
0108 callfunc_name,
0109 NULL);
0110 if (rc < 0)
0111 goto fail;
0112 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
0113 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
0114
0115 return 0;
0116
0117 fail:
0118 xen_smp_intr_free(cpu);
0119 return rc;
0120 }
0121
0122 void __init xen_smp_cpus_done(unsigned int max_cpus)
0123 {
0124 if (xen_hvm_domain())
0125 native_smp_cpus_done(max_cpus);
0126 else
0127 calculate_max_logical_packages();
0128 }
0129
0130 void xen_smp_send_reschedule(int cpu)
0131 {
0132 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
0133 }
0134
0135 static void __xen_send_IPI_mask(const struct cpumask *mask,
0136 int vector)
0137 {
0138 unsigned cpu;
0139
0140 for_each_cpu_and(cpu, mask, cpu_online_mask)
0141 xen_send_IPI_one(cpu, vector);
0142 }
0143
0144 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
0145 {
0146 int cpu;
0147
0148 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
0149
0150
0151 for_each_cpu(cpu, mask) {
0152 if (xen_vcpu_stolen(cpu)) {
0153 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
0154 break;
0155 }
0156 }
0157 }
0158
0159 void xen_smp_send_call_function_single_ipi(int cpu)
0160 {
0161 __xen_send_IPI_mask(cpumask_of(cpu),
0162 XEN_CALL_FUNCTION_SINGLE_VECTOR);
0163 }
0164
0165 static inline int xen_map_vector(int vector)
0166 {
0167 int xen_vector;
0168
0169 switch (vector) {
0170 case RESCHEDULE_VECTOR:
0171 xen_vector = XEN_RESCHEDULE_VECTOR;
0172 break;
0173 case CALL_FUNCTION_VECTOR:
0174 xen_vector = XEN_CALL_FUNCTION_VECTOR;
0175 break;
0176 case CALL_FUNCTION_SINGLE_VECTOR:
0177 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
0178 break;
0179 case IRQ_WORK_VECTOR:
0180 xen_vector = XEN_IRQ_WORK_VECTOR;
0181 break;
0182 #ifdef CONFIG_X86_64
0183 case NMI_VECTOR:
0184 case APIC_DM_NMI:
0185 xen_vector = XEN_NMI_VECTOR;
0186 break;
0187 #endif
0188 default:
0189 xen_vector = -1;
0190 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
0191 vector);
0192 }
0193
0194 return xen_vector;
0195 }
0196
0197 void xen_send_IPI_mask(const struct cpumask *mask,
0198 int vector)
0199 {
0200 int xen_vector = xen_map_vector(vector);
0201
0202 if (xen_vector >= 0)
0203 __xen_send_IPI_mask(mask, xen_vector);
0204 }
0205
0206 void xen_send_IPI_all(int vector)
0207 {
0208 int xen_vector = xen_map_vector(vector);
0209
0210 if (xen_vector >= 0)
0211 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
0212 }
0213
0214 void xen_send_IPI_self(int vector)
0215 {
0216 int xen_vector = xen_map_vector(vector);
0217
0218 if (xen_vector >= 0)
0219 xen_send_IPI_one(smp_processor_id(), xen_vector);
0220 }
0221
0222 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
0223 int vector)
0224 {
0225 unsigned cpu;
0226 unsigned int this_cpu = smp_processor_id();
0227 int xen_vector = xen_map_vector(vector);
0228
0229 if (!(num_online_cpus() > 1) || (xen_vector < 0))
0230 return;
0231
0232 for_each_cpu_and(cpu, mask, cpu_online_mask) {
0233 if (this_cpu == cpu)
0234 continue;
0235
0236 xen_send_IPI_one(cpu, xen_vector);
0237 }
0238 }
0239
0240 void xen_send_IPI_allbutself(int vector)
0241 {
0242 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
0243 }
0244
0245 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
0246 {
0247 generic_smp_call_function_interrupt();
0248 inc_irq_stat(irq_call_count);
0249
0250 return IRQ_HANDLED;
0251 }
0252
0253 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
0254 {
0255 generic_smp_call_function_single_interrupt();
0256 inc_irq_stat(irq_call_count);
0257
0258 return IRQ_HANDLED;
0259 }