0001
0002
0003 #include <linux/cpuhotplug.h>
0004 #include <linux/cpumask.h>
0005 #include <linux/slab.h>
0006 #include <linux/mm.h>
0007
0008 #include <asm/apic.h>
0009
0010 #include "local.h"
0011
0012 struct cluster_mask {
0013 unsigned int clusterid;
0014 int node;
0015 struct cpumask mask;
0016 };
0017
0018
0019
0020
0021
0022
0023 static u32 *x86_cpu_to_logical_apicid __read_mostly;
0024
0025 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
0026 static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
0027 static struct cluster_mask *cluster_hotplug_mask;
0028
0029 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
0030 {
0031 return x2apic_enabled();
0032 }
0033
0034 static void x2apic_send_IPI(int cpu, int vector)
0035 {
0036 u32 dest = x86_cpu_to_logical_apicid[cpu];
0037
0038
0039 weak_wrmsr_fence();
0040 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
0041 }
0042
0043 static void
0044 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
0045 {
0046 unsigned int cpu, clustercpu;
0047 struct cpumask *tmpmsk;
0048 unsigned long flags;
0049 u32 dest;
0050
0051
0052 weak_wrmsr_fence();
0053 local_irq_save(flags);
0054
0055 tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
0056 cpumask_copy(tmpmsk, mask);
0057
0058 if (apic_dest != APIC_DEST_ALLINC)
0059 __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
0060
0061
0062 for_each_cpu(cpu, tmpmsk) {
0063 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
0064
0065 dest = 0;
0066 for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
0067 dest |= x86_cpu_to_logical_apicid[clustercpu];
0068
0069 if (!dest)
0070 continue;
0071
0072 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
0073
0074 cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
0075 }
0076
0077 local_irq_restore(flags);
0078 }
0079
0080 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
0081 {
0082 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
0083 }
0084
0085 static void
0086 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
0087 {
0088 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
0089 }
0090
0091 static void x2apic_send_IPI_allbutself(int vector)
0092 {
0093 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
0094 }
0095
0096 static void x2apic_send_IPI_all(int vector)
0097 {
0098 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
0099 }
0100
0101 static u32 x2apic_calc_apicid(unsigned int cpu)
0102 {
0103 return x86_cpu_to_logical_apicid[cpu];
0104 }
0105
0106 static void init_x2apic_ldr(void)
0107 {
0108 struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
0109 u32 cluster, apicid = apic_read(APIC_LDR);
0110 unsigned int cpu;
0111
0112 x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;
0113
0114 if (cmsk)
0115 goto update;
0116
0117 cluster = apicid >> 16;
0118 for_each_online_cpu(cpu) {
0119 cmsk = per_cpu(cluster_masks, cpu);
0120
0121 if (cmsk && cmsk->clusterid == cluster)
0122 goto update;
0123 }
0124 cmsk = cluster_hotplug_mask;
0125 cmsk->clusterid = cluster;
0126 cluster_hotplug_mask = NULL;
0127 update:
0128 this_cpu_write(cluster_masks, cmsk);
0129 cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
0130 }
0131
0132 static int alloc_clustermask(unsigned int cpu, int node)
0133 {
0134 if (per_cpu(cluster_masks, cpu))
0135 return 0;
0136
0137
0138
0139
0140 if (cluster_hotplug_mask) {
0141 if (cluster_hotplug_mask->node == node)
0142 return 0;
0143 kfree(cluster_hotplug_mask);
0144 }
0145
0146 cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
0147 GFP_KERNEL, node);
0148 if (!cluster_hotplug_mask)
0149 return -ENOMEM;
0150 cluster_hotplug_mask->node = node;
0151 return 0;
0152 }
0153
0154 static int x2apic_prepare_cpu(unsigned int cpu)
0155 {
0156 if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
0157 return -ENOMEM;
0158 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
0159 return -ENOMEM;
0160 return 0;
0161 }
0162
0163 static int x2apic_dead_cpu(unsigned int dead_cpu)
0164 {
0165 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
0166
0167 if (cmsk)
0168 cpumask_clear_cpu(dead_cpu, &cmsk->mask);
0169 free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
0170 return 0;
0171 }
0172
0173 static int x2apic_cluster_probe(void)
0174 {
0175 u32 slots;
0176
0177 if (!x2apic_mode)
0178 return 0;
0179
0180 slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
0181 x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
0182 if (!x86_cpu_to_logical_apicid)
0183 return 0;
0184
0185 if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
0186 x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
0187 pr_err("Failed to register X2APIC_PREPARE\n");
0188 kfree(x86_cpu_to_logical_apicid);
0189 x86_cpu_to_logical_apicid = NULL;
0190 return 0;
0191 }
0192 init_x2apic_ldr();
0193 return 1;
0194 }
0195
0196 static struct apic apic_x2apic_cluster __ro_after_init = {
0197
0198 .name = "cluster x2apic",
0199 .probe = x2apic_cluster_probe,
0200 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
0201 .apic_id_valid = x2apic_apic_id_valid,
0202 .apic_id_registered = x2apic_apic_id_registered,
0203
0204 .delivery_mode = APIC_DELIVERY_MODE_FIXED,
0205 .dest_mode_logical = true,
0206
0207 .disable_esr = 0,
0208
0209 .check_apicid_used = NULL,
0210 .init_apic_ldr = init_x2apic_ldr,
0211 .ioapic_phys_id_map = NULL,
0212 .setup_apic_routing = NULL,
0213 .cpu_present_to_apicid = default_cpu_present_to_apicid,
0214 .apicid_to_cpu_present = NULL,
0215 .check_phys_apicid_present = default_check_phys_apicid_present,
0216 .phys_pkg_id = x2apic_phys_pkg_id,
0217
0218 .get_apic_id = x2apic_get_apic_id,
0219 .set_apic_id = x2apic_set_apic_id,
0220
0221 .calc_dest_apicid = x2apic_calc_apicid,
0222
0223 .send_IPI = x2apic_send_IPI,
0224 .send_IPI_mask = x2apic_send_IPI_mask,
0225 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
0226 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
0227 .send_IPI_all = x2apic_send_IPI_all,
0228 .send_IPI_self = x2apic_send_IPI_self,
0229
0230 .inquire_remote_apic = NULL,
0231
0232 .read = native_apic_msr_read,
0233 .write = native_apic_msr_write,
0234 .eoi_write = native_apic_msr_eoi_write,
0235 .icr_read = native_x2apic_icr_read,
0236 .icr_write = native_x2apic_icr_write,
0237 .wait_icr_idle = native_x2apic_wait_icr_idle,
0238 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
0239 };
0240
0241 apic_driver(apic_x2apic_cluster);