0001
0002
0003
0004
0005
0006
0007 #include <linux/cpu_rmap.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/export.h>
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
0026 {
0027 struct cpu_rmap *rmap;
0028 unsigned int cpu;
0029 size_t obj_offset;
0030
0031
0032 if (size > 0xffff)
0033 return NULL;
0034
0035
0036 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
0037 sizeof(void *));
0038
0039 rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
0040 if (!rmap)
0041 return NULL;
0042
0043 kref_init(&rmap->refcount);
0044 rmap->obj = (void **)((char *)rmap + obj_offset);
0045
0046
0047
0048
0049
0050
0051
0052 for_each_possible_cpu(cpu) {
0053 rmap->near[cpu].index = cpu % size;
0054 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
0055 }
0056
0057 rmap->size = size;
0058 return rmap;
0059 }
0060 EXPORT_SYMBOL(alloc_cpu_rmap);
0061
0062
0063
0064
0065
0066 static void cpu_rmap_release(struct kref *ref)
0067 {
0068 struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
0069 kfree(rmap);
0070 }
0071
0072
0073
0074
0075
0076 static inline void cpu_rmap_get(struct cpu_rmap *rmap)
0077 {
0078 kref_get(&rmap->refcount);
0079 }
0080
0081
0082
0083
0084
0085 int cpu_rmap_put(struct cpu_rmap *rmap)
0086 {
0087 return kref_put(&rmap->refcount, cpu_rmap_release);
0088 }
0089 EXPORT_SYMBOL(cpu_rmap_put);
0090
0091
0092
0093
0094 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
0095 const struct cpumask *mask, u16 dist)
0096 {
0097 int neigh;
0098
0099 for_each_cpu(neigh, mask) {
0100 if (rmap->near[cpu].dist > dist &&
0101 rmap->near[neigh].dist <= dist) {
0102 rmap->near[cpu].index = rmap->near[neigh].index;
0103 rmap->near[cpu].dist = dist;
0104 return true;
0105 }
0106 }
0107 return false;
0108 }
0109
0110 #ifdef DEBUG
0111 static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
0112 {
0113 unsigned index;
0114 unsigned int cpu;
0115
0116 pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
0117
0118 for_each_possible_cpu(cpu) {
0119 index = rmap->near[cpu].index;
0120 pr_info("cpu %d -> obj %u (distance %u)\n",
0121 cpu, index, rmap->near[cpu].dist);
0122 }
0123 }
0124 #else
0125 static inline void
0126 debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
0127 {
0128 }
0129 #endif
0130
0131
0132
0133
0134
0135
0136
0137
0138 int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
0139 {
0140 u16 index;
0141
0142 BUG_ON(rmap->used >= rmap->size);
0143 index = rmap->used++;
0144 rmap->obj[index] = obj;
0145 return index;
0146 }
0147 EXPORT_SYMBOL(cpu_rmap_add);
0148
0149
0150
0151
0152
0153
0154
0155 int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
0156 const struct cpumask *affinity)
0157 {
0158 cpumask_var_t update_mask;
0159 unsigned int cpu;
0160
0161 if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
0162 return -ENOMEM;
0163
0164
0165
0166
0167 for_each_online_cpu(cpu) {
0168 if (rmap->near[cpu].index == index) {
0169 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
0170 cpumask_set_cpu(cpu, update_mask);
0171 }
0172 }
0173
0174 debug_print_rmap(rmap, "after invalidating old distances");
0175
0176
0177
0178
0179 for_each_cpu(cpu, affinity) {
0180 rmap->near[cpu].index = index;
0181 rmap->near[cpu].dist = 0;
0182 cpumask_or(update_mask, update_mask,
0183 cpumask_of_node(cpu_to_node(cpu)));
0184 }
0185
0186 debug_print_rmap(rmap, "after updating neighbours");
0187
0188
0189 for_each_cpu(cpu, update_mask) {
0190 if (cpu_rmap_copy_neigh(rmap, cpu,
0191 topology_sibling_cpumask(cpu), 1))
0192 continue;
0193 if (cpu_rmap_copy_neigh(rmap, cpu,
0194 topology_core_cpumask(cpu), 2))
0195 continue;
0196 if (cpu_rmap_copy_neigh(rmap, cpu,
0197 cpumask_of_node(cpu_to_node(cpu)), 3))
0198 continue;
0199
0200
0201
0202 }
0203
0204 debug_print_rmap(rmap, "after copying neighbours");
0205
0206 free_cpumask_var(update_mask);
0207 return 0;
0208 }
0209 EXPORT_SYMBOL(cpu_rmap_update);
0210
0211
0212
0213 struct irq_glue {
0214 struct irq_affinity_notify notify;
0215 struct cpu_rmap *rmap;
0216 u16 index;
0217 };
0218
0219
0220
0221
0222
0223
0224
0225 void free_irq_cpu_rmap(struct cpu_rmap *rmap)
0226 {
0227 struct irq_glue *glue;
0228 u16 index;
0229
0230 if (!rmap)
0231 return;
0232
0233 for (index = 0; index < rmap->used; index++) {
0234 glue = rmap->obj[index];
0235 irq_set_affinity_notifier(glue->notify.irq, NULL);
0236 }
0237
0238 cpu_rmap_put(rmap);
0239 }
0240 EXPORT_SYMBOL(free_irq_cpu_rmap);
0241
0242
0243
0244
0245
0246
0247
0248
0249 static void
0250 irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
0251 {
0252 struct irq_glue *glue =
0253 container_of(notify, struct irq_glue, notify);
0254 int rc;
0255
0256 rc = cpu_rmap_update(glue->rmap, glue->index, mask);
0257 if (rc)
0258 pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
0259 }
0260
0261
0262
0263
0264
0265 static void irq_cpu_rmap_release(struct kref *ref)
0266 {
0267 struct irq_glue *glue =
0268 container_of(ref, struct irq_glue, notify.kref);
0269
0270 cpu_rmap_put(glue->rmap);
0271 kfree(glue);
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
0286 {
0287 struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
0288 int rc;
0289
0290 if (!glue)
0291 return -ENOMEM;
0292 glue->notify.notify = irq_cpu_rmap_notify;
0293 glue->notify.release = irq_cpu_rmap_release;
0294 glue->rmap = rmap;
0295 cpu_rmap_get(rmap);
0296 glue->index = cpu_rmap_add(rmap, glue);
0297 rc = irq_set_affinity_notifier(irq, &glue->notify);
0298 if (rc) {
0299 cpu_rmap_put(glue->rmap);
0300 kfree(glue);
0301 }
0302 return rc;
0303 }
0304 EXPORT_SYMBOL(irq_cpu_rmap_add);