0001
0002
0003
0004 #include <linux/spinlock.h>
0005 #include <linux/seq_file.h>
0006 #include <linux/bitmap.h>
0007 #include <linux/percpu.h>
0008 #include <linux/cpu.h>
0009 #include <linux/irq.h>
0010
0011 #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
0012
0013 struct cpumap {
0014 unsigned int available;
0015 unsigned int allocated;
0016 unsigned int managed;
0017 unsigned int managed_allocated;
0018 bool initialized;
0019 bool online;
0020 unsigned long alloc_map[IRQ_MATRIX_SIZE];
0021 unsigned long managed_map[IRQ_MATRIX_SIZE];
0022 };
0023
0024 struct irq_matrix {
0025 unsigned int matrix_bits;
0026 unsigned int alloc_start;
0027 unsigned int alloc_end;
0028 unsigned int alloc_size;
0029 unsigned int global_available;
0030 unsigned int global_reserved;
0031 unsigned int systembits_inalloc;
0032 unsigned int total_allocated;
0033 unsigned int online_maps;
0034 struct cpumap __percpu *maps;
0035 unsigned long scratch_map[IRQ_MATRIX_SIZE];
0036 unsigned long system_map[IRQ_MATRIX_SIZE];
0037 };
0038
0039 #define CREATE_TRACE_POINTS
0040 #include <trace/events/irq_matrix.h>
0041
0042
0043
0044
0045
0046
0047
0048
0049 __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
0050 unsigned int alloc_start,
0051 unsigned int alloc_end)
0052 {
0053 struct irq_matrix *m;
0054
0055 if (matrix_bits > IRQ_MATRIX_BITS)
0056 return NULL;
0057
0058 m = kzalloc(sizeof(*m), GFP_KERNEL);
0059 if (!m)
0060 return NULL;
0061
0062 m->matrix_bits = matrix_bits;
0063 m->alloc_start = alloc_start;
0064 m->alloc_end = alloc_end;
0065 m->alloc_size = alloc_end - alloc_start;
0066 m->maps = alloc_percpu(*m->maps);
0067 if (!m->maps) {
0068 kfree(m);
0069 return NULL;
0070 }
0071 return m;
0072 }
0073
0074
0075
0076
0077
0078 void irq_matrix_online(struct irq_matrix *m)
0079 {
0080 struct cpumap *cm = this_cpu_ptr(m->maps);
0081
0082 BUG_ON(cm->online);
0083
0084 if (!cm->initialized) {
0085 cm->available = m->alloc_size;
0086 cm->available -= cm->managed + m->systembits_inalloc;
0087 cm->initialized = true;
0088 }
0089 m->global_available += cm->available;
0090 cm->online = true;
0091 m->online_maps++;
0092 trace_irq_matrix_online(m);
0093 }
0094
0095
0096
0097
0098
0099 void irq_matrix_offline(struct irq_matrix *m)
0100 {
0101 struct cpumap *cm = this_cpu_ptr(m->maps);
0102
0103
0104 m->global_available -= cm->available;
0105 cm->online = false;
0106 m->online_maps--;
0107 trace_irq_matrix_offline(m);
0108 }
0109
0110 static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
0111 unsigned int num, bool managed)
0112 {
0113 unsigned int area, start = m->alloc_start;
0114 unsigned int end = m->alloc_end;
0115
0116 bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
0117 bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
0118 area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
0119 if (area >= end)
0120 return area;
0121 if (managed)
0122 bitmap_set(cm->managed_map, area, num);
0123 else
0124 bitmap_set(cm->alloc_map, area, num);
0125 return area;
0126 }
0127
0128
0129 static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
0130 const struct cpumask *msk)
0131 {
0132 unsigned int cpu, best_cpu, maxavl = 0;
0133 struct cpumap *cm;
0134
0135 best_cpu = UINT_MAX;
0136
0137 for_each_cpu(cpu, msk) {
0138 cm = per_cpu_ptr(m->maps, cpu);
0139
0140 if (!cm->online || cm->available <= maxavl)
0141 continue;
0142
0143 best_cpu = cpu;
0144 maxavl = cm->available;
0145 }
0146 return best_cpu;
0147 }
0148
0149
0150 static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
0151 const struct cpumask *msk)
0152 {
0153 unsigned int cpu, best_cpu, allocated = UINT_MAX;
0154 struct cpumap *cm;
0155
0156 best_cpu = UINT_MAX;
0157
0158 for_each_cpu(cpu, msk) {
0159 cm = per_cpu_ptr(m->maps, cpu);
0160
0161 if (!cm->online || cm->managed_allocated > allocated)
0162 continue;
0163
0164 best_cpu = cpu;
0165 allocated = cm->managed_allocated;
0166 }
0167 return best_cpu;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
0182 bool replace)
0183 {
0184 struct cpumap *cm = this_cpu_ptr(m->maps);
0185
0186 BUG_ON(bit > m->matrix_bits);
0187 BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
0188
0189 set_bit(bit, m->system_map);
0190 if (replace) {
0191 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
0192 cm->allocated--;
0193 m->total_allocated--;
0194 }
0195 if (bit >= m->alloc_start && bit < m->alloc_end)
0196 m->systembits_inalloc++;
0197
0198 trace_irq_matrix_assign_system(bit, m);
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
0211 {
0212 unsigned int cpu, failed_cpu;
0213
0214 for_each_cpu(cpu, msk) {
0215 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
0216 unsigned int bit;
0217
0218 bit = matrix_alloc_area(m, cm, 1, true);
0219 if (bit >= m->alloc_end)
0220 goto cleanup;
0221 cm->managed++;
0222 if (cm->online) {
0223 cm->available--;
0224 m->global_available--;
0225 }
0226 trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
0227 }
0228 return 0;
0229 cleanup:
0230 failed_cpu = cpu;
0231 for_each_cpu(cpu, msk) {
0232 if (cpu == failed_cpu)
0233 break;
0234 irq_matrix_remove_managed(m, cpumask_of(cpu));
0235 }
0236 return -ENOSPC;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
0252 {
0253 unsigned int cpu;
0254
0255 for_each_cpu(cpu, msk) {
0256 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
0257 unsigned int bit, end = m->alloc_end;
0258
0259 if (WARN_ON_ONCE(!cm->managed))
0260 continue;
0261
0262
0263 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
0264
0265 bit = find_first_bit(m->scratch_map, end);
0266 if (WARN_ON_ONCE(bit >= end))
0267 continue;
0268
0269 clear_bit(bit, cm->managed_map);
0270
0271 cm->managed--;
0272 if (cm->online) {
0273 cm->available++;
0274 m->global_available++;
0275 }
0276 trace_irq_matrix_remove_managed(bit, cpu, m, cm);
0277 }
0278 }
0279
0280
0281
0282
0283
0284
0285
0286 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
0287 unsigned int *mapped_cpu)
0288 {
0289 unsigned int bit, cpu, end;
0290 struct cpumap *cm;
0291
0292 if (cpumask_empty(msk))
0293 return -EINVAL;
0294
0295 cpu = matrix_find_best_cpu_managed(m, msk);
0296 if (cpu == UINT_MAX)
0297 return -ENOSPC;
0298
0299 cm = per_cpu_ptr(m->maps, cpu);
0300 end = m->alloc_end;
0301
0302 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
0303 bit = find_first_bit(m->scratch_map, end);
0304 if (bit >= end)
0305 return -ENOSPC;
0306 set_bit(bit, cm->alloc_map);
0307 cm->allocated++;
0308 cm->managed_allocated++;
0309 m->total_allocated++;
0310 *mapped_cpu = cpu;
0311 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
0312 return bit;
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322 void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
0323 {
0324 struct cpumap *cm = this_cpu_ptr(m->maps);
0325
0326 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
0327 return;
0328 if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
0329 return;
0330 cm->allocated++;
0331 m->total_allocated++;
0332 cm->available--;
0333 m->global_available--;
0334 trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
0335 }
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346 void irq_matrix_reserve(struct irq_matrix *m)
0347 {
0348 if (m->global_reserved == m->global_available)
0349 pr_warn("Interrupt reservation exceeds available resources\n");
0350
0351 m->global_reserved++;
0352 trace_irq_matrix_reserve(m);
0353 }
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 void irq_matrix_remove_reserved(struct irq_matrix *m)
0365 {
0366 m->global_reserved--;
0367 trace_irq_matrix_remove_reserved(m);
0368 }
0369
0370
0371
0372
0373
0374
0375
0376
0377 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
0378 bool reserved, unsigned int *mapped_cpu)
0379 {
0380 unsigned int cpu, bit;
0381 struct cpumap *cm;
0382
0383
0384
0385
0386
0387 if (cpumask_empty(msk))
0388 return -EINVAL;
0389
0390 cpu = matrix_find_best_cpu(m, msk);
0391 if (cpu == UINT_MAX)
0392 return -ENOSPC;
0393
0394 cm = per_cpu_ptr(m->maps, cpu);
0395 bit = matrix_alloc_area(m, cm, 1, false);
0396 if (bit >= m->alloc_end)
0397 return -ENOSPC;
0398 cm->allocated++;
0399 cm->available--;
0400 m->total_allocated++;
0401 m->global_available--;
0402 if (reserved)
0403 m->global_reserved--;
0404 *mapped_cpu = cpu;
0405 trace_irq_matrix_alloc(bit, cpu, m, cm);
0406 return bit;
0407
0408 }
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
0419 unsigned int bit, bool managed)
0420 {
0421 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
0422
0423 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
0424 return;
0425
0426 if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
0427 return;
0428
0429 cm->allocated--;
0430 if(managed)
0431 cm->managed_allocated--;
0432
0433 if (cm->online)
0434 m->total_allocated--;
0435
0436 if (!managed) {
0437 cm->available++;
0438 if (cm->online)
0439 m->global_available++;
0440 }
0441 trace_irq_matrix_free(bit, cpu, m, cm);
0442 }
0443
0444
0445
0446
0447
0448
0449
0450 unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
0451 {
0452 struct cpumap *cm = this_cpu_ptr(m->maps);
0453
0454 if (!cpudown)
0455 return m->global_available;
0456 return m->global_available - cm->available;
0457 }
0458
0459
0460
0461
0462
0463 unsigned int irq_matrix_reserved(struct irq_matrix *m)
0464 {
0465 return m->global_reserved;
0466 }
0467
0468
0469
0470
0471
0472
0473
0474 unsigned int irq_matrix_allocated(struct irq_matrix *m)
0475 {
0476 struct cpumap *cm = this_cpu_ptr(m->maps);
0477
0478 return cm->allocated;
0479 }
0480
0481 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
0482
0483
0484
0485
0486
0487
0488
0489
0490 void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
0491 {
0492 unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
0493 int cpu;
0494
0495 seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
0496 seq_printf(sf, "Global available: %6u\n", m->global_available);
0497 seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
0498 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
0499 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
0500 m->system_map);
0501 seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
0502 cpus_read_lock();
0503 for_each_online_cpu(cpu) {
0504 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
0505
0506 seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
0507 cpu, cm->available, cm->managed,
0508 cm->managed_allocated, cm->allocated,
0509 m->matrix_bits, cm->alloc_map);
0510 }
0511 cpus_read_unlock();
0512 }
0513 #endif