0001
0002 #include <linux/errno.h>
0003 #include <linux/numa.h>
0004 #include <linux/slab.h>
0005 #include <linux/rculist.h>
0006 #include <linux/threads.h>
0007 #include <linux/preempt.h>
0008 #include <linux/irqflags.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/mm.h>
0011 #include <linux/module.h>
0012 #include <linux/device-mapper.h>
0013
0014 #include "dm-core.h"
0015 #include "dm-stats.h"
0016
0017 #define DM_MSG_PREFIX "stats"
0018
0019 static int dm_stat_need_rcu_barrier;
0020
0021
0022
0023
0024
0025 struct dm_stat_percpu {
0026 unsigned long long sectors[2];
0027 unsigned long long ios[2];
0028 unsigned long long merges[2];
0029 unsigned long long ticks[2];
0030 unsigned long long io_ticks[2];
0031 unsigned long long io_ticks_total;
0032 unsigned long long time_in_queue;
0033 unsigned long long *histogram;
0034 };
0035
0036 struct dm_stat_shared {
0037 atomic_t in_flight[2];
0038 unsigned long long stamp;
0039 struct dm_stat_percpu tmp;
0040 };
0041
0042 struct dm_stat {
0043 struct list_head list_entry;
0044 int id;
0045 unsigned stat_flags;
0046 size_t n_entries;
0047 sector_t start;
0048 sector_t end;
0049 sector_t step;
0050 unsigned n_histogram_entries;
0051 unsigned long long *histogram_boundaries;
0052 const char *program_id;
0053 const char *aux_data;
0054 struct rcu_head rcu_head;
0055 size_t shared_alloc_size;
0056 size_t percpu_alloc_size;
0057 size_t histogram_alloc_size;
0058 struct dm_stat_percpu *stat_percpu[NR_CPUS];
0059 struct dm_stat_shared stat_shared[];
0060 };
0061
0062 #define STAT_PRECISE_TIMESTAMPS 1
0063
0064 struct dm_stats_last_position {
0065 sector_t last_sector;
0066 unsigned last_rw;
0067 };
0068
0069
0070
0071
0072
0073
0074 #define DM_STATS_MEMORY_FACTOR 4
0075 #define DM_STATS_VMALLOC_FACTOR 2
0076
0077 static DEFINE_SPINLOCK(shared_memory_lock);
0078
0079 static unsigned long shared_memory_amount;
0080
0081 static bool __check_shared_memory(size_t alloc_size)
0082 {
0083 size_t a;
0084
0085 a = shared_memory_amount + alloc_size;
0086 if (a < shared_memory_amount)
0087 return false;
0088 if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
0089 return false;
0090 #ifdef CONFIG_MMU
0091 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
0092 return false;
0093 #endif
0094 return true;
0095 }
0096
0097 static bool check_shared_memory(size_t alloc_size)
0098 {
0099 bool ret;
0100
0101 spin_lock_irq(&shared_memory_lock);
0102
0103 ret = __check_shared_memory(alloc_size);
0104
0105 spin_unlock_irq(&shared_memory_lock);
0106
0107 return ret;
0108 }
0109
0110 static bool claim_shared_memory(size_t alloc_size)
0111 {
0112 spin_lock_irq(&shared_memory_lock);
0113
0114 if (!__check_shared_memory(alloc_size)) {
0115 spin_unlock_irq(&shared_memory_lock);
0116 return false;
0117 }
0118
0119 shared_memory_amount += alloc_size;
0120
0121 spin_unlock_irq(&shared_memory_lock);
0122
0123 return true;
0124 }
0125
0126 static void free_shared_memory(size_t alloc_size)
0127 {
0128 unsigned long flags;
0129
0130 spin_lock_irqsave(&shared_memory_lock, flags);
0131
0132 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
0133 spin_unlock_irqrestore(&shared_memory_lock, flags);
0134 DMCRIT("Memory usage accounting bug.");
0135 return;
0136 }
0137
0138 shared_memory_amount -= alloc_size;
0139
0140 spin_unlock_irqrestore(&shared_memory_lock, flags);
0141 }
0142
0143 static void *dm_kvzalloc(size_t alloc_size, int node)
0144 {
0145 void *p;
0146
0147 if (!claim_shared_memory(alloc_size))
0148 return NULL;
0149
0150 p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
0151 if (p)
0152 return p;
0153
0154 free_shared_memory(alloc_size);
0155
0156 return NULL;
0157 }
0158
0159 static void dm_kvfree(void *ptr, size_t alloc_size)
0160 {
0161 if (!ptr)
0162 return;
0163
0164 free_shared_memory(alloc_size);
0165
0166 kvfree(ptr);
0167 }
0168
0169 static void dm_stat_free(struct rcu_head *head)
0170 {
0171 int cpu;
0172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
0173
0174 kfree(s->histogram_boundaries);
0175 kfree(s->program_id);
0176 kfree(s->aux_data);
0177 for_each_possible_cpu(cpu) {
0178 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
0179 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
0180 }
0181 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
0182 dm_kvfree(s, s->shared_alloc_size);
0183 }
0184
0185 static int dm_stat_in_flight(struct dm_stat_shared *shared)
0186 {
0187 return atomic_read(&shared->in_flight[READ]) +
0188 atomic_read(&shared->in_flight[WRITE]);
0189 }
0190
0191 void dm_stats_init(struct dm_stats *stats)
0192 {
0193 int cpu;
0194 struct dm_stats_last_position *last;
0195
0196 mutex_init(&stats->mutex);
0197 INIT_LIST_HEAD(&stats->list);
0198 stats->precise_timestamps = false;
0199 stats->last = alloc_percpu(struct dm_stats_last_position);
0200 for_each_possible_cpu(cpu) {
0201 last = per_cpu_ptr(stats->last, cpu);
0202 last->last_sector = (sector_t)ULLONG_MAX;
0203 last->last_rw = UINT_MAX;
0204 }
0205 }
0206
0207 void dm_stats_cleanup(struct dm_stats *stats)
0208 {
0209 size_t ni;
0210 struct dm_stat *s;
0211 struct dm_stat_shared *shared;
0212
0213 while (!list_empty(&stats->list)) {
0214 s = container_of(stats->list.next, struct dm_stat, list_entry);
0215 list_del(&s->list_entry);
0216 for (ni = 0; ni < s->n_entries; ni++) {
0217 shared = &s->stat_shared[ni];
0218 if (WARN_ON(dm_stat_in_flight(shared))) {
0219 DMCRIT("leaked in-flight counter at index %lu "
0220 "(start %llu, end %llu, step %llu): reads %d, writes %d",
0221 (unsigned long)ni,
0222 (unsigned long long)s->start,
0223 (unsigned long long)s->end,
0224 (unsigned long long)s->step,
0225 atomic_read(&shared->in_flight[READ]),
0226 atomic_read(&shared->in_flight[WRITE]));
0227 }
0228 cond_resched();
0229 }
0230 dm_stat_free(&s->rcu_head);
0231 }
0232 free_percpu(stats->last);
0233 mutex_destroy(&stats->mutex);
0234 }
0235
0236 static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
0237 {
0238 struct list_head *l;
0239 struct dm_stat *tmp_s;
0240 bool precise_timestamps = false;
0241
0242 list_for_each(l, &stats->list) {
0243 tmp_s = container_of(l, struct dm_stat, list_entry);
0244 if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
0245 precise_timestamps = true;
0246 break;
0247 }
0248 }
0249 stats->precise_timestamps = precise_timestamps;
0250 }
0251
0252 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
0253 sector_t step, unsigned stat_flags,
0254 unsigned n_histogram_entries,
0255 unsigned long long *histogram_boundaries,
0256 const char *program_id, const char *aux_data,
0257 void (*suspend_callback)(struct mapped_device *),
0258 void (*resume_callback)(struct mapped_device *),
0259 struct mapped_device *md)
0260 {
0261 struct list_head *l;
0262 struct dm_stat *s, *tmp_s;
0263 sector_t n_entries;
0264 size_t ni;
0265 size_t shared_alloc_size;
0266 size_t percpu_alloc_size;
0267 size_t histogram_alloc_size;
0268 struct dm_stat_percpu *p;
0269 int cpu;
0270 int ret_id;
0271 int r;
0272
0273 if (end < start || !step)
0274 return -EINVAL;
0275
0276 n_entries = end - start;
0277 if (dm_sector_div64(n_entries, step))
0278 n_entries++;
0279
0280 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
0281 return -EOVERFLOW;
0282
0283 shared_alloc_size = struct_size(s, stat_shared, n_entries);
0284 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
0285 return -EOVERFLOW;
0286
0287 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
0288 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
0289 return -EOVERFLOW;
0290
0291 histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
0292 if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
0293 return -EOVERFLOW;
0294
0295 if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
0296 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
0297 return -ENOMEM;
0298
0299 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
0300 if (!s)
0301 return -ENOMEM;
0302
0303 s->stat_flags = stat_flags;
0304 s->n_entries = n_entries;
0305 s->start = start;
0306 s->end = end;
0307 s->step = step;
0308 s->shared_alloc_size = shared_alloc_size;
0309 s->percpu_alloc_size = percpu_alloc_size;
0310 s->histogram_alloc_size = histogram_alloc_size;
0311
0312 s->n_histogram_entries = n_histogram_entries;
0313 s->histogram_boundaries = kmemdup(histogram_boundaries,
0314 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
0315 if (!s->histogram_boundaries) {
0316 r = -ENOMEM;
0317 goto out;
0318 }
0319
0320 s->program_id = kstrdup(program_id, GFP_KERNEL);
0321 if (!s->program_id) {
0322 r = -ENOMEM;
0323 goto out;
0324 }
0325 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
0326 if (!s->aux_data) {
0327 r = -ENOMEM;
0328 goto out;
0329 }
0330
0331 for (ni = 0; ni < n_entries; ni++) {
0332 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
0333 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
0334 cond_resched();
0335 }
0336
0337 if (s->n_histogram_entries) {
0338 unsigned long long *hi;
0339 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
0340 if (!hi) {
0341 r = -ENOMEM;
0342 goto out;
0343 }
0344 for (ni = 0; ni < n_entries; ni++) {
0345 s->stat_shared[ni].tmp.histogram = hi;
0346 hi += s->n_histogram_entries + 1;
0347 cond_resched();
0348 }
0349 }
0350
0351 for_each_possible_cpu(cpu) {
0352 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
0353 if (!p) {
0354 r = -ENOMEM;
0355 goto out;
0356 }
0357 s->stat_percpu[cpu] = p;
0358 if (s->n_histogram_entries) {
0359 unsigned long long *hi;
0360 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
0361 if (!hi) {
0362 r = -ENOMEM;
0363 goto out;
0364 }
0365 for (ni = 0; ni < n_entries; ni++) {
0366 p[ni].histogram = hi;
0367 hi += s->n_histogram_entries + 1;
0368 cond_resched();
0369 }
0370 }
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380 suspend_callback(md);
0381
0382 mutex_lock(&stats->mutex);
0383 s->id = 0;
0384 list_for_each(l, &stats->list) {
0385 tmp_s = container_of(l, struct dm_stat, list_entry);
0386 if (WARN_ON(tmp_s->id < s->id)) {
0387 r = -EINVAL;
0388 goto out_unlock_resume;
0389 }
0390 if (tmp_s->id > s->id)
0391 break;
0392 if (unlikely(s->id == INT_MAX)) {
0393 r = -ENFILE;
0394 goto out_unlock_resume;
0395 }
0396 s->id++;
0397 }
0398 ret_id = s->id;
0399 list_add_tail_rcu(&s->list_entry, l);
0400
0401 dm_stats_recalc_precise_timestamps(stats);
0402
0403 if (!static_key_enabled(&stats_enabled.key))
0404 static_branch_enable(&stats_enabled);
0405
0406 mutex_unlock(&stats->mutex);
0407
0408 resume_callback(md);
0409
0410 return ret_id;
0411
0412 out_unlock_resume:
0413 mutex_unlock(&stats->mutex);
0414 resume_callback(md);
0415 out:
0416 dm_stat_free(&s->rcu_head);
0417 return r;
0418 }
0419
0420 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
0421 {
0422 struct dm_stat *s;
0423
0424 list_for_each_entry(s, &stats->list, list_entry) {
0425 if (s->id > id)
0426 break;
0427 if (s->id == id)
0428 return s;
0429 }
0430
0431 return NULL;
0432 }
0433
0434 static int dm_stats_delete(struct dm_stats *stats, int id)
0435 {
0436 struct dm_stat *s;
0437 int cpu;
0438
0439 mutex_lock(&stats->mutex);
0440
0441 s = __dm_stats_find(stats, id);
0442 if (!s) {
0443 mutex_unlock(&stats->mutex);
0444 return -ENOENT;
0445 }
0446
0447 list_del_rcu(&s->list_entry);
0448
0449 dm_stats_recalc_precise_timestamps(stats);
0450
0451 mutex_unlock(&stats->mutex);
0452
0453
0454
0455
0456 for_each_possible_cpu(cpu)
0457 if (is_vmalloc_addr(s->stat_percpu) ||
0458 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
0459 goto do_sync_free;
0460 if (is_vmalloc_addr(s) ||
0461 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
0462 do_sync_free:
0463 synchronize_rcu_expedited();
0464 dm_stat_free(&s->rcu_head);
0465 } else {
0466 WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
0467 call_rcu(&s->rcu_head, dm_stat_free);
0468 }
0469 return 0;
0470 }
0471
0472 static int dm_stats_list(struct dm_stats *stats, const char *program,
0473 char *result, unsigned maxlen)
0474 {
0475 struct dm_stat *s;
0476 sector_t len;
0477 unsigned sz = 0;
0478
0479
0480
0481
0482
0483
0484 mutex_lock(&stats->mutex);
0485 list_for_each_entry(s, &stats->list, list_entry) {
0486 if (!program || !strcmp(program, s->program_id)) {
0487 len = s->end - s->start;
0488 DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
0489 (unsigned long long)s->start,
0490 (unsigned long long)len,
0491 (unsigned long long)s->step,
0492 s->program_id,
0493 s->aux_data);
0494 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
0495 DMEMIT(" precise_timestamps");
0496 if (s->n_histogram_entries) {
0497 unsigned i;
0498 DMEMIT(" histogram:");
0499 for (i = 0; i < s->n_histogram_entries; i++) {
0500 if (i)
0501 DMEMIT(",");
0502 DMEMIT("%llu", s->histogram_boundaries[i]);
0503 }
0504 }
0505 DMEMIT("\n");
0506 }
0507 cond_resched();
0508 }
0509 mutex_unlock(&stats->mutex);
0510
0511 return 1;
0512 }
0513
0514 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
0515 struct dm_stat_percpu *p)
0516 {
0517
0518
0519
0520 unsigned long long now, difference;
0521 unsigned in_flight_read, in_flight_write;
0522
0523 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
0524 now = jiffies;
0525 else
0526 now = ktime_to_ns(ktime_get());
0527
0528 difference = now - shared->stamp;
0529 if (!difference)
0530 return;
0531
0532 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
0533 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
0534 if (in_flight_read)
0535 p->io_ticks[READ] += difference;
0536 if (in_flight_write)
0537 p->io_ticks[WRITE] += difference;
0538 if (in_flight_read + in_flight_write) {
0539 p->io_ticks_total += difference;
0540 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
0541 }
0542 shared->stamp = now;
0543 }
0544
0545 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
0546 int idx, sector_t len,
0547 struct dm_stats_aux *stats_aux, bool end,
0548 unsigned long duration_jiffies)
0549 {
0550 struct dm_stat_shared *shared = &s->stat_shared[entry];
0551 struct dm_stat_percpu *p;
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568 #if BITS_PER_LONG == 32
0569 unsigned long flags;
0570 local_irq_save(flags);
0571 #else
0572 preempt_disable();
0573 #endif
0574 p = &s->stat_percpu[smp_processor_id()][entry];
0575
0576 if (!end) {
0577 dm_stat_round(s, shared, p);
0578 atomic_inc(&shared->in_flight[idx]);
0579 } else {
0580 unsigned long long duration;
0581 dm_stat_round(s, shared, p);
0582 atomic_dec(&shared->in_flight[idx]);
0583 p->sectors[idx] += len;
0584 p->ios[idx] += 1;
0585 p->merges[idx] += stats_aux->merged;
0586 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
0587 p->ticks[idx] += duration_jiffies;
0588 duration = jiffies_to_msecs(duration_jiffies);
0589 } else {
0590 p->ticks[idx] += stats_aux->duration_ns;
0591 duration = stats_aux->duration_ns;
0592 }
0593 if (s->n_histogram_entries) {
0594 unsigned lo = 0, hi = s->n_histogram_entries + 1;
0595 while (lo + 1 < hi) {
0596 unsigned mid = (lo + hi) / 2;
0597 if (s->histogram_boundaries[mid - 1] > duration) {
0598 hi = mid;
0599 } else {
0600 lo = mid;
0601 }
0602
0603 }
0604 p->histogram[lo]++;
0605 }
0606 }
0607
0608 #if BITS_PER_LONG == 32
0609 local_irq_restore(flags);
0610 #else
0611 preempt_enable();
0612 #endif
0613 }
0614
0615 static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
0616 sector_t bi_sector, sector_t end_sector,
0617 bool end, unsigned long duration_jiffies,
0618 struct dm_stats_aux *stats_aux)
0619 {
0620 sector_t rel_sector, offset, todo, fragment_len;
0621 size_t entry;
0622
0623 if (end_sector <= s->start || bi_sector >= s->end)
0624 return;
0625 if (unlikely(bi_sector < s->start)) {
0626 rel_sector = 0;
0627 todo = end_sector - s->start;
0628 } else {
0629 rel_sector = bi_sector - s->start;
0630 todo = end_sector - bi_sector;
0631 }
0632 if (unlikely(end_sector > s->end))
0633 todo -= (end_sector - s->end);
0634
0635 offset = dm_sector_div64(rel_sector, s->step);
0636 entry = rel_sector;
0637 do {
0638 if (WARN_ON_ONCE(entry >= s->n_entries)) {
0639 DMCRIT("Invalid area access in region id %d", s->id);
0640 return;
0641 }
0642 fragment_len = todo;
0643 if (fragment_len > s->step - offset)
0644 fragment_len = s->step - offset;
0645 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
0646 stats_aux, end, duration_jiffies);
0647 todo -= fragment_len;
0648 entry++;
0649 offset = 0;
0650 } while (unlikely(todo != 0));
0651 }
0652
0653 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
0654 sector_t bi_sector, unsigned bi_sectors, bool end,
0655 unsigned long start_time,
0656 struct dm_stats_aux *stats_aux)
0657 {
0658 struct dm_stat *s;
0659 sector_t end_sector;
0660 struct dm_stats_last_position *last;
0661 bool got_precise_time;
0662 unsigned long duration_jiffies = 0;
0663
0664 if (unlikely(!bi_sectors))
0665 return;
0666
0667 end_sector = bi_sector + bi_sectors;
0668
0669 if (!end) {
0670
0671
0672
0673
0674 last = raw_cpu_ptr(stats->last);
0675 stats_aux->merged =
0676 (bi_sector == (READ_ONCE(last->last_sector) &&
0677 ((bi_rw == WRITE) ==
0678 (READ_ONCE(last->last_rw) == WRITE))
0679 ));
0680 WRITE_ONCE(last->last_sector, end_sector);
0681 WRITE_ONCE(last->last_rw, bi_rw);
0682 } else
0683 duration_jiffies = jiffies - start_time;
0684
0685 rcu_read_lock();
0686
0687 got_precise_time = false;
0688 list_for_each_entry_rcu(s, &stats->list, list_entry) {
0689 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
0690
0691 if (end)
0692 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
0693 got_precise_time = true;
0694 }
0695 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
0696 }
0697
0698 rcu_read_unlock();
0699 }
0700
0701 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
0702 struct dm_stat *s, size_t x)
0703 {
0704 int cpu;
0705 struct dm_stat_percpu *p;
0706
0707 local_irq_disable();
0708 p = &s->stat_percpu[smp_processor_id()][x];
0709 dm_stat_round(s, shared, p);
0710 local_irq_enable();
0711
0712 shared->tmp.sectors[READ] = 0;
0713 shared->tmp.sectors[WRITE] = 0;
0714 shared->tmp.ios[READ] = 0;
0715 shared->tmp.ios[WRITE] = 0;
0716 shared->tmp.merges[READ] = 0;
0717 shared->tmp.merges[WRITE] = 0;
0718 shared->tmp.ticks[READ] = 0;
0719 shared->tmp.ticks[WRITE] = 0;
0720 shared->tmp.io_ticks[READ] = 0;
0721 shared->tmp.io_ticks[WRITE] = 0;
0722 shared->tmp.io_ticks_total = 0;
0723 shared->tmp.time_in_queue = 0;
0724
0725 if (s->n_histogram_entries)
0726 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
0727
0728 for_each_possible_cpu(cpu) {
0729 p = &s->stat_percpu[cpu][x];
0730 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
0731 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
0732 shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
0733 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
0734 shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
0735 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
0736 shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
0737 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
0738 shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
0739 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
0740 shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
0741 shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
0742 if (s->n_histogram_entries) {
0743 unsigned i;
0744 for (i = 0; i < s->n_histogram_entries + 1; i++)
0745 shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
0746 }
0747 }
0748 }
0749
0750 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
0751 bool init_tmp_percpu_totals)
0752 {
0753 size_t x;
0754 struct dm_stat_shared *shared;
0755 struct dm_stat_percpu *p;
0756
0757 for (x = idx_start; x < idx_end; x++) {
0758 shared = &s->stat_shared[x];
0759 if (init_tmp_percpu_totals)
0760 __dm_stat_init_temporary_percpu_totals(shared, s, x);
0761 local_irq_disable();
0762 p = &s->stat_percpu[smp_processor_id()][x];
0763 p->sectors[READ] -= shared->tmp.sectors[READ];
0764 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
0765 p->ios[READ] -= shared->tmp.ios[READ];
0766 p->ios[WRITE] -= shared->tmp.ios[WRITE];
0767 p->merges[READ] -= shared->tmp.merges[READ];
0768 p->merges[WRITE] -= shared->tmp.merges[WRITE];
0769 p->ticks[READ] -= shared->tmp.ticks[READ];
0770 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
0771 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
0772 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
0773 p->io_ticks_total -= shared->tmp.io_ticks_total;
0774 p->time_in_queue -= shared->tmp.time_in_queue;
0775 local_irq_enable();
0776 if (s->n_histogram_entries) {
0777 unsigned i;
0778 for (i = 0; i < s->n_histogram_entries + 1; i++) {
0779 local_irq_disable();
0780 p = &s->stat_percpu[smp_processor_id()][x];
0781 p->histogram[i] -= shared->tmp.histogram[i];
0782 local_irq_enable();
0783 }
0784 }
0785 cond_resched();
0786 }
0787 }
0788
0789 static int dm_stats_clear(struct dm_stats *stats, int id)
0790 {
0791 struct dm_stat *s;
0792
0793 mutex_lock(&stats->mutex);
0794
0795 s = __dm_stats_find(stats, id);
0796 if (!s) {
0797 mutex_unlock(&stats->mutex);
0798 return -ENOENT;
0799 }
0800
0801 __dm_stat_clear(s, 0, s->n_entries, true);
0802
0803 mutex_unlock(&stats->mutex);
0804
0805 return 1;
0806 }
0807
0808
0809
0810
0811 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
0812 {
0813 unsigned long long result;
0814 unsigned mult;
0815
0816 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
0817 return j;
0818
0819 result = 0;
0820 if (j)
0821 result = jiffies_to_msecs(j & 0x3fffff);
0822 if (j >= 1 << 22) {
0823 mult = jiffies_to_msecs(1 << 22);
0824 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
0825 }
0826 if (j >= 1ULL << 44)
0827 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
0828
0829 return result;
0830 }
0831
0832 static int dm_stats_print(struct dm_stats *stats, int id,
0833 size_t idx_start, size_t idx_len,
0834 bool clear, char *result, unsigned maxlen)
0835 {
0836 unsigned sz = 0;
0837 struct dm_stat *s;
0838 size_t x;
0839 sector_t start, end, step;
0840 size_t idx_end;
0841 struct dm_stat_shared *shared;
0842
0843
0844
0845
0846
0847
0848 mutex_lock(&stats->mutex);
0849
0850 s = __dm_stats_find(stats, id);
0851 if (!s) {
0852 mutex_unlock(&stats->mutex);
0853 return -ENOENT;
0854 }
0855
0856 idx_end = idx_start + idx_len;
0857 if (idx_end < idx_start ||
0858 idx_end > s->n_entries)
0859 idx_end = s->n_entries;
0860
0861 if (idx_start > idx_end)
0862 idx_start = idx_end;
0863
0864 step = s->step;
0865 start = s->start + (step * idx_start);
0866
0867 for (x = idx_start; x < idx_end; x++, start = end) {
0868 shared = &s->stat_shared[x];
0869 end = start + step;
0870 if (unlikely(end > s->end))
0871 end = s->end;
0872
0873 __dm_stat_init_temporary_percpu_totals(shared, s, x);
0874
0875 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
0876 (unsigned long long)start,
0877 (unsigned long long)step,
0878 shared->tmp.ios[READ],
0879 shared->tmp.merges[READ],
0880 shared->tmp.sectors[READ],
0881 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
0882 shared->tmp.ios[WRITE],
0883 shared->tmp.merges[WRITE],
0884 shared->tmp.sectors[WRITE],
0885 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
0886 dm_stat_in_flight(shared),
0887 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
0888 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
0889 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
0890 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
0891 if (s->n_histogram_entries) {
0892 unsigned i;
0893 for (i = 0; i < s->n_histogram_entries + 1; i++) {
0894 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
0895 }
0896 }
0897 DMEMIT("\n");
0898
0899 if (unlikely(sz + 1 >= maxlen))
0900 goto buffer_overflow;
0901
0902 cond_resched();
0903 }
0904
0905 if (clear)
0906 __dm_stat_clear(s, idx_start, idx_end, false);
0907
0908 buffer_overflow:
0909 mutex_unlock(&stats->mutex);
0910
0911 return 1;
0912 }
0913
0914 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
0915 {
0916 struct dm_stat *s;
0917 const char *new_aux_data;
0918
0919 mutex_lock(&stats->mutex);
0920
0921 s = __dm_stats_find(stats, id);
0922 if (!s) {
0923 mutex_unlock(&stats->mutex);
0924 return -ENOENT;
0925 }
0926
0927 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
0928 if (!new_aux_data) {
0929 mutex_unlock(&stats->mutex);
0930 return -ENOMEM;
0931 }
0932
0933 kfree(s->aux_data);
0934 s->aux_data = new_aux_data;
0935
0936 mutex_unlock(&stats->mutex);
0937
0938 return 0;
0939 }
0940
0941 static int parse_histogram(const char *h, unsigned *n_histogram_entries,
0942 unsigned long long **histogram_boundaries)
0943 {
0944 const char *q;
0945 unsigned n;
0946 unsigned long long last;
0947
0948 *n_histogram_entries = 1;
0949 for (q = h; *q; q++)
0950 if (*q == ',')
0951 (*n_histogram_entries)++;
0952
0953 *histogram_boundaries = kmalloc_array(*n_histogram_entries,
0954 sizeof(unsigned long long),
0955 GFP_KERNEL);
0956 if (!*histogram_boundaries)
0957 return -ENOMEM;
0958
0959 n = 0;
0960 last = 0;
0961 while (1) {
0962 unsigned long long hi;
0963 int s;
0964 char ch;
0965 s = sscanf(h, "%llu%c", &hi, &ch);
0966 if (!s || (s == 2 && ch != ','))
0967 return -EINVAL;
0968 if (hi <= last)
0969 return -EINVAL;
0970 last = hi;
0971 (*histogram_boundaries)[n] = hi;
0972 if (s == 1)
0973 return 0;
0974 h = strchr(h, ',') + 1;
0975 n++;
0976 }
0977 }
0978
0979 static int message_stats_create(struct mapped_device *md,
0980 unsigned argc, char **argv,
0981 char *result, unsigned maxlen)
0982 {
0983 int r;
0984 int id;
0985 char dummy;
0986 unsigned long long start, end, len, step;
0987 unsigned divisor;
0988 const char *program_id, *aux_data;
0989 unsigned stat_flags = 0;
0990
0991 unsigned n_histogram_entries = 0;
0992 unsigned long long *histogram_boundaries = NULL;
0993
0994 struct dm_arg_set as, as_backup;
0995 const char *a;
0996 unsigned feature_args;
0997
0998
0999
1000
1001
1002
1003 if (argc < 3)
1004 goto ret_einval;
1005
1006 as.argc = argc;
1007 as.argv = argv;
1008 dm_consume_args(&as, 1);
1009
1010 a = dm_shift_arg(&as);
1011 if (!strcmp(a, "-")) {
1012 start = 0;
1013 len = dm_get_size(md);
1014 if (!len)
1015 len = 1;
1016 } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
1017 start != (sector_t)start || len != (sector_t)len)
1018 goto ret_einval;
1019
1020 end = start + len;
1021 if (start >= end)
1022 goto ret_einval;
1023
1024 a = dm_shift_arg(&as);
1025 if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
1026 if (!divisor)
1027 return -EINVAL;
1028 step = end - start;
1029 if (do_div(step, divisor))
1030 step++;
1031 if (!step)
1032 step = 1;
1033 } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1034 step != (sector_t)step || !step)
1035 goto ret_einval;
1036
1037 as_backup = as;
1038 a = dm_shift_arg(&as);
1039 if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1040 while (feature_args--) {
1041 a = dm_shift_arg(&as);
1042 if (!a)
1043 goto ret_einval;
1044 if (!strcasecmp(a, "precise_timestamps"))
1045 stat_flags |= STAT_PRECISE_TIMESTAMPS;
1046 else if (!strncasecmp(a, "histogram:", 10)) {
1047 if (n_histogram_entries)
1048 goto ret_einval;
1049 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1050 goto ret;
1051 } else
1052 goto ret_einval;
1053 }
1054 } else {
1055 as = as_backup;
1056 }
1057
1058 program_id = "-";
1059 aux_data = "-";
1060
1061 a = dm_shift_arg(&as);
1062 if (a)
1063 program_id = a;
1064
1065 a = dm_shift_arg(&as);
1066 if (a)
1067 aux_data = a;
1068
1069 if (as.argc)
1070 goto ret_einval;
1071
1072
1073
1074
1075
1076
1077
1078 snprintf(result, maxlen, "%d", INT_MAX);
1079 if (dm_message_test_buffer_overflow(result, maxlen)) {
1080 r = 1;
1081 goto ret;
1082 }
1083
1084 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1085 n_histogram_entries, histogram_boundaries, program_id, aux_data,
1086 dm_internal_suspend_fast, dm_internal_resume_fast, md);
1087 if (id < 0) {
1088 r = id;
1089 goto ret;
1090 }
1091
1092 snprintf(result, maxlen, "%d", id);
1093
1094 r = 1;
1095 goto ret;
1096
1097 ret_einval:
1098 r = -EINVAL;
1099 ret:
1100 kfree(histogram_boundaries);
1101 return r;
1102 }
1103
1104 static int message_stats_delete(struct mapped_device *md,
1105 unsigned argc, char **argv)
1106 {
1107 int id;
1108 char dummy;
1109
1110 if (argc != 2)
1111 return -EINVAL;
1112
1113 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1114 return -EINVAL;
1115
1116 return dm_stats_delete(dm_get_stats(md), id);
1117 }
1118
1119 static int message_stats_clear(struct mapped_device *md,
1120 unsigned argc, char **argv)
1121 {
1122 int id;
1123 char dummy;
1124
1125 if (argc != 2)
1126 return -EINVAL;
1127
1128 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1129 return -EINVAL;
1130
1131 return dm_stats_clear(dm_get_stats(md), id);
1132 }
1133
1134 static int message_stats_list(struct mapped_device *md,
1135 unsigned argc, char **argv,
1136 char *result, unsigned maxlen)
1137 {
1138 int r;
1139 const char *program = NULL;
1140
1141 if (argc < 1 || argc > 2)
1142 return -EINVAL;
1143
1144 if (argc > 1) {
1145 program = kstrdup(argv[1], GFP_KERNEL);
1146 if (!program)
1147 return -ENOMEM;
1148 }
1149
1150 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1151
1152 kfree(program);
1153
1154 return r;
1155 }
1156
1157 static int message_stats_print(struct mapped_device *md,
1158 unsigned argc, char **argv, bool clear,
1159 char *result, unsigned maxlen)
1160 {
1161 int id;
1162 char dummy;
1163 unsigned long idx_start = 0, idx_len = ULONG_MAX;
1164
1165 if (argc != 2 && argc != 4)
1166 return -EINVAL;
1167
1168 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1169 return -EINVAL;
1170
1171 if (argc > 3) {
1172 if (strcmp(argv[2], "-") &&
1173 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1174 return -EINVAL;
1175 if (strcmp(argv[3], "-") &&
1176 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1177 return -EINVAL;
1178 }
1179
1180 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1181 result, maxlen);
1182 }
1183
1184 static int message_stats_set_aux(struct mapped_device *md,
1185 unsigned argc, char **argv)
1186 {
1187 int id;
1188 char dummy;
1189
1190 if (argc != 3)
1191 return -EINVAL;
1192
1193 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1194 return -EINVAL;
1195
1196 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1197 }
1198
1199 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1200 char *result, unsigned maxlen)
1201 {
1202 int r;
1203
1204
1205 if (!strcasecmp(argv[0], "@stats_create"))
1206 r = message_stats_create(md, argc, argv, result, maxlen);
1207 else if (!strcasecmp(argv[0], "@stats_delete"))
1208 r = message_stats_delete(md, argc, argv);
1209 else if (!strcasecmp(argv[0], "@stats_clear"))
1210 r = message_stats_clear(md, argc, argv);
1211 else if (!strcasecmp(argv[0], "@stats_list"))
1212 r = message_stats_list(md, argc, argv, result, maxlen);
1213 else if (!strcasecmp(argv[0], "@stats_print"))
1214 r = message_stats_print(md, argc, argv, false, result, maxlen);
1215 else if (!strcasecmp(argv[0], "@stats_print_clear"))
1216 r = message_stats_print(md, argc, argv, true, result, maxlen);
1217 else if (!strcasecmp(argv[0], "@stats_set_aux"))
1218 r = message_stats_set_aux(md, argc, argv);
1219 else
1220 return 2;
1221
1222 if (r == -EINVAL)
1223 DMWARN("Invalid parameters for message %s", argv[0]);
1224
1225 return r;
1226 }
1227
1228 int __init dm_statistics_init(void)
1229 {
1230 shared_memory_amount = 0;
1231 dm_stat_need_rcu_barrier = 0;
1232 return 0;
1233 }
1234
1235 void dm_statistics_exit(void)
1236 {
1237 if (dm_stat_need_rcu_barrier)
1238 rcu_barrier();
1239 if (WARN_ON(shared_memory_amount))
1240 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1241 }
1242
1243 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1244 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");