0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/rculist.h>
0009 #include <linux/blk-mq.h>
0010
0011 #include "blk-stat.h"
0012 #include "blk-mq.h"
0013 #include "blk.h"
0014
0015 struct blk_queue_stats {
0016 struct list_head callbacks;
0017 spinlock_t lock;
0018 int accounting;
0019 };
0020
0021 void blk_rq_stat_init(struct blk_rq_stat *stat)
0022 {
0023 stat->min = -1ULL;
0024 stat->max = stat->nr_samples = stat->mean = 0;
0025 stat->batch = 0;
0026 }
0027
0028
0029 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
0030 {
0031 if (!src->nr_samples)
0032 return;
0033
0034 dst->min = min(dst->min, src->min);
0035 dst->max = max(dst->max, src->max);
0036
0037 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
0038 dst->nr_samples + src->nr_samples);
0039
0040 dst->nr_samples += src->nr_samples;
0041 }
0042
0043 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
0044 {
0045 stat->min = min(stat->min, value);
0046 stat->max = max(stat->max, value);
0047 stat->batch += value;
0048 stat->nr_samples++;
0049 }
0050
0051 void blk_stat_add(struct request *rq, u64 now)
0052 {
0053 struct request_queue *q = rq->q;
0054 struct blk_stat_callback *cb;
0055 struct blk_rq_stat *stat;
0056 int bucket, cpu;
0057 u64 value;
0058
0059 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
0060
0061 blk_throtl_stat_add(rq, value);
0062
0063 rcu_read_lock();
0064 cpu = get_cpu();
0065 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
0066 if (!blk_stat_is_active(cb))
0067 continue;
0068
0069 bucket = cb->bucket_fn(rq);
0070 if (bucket < 0)
0071 continue;
0072
0073 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
0074 blk_rq_stat_add(stat, value);
0075 }
0076 put_cpu();
0077 rcu_read_unlock();
0078 }
0079
0080 static void blk_stat_timer_fn(struct timer_list *t)
0081 {
0082 struct blk_stat_callback *cb = from_timer(cb, t, timer);
0083 unsigned int bucket;
0084 int cpu;
0085
0086 for (bucket = 0; bucket < cb->buckets; bucket++)
0087 blk_rq_stat_init(&cb->stat[bucket]);
0088
0089 for_each_online_cpu(cpu) {
0090 struct blk_rq_stat *cpu_stat;
0091
0092 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
0093 for (bucket = 0; bucket < cb->buckets; bucket++) {
0094 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
0095 blk_rq_stat_init(&cpu_stat[bucket]);
0096 }
0097 }
0098
0099 cb->timer_fn(cb);
0100 }
0101
0102 struct blk_stat_callback *
0103 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
0104 int (*bucket_fn)(const struct request *),
0105 unsigned int buckets, void *data)
0106 {
0107 struct blk_stat_callback *cb;
0108
0109 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
0110 if (!cb)
0111 return NULL;
0112
0113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
0114 GFP_KERNEL);
0115 if (!cb->stat) {
0116 kfree(cb);
0117 return NULL;
0118 }
0119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
0120 __alignof__(struct blk_rq_stat));
0121 if (!cb->cpu_stat) {
0122 kfree(cb->stat);
0123 kfree(cb);
0124 return NULL;
0125 }
0126
0127 cb->timer_fn = timer_fn;
0128 cb->bucket_fn = bucket_fn;
0129 cb->data = data;
0130 cb->buckets = buckets;
0131 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
0132
0133 return cb;
0134 }
0135
0136 void blk_stat_add_callback(struct request_queue *q,
0137 struct blk_stat_callback *cb)
0138 {
0139 unsigned int bucket;
0140 unsigned long flags;
0141 int cpu;
0142
0143 for_each_possible_cpu(cpu) {
0144 struct blk_rq_stat *cpu_stat;
0145
0146 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
0147 for (bucket = 0; bucket < cb->buckets; bucket++)
0148 blk_rq_stat_init(&cpu_stat[bucket]);
0149 }
0150
0151 spin_lock_irqsave(&q->stats->lock, flags);
0152 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
0153 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
0154 spin_unlock_irqrestore(&q->stats->lock, flags);
0155 }
0156
0157 void blk_stat_remove_callback(struct request_queue *q,
0158 struct blk_stat_callback *cb)
0159 {
0160 unsigned long flags;
0161
0162 spin_lock_irqsave(&q->stats->lock, flags);
0163 list_del_rcu(&cb->list);
0164 if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
0165 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
0166 spin_unlock_irqrestore(&q->stats->lock, flags);
0167
0168 del_timer_sync(&cb->timer);
0169 }
0170
0171 static void blk_stat_free_callback_rcu(struct rcu_head *head)
0172 {
0173 struct blk_stat_callback *cb;
0174
0175 cb = container_of(head, struct blk_stat_callback, rcu);
0176 free_percpu(cb->cpu_stat);
0177 kfree(cb->stat);
0178 kfree(cb);
0179 }
0180
0181 void blk_stat_free_callback(struct blk_stat_callback *cb)
0182 {
0183 if (cb)
0184 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
0185 }
0186
0187 void blk_stat_disable_accounting(struct request_queue *q)
0188 {
0189 unsigned long flags;
0190
0191 spin_lock_irqsave(&q->stats->lock, flags);
0192 if (!--q->stats->accounting)
0193 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
0194 spin_unlock_irqrestore(&q->stats->lock, flags);
0195 }
0196 EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
0197
0198 void blk_stat_enable_accounting(struct request_queue *q)
0199 {
0200 unsigned long flags;
0201
0202 spin_lock_irqsave(&q->stats->lock, flags);
0203 if (!q->stats->accounting++)
0204 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
0205 spin_unlock_irqrestore(&q->stats->lock, flags);
0206 }
0207 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
0208
0209 struct blk_queue_stats *blk_alloc_queue_stats(void)
0210 {
0211 struct blk_queue_stats *stats;
0212
0213 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
0214 if (!stats)
0215 return NULL;
0216
0217 INIT_LIST_HEAD(&stats->callbacks);
0218 spin_lock_init(&stats->lock);
0219 stats->accounting = 0;
0220
0221 return stats;
0222 }
0223
0224 void blk_free_queue_stats(struct blk_queue_stats *stats)
0225 {
0226 if (!stats)
0227 return;
0228
0229 WARN_ON(!list_empty(&stats->callbacks));
0230
0231 kfree(stats);
0232 }
0233
0234 bool blk_stats_alloc_enable(struct request_queue *q)
0235 {
0236 struct blk_rq_stat *poll_stat;
0237
0238 poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
0239 GFP_ATOMIC);
0240 if (!poll_stat)
0241 return false;
0242
0243 if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
0244 kfree(poll_stat);
0245 return true;
0246 }
0247
0248 blk_stat_add_callback(q, q->poll_cb);
0249 return false;
0250 }