Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2017 Facebook
0004  */
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/blkdev.h>
0008 #include <linux/debugfs.h>
0009 
0010 #include <linux/blk-mq.h>
0011 #include "blk.h"
0012 #include "blk-mq.h"
0013 #include "blk-mq-debugfs.h"
0014 #include "blk-mq-sched.h"
0015 #include "blk-mq-tag.h"
0016 #include "blk-rq-qos.h"
0017 
0018 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
0019 {
0020     if (stat->nr_samples) {
0021         seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
0022                stat->nr_samples, stat->mean, stat->min, stat->max);
0023     } else {
0024         seq_puts(m, "samples=0");
0025     }
0026 }
0027 
0028 static int queue_poll_stat_show(void *data, struct seq_file *m)
0029 {
0030     struct request_queue *q = data;
0031     int bucket;
0032 
0033     if (!q->poll_stat)
0034         return 0;
0035 
0036     for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
0037         seq_printf(m, "read  (%d Bytes): ", 1 << (9 + bucket));
0038         print_stat(m, &q->poll_stat[2 * bucket]);
0039         seq_puts(m, "\n");
0040 
0041         seq_printf(m, "write (%d Bytes): ",  1 << (9 + bucket));
0042         print_stat(m, &q->poll_stat[2 * bucket + 1]);
0043         seq_puts(m, "\n");
0044     }
0045     return 0;
0046 }
0047 
0048 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
0049     __acquires(&q->requeue_lock)
0050 {
0051     struct request_queue *q = m->private;
0052 
0053     spin_lock_irq(&q->requeue_lock);
0054     return seq_list_start(&q->requeue_list, *pos);
0055 }
0056 
0057 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
0058 {
0059     struct request_queue *q = m->private;
0060 
0061     return seq_list_next(v, &q->requeue_list, pos);
0062 }
0063 
0064 static void queue_requeue_list_stop(struct seq_file *m, void *v)
0065     __releases(&q->requeue_lock)
0066 {
0067     struct request_queue *q = m->private;
0068 
0069     spin_unlock_irq(&q->requeue_lock);
0070 }
0071 
0072 static const struct seq_operations queue_requeue_list_seq_ops = {
0073     .start  = queue_requeue_list_start,
0074     .next   = queue_requeue_list_next,
0075     .stop   = queue_requeue_list_stop,
0076     .show   = blk_mq_debugfs_rq_show,
0077 };
0078 
0079 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
0080               const char *const *flag_name, int flag_name_count)
0081 {
0082     bool sep = false;
0083     int i;
0084 
0085     for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
0086         if (!(flags & BIT(i)))
0087             continue;
0088         if (sep)
0089             seq_puts(m, "|");
0090         sep = true;
0091         if (i < flag_name_count && flag_name[i])
0092             seq_puts(m, flag_name[i]);
0093         else
0094             seq_printf(m, "%d", i);
0095     }
0096     return 0;
0097 }
0098 
0099 static int queue_pm_only_show(void *data, struct seq_file *m)
0100 {
0101     struct request_queue *q = data;
0102 
0103     seq_printf(m, "%d\n", atomic_read(&q->pm_only));
0104     return 0;
0105 }
0106 
0107 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
0108 static const char *const blk_queue_flag_name[] = {
0109     QUEUE_FLAG_NAME(STOPPED),
0110     QUEUE_FLAG_NAME(DYING),
0111     QUEUE_FLAG_NAME(NOMERGES),
0112     QUEUE_FLAG_NAME(SAME_COMP),
0113     QUEUE_FLAG_NAME(FAIL_IO),
0114     QUEUE_FLAG_NAME(NONROT),
0115     QUEUE_FLAG_NAME(IO_STAT),
0116     QUEUE_FLAG_NAME(NOXMERGES),
0117     QUEUE_FLAG_NAME(ADD_RANDOM),
0118     QUEUE_FLAG_NAME(SAME_FORCE),
0119     QUEUE_FLAG_NAME(INIT_DONE),
0120     QUEUE_FLAG_NAME(STABLE_WRITES),
0121     QUEUE_FLAG_NAME(POLL),
0122     QUEUE_FLAG_NAME(WC),
0123     QUEUE_FLAG_NAME(FUA),
0124     QUEUE_FLAG_NAME(DAX),
0125     QUEUE_FLAG_NAME(STATS),
0126     QUEUE_FLAG_NAME(REGISTERED),
0127     QUEUE_FLAG_NAME(QUIESCED),
0128     QUEUE_FLAG_NAME(PCI_P2PDMA),
0129     QUEUE_FLAG_NAME(ZONE_RESETALL),
0130     QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
0131     QUEUE_FLAG_NAME(HCTX_ACTIVE),
0132     QUEUE_FLAG_NAME(NOWAIT),
0133 };
0134 #undef QUEUE_FLAG_NAME
0135 
0136 static int queue_state_show(void *data, struct seq_file *m)
0137 {
0138     struct request_queue *q = data;
0139 
0140     blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
0141                ARRAY_SIZE(blk_queue_flag_name));
0142     seq_puts(m, "\n");
0143     return 0;
0144 }
0145 
0146 static ssize_t queue_state_write(void *data, const char __user *buf,
0147                  size_t count, loff_t *ppos)
0148 {
0149     struct request_queue *q = data;
0150     char opbuf[16] = { }, *op;
0151 
0152     /*
0153      * The "state" attribute is removed when the queue is removed.  Don't
0154      * allow setting the state on a dying queue to avoid a use-after-free.
0155      */
0156     if (blk_queue_dying(q))
0157         return -ENOENT;
0158 
0159     if (count >= sizeof(opbuf)) {
0160         pr_err("%s: operation too long\n", __func__);
0161         goto inval;
0162     }
0163 
0164     if (copy_from_user(opbuf, buf, count))
0165         return -EFAULT;
0166     op = strstrip(opbuf);
0167     if (strcmp(op, "run") == 0) {
0168         blk_mq_run_hw_queues(q, true);
0169     } else if (strcmp(op, "start") == 0) {
0170         blk_mq_start_stopped_hw_queues(q, true);
0171     } else if (strcmp(op, "kick") == 0) {
0172         blk_mq_kick_requeue_list(q);
0173     } else {
0174         pr_err("%s: unsupported operation '%s'\n", __func__, op);
0175 inval:
0176         pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
0177         return -EINVAL;
0178     }
0179     return count;
0180 }
0181 
0182 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
0183     { "poll_stat", 0400, queue_poll_stat_show },
0184     { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
0185     { "pm_only", 0600, queue_pm_only_show, NULL },
0186     { "state", 0600, queue_state_show, queue_state_write },
0187     { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
0188     { },
0189 };
0190 
0191 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
0192 static const char *const hctx_state_name[] = {
0193     HCTX_STATE_NAME(STOPPED),
0194     HCTX_STATE_NAME(TAG_ACTIVE),
0195     HCTX_STATE_NAME(SCHED_RESTART),
0196     HCTX_STATE_NAME(INACTIVE),
0197 };
0198 #undef HCTX_STATE_NAME
0199 
0200 static int hctx_state_show(void *data, struct seq_file *m)
0201 {
0202     struct blk_mq_hw_ctx *hctx = data;
0203 
0204     blk_flags_show(m, hctx->state, hctx_state_name,
0205                ARRAY_SIZE(hctx_state_name));
0206     seq_puts(m, "\n");
0207     return 0;
0208 }
0209 
0210 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
0211 static const char *const alloc_policy_name[] = {
0212     BLK_TAG_ALLOC_NAME(FIFO),
0213     BLK_TAG_ALLOC_NAME(RR),
0214 };
0215 #undef BLK_TAG_ALLOC_NAME
0216 
0217 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
0218 static const char *const hctx_flag_name[] = {
0219     HCTX_FLAG_NAME(SHOULD_MERGE),
0220     HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
0221     HCTX_FLAG_NAME(BLOCKING),
0222     HCTX_FLAG_NAME(NO_SCHED),
0223     HCTX_FLAG_NAME(STACKING),
0224     HCTX_FLAG_NAME(TAG_HCTX_SHARED),
0225 };
0226 #undef HCTX_FLAG_NAME
0227 
0228 static int hctx_flags_show(void *data, struct seq_file *m)
0229 {
0230     struct blk_mq_hw_ctx *hctx = data;
0231     const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
0232 
0233     seq_puts(m, "alloc_policy=");
0234     if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
0235         alloc_policy_name[alloc_policy])
0236         seq_puts(m, alloc_policy_name[alloc_policy]);
0237     else
0238         seq_printf(m, "%d", alloc_policy);
0239     seq_puts(m, " ");
0240     blk_flags_show(m,
0241                hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
0242                hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
0243     seq_puts(m, "\n");
0244     return 0;
0245 }
0246 
0247 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
0248 static const char *const cmd_flag_name[] = {
0249     CMD_FLAG_NAME(FAILFAST_DEV),
0250     CMD_FLAG_NAME(FAILFAST_TRANSPORT),
0251     CMD_FLAG_NAME(FAILFAST_DRIVER),
0252     CMD_FLAG_NAME(SYNC),
0253     CMD_FLAG_NAME(META),
0254     CMD_FLAG_NAME(PRIO),
0255     CMD_FLAG_NAME(NOMERGE),
0256     CMD_FLAG_NAME(IDLE),
0257     CMD_FLAG_NAME(INTEGRITY),
0258     CMD_FLAG_NAME(FUA),
0259     CMD_FLAG_NAME(PREFLUSH),
0260     CMD_FLAG_NAME(RAHEAD),
0261     CMD_FLAG_NAME(BACKGROUND),
0262     CMD_FLAG_NAME(NOWAIT),
0263     CMD_FLAG_NAME(NOUNMAP),
0264     CMD_FLAG_NAME(POLLED),
0265 };
0266 #undef CMD_FLAG_NAME
0267 
0268 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
0269 static const char *const rqf_name[] = {
0270     RQF_NAME(STARTED),
0271     RQF_NAME(SOFTBARRIER),
0272     RQF_NAME(FLUSH_SEQ),
0273     RQF_NAME(MIXED_MERGE),
0274     RQF_NAME(MQ_INFLIGHT),
0275     RQF_NAME(DONTPREP),
0276     RQF_NAME(FAILED),
0277     RQF_NAME(QUIET),
0278     RQF_NAME(ELVPRIV),
0279     RQF_NAME(IO_STAT),
0280     RQF_NAME(PM),
0281     RQF_NAME(HASHED),
0282     RQF_NAME(STATS),
0283     RQF_NAME(SPECIAL_PAYLOAD),
0284     RQF_NAME(ZONE_WRITE_LOCKED),
0285     RQF_NAME(MQ_POLL_SLEPT),
0286     RQF_NAME(TIMED_OUT),
0287     RQF_NAME(ELV),
0288     RQF_NAME(RESV),
0289 };
0290 #undef RQF_NAME
0291 
0292 static const char *const blk_mq_rq_state_name_array[] = {
0293     [MQ_RQ_IDLE]        = "idle",
0294     [MQ_RQ_IN_FLIGHT]   = "in_flight",
0295     [MQ_RQ_COMPLETE]    = "complete",
0296 };
0297 
0298 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
0299 {
0300     if (WARN_ON_ONCE((unsigned int)rq_state >=
0301              ARRAY_SIZE(blk_mq_rq_state_name_array)))
0302         return "(?)";
0303     return blk_mq_rq_state_name_array[rq_state];
0304 }
0305 
0306 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
0307 {
0308     const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
0309     const enum req_op op = req_op(rq);
0310     const char *op_str = blk_op_str(op);
0311 
0312     seq_printf(m, "%p {.op=", rq);
0313     if (strcmp(op_str, "UNKNOWN") == 0)
0314         seq_printf(m, "%u", op);
0315     else
0316         seq_printf(m, "%s", op_str);
0317     seq_puts(m, ", .cmd_flags=");
0318     blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
0319                cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
0320     seq_puts(m, ", .rq_flags=");
0321     blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
0322                ARRAY_SIZE(rqf_name));
0323     seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
0324     seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
0325            rq->internal_tag);
0326     if (mq_ops->show_rq)
0327         mq_ops->show_rq(m, rq);
0328     seq_puts(m, "}\n");
0329     return 0;
0330 }
0331 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
0332 
0333 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
0334 {
0335     return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
0336 }
0337 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
0338 
0339 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
0340     __acquires(&hctx->lock)
0341 {
0342     struct blk_mq_hw_ctx *hctx = m->private;
0343 
0344     spin_lock(&hctx->lock);
0345     return seq_list_start(&hctx->dispatch, *pos);
0346 }
0347 
0348 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
0349 {
0350     struct blk_mq_hw_ctx *hctx = m->private;
0351 
0352     return seq_list_next(v, &hctx->dispatch, pos);
0353 }
0354 
0355 static void hctx_dispatch_stop(struct seq_file *m, void *v)
0356     __releases(&hctx->lock)
0357 {
0358     struct blk_mq_hw_ctx *hctx = m->private;
0359 
0360     spin_unlock(&hctx->lock);
0361 }
0362 
0363 static const struct seq_operations hctx_dispatch_seq_ops = {
0364     .start  = hctx_dispatch_start,
0365     .next   = hctx_dispatch_next,
0366     .stop   = hctx_dispatch_stop,
0367     .show   = blk_mq_debugfs_rq_show,
0368 };
0369 
0370 struct show_busy_params {
0371     struct seq_file     *m;
0372     struct blk_mq_hw_ctx    *hctx;
0373 };
0374 
0375 /*
0376  * Note: the state of a request may change while this function is in progress,
0377  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
0378  * keep iterating requests.
0379  */
0380 static bool hctx_show_busy_rq(struct request *rq, void *data)
0381 {
0382     const struct show_busy_params *params = data;
0383 
0384     if (rq->mq_hctx == params->hctx)
0385         __blk_mq_debugfs_rq_show(params->m, rq);
0386 
0387     return true;
0388 }
0389 
0390 static int hctx_busy_show(void *data, struct seq_file *m)
0391 {
0392     struct blk_mq_hw_ctx *hctx = data;
0393     struct show_busy_params params = { .m = m, .hctx = hctx };
0394 
0395     blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
0396                 &params);
0397 
0398     return 0;
0399 }
0400 
0401 static const char *const hctx_types[] = {
0402     [HCTX_TYPE_DEFAULT] = "default",
0403     [HCTX_TYPE_READ]    = "read",
0404     [HCTX_TYPE_POLL]    = "poll",
0405 };
0406 
0407 static int hctx_type_show(void *data, struct seq_file *m)
0408 {
0409     struct blk_mq_hw_ctx *hctx = data;
0410 
0411     BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
0412     seq_printf(m, "%s\n", hctx_types[hctx->type]);
0413     return 0;
0414 }
0415 
0416 static int hctx_ctx_map_show(void *data, struct seq_file *m)
0417 {
0418     struct blk_mq_hw_ctx *hctx = data;
0419 
0420     sbitmap_bitmap_show(&hctx->ctx_map, m);
0421     return 0;
0422 }
0423 
0424 static void blk_mq_debugfs_tags_show(struct seq_file *m,
0425                      struct blk_mq_tags *tags)
0426 {
0427     seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
0428     seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
0429     seq_printf(m, "active_queues=%d\n",
0430            atomic_read(&tags->active_queues));
0431 
0432     seq_puts(m, "\nbitmap_tags:\n");
0433     sbitmap_queue_show(&tags->bitmap_tags, m);
0434 
0435     if (tags->nr_reserved_tags) {
0436         seq_puts(m, "\nbreserved_tags:\n");
0437         sbitmap_queue_show(&tags->breserved_tags, m);
0438     }
0439 }
0440 
0441 static int hctx_tags_show(void *data, struct seq_file *m)
0442 {
0443     struct blk_mq_hw_ctx *hctx = data;
0444     struct request_queue *q = hctx->queue;
0445     int res;
0446 
0447     res = mutex_lock_interruptible(&q->sysfs_lock);
0448     if (res)
0449         goto out;
0450     if (hctx->tags)
0451         blk_mq_debugfs_tags_show(m, hctx->tags);
0452     mutex_unlock(&q->sysfs_lock);
0453 
0454 out:
0455     return res;
0456 }
0457 
0458 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
0459 {
0460     struct blk_mq_hw_ctx *hctx = data;
0461     struct request_queue *q = hctx->queue;
0462     int res;
0463 
0464     res = mutex_lock_interruptible(&q->sysfs_lock);
0465     if (res)
0466         goto out;
0467     if (hctx->tags)
0468         sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
0469     mutex_unlock(&q->sysfs_lock);
0470 
0471 out:
0472     return res;
0473 }
0474 
0475 static int hctx_sched_tags_show(void *data, struct seq_file *m)
0476 {
0477     struct blk_mq_hw_ctx *hctx = data;
0478     struct request_queue *q = hctx->queue;
0479     int res;
0480 
0481     res = mutex_lock_interruptible(&q->sysfs_lock);
0482     if (res)
0483         goto out;
0484     if (hctx->sched_tags)
0485         blk_mq_debugfs_tags_show(m, hctx->sched_tags);
0486     mutex_unlock(&q->sysfs_lock);
0487 
0488 out:
0489     return res;
0490 }
0491 
0492 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
0493 {
0494     struct blk_mq_hw_ctx *hctx = data;
0495     struct request_queue *q = hctx->queue;
0496     int res;
0497 
0498     res = mutex_lock_interruptible(&q->sysfs_lock);
0499     if (res)
0500         goto out;
0501     if (hctx->sched_tags)
0502         sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
0503     mutex_unlock(&q->sysfs_lock);
0504 
0505 out:
0506     return res;
0507 }
0508 
0509 static int hctx_run_show(void *data, struct seq_file *m)
0510 {
0511     struct blk_mq_hw_ctx *hctx = data;
0512 
0513     seq_printf(m, "%lu\n", hctx->run);
0514     return 0;
0515 }
0516 
0517 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
0518                   loff_t *ppos)
0519 {
0520     struct blk_mq_hw_ctx *hctx = data;
0521 
0522     hctx->run = 0;
0523     return count;
0524 }
0525 
0526 static int hctx_active_show(void *data, struct seq_file *m)
0527 {
0528     struct blk_mq_hw_ctx *hctx = data;
0529 
0530     seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
0531     return 0;
0532 }
0533 
0534 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
0535 {
0536     struct blk_mq_hw_ctx *hctx = data;
0537 
0538     seq_printf(m, "%u\n", hctx->dispatch_busy);
0539     return 0;
0540 }
0541 
0542 #define CTX_RQ_SEQ_OPS(name, type)                  \
0543 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
0544     __acquires(&ctx->lock)                      \
0545 {                                   \
0546     struct blk_mq_ctx *ctx = m->private;                \
0547                                     \
0548     spin_lock(&ctx->lock);                      \
0549     return seq_list_start(&ctx->rq_lists[type], *pos);      \
0550 }                                   \
0551                                     \
0552 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
0553                      loff_t *pos)           \
0554 {                                   \
0555     struct blk_mq_ctx *ctx = m->private;                \
0556                                     \
0557     return seq_list_next(v, &ctx->rq_lists[type], pos);     \
0558 }                                   \
0559                                     \
0560 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)  \
0561     __releases(&ctx->lock)                      \
0562 {                                   \
0563     struct blk_mq_ctx *ctx = m->private;                \
0564                                     \
0565     spin_unlock(&ctx->lock);                    \
0566 }                                   \
0567                                     \
0568 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
0569     .start  = ctx_##name##_rq_list_start,               \
0570     .next   = ctx_##name##_rq_list_next,                \
0571     .stop   = ctx_##name##_rq_list_stop,                \
0572     .show   = blk_mq_debugfs_rq_show,               \
0573 }
0574 
0575 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
0576 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
0577 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
0578 
0579 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
0580 {
0581     const struct blk_mq_debugfs_attr *attr = m->private;
0582     void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
0583 
0584     return attr->show(data, m);
0585 }
0586 
0587 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
0588                     size_t count, loff_t *ppos)
0589 {
0590     struct seq_file *m = file->private_data;
0591     const struct blk_mq_debugfs_attr *attr = m->private;
0592     void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
0593 
0594     /*
0595      * Attributes that only implement .seq_ops are read-only and 'attr' is
0596      * the same with 'data' in this case.
0597      */
0598     if (attr == data || !attr->write)
0599         return -EPERM;
0600 
0601     return attr->write(data, buf, count, ppos);
0602 }
0603 
0604 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
0605 {
0606     const struct blk_mq_debugfs_attr *attr = inode->i_private;
0607     void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
0608     struct seq_file *m;
0609     int ret;
0610 
0611     if (attr->seq_ops) {
0612         ret = seq_open(file, attr->seq_ops);
0613         if (!ret) {
0614             m = file->private_data;
0615             m->private = data;
0616         }
0617         return ret;
0618     }
0619 
0620     if (WARN_ON_ONCE(!attr->show))
0621         return -EPERM;
0622 
0623     return single_open(file, blk_mq_debugfs_show, inode->i_private);
0624 }
0625 
0626 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
0627 {
0628     const struct blk_mq_debugfs_attr *attr = inode->i_private;
0629 
0630     if (attr->show)
0631         return single_release(inode, file);
0632 
0633     return seq_release(inode, file);
0634 }
0635 
0636 static const struct file_operations blk_mq_debugfs_fops = {
0637     .open       = blk_mq_debugfs_open,
0638     .read       = seq_read,
0639     .write      = blk_mq_debugfs_write,
0640     .llseek     = seq_lseek,
0641     .release    = blk_mq_debugfs_release,
0642 };
0643 
0644 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
0645     {"state", 0400, hctx_state_show},
0646     {"flags", 0400, hctx_flags_show},
0647     {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
0648     {"busy", 0400, hctx_busy_show},
0649     {"ctx_map", 0400, hctx_ctx_map_show},
0650     {"tags", 0400, hctx_tags_show},
0651     {"tags_bitmap", 0400, hctx_tags_bitmap_show},
0652     {"sched_tags", 0400, hctx_sched_tags_show},
0653     {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
0654     {"run", 0600, hctx_run_show, hctx_run_write},
0655     {"active", 0400, hctx_active_show},
0656     {"dispatch_busy", 0400, hctx_dispatch_busy_show},
0657     {"type", 0400, hctx_type_show},
0658     {},
0659 };
0660 
0661 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
0662     {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
0663     {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
0664     {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
0665     {},
0666 };
0667 
0668 static void debugfs_create_files(struct dentry *parent, void *data,
0669                  const struct blk_mq_debugfs_attr *attr)
0670 {
0671     if (IS_ERR_OR_NULL(parent))
0672         return;
0673 
0674     d_inode(parent)->i_private = data;
0675 
0676     for (; attr->name; attr++)
0677         debugfs_create_file(attr->name, attr->mode, parent,
0678                     (void *)attr, &blk_mq_debugfs_fops);
0679 }
0680 
0681 void blk_mq_debugfs_register(struct request_queue *q)
0682 {
0683     struct blk_mq_hw_ctx *hctx;
0684     unsigned long i;
0685 
0686     debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
0687 
0688     /*
0689      * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
0690      * didn't exist yet (because we don't know what to name the directory
0691      * until the queue is registered to a gendisk).
0692      */
0693     if (q->elevator && !q->sched_debugfs_dir)
0694         blk_mq_debugfs_register_sched(q);
0695 
0696     /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
0697     queue_for_each_hw_ctx(q, hctx, i) {
0698         if (!hctx->debugfs_dir)
0699             blk_mq_debugfs_register_hctx(q, hctx);
0700         if (q->elevator && !hctx->sched_debugfs_dir)
0701             blk_mq_debugfs_register_sched_hctx(q, hctx);
0702     }
0703 
0704     if (q->rq_qos) {
0705         struct rq_qos *rqos = q->rq_qos;
0706 
0707         while (rqos) {
0708             blk_mq_debugfs_register_rqos(rqos);
0709             rqos = rqos->next;
0710         }
0711     }
0712 }
0713 
0714 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
0715                     struct blk_mq_ctx *ctx)
0716 {
0717     struct dentry *ctx_dir;
0718     char name[20];
0719 
0720     snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
0721     ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
0722 
0723     debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
0724 }
0725 
0726 void blk_mq_debugfs_register_hctx(struct request_queue *q,
0727                   struct blk_mq_hw_ctx *hctx)
0728 {
0729     struct blk_mq_ctx *ctx;
0730     char name[20];
0731     int i;
0732 
0733     if (!q->debugfs_dir)
0734         return;
0735 
0736     snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
0737     hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
0738 
0739     debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
0740 
0741     hctx_for_each_ctx(hctx, ctx, i)
0742         blk_mq_debugfs_register_ctx(hctx, ctx);
0743 }
0744 
0745 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
0746 {
0747     if (!hctx->queue->debugfs_dir)
0748         return;
0749     debugfs_remove_recursive(hctx->debugfs_dir);
0750     hctx->sched_debugfs_dir = NULL;
0751     hctx->debugfs_dir = NULL;
0752 }
0753 
0754 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
0755 {
0756     struct blk_mq_hw_ctx *hctx;
0757     unsigned long i;
0758 
0759     queue_for_each_hw_ctx(q, hctx, i)
0760         blk_mq_debugfs_register_hctx(q, hctx);
0761 }
0762 
0763 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
0764 {
0765     struct blk_mq_hw_ctx *hctx;
0766     unsigned long i;
0767 
0768     queue_for_each_hw_ctx(q, hctx, i)
0769         blk_mq_debugfs_unregister_hctx(hctx);
0770 }
0771 
0772 void blk_mq_debugfs_register_sched(struct request_queue *q)
0773 {
0774     struct elevator_type *e = q->elevator->type;
0775 
0776     lockdep_assert_held(&q->debugfs_mutex);
0777 
0778     /*
0779      * If the parent directory has not been created yet, return, we will be
0780      * called again later on and the directory/files will be created then.
0781      */
0782     if (!q->debugfs_dir)
0783         return;
0784 
0785     if (!e->queue_debugfs_attrs)
0786         return;
0787 
0788     q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
0789 
0790     debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
0791 }
0792 
0793 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
0794 {
0795     lockdep_assert_held(&q->debugfs_mutex);
0796 
0797     debugfs_remove_recursive(q->sched_debugfs_dir);
0798     q->sched_debugfs_dir = NULL;
0799 }
0800 
0801 static const char *rq_qos_id_to_name(enum rq_qos_id id)
0802 {
0803     switch (id) {
0804     case RQ_QOS_WBT:
0805         return "wbt";
0806     case RQ_QOS_LATENCY:
0807         return "latency";
0808     case RQ_QOS_COST:
0809         return "cost";
0810     case RQ_QOS_IOPRIO:
0811         return "ioprio";
0812     }
0813     return "unknown";
0814 }
0815 
0816 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
0817 {
0818     lockdep_assert_held(&rqos->q->debugfs_mutex);
0819 
0820     if (!rqos->q->debugfs_dir)
0821         return;
0822     debugfs_remove_recursive(rqos->debugfs_dir);
0823     rqos->debugfs_dir = NULL;
0824 }
0825 
0826 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
0827 {
0828     struct request_queue *q = rqos->q;
0829     const char *dir_name = rq_qos_id_to_name(rqos->id);
0830 
0831     lockdep_assert_held(&q->debugfs_mutex);
0832 
0833     if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
0834         return;
0835 
0836     if (!q->rqos_debugfs_dir)
0837         q->rqos_debugfs_dir = debugfs_create_dir("rqos",
0838                              q->debugfs_dir);
0839 
0840     rqos->debugfs_dir = debugfs_create_dir(dir_name,
0841                            rqos->q->rqos_debugfs_dir);
0842 
0843     debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
0844 }
0845 
0846 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
0847                     struct blk_mq_hw_ctx *hctx)
0848 {
0849     struct elevator_type *e = q->elevator->type;
0850 
0851     lockdep_assert_held(&q->debugfs_mutex);
0852 
0853     /*
0854      * If the parent debugfs directory has not been created yet, return;
0855      * We will be called again later on with appropriate parent debugfs
0856      * directory from blk_register_queue()
0857      */
0858     if (!hctx->debugfs_dir)
0859         return;
0860 
0861     if (!e->hctx_debugfs_attrs)
0862         return;
0863 
0864     hctx->sched_debugfs_dir = debugfs_create_dir("sched",
0865                              hctx->debugfs_dir);
0866     debugfs_create_files(hctx->sched_debugfs_dir, hctx,
0867                  e->hctx_debugfs_attrs);
0868 }
0869 
0870 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
0871 {
0872     lockdep_assert_held(&hctx->queue->debugfs_mutex);
0873 
0874     if (!hctx->queue->debugfs_dir)
0875         return;
0876     debugfs_remove_recursive(hctx->sched_debugfs_dir);
0877     hctx->sched_debugfs_dir = NULL;
0878 }