Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kernel.h>
0003 #include <linux/module.h>
0004 #include <linux/backing-dev.h>
0005 #include <linux/bio.h>
0006 #include <linux/blkdev.h>
0007 #include <linux/mm.h>
0008 #include <linux/init.h>
0009 #include <linux/slab.h>
0010 #include <linux/workqueue.h>
0011 #include <linux/smp.h>
0012 
0013 #include <linux/blk-mq.h>
0014 #include "blk.h"
0015 #include "blk-mq.h"
0016 #include "blk-mq-tag.h"
0017 
0018 static void blk_mq_sysfs_release(struct kobject *kobj)
0019 {
0020     struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
0021 
0022     free_percpu(ctxs->queue_ctx);
0023     kfree(ctxs);
0024 }
0025 
0026 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
0027 {
0028     struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
0029 
0030     /* ctx->ctxs won't be released until all ctx are freed */
0031     kobject_put(&ctx->ctxs->kobj);
0032 }
0033 
0034 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
0035 {
0036     struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
0037                           kobj);
0038 
0039     blk_free_flush_queue(hctx->fq);
0040     sbitmap_free(&hctx->ctx_map);
0041     free_cpumask_var(hctx->cpumask);
0042     kfree(hctx->ctxs);
0043     kfree(hctx);
0044 }
0045 
0046 struct blk_mq_hw_ctx_sysfs_entry {
0047     struct attribute attr;
0048     ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
0049     ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
0050 };
0051 
0052 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
0053                     struct attribute *attr, char *page)
0054 {
0055     struct blk_mq_hw_ctx_sysfs_entry *entry;
0056     struct blk_mq_hw_ctx *hctx;
0057     struct request_queue *q;
0058     ssize_t res;
0059 
0060     entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
0061     hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
0062     q = hctx->queue;
0063 
0064     if (!entry->show)
0065         return -EIO;
0066 
0067     mutex_lock(&q->sysfs_lock);
0068     res = entry->show(hctx, page);
0069     mutex_unlock(&q->sysfs_lock);
0070     return res;
0071 }
0072 
0073 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
0074                      struct attribute *attr, const char *page,
0075                      size_t length)
0076 {
0077     struct blk_mq_hw_ctx_sysfs_entry *entry;
0078     struct blk_mq_hw_ctx *hctx;
0079     struct request_queue *q;
0080     ssize_t res;
0081 
0082     entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
0083     hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
0084     q = hctx->queue;
0085 
0086     if (!entry->store)
0087         return -EIO;
0088 
0089     mutex_lock(&q->sysfs_lock);
0090     res = entry->store(hctx, page, length);
0091     mutex_unlock(&q->sysfs_lock);
0092     return res;
0093 }
0094 
0095 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
0096                         char *page)
0097 {
0098     return sprintf(page, "%u\n", hctx->tags->nr_tags);
0099 }
0100 
0101 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
0102                              char *page)
0103 {
0104     return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
0105 }
0106 
0107 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
0108 {
0109     const size_t size = PAGE_SIZE - 1;
0110     unsigned int i, first = 1;
0111     int ret = 0, pos = 0;
0112 
0113     for_each_cpu(i, hctx->cpumask) {
0114         if (first)
0115             ret = snprintf(pos + page, size - pos, "%u", i);
0116         else
0117             ret = snprintf(pos + page, size - pos, ", %u", i);
0118 
0119         if (ret >= size - pos)
0120             break;
0121 
0122         first = 0;
0123         pos += ret;
0124     }
0125 
0126     ret = snprintf(pos + page, size + 1 - pos, "\n");
0127     return pos + ret;
0128 }
0129 
0130 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
0131     .attr = {.name = "nr_tags", .mode = 0444 },
0132     .show = blk_mq_hw_sysfs_nr_tags_show,
0133 };
0134 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
0135     .attr = {.name = "nr_reserved_tags", .mode = 0444 },
0136     .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
0137 };
0138 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
0139     .attr = {.name = "cpu_list", .mode = 0444 },
0140     .show = blk_mq_hw_sysfs_cpus_show,
0141 };
0142 
0143 static struct attribute *default_hw_ctx_attrs[] = {
0144     &blk_mq_hw_sysfs_nr_tags.attr,
0145     &blk_mq_hw_sysfs_nr_reserved_tags.attr,
0146     &blk_mq_hw_sysfs_cpus.attr,
0147     NULL,
0148 };
0149 ATTRIBUTE_GROUPS(default_hw_ctx);
0150 
0151 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
0152     .show   = blk_mq_hw_sysfs_show,
0153     .store  = blk_mq_hw_sysfs_store,
0154 };
0155 
0156 static struct kobj_type blk_mq_ktype = {
0157     .release    = blk_mq_sysfs_release,
0158 };
0159 
0160 static struct kobj_type blk_mq_ctx_ktype = {
0161     .release    = blk_mq_ctx_sysfs_release,
0162 };
0163 
0164 static struct kobj_type blk_mq_hw_ktype = {
0165     .sysfs_ops  = &blk_mq_hw_sysfs_ops,
0166     .default_groups = default_hw_ctx_groups,
0167     .release    = blk_mq_hw_sysfs_release,
0168 };
0169 
0170 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
0171 {
0172     struct blk_mq_ctx *ctx;
0173     int i;
0174 
0175     if (!hctx->nr_ctx)
0176         return;
0177 
0178     hctx_for_each_ctx(hctx, ctx, i)
0179         kobject_del(&ctx->kobj);
0180 
0181     kobject_del(&hctx->kobj);
0182 }
0183 
0184 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
0185 {
0186     struct request_queue *q = hctx->queue;
0187     struct blk_mq_ctx *ctx;
0188     int i, ret;
0189 
0190     if (!hctx->nr_ctx)
0191         return 0;
0192 
0193     ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
0194     if (ret)
0195         return ret;
0196 
0197     hctx_for_each_ctx(hctx, ctx, i) {
0198         ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
0199         if (ret)
0200             break;
0201     }
0202 
0203     return ret;
0204 }
0205 
0206 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
0207 {
0208     kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
0209 }
0210 
0211 void blk_mq_sysfs_deinit(struct request_queue *q)
0212 {
0213     struct blk_mq_ctx *ctx;
0214     int cpu;
0215 
0216     for_each_possible_cpu(cpu) {
0217         ctx = per_cpu_ptr(q->queue_ctx, cpu);
0218         kobject_put(&ctx->kobj);
0219     }
0220     kobject_put(q->mq_kobj);
0221 }
0222 
0223 void blk_mq_sysfs_init(struct request_queue *q)
0224 {
0225     struct blk_mq_ctx *ctx;
0226     int cpu;
0227 
0228     kobject_init(q->mq_kobj, &blk_mq_ktype);
0229 
0230     for_each_possible_cpu(cpu) {
0231         ctx = per_cpu_ptr(q->queue_ctx, cpu);
0232 
0233         kobject_get(q->mq_kobj);
0234         kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
0235     }
0236 }
0237 
0238 int blk_mq_sysfs_register(struct gendisk *disk)
0239 {
0240     struct request_queue *q = disk->queue;
0241     struct blk_mq_hw_ctx *hctx;
0242     unsigned long i, j;
0243     int ret;
0244 
0245     lockdep_assert_held(&q->sysfs_dir_lock);
0246 
0247     ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
0248     if (ret < 0)
0249         goto out;
0250 
0251     kobject_uevent(q->mq_kobj, KOBJ_ADD);
0252 
0253     queue_for_each_hw_ctx(q, hctx, i) {
0254         ret = blk_mq_register_hctx(hctx);
0255         if (ret)
0256             goto unreg;
0257     }
0258 
0259     q->mq_sysfs_init_done = true;
0260 
0261 out:
0262     return ret;
0263 
0264 unreg:
0265     queue_for_each_hw_ctx(q, hctx, j) {
0266         if (j < i)
0267             blk_mq_unregister_hctx(hctx);
0268     }
0269 
0270     kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
0271     kobject_del(q->mq_kobj);
0272     return ret;
0273 }
0274 
0275 void blk_mq_sysfs_unregister(struct gendisk *disk)
0276 {
0277     struct request_queue *q = disk->queue;
0278     struct blk_mq_hw_ctx *hctx;
0279     unsigned long i;
0280 
0281     lockdep_assert_held(&q->sysfs_dir_lock);
0282 
0283     queue_for_each_hw_ctx(q, hctx, i)
0284         blk_mq_unregister_hctx(hctx);
0285 
0286     kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
0287     kobject_del(q->mq_kobj);
0288 
0289     q->mq_sysfs_init_done = false;
0290 }
0291 
0292 void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
0293 {
0294     struct blk_mq_hw_ctx *hctx;
0295     unsigned long i;
0296 
0297     mutex_lock(&q->sysfs_dir_lock);
0298     if (!q->mq_sysfs_init_done)
0299         goto unlock;
0300 
0301     queue_for_each_hw_ctx(q, hctx, i)
0302         blk_mq_unregister_hctx(hctx);
0303 
0304 unlock:
0305     mutex_unlock(&q->sysfs_dir_lock);
0306 }
0307 
0308 int blk_mq_sysfs_register_hctxs(struct request_queue *q)
0309 {
0310     struct blk_mq_hw_ctx *hctx;
0311     unsigned long i;
0312     int ret = 0;
0313 
0314     mutex_lock(&q->sysfs_dir_lock);
0315     if (!q->mq_sysfs_init_done)
0316         goto unlock;
0317 
0318     queue_for_each_hw_ctx(q, hctx, i) {
0319         ret = blk_mq_register_hctx(hctx);
0320         if (ret)
0321             break;
0322     }
0323 
0324 unlock:
0325     mutex_unlock(&q->sysfs_dir_lock);
0326 
0327     return ret;
0328 }