0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/slab.h>
0007 #include <linux/module.h>
0008 #include <linux/bio.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/backing-dev.h>
0011 #include <linux/blktrace_api.h>
0012 #include <linux/blk-mq.h>
0013 #include <linux/debugfs.h>
0014
0015 #include "blk.h"
0016 #include "blk-mq.h"
0017 #include "blk-mq-debugfs.h"
0018 #include "blk-mq-sched.h"
0019 #include "blk-wbt.h"
0020 #include "blk-cgroup.h"
0021 #include "blk-throttle.h"
0022
0023 struct queue_sysfs_entry {
0024 struct attribute attr;
0025 ssize_t (*show)(struct request_queue *, char *);
0026 ssize_t (*store)(struct request_queue *, const char *, size_t);
0027 };
0028
0029 static ssize_t
0030 queue_var_show(unsigned long var, char *page)
0031 {
0032 return sprintf(page, "%lu\n", var);
0033 }
0034
0035 static ssize_t
0036 queue_var_store(unsigned long *var, const char *page, size_t count)
0037 {
0038 int err;
0039 unsigned long v;
0040
0041 err = kstrtoul(page, 10, &v);
0042 if (err || v > UINT_MAX)
0043 return -EINVAL;
0044
0045 *var = v;
0046
0047 return count;
0048 }
0049
0050 static ssize_t queue_var_store64(s64 *var, const char *page)
0051 {
0052 int err;
0053 s64 v;
0054
0055 err = kstrtos64(page, 10, &v);
0056 if (err < 0)
0057 return err;
0058
0059 *var = v;
0060 return 0;
0061 }
0062
0063 static ssize_t queue_requests_show(struct request_queue *q, char *page)
0064 {
0065 return queue_var_show(q->nr_requests, page);
0066 }
0067
0068 static ssize_t
0069 queue_requests_store(struct request_queue *q, const char *page, size_t count)
0070 {
0071 unsigned long nr;
0072 int ret, err;
0073
0074 if (!queue_is_mq(q))
0075 return -EINVAL;
0076
0077 ret = queue_var_store(&nr, page, count);
0078 if (ret < 0)
0079 return ret;
0080
0081 if (nr < BLKDEV_MIN_RQ)
0082 nr = BLKDEV_MIN_RQ;
0083
0084 err = blk_mq_update_nr_requests(q, nr);
0085 if (err)
0086 return err;
0087
0088 return ret;
0089 }
0090
0091 static ssize_t queue_ra_show(struct request_queue *q, char *page)
0092 {
0093 unsigned long ra_kb;
0094
0095 if (!q->disk)
0096 return -EINVAL;
0097 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
0098 return queue_var_show(ra_kb, page);
0099 }
0100
0101 static ssize_t
0102 queue_ra_store(struct request_queue *q, const char *page, size_t count)
0103 {
0104 unsigned long ra_kb;
0105 ssize_t ret;
0106
0107 if (!q->disk)
0108 return -EINVAL;
0109 ret = queue_var_store(&ra_kb, page, count);
0110 if (ret < 0)
0111 return ret;
0112 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
0113 return ret;
0114 }
0115
0116 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
0117 {
0118 int max_sectors_kb = queue_max_sectors(q) >> 1;
0119
0120 return queue_var_show(max_sectors_kb, page);
0121 }
0122
0123 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
0124 {
0125 return queue_var_show(queue_max_segments(q), page);
0126 }
0127
0128 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
0129 char *page)
0130 {
0131 return queue_var_show(queue_max_discard_segments(q), page);
0132 }
0133
0134 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
0135 {
0136 return queue_var_show(q->limits.max_integrity_segments, page);
0137 }
0138
0139 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
0140 {
0141 return queue_var_show(queue_max_segment_size(q), page);
0142 }
0143
0144 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
0145 {
0146 return queue_var_show(queue_logical_block_size(q), page);
0147 }
0148
0149 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
0150 {
0151 return queue_var_show(queue_physical_block_size(q), page);
0152 }
0153
0154 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
0155 {
0156 return queue_var_show(q->limits.chunk_sectors, page);
0157 }
0158
0159 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
0160 {
0161 return queue_var_show(queue_io_min(q), page);
0162 }
0163
0164 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
0165 {
0166 return queue_var_show(queue_io_opt(q), page);
0167 }
0168
0169 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
0170 {
0171 return queue_var_show(q->limits.discard_granularity, page);
0172 }
0173
0174 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
0175 {
0176
0177 return sprintf(page, "%llu\n",
0178 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
0179 }
0180
0181 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
0182 {
0183 return sprintf(page, "%llu\n",
0184 (unsigned long long)q->limits.max_discard_sectors << 9);
0185 }
0186
0187 static ssize_t queue_discard_max_store(struct request_queue *q,
0188 const char *page, size_t count)
0189 {
0190 unsigned long max_discard;
0191 ssize_t ret = queue_var_store(&max_discard, page, count);
0192
0193 if (ret < 0)
0194 return ret;
0195
0196 if (max_discard & (q->limits.discard_granularity - 1))
0197 return -EINVAL;
0198
0199 max_discard >>= 9;
0200 if (max_discard > UINT_MAX)
0201 return -EINVAL;
0202
0203 if (max_discard > q->limits.max_hw_discard_sectors)
0204 max_discard = q->limits.max_hw_discard_sectors;
0205
0206 q->limits.max_discard_sectors = max_discard;
0207 return ret;
0208 }
0209
0210 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
0211 {
0212 return queue_var_show(0, page);
0213 }
0214
0215 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
0216 {
0217 return queue_var_show(0, page);
0218 }
0219
0220 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
0221 {
0222 return sprintf(page, "%llu\n",
0223 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
0224 }
0225
0226 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
0227 char *page)
0228 {
0229 return queue_var_show(queue_zone_write_granularity(q), page);
0230 }
0231
0232 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
0233 {
0234 unsigned long long max_sectors = q->limits.max_zone_append_sectors;
0235
0236 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
0237 }
0238
0239 static ssize_t
0240 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
0241 {
0242 unsigned long max_sectors_kb,
0243 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
0244 page_kb = 1 << (PAGE_SHIFT - 10);
0245 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
0246
0247 if (ret < 0)
0248 return ret;
0249
0250 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
0251 q->limits.max_dev_sectors >> 1);
0252
0253 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
0254 return -EINVAL;
0255
0256 spin_lock_irq(&q->queue_lock);
0257 q->limits.max_sectors = max_sectors_kb << 1;
0258 if (q->disk)
0259 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
0260 spin_unlock_irq(&q->queue_lock);
0261
0262 return ret;
0263 }
0264
0265 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
0266 {
0267 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
0268
0269 return queue_var_show(max_hw_sectors_kb, page);
0270 }
0271
0272 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
0273 {
0274 return queue_var_show(q->limits.virt_boundary_mask, page);
0275 }
0276
0277 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
0278 {
0279 return queue_var_show(queue_dma_alignment(q), page);
0280 }
0281
0282 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
0283 static ssize_t \
0284 queue_##name##_show(struct request_queue *q, char *page) \
0285 { \
0286 int bit; \
0287 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
0288 return queue_var_show(neg ? !bit : bit, page); \
0289 } \
0290 static ssize_t \
0291 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
0292 { \
0293 unsigned long val; \
0294 ssize_t ret; \
0295 ret = queue_var_store(&val, page, count); \
0296 if (ret < 0) \
0297 return ret; \
0298 if (neg) \
0299 val = !val; \
0300 \
0301 if (val) \
0302 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
0303 else \
0304 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
0305 return ret; \
0306 }
0307
0308 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
0309 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
0310 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
0311 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
0312 #undef QUEUE_SYSFS_BIT_FNS
0313
0314 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
0315 {
0316 switch (blk_queue_zoned_model(q)) {
0317 case BLK_ZONED_HA:
0318 return sprintf(page, "host-aware\n");
0319 case BLK_ZONED_HM:
0320 return sprintf(page, "host-managed\n");
0321 default:
0322 return sprintf(page, "none\n");
0323 }
0324 }
0325
0326 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
0327 {
0328 return queue_var_show(disk_nr_zones(q->disk), page);
0329 }
0330
0331 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
0332 {
0333 return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
0334 }
0335
0336 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
0337 {
0338 return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
0339 }
0340
0341 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
0342 {
0343 return queue_var_show((blk_queue_nomerges(q) << 1) |
0344 blk_queue_noxmerges(q), page);
0345 }
0346
0347 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
0348 size_t count)
0349 {
0350 unsigned long nm;
0351 ssize_t ret = queue_var_store(&nm, page, count);
0352
0353 if (ret < 0)
0354 return ret;
0355
0356 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
0357 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
0358 if (nm == 2)
0359 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
0360 else if (nm)
0361 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
0362
0363 return ret;
0364 }
0365
0366 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
0367 {
0368 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
0369 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
0370
0371 return queue_var_show(set << force, page);
0372 }
0373
0374 static ssize_t
0375 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
0376 {
0377 ssize_t ret = -EINVAL;
0378 #ifdef CONFIG_SMP
0379 unsigned long val;
0380
0381 ret = queue_var_store(&val, page, count);
0382 if (ret < 0)
0383 return ret;
0384
0385 if (val == 2) {
0386 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
0387 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
0388 } else if (val == 1) {
0389 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
0390 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
0391 } else if (val == 0) {
0392 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
0393 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
0394 }
0395 #endif
0396 return ret;
0397 }
0398
0399 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
0400 {
0401 int val;
0402
0403 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
0404 val = BLK_MQ_POLL_CLASSIC;
0405 else
0406 val = q->poll_nsec / 1000;
0407
0408 return sprintf(page, "%d\n", val);
0409 }
0410
0411 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
0412 size_t count)
0413 {
0414 int err, val;
0415
0416 if (!q->mq_ops || !q->mq_ops->poll)
0417 return -EINVAL;
0418
0419 err = kstrtoint(page, 10, &val);
0420 if (err < 0)
0421 return err;
0422
0423 if (val == BLK_MQ_POLL_CLASSIC)
0424 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
0425 else if (val >= 0)
0426 q->poll_nsec = val * 1000;
0427 else
0428 return -EINVAL;
0429
0430 return count;
0431 }
0432
0433 static ssize_t queue_poll_show(struct request_queue *q, char *page)
0434 {
0435 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
0436 }
0437
0438 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
0439 size_t count)
0440 {
0441 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
0442 return -EINVAL;
0443 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
0444 pr_info_ratelimited("please use driver specific parameters instead.\n");
0445 return count;
0446 }
0447
0448 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
0449 {
0450 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
0451 }
0452
0453 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
0454 size_t count)
0455 {
0456 unsigned int val;
0457 int err;
0458
0459 err = kstrtou32(page, 10, &val);
0460 if (err || val == 0)
0461 return -EINVAL;
0462
0463 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
0464
0465 return count;
0466 }
0467
0468 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
0469 {
0470 if (!wbt_rq_qos(q))
0471 return -EINVAL;
0472
0473 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
0474 }
0475
0476 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
0477 size_t count)
0478 {
0479 struct rq_qos *rqos;
0480 ssize_t ret;
0481 s64 val;
0482
0483 ret = queue_var_store64(&val, page);
0484 if (ret < 0)
0485 return ret;
0486 if (val < -1)
0487 return -EINVAL;
0488
0489 rqos = wbt_rq_qos(q);
0490 if (!rqos) {
0491 ret = wbt_init(q);
0492 if (ret)
0493 return ret;
0494 }
0495
0496 if (val == -1)
0497 val = wbt_default_latency_nsec(q);
0498 else if (val >= 0)
0499 val *= 1000ULL;
0500
0501 if (wbt_get_min_lat(q) == val)
0502 return count;
0503
0504
0505
0506
0507
0508
0509 blk_mq_freeze_queue(q);
0510 blk_mq_quiesce_queue(q);
0511
0512 wbt_set_min_lat(q, val);
0513
0514 blk_mq_unquiesce_queue(q);
0515 blk_mq_unfreeze_queue(q);
0516
0517 return count;
0518 }
0519
0520 static ssize_t queue_wc_show(struct request_queue *q, char *page)
0521 {
0522 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
0523 return sprintf(page, "write back\n");
0524
0525 return sprintf(page, "write through\n");
0526 }
0527
0528 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
0529 size_t count)
0530 {
0531 int set = -1;
0532
0533 if (!strncmp(page, "write back", 10))
0534 set = 1;
0535 else if (!strncmp(page, "write through", 13) ||
0536 !strncmp(page, "none", 4))
0537 set = 0;
0538
0539 if (set == -1)
0540 return -EINVAL;
0541
0542 if (set)
0543 blk_queue_flag_set(QUEUE_FLAG_WC, q);
0544 else
0545 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
0546
0547 return count;
0548 }
0549
0550 static ssize_t queue_fua_show(struct request_queue *q, char *page)
0551 {
0552 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
0553 }
0554
0555 static ssize_t queue_dax_show(struct request_queue *q, char *page)
0556 {
0557 return queue_var_show(blk_queue_dax(q), page);
0558 }
0559
0560 #define QUEUE_RO_ENTRY(_prefix, _name) \
0561 static struct queue_sysfs_entry _prefix##_entry = { \
0562 .attr = { .name = _name, .mode = 0444 }, \
0563 .show = _prefix##_show, \
0564 };
0565
0566 #define QUEUE_RW_ENTRY(_prefix, _name) \
0567 static struct queue_sysfs_entry _prefix##_entry = { \
0568 .attr = { .name = _name, .mode = 0644 }, \
0569 .show = _prefix##_show, \
0570 .store = _prefix##_store, \
0571 };
0572
0573 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
0574 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
0575 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
0576 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
0577 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
0578 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
0579 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
0580 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
0581
0582 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
0583 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
0584 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
0585 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
0586 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
0587
0588 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
0589 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
0590 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
0591 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
0592 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
0593
0594 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
0595 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
0596 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
0597 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
0598
0599 QUEUE_RO_ENTRY(queue_zoned, "zoned");
0600 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
0601 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
0602 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
0603
0604 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
0605 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
0606 QUEUE_RW_ENTRY(queue_poll, "io_poll");
0607 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
0608 QUEUE_RW_ENTRY(queue_wc, "write_cache");
0609 QUEUE_RO_ENTRY(queue_fua, "fua");
0610 QUEUE_RO_ENTRY(queue_dax, "dax");
0611 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
0612 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
0613 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
0614 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
0615
0616 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
0617 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
0618 #endif
0619
0620
0621 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
0622 .attr = {.name = "hw_sector_size", .mode = 0444 },
0623 .show = queue_logical_block_size_show,
0624 };
0625
0626 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
0627 QUEUE_RW_ENTRY(queue_iostats, "iostats");
0628 QUEUE_RW_ENTRY(queue_random, "add_random");
0629 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
0630
0631 static struct attribute *queue_attrs[] = {
0632 &queue_requests_entry.attr,
0633 &queue_ra_entry.attr,
0634 &queue_max_hw_sectors_entry.attr,
0635 &queue_max_sectors_entry.attr,
0636 &queue_max_segments_entry.attr,
0637 &queue_max_discard_segments_entry.attr,
0638 &queue_max_integrity_segments_entry.attr,
0639 &queue_max_segment_size_entry.attr,
0640 &elv_iosched_entry.attr,
0641 &queue_hw_sector_size_entry.attr,
0642 &queue_logical_block_size_entry.attr,
0643 &queue_physical_block_size_entry.attr,
0644 &queue_chunk_sectors_entry.attr,
0645 &queue_io_min_entry.attr,
0646 &queue_io_opt_entry.attr,
0647 &queue_discard_granularity_entry.attr,
0648 &queue_discard_max_entry.attr,
0649 &queue_discard_max_hw_entry.attr,
0650 &queue_discard_zeroes_data_entry.attr,
0651 &queue_write_same_max_entry.attr,
0652 &queue_write_zeroes_max_entry.attr,
0653 &queue_zone_append_max_entry.attr,
0654 &queue_zone_write_granularity_entry.attr,
0655 &queue_nonrot_entry.attr,
0656 &queue_zoned_entry.attr,
0657 &queue_nr_zones_entry.attr,
0658 &queue_max_open_zones_entry.attr,
0659 &queue_max_active_zones_entry.attr,
0660 &queue_nomerges_entry.attr,
0661 &queue_rq_affinity_entry.attr,
0662 &queue_iostats_entry.attr,
0663 &queue_stable_writes_entry.attr,
0664 &queue_random_entry.attr,
0665 &queue_poll_entry.attr,
0666 &queue_wc_entry.attr,
0667 &queue_fua_entry.attr,
0668 &queue_dax_entry.attr,
0669 &queue_wb_lat_entry.attr,
0670 &queue_poll_delay_entry.attr,
0671 &queue_io_timeout_entry.attr,
0672 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
0673 &blk_throtl_sample_time_entry.attr,
0674 #endif
0675 &queue_virt_boundary_mask_entry.attr,
0676 &queue_dma_alignment_entry.attr,
0677 NULL,
0678 };
0679
0680 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
0681 int n)
0682 {
0683 struct request_queue *q =
0684 container_of(kobj, struct request_queue, kobj);
0685
0686 if (attr == &queue_io_timeout_entry.attr &&
0687 (!q->mq_ops || !q->mq_ops->timeout))
0688 return 0;
0689
0690 if ((attr == &queue_max_open_zones_entry.attr ||
0691 attr == &queue_max_active_zones_entry.attr) &&
0692 !blk_queue_is_zoned(q))
0693 return 0;
0694
0695 return attr->mode;
0696 }
0697
0698 static struct attribute_group queue_attr_group = {
0699 .attrs = queue_attrs,
0700 .is_visible = queue_attr_visible,
0701 };
0702
0703
0704 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
0705
0706 static ssize_t
0707 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
0708 {
0709 struct queue_sysfs_entry *entry = to_queue(attr);
0710 struct request_queue *q =
0711 container_of(kobj, struct request_queue, kobj);
0712 ssize_t res;
0713
0714 if (!entry->show)
0715 return -EIO;
0716 mutex_lock(&q->sysfs_lock);
0717 res = entry->show(q, page);
0718 mutex_unlock(&q->sysfs_lock);
0719 return res;
0720 }
0721
0722 static ssize_t
0723 queue_attr_store(struct kobject *kobj, struct attribute *attr,
0724 const char *page, size_t length)
0725 {
0726 struct queue_sysfs_entry *entry = to_queue(attr);
0727 struct request_queue *q;
0728 ssize_t res;
0729
0730 if (!entry->store)
0731 return -EIO;
0732
0733 q = container_of(kobj, struct request_queue, kobj);
0734 mutex_lock(&q->sysfs_lock);
0735 res = entry->store(q, page, length);
0736 mutex_unlock(&q->sysfs_lock);
0737 return res;
0738 }
0739
0740 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
0741 {
0742 struct request_queue *q = container_of(rcu_head, struct request_queue,
0743 rcu_head);
0744
0745 kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763 static void blk_release_queue(struct kobject *kobj)
0764 {
0765 struct request_queue *q =
0766 container_of(kobj, struct request_queue, kobj);
0767
0768 might_sleep();
0769
0770 percpu_ref_exit(&q->q_usage_counter);
0771
0772 if (q->poll_stat)
0773 blk_stat_remove_callback(q, q->poll_cb);
0774 blk_stat_free_callback(q->poll_cb);
0775
0776 blk_free_queue_stats(q->stats);
0777 kfree(q->poll_stat);
0778
0779 if (queue_is_mq(q))
0780 blk_mq_release(q);
0781
0782 if (blk_queue_has_srcu(q))
0783 cleanup_srcu_struct(q->srcu);
0784
0785 ida_free(&blk_queue_ida, q->id);
0786 call_rcu(&q->rcu_head, blk_free_queue_rcu);
0787 }
0788
0789 static const struct sysfs_ops queue_sysfs_ops = {
0790 .show = queue_attr_show,
0791 .store = queue_attr_store,
0792 };
0793
0794 static const struct attribute_group *blk_queue_attr_groups[] = {
0795 &queue_attr_group,
0796 NULL
0797 };
0798
0799 struct kobj_type blk_queue_ktype = {
0800 .default_groups = blk_queue_attr_groups,
0801 .sysfs_ops = &queue_sysfs_ops,
0802 .release = blk_release_queue,
0803 };
0804
0805
0806
0807
0808
0809 int blk_register_queue(struct gendisk *disk)
0810 {
0811 struct request_queue *q = disk->queue;
0812 int ret;
0813
0814 mutex_lock(&q->sysfs_dir_lock);
0815
0816 ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue");
0817 if (ret < 0)
0818 goto unlock;
0819
0820 if (queue_is_mq(q))
0821 blk_mq_sysfs_register(disk);
0822 mutex_lock(&q->sysfs_lock);
0823
0824 mutex_lock(&q->debugfs_mutex);
0825 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
0826 blk_debugfs_root);
0827 if (queue_is_mq(q))
0828 blk_mq_debugfs_register(q);
0829 mutex_unlock(&q->debugfs_mutex);
0830
0831 ret = disk_register_independent_access_ranges(disk);
0832 if (ret)
0833 goto put_dev;
0834
0835 if (q->elevator) {
0836 ret = elv_register_queue(q, false);
0837 if (ret)
0838 goto put_dev;
0839 }
0840
0841 ret = blk_crypto_sysfs_register(q);
0842 if (ret)
0843 goto put_dev;
0844
0845 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
0846 wbt_enable_default(q);
0847 blk_throtl_register_queue(q);
0848
0849
0850 kobject_uevent(&q->kobj, KOBJ_ADD);
0851 if (q->elevator)
0852 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
0853 mutex_unlock(&q->sysfs_lock);
0854
0855 unlock:
0856 mutex_unlock(&q->sysfs_dir_lock);
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 if (!blk_queue_init_done(q)) {
0868 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
0869 percpu_ref_switch_to_percpu(&q->q_usage_counter);
0870 }
0871
0872 return ret;
0873
0874 put_dev:
0875 elv_unregister_queue(q);
0876 disk_unregister_independent_access_ranges(disk);
0877 mutex_unlock(&q->sysfs_lock);
0878 mutex_unlock(&q->sysfs_dir_lock);
0879 kobject_del(&q->kobj);
0880
0881 return ret;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890
0891 void blk_unregister_queue(struct gendisk *disk)
0892 {
0893 struct request_queue *q = disk->queue;
0894
0895 if (WARN_ON(!q))
0896 return;
0897
0898
0899 if (!blk_queue_registered(q))
0900 return;
0901
0902
0903
0904
0905
0906
0907 mutex_lock(&q->sysfs_lock);
0908 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
0909 mutex_unlock(&q->sysfs_lock);
0910
0911 mutex_lock(&q->sysfs_dir_lock);
0912
0913
0914
0915
0916 if (queue_is_mq(q))
0917 blk_mq_sysfs_unregister(disk);
0918 blk_crypto_sysfs_unregister(q);
0919
0920 mutex_lock(&q->sysfs_lock);
0921 elv_unregister_queue(q);
0922 disk_unregister_independent_access_ranges(disk);
0923 mutex_unlock(&q->sysfs_lock);
0924
0925
0926 kobject_uevent(&q->kobj, KOBJ_REMOVE);
0927 kobject_del(&q->kobj);
0928 mutex_unlock(&q->sysfs_dir_lock);
0929
0930 mutex_lock(&q->debugfs_mutex);
0931 blk_trace_shutdown(q);
0932 debugfs_remove_recursive(q->debugfs_dir);
0933 q->debugfs_dir = NULL;
0934 q->sched_debugfs_dir = NULL;
0935 q->rqos_debugfs_dir = NULL;
0936 mutex_unlock(&q->debugfs_mutex);
0937 }