0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _LINUX_DEVICE_MAPPER_H
0009 #define _LINUX_DEVICE_MAPPER_H
0010
0011 #include <linux/bio.h>
0012 #include <linux/blkdev.h>
0013 #include <linux/dm-ioctl.h>
0014 #include <linux/math64.h>
0015 #include <linux/ratelimit.h>
0016
0017 struct dm_dev;
0018 struct dm_target;
0019 struct dm_table;
0020 struct dm_report_zones_args;
0021 struct mapped_device;
0022 struct bio_vec;
0023 enum dax_access_mode;
0024
0025
0026
0027
0028 enum dm_queue_mode {
0029 DM_TYPE_NONE = 0,
0030 DM_TYPE_BIO_BASED = 1,
0031 DM_TYPE_REQUEST_BASED = 2,
0032 DM_TYPE_DAX_BIO_BASED = 3,
0033 };
0034
0035 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
0036
0037 union map_info {
0038 void *ptr;
0039 };
0040
0041
0042
0043
0044
0045 typedef int (*dm_ctr_fn) (struct dm_target *target,
0046 unsigned int argc, char **argv);
0047
0048
0049
0050
0051
0052 typedef void (*dm_dtr_fn) (struct dm_target *ti);
0053
0054
0055
0056
0057
0058
0059
0060
0061 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
0062 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
0063 struct request *rq,
0064 union map_info *map_context,
0065 struct request **clone);
0066 typedef void (*dm_release_clone_request_fn) (struct request *clone,
0067 union map_info *map_context);
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 typedef int (*dm_endio_fn) (struct dm_target *ti,
0078 struct bio *bio, blk_status_t *error);
0079 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
0080 struct request *clone, blk_status_t error,
0081 union map_info *map_context);
0082
0083 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
0084 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
0085 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
0086 typedef int (*dm_preresume_fn) (struct dm_target *ti);
0087 typedef void (*dm_resume_fn) (struct dm_target *ti);
0088
0089 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
0090 unsigned status_flags, char *result, unsigned maxlen);
0091
0092 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
0093 char *result, unsigned maxlen);
0094
0095 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
0096
0097 #ifdef CONFIG_BLK_DEV_ZONED
0098 typedef int (*dm_report_zones_fn) (struct dm_target *ti,
0099 struct dm_report_zones_args *args,
0100 unsigned int nr_zones);
0101 #else
0102
0103
0104
0105
0106
0107 typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
0108 #endif
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
0121 struct dm_dev *dev,
0122 sector_t start, sector_t len,
0123 void *data);
0124
0125
0126
0127
0128
0129
0130 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
0131 iterate_devices_callout_fn fn,
0132 void *data);
0133
0134 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
0135 struct queue_limits *limits);
0136
0137
0138
0139
0140
0141
0142 typedef int (*dm_busy_fn) (struct dm_target *ti);
0143
0144
0145
0146
0147
0148
0149 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
0150 long nr_pages, enum dax_access_mode node, void **kaddr,
0151 pfn_t *pfn);
0152 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
0153 size_t nr_pages);
0154
0155
0156
0157
0158
0159
0160 typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
0161 void *addr, size_t bytes, struct iov_iter *i);
0162
0163 void dm_error(const char *message);
0164
0165 struct dm_dev {
0166 struct block_device *bdev;
0167 struct dax_device *dax_dev;
0168 fmode_t mode;
0169 char name[16];
0170 };
0171
0172 dev_t dm_get_dev_t(const char *path);
0173
0174
0175
0176
0177
0178 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
0179 struct dm_dev **result);
0180 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
0181
0182
0183
0184
0185
0186 struct target_type {
0187 uint64_t features;
0188 const char *name;
0189 struct module *module;
0190 unsigned version[3];
0191 dm_ctr_fn ctr;
0192 dm_dtr_fn dtr;
0193 dm_map_fn map;
0194 dm_clone_and_map_request_fn clone_and_map_rq;
0195 dm_release_clone_request_fn release_clone_rq;
0196 dm_endio_fn end_io;
0197 dm_request_endio_fn rq_end_io;
0198 dm_presuspend_fn presuspend;
0199 dm_presuspend_undo_fn presuspend_undo;
0200 dm_postsuspend_fn postsuspend;
0201 dm_preresume_fn preresume;
0202 dm_resume_fn resume;
0203 dm_status_fn status;
0204 dm_message_fn message;
0205 dm_prepare_ioctl_fn prepare_ioctl;
0206 dm_report_zones_fn report_zones;
0207 dm_busy_fn busy;
0208 dm_iterate_devices_fn iterate_devices;
0209 dm_io_hints_fn io_hints;
0210 dm_dax_direct_access_fn direct_access;
0211 dm_dax_zero_page_range_fn dax_zero_page_range;
0212 dm_dax_recovery_write_fn dax_recovery_write;
0213
0214
0215 struct list_head list;
0216 };
0217
0218
0219
0220
0221
0222
0223
0224
0225 #define DM_TARGET_SINGLETON 0x00000001
0226 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
0227
0228
0229
0230
0231 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
0232 #define dm_target_always_writeable(type) \
0233 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
0234
0235
0236
0237
0238
0239 #define DM_TARGET_IMMUTABLE 0x00000004
0240 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
0241
0242
0243
0244
0245
0246 #define DM_TARGET_WILDCARD 0x00000008
0247 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
0248
0249
0250
0251
0252 #define DM_TARGET_INTEGRITY 0x00000010
0253 #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
0254
0255
0256
0257
0258 #define DM_TARGET_PASSES_INTEGRITY 0x00000020
0259 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
0260
0261
0262
0263
0264
0265
0266
0267
0268 #ifdef CONFIG_BLK_DEV_ZONED
0269 #define DM_TARGET_ZONED_HM 0x00000040
0270 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
0271 #else
0272 #define DM_TARGET_ZONED_HM 0x00000000
0273 #define dm_target_supports_zoned_hm(type) (false)
0274 #endif
0275
0276
0277
0278
0279 #define DM_TARGET_NOWAIT 0x00000080
0280 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
0281
0282
0283
0284
0285 #define DM_TARGET_PASSES_CRYPTO 0x00000100
0286 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
0287
0288 #ifdef CONFIG_BLK_DEV_ZONED
0289 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
0290 #define dm_target_supports_mixed_zoned_model(type) \
0291 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
0292 #else
0293 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
0294 #define dm_target_supports_mixed_zoned_model(type) (false)
0295 #endif
0296
0297 struct dm_target {
0298 struct dm_table *table;
0299 struct target_type *type;
0300
0301
0302 sector_t begin;
0303 sector_t len;
0304
0305
0306 uint32_t max_io_len;
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 unsigned num_flush_bios;
0317
0318
0319
0320
0321
0322 unsigned num_discard_bios;
0323
0324
0325
0326
0327
0328 unsigned num_secure_erase_bios;
0329
0330
0331
0332
0333
0334 unsigned num_write_zeroes_bios;
0335
0336
0337
0338
0339
0340 unsigned per_io_data_size;
0341
0342
0343 void *private;
0344
0345
0346 char *error;
0347
0348
0349
0350
0351
0352 bool flush_supported:1;
0353
0354
0355
0356
0357
0358 bool discards_supported:1;
0359
0360
0361
0362
0363 bool limit_swap_bios:1;
0364
0365
0366
0367
0368
0369 bool emulate_zone_append:1;
0370
0371
0372
0373
0374
0375 bool accounts_remapped_io:1;
0376
0377
0378
0379
0380
0381 bool needs_bio_set_dev:1;
0382 };
0383
0384 void *dm_per_bio_data(struct bio *bio, size_t data_size);
0385 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
0386 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
0387
0388 u64 dm_start_time_ns_from_clone(struct bio *bio);
0389
0390 int dm_register_target(struct target_type *t);
0391 void dm_unregister_target(struct target_type *t);
0392
0393
0394
0395
0396 struct dm_arg_set {
0397 unsigned argc;
0398 char **argv;
0399 };
0400
0401
0402
0403
0404
0405 struct dm_arg {
0406 unsigned min;
0407 unsigned max;
0408 char *error;
0409 };
0410
0411
0412
0413
0414
0415 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
0416 unsigned *value, char **error);
0417
0418
0419
0420
0421
0422
0423 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
0424 unsigned *num_args, char **error);
0425
0426
0427
0428
0429 const char *dm_shift_arg(struct dm_arg_set *as);
0430
0431
0432
0433
0434 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 #define DM_ANY_MINOR (-1)
0445 int dm_create(int minor, struct mapped_device **md);
0446
0447
0448
0449
0450 struct mapped_device *dm_get_md(dev_t dev);
0451 void dm_get(struct mapped_device *md);
0452 int dm_hold(struct mapped_device *md);
0453 void dm_put(struct mapped_device *md);
0454
0455
0456
0457
0458 void dm_set_mdptr(struct mapped_device *md, void *ptr);
0459 void *dm_get_mdptr(struct mapped_device *md);
0460
0461
0462
0463
0464 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
0465 int dm_resume(struct mapped_device *md);
0466
0467
0468
0469
0470 uint32_t dm_get_event_nr(struct mapped_device *md);
0471 int dm_wait_event(struct mapped_device *md, int event_nr);
0472 uint32_t dm_next_uevent_seq(struct mapped_device *md);
0473 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
0474
0475
0476
0477
0478 const char *dm_device_name(struct mapped_device *md);
0479 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
0480 struct gendisk *dm_disk(struct mapped_device *md);
0481 int dm_suspended(struct dm_target *ti);
0482 int dm_post_suspending(struct dm_target *ti);
0483 int dm_noflush_suspending(struct dm_target *ti);
0484 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
0485 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
0486 union map_info *dm_get_rq_mapinfo(struct request *rq);
0487
0488 #ifdef CONFIG_BLK_DEV_ZONED
0489 struct dm_report_zones_args {
0490 struct dm_target *tgt;
0491 sector_t next_sector;
0492
0493 void *orig_data;
0494 report_zones_cb orig_cb;
0495 unsigned int zone_idx;
0496
0497
0498 sector_t start;
0499 };
0500 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
0501 struct dm_report_zones_args *args, unsigned int nr_zones);
0502 #endif
0503
0504
0505
0506
0507
0508 int __init dm_early_create(struct dm_ioctl *dmi,
0509 struct dm_target_spec **spec_array,
0510 char **target_params_array);
0511
0512 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
0513
0514
0515
0516
0517 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
0518 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
0519
0520
0521
0522
0523
0524
0525
0526
0527 int dm_table_create(struct dm_table **result, fmode_t mode,
0528 unsigned num_targets, struct mapped_device *md);
0529
0530
0531
0532
0533 int dm_table_add_target(struct dm_table *t, const char *type,
0534 sector_t start, sector_t len, char *params);
0535
0536
0537
0538
0539
0540
0541
0542 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
0543
0544
0545
0546
0547 int dm_table_complete(struct dm_table *t);
0548
0549
0550
0551
0552 void dm_table_destroy(struct dm_table *t);
0553
0554
0555
0556
0557 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
0558
0559
0560
0561
0562 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
0563 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
0564 void dm_sync_table(struct mapped_device *md);
0565
0566
0567
0568
0569 sector_t dm_table_get_size(struct dm_table *t);
0570 fmode_t dm_table_get_mode(struct dm_table *t);
0571 struct mapped_device *dm_table_get_md(struct dm_table *t);
0572 const char *dm_table_device_name(struct dm_table *t);
0573
0574
0575
0576
0577 void dm_table_event(struct dm_table *t);
0578
0579
0580
0581
0582 void dm_table_run_md_queue_async(struct dm_table *t);
0583
0584
0585
0586
0587
0588 struct dm_table *dm_swap_table(struct mapped_device *md,
0589 struct dm_table *t);
0590
0591
0592
0593
0594 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
0595
0596
0597
0598
0599 #define DM_NAME "device-mapper"
0600
0601 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
0602
0603 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
0604
0605 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
0606 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0607 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
0608 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0609 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
0610 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0611
0612 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
0613 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0614
0615 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0616 0 : scnprintf(result + sz, maxlen - sz, x))
0617
0618 #define DMEMIT_TARGET_NAME_VERSION(y) \
0619 DMEMIT("target_name=%s,target_version=%u.%u.%u", \
0620 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
0621
0622
0623
0624
0625 #define DM_ENDIO_DONE 0
0626 #define DM_ENDIO_INCOMPLETE 1
0627 #define DM_ENDIO_REQUEUE 2
0628 #define DM_ENDIO_DELAY_REQUEUE 3
0629
0630
0631
0632
0633 #define DM_MAPIO_SUBMITTED 0
0634 #define DM_MAPIO_REMAPPED 1
0635 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
0636 #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
0637 #define DM_MAPIO_KILL 4
0638
0639 #define dm_sector_div64(x, y)( \
0640 { \
0641 u64 _res; \
0642 (x) = div64_u64_rem(x, y, &_res); \
0643 _res; \
0644 } \
0645 )
0646
0647
0648
0649
0650 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
0651
0652 #define dm_sector_div_up(n, sz) ( \
0653 { \
0654 sector_t _r = ((n) + (sz) - 1); \
0655 sector_div(_r, (sz)); \
0656 _r; \
0657 } \
0658 )
0659
0660
0661
0662
0663 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
0664
0665
0666
0667
0668
0669 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
0670
0671 static inline sector_t to_sector(unsigned long long n)
0672 {
0673 return (n >> SECTOR_SHIFT);
0674 }
0675
0676 static inline unsigned long to_bytes(sector_t n)
0677 {
0678 return (n << SECTOR_SHIFT);
0679 }
0680
0681 #endif