0001
0002
0003
0004
0005
0006
0007
0008
0009 #ifndef DM_CORE_INTERNAL_H
0010 #define DM_CORE_INTERNAL_H
0011
0012 #include <linux/kthread.h>
0013 #include <linux/ktime.h>
0014 #include <linux/blk-mq.h>
0015 #include <linux/blk-crypto-profile.h>
0016 #include <linux/jump_label.h>
0017
0018 #include <trace/events/block.h>
0019
0020 #include "dm.h"
0021 #include "dm-ima.h"
0022
0023 #define DM_RESERVED_MAX_IOS 1024
0024
0025 struct dm_io;
0026
0027 struct dm_kobject_holder {
0028 struct kobject kobj;
0029 struct completion completion;
0030 };
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 struct dm_md_mempools {
0042 struct bio_set bs;
0043 struct bio_set io_bs;
0044 };
0045
0046 struct mapped_device {
0047 struct mutex suspend_lock;
0048
0049 struct mutex table_devices_lock;
0050 struct list_head table_devices;
0051
0052
0053
0054
0055
0056
0057 void __rcu *map;
0058
0059 unsigned long flags;
0060
0061
0062 struct mutex type_lock;
0063 enum dm_queue_mode type;
0064
0065 int numa_node_id;
0066 struct request_queue *queue;
0067
0068 atomic_t holders;
0069 atomic_t open_count;
0070
0071 struct dm_target *immutable_target;
0072 struct target_type *immutable_target_type;
0073
0074 char name[16];
0075 struct gendisk *disk;
0076 struct dax_device *dax_dev;
0077
0078 wait_queue_head_t wait;
0079 unsigned long __percpu *pending_io;
0080
0081
0082 struct hd_geometry geometry;
0083
0084
0085
0086
0087 struct workqueue_struct *wq;
0088
0089
0090
0091
0092 struct work_struct work;
0093 spinlock_t deferred_lock;
0094 struct bio_list deferred;
0095
0096
0097
0098
0099
0100
0101 struct work_struct requeue_work;
0102 struct dm_io *requeue_list;
0103
0104 void *interface_ptr;
0105
0106
0107
0108
0109 wait_queue_head_t eventq;
0110 atomic_t event_nr;
0111 atomic_t uevent_seq;
0112 struct list_head uevent_list;
0113 spinlock_t uevent_lock;
0114
0115
0116 bool init_tio_pdu:1;
0117 struct blk_mq_tag_set *tag_set;
0118
0119 struct dm_stats stats;
0120
0121
0122 unsigned internal_suspend_count;
0123
0124 int swap_bios;
0125 struct semaphore swap_bios_semaphore;
0126 struct mutex swap_bios_lock;
0127
0128
0129
0130
0131 struct dm_md_mempools *mempools;
0132
0133
0134 struct dm_kobject_holder kobj_holder;
0135
0136 struct srcu_struct io_barrier;
0137
0138 #ifdef CONFIG_BLK_DEV_ZONED
0139 unsigned int nr_zones;
0140 unsigned int *zwp_offset;
0141 #endif
0142
0143 #ifdef CONFIG_IMA
0144 struct dm_ima_measurements ima;
0145 #endif
0146 };
0147
0148
0149
0150
0151 #define DMF_BLOCK_IO_FOR_SUSPEND 0
0152 #define DMF_SUSPENDED 1
0153 #define DMF_FROZEN 2
0154 #define DMF_FREEING 3
0155 #define DMF_DELETING 4
0156 #define DMF_NOFLUSH_SUSPENDING 5
0157 #define DMF_DEFERRED_REMOVE 6
0158 #define DMF_SUSPENDED_INTERNALLY 7
0159 #define DMF_POST_SUSPENDING 8
0160 #define DMF_EMULATE_ZONE_APPEND 9
0161
0162 void disable_discard(struct mapped_device *md);
0163 void disable_write_zeroes(struct mapped_device *md);
0164
0165 static inline sector_t dm_get_size(struct mapped_device *md)
0166 {
0167 return get_capacity(md->disk);
0168 }
0169
0170 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
0171 {
0172 return &md->stats;
0173 }
0174
0175 DECLARE_STATIC_KEY_FALSE(stats_enabled);
0176 DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
0177 DECLARE_STATIC_KEY_FALSE(zoned_enabled);
0178
0179 static inline bool dm_emulate_zone_append(struct mapped_device *md)
0180 {
0181 if (blk_queue_is_zoned(md->queue))
0182 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
0183 return false;
0184 }
0185
0186 #define DM_TABLE_MAX_DEPTH 16
0187
0188 struct dm_table {
0189 struct mapped_device *md;
0190 enum dm_queue_mode type;
0191
0192
0193 unsigned int depth;
0194 unsigned int counts[DM_TABLE_MAX_DEPTH];
0195 sector_t *index[DM_TABLE_MAX_DEPTH];
0196
0197 unsigned int num_targets;
0198 unsigned int num_allocated;
0199 sector_t *highs;
0200 struct dm_target *targets;
0201
0202 struct target_type *immutable_target_type;
0203
0204 bool integrity_supported:1;
0205 bool singleton:1;
0206 unsigned integrity_added:1;
0207
0208
0209
0210
0211
0212
0213 fmode_t mode;
0214
0215
0216 struct list_head devices;
0217
0218
0219 void (*event_fn)(void *);
0220 void *event_context;
0221
0222 struct dm_md_mempools *mempools;
0223
0224 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0225 struct blk_crypto_profile *crypto_profile;
0226 #endif
0227 };
0228
0229 static inline struct dm_target *dm_table_get_target(struct dm_table *t,
0230 unsigned int index)
0231 {
0232 BUG_ON(index >= t->num_targets);
0233 return t->targets + index;
0234 }
0235
0236
0237
0238
0239 #define DM_TIO_MAGIC 28714
0240 struct dm_target_io {
0241 unsigned short magic;
0242 blk_short_t flags;
0243 unsigned int target_bio_nr;
0244 struct dm_io *io;
0245 struct dm_target *ti;
0246 unsigned int *len_ptr;
0247 sector_t old_sector;
0248 struct bio clone;
0249 };
0250 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
0251 #define DM_IO_BIO_OFFSET \
0252 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
0253
0254
0255
0256
0257 enum {
0258 DM_TIO_INSIDE_DM_IO,
0259 DM_TIO_IS_DUPLICATE_BIO
0260 };
0261
0262 static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
0263 {
0264 return (tio->flags & (1U << bit)) != 0;
0265 }
0266
0267 static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
0268 {
0269 tio->flags |= (1U << bit);
0270 }
0271
0272 static inline bool dm_tio_is_normal(struct dm_target_io *tio)
0273 {
0274 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
0275 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
0276 }
0277
0278
0279
0280
0281
0282 #define DM_IO_MAGIC 19577
0283 struct dm_io {
0284 unsigned short magic;
0285 blk_short_t flags;
0286 spinlock_t lock;
0287 unsigned long start_time;
0288 void *data;
0289 struct dm_io *next;
0290 struct dm_stats_aux stats_aux;
0291 blk_status_t status;
0292 atomic_t io_count;
0293 struct mapped_device *md;
0294
0295
0296 struct bio *orig_bio;
0297 unsigned int sector_offset;
0298 unsigned int sectors;
0299
0300
0301 struct dm_target_io tio;
0302 };
0303
0304
0305
0306
0307 enum {
0308 DM_IO_ACCOUNTED,
0309 DM_IO_WAS_SPLIT
0310 };
0311
0312 static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
0313 {
0314 return (io->flags & (1U << bit)) != 0;
0315 }
0316
0317 static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
0318 {
0319 io->flags |= (1U << bit);
0320 }
0321
0322 void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
0323
0324 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
0325 {
0326 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
0327 }
0328
0329 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
0330
0331 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
0332 {
0333 return !maxlen || strlen(result) + 1 >= maxlen;
0334 }
0335
0336 extern atomic_t dm_global_event_nr;
0337 extern wait_queue_head_t dm_global_eventq;
0338 void dm_issue_global_event(void);
0339
0340 #endif