Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2001 Sistina Software (UK) Limited.
0003  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
0004  *
0005  * This file is released under the LGPL.
0006  */
0007 
0008 #ifndef _LINUX_DEVICE_MAPPER_H
0009 #define _LINUX_DEVICE_MAPPER_H
0010 
0011 #include <linux/bio.h>
0012 #include <linux/blkdev.h>
0013 #include <linux/dm-ioctl.h>
0014 #include <linux/math64.h>
0015 #include <linux/ratelimit.h>
0016 
0017 struct dm_dev;
0018 struct dm_target;
0019 struct dm_table;
0020 struct dm_report_zones_args;
0021 struct mapped_device;
0022 struct bio_vec;
0023 enum dax_access_mode;
0024 
0025 /*
0026  * Type of table, mapped_device's mempool and request_queue
0027  */
0028 enum dm_queue_mode {
0029     DM_TYPE_NONE         = 0,
0030     DM_TYPE_BIO_BASED    = 1,
0031     DM_TYPE_REQUEST_BASED    = 2,
0032     DM_TYPE_DAX_BIO_BASED    = 3,
0033 };
0034 
0035 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
0036 
0037 union map_info {
0038     void *ptr;
0039 };
0040 
0041 /*
0042  * In the constructor the target parameter will already have the
0043  * table, type, begin and len fields filled in.
0044  */
0045 typedef int (*dm_ctr_fn) (struct dm_target *target,
0046               unsigned int argc, char **argv);
0047 
0048 /*
0049  * The destructor doesn't need to free the dm_target, just
0050  * anything hidden ti->private.
0051  */
0052 typedef void (*dm_dtr_fn) (struct dm_target *ti);
0053 
0054 /*
0055  * The map function must return:
0056  * < 0: error
0057  * = 0: The target will handle the io by resubmitting it later
0058  * = 1: simple remap complete
0059  * = 2: The target wants to push back the io
0060  */
0061 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
0062 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
0063                         struct request *rq,
0064                         union map_info *map_context,
0065                         struct request **clone);
0066 typedef void (*dm_release_clone_request_fn) (struct request *clone,
0067                          union map_info *map_context);
0068 
0069 /*
0070  * Returns:
0071  * < 0 : error (currently ignored)
0072  * 0   : ended successfully
0073  * 1   : for some reason the io has still not completed (eg,
0074  *       multipath target might want to requeue a failed io).
0075  * 2   : The target wants to push back the io
0076  */
0077 typedef int (*dm_endio_fn) (struct dm_target *ti,
0078                 struct bio *bio, blk_status_t *error);
0079 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
0080                     struct request *clone, blk_status_t error,
0081                     union map_info *map_context);
0082 
0083 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
0084 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
0085 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
0086 typedef int (*dm_preresume_fn) (struct dm_target *ti);
0087 typedef void (*dm_resume_fn) (struct dm_target *ti);
0088 
0089 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
0090                   unsigned status_flags, char *result, unsigned maxlen);
0091 
0092 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
0093                   char *result, unsigned maxlen);
0094 
0095 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
0096 
0097 #ifdef CONFIG_BLK_DEV_ZONED
0098 typedef int (*dm_report_zones_fn) (struct dm_target *ti,
0099                    struct dm_report_zones_args *args,
0100                    unsigned int nr_zones);
0101 #else
0102 /*
0103  * Define dm_report_zones_fn so that targets can assign to NULL if
0104  * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
0105  * awkward #ifdefs in their target_type, etc.
0106  */
0107 typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
0108 #endif
0109 
0110 /*
0111  * These iteration functions are typically used to check (and combine)
0112  * properties of underlying devices.
0113  * E.g. Does at least one underlying device support flush?
0114  *      Does any underlying device not support WRITE_SAME?
0115  *
0116  * The callout function is called once for each contiguous section of
0117  * an underlying device.  State can be maintained in *data.
0118  * Return non-zero to stop iterating through any further devices.
0119  */
0120 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
0121                        struct dm_dev *dev,
0122                        sector_t start, sector_t len,
0123                        void *data);
0124 
0125 /*
0126  * This function must iterate through each section of device used by the
0127  * target until it encounters a non-zero return code, which it then returns.
0128  * Returns zero if no callout returned non-zero.
0129  */
0130 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
0131                       iterate_devices_callout_fn fn,
0132                       void *data);
0133 
0134 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
0135                 struct queue_limits *limits);
0136 
0137 /*
0138  * Returns:
0139  *    0: The target can handle the next I/O immediately.
0140  *    1: The target can't handle the next I/O immediately.
0141  */
0142 typedef int (*dm_busy_fn) (struct dm_target *ti);
0143 
0144 /*
0145  * Returns:
0146  *  < 0 : error
0147  * >= 0 : the number of bytes accessible at the address
0148  */
0149 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
0150         long nr_pages, enum dax_access_mode node, void **kaddr,
0151         pfn_t *pfn);
0152 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
0153         size_t nr_pages);
0154 
0155 /*
0156  * Returns:
0157  * != 0 : number of bytes transferred
0158  * 0    : recovery write failed
0159  */
0160 typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
0161         void *addr, size_t bytes, struct iov_iter *i);
0162 
0163 void dm_error(const char *message);
0164 
0165 struct dm_dev {
0166     struct block_device *bdev;
0167     struct dax_device *dax_dev;
0168     fmode_t mode;
0169     char name[16];
0170 };
0171 
0172 dev_t dm_get_dev_t(const char *path);
0173 
0174 /*
0175  * Constructors should call these functions to ensure destination devices
0176  * are opened/closed correctly.
0177  */
0178 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
0179           struct dm_dev **result);
0180 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
0181 
0182 /*
0183  * Information about a target type
0184  */
0185 
0186 struct target_type {
0187     uint64_t features;
0188     const char *name;
0189     struct module *module;
0190     unsigned version[3];
0191     dm_ctr_fn ctr;
0192     dm_dtr_fn dtr;
0193     dm_map_fn map;
0194     dm_clone_and_map_request_fn clone_and_map_rq;
0195     dm_release_clone_request_fn release_clone_rq;
0196     dm_endio_fn end_io;
0197     dm_request_endio_fn rq_end_io;
0198     dm_presuspend_fn presuspend;
0199     dm_presuspend_undo_fn presuspend_undo;
0200     dm_postsuspend_fn postsuspend;
0201     dm_preresume_fn preresume;
0202     dm_resume_fn resume;
0203     dm_status_fn status;
0204     dm_message_fn message;
0205     dm_prepare_ioctl_fn prepare_ioctl;
0206     dm_report_zones_fn report_zones;
0207     dm_busy_fn busy;
0208     dm_iterate_devices_fn iterate_devices;
0209     dm_io_hints_fn io_hints;
0210     dm_dax_direct_access_fn direct_access;
0211     dm_dax_zero_page_range_fn dax_zero_page_range;
0212     dm_dax_recovery_write_fn dax_recovery_write;
0213 
0214     /* For internal device-mapper use. */
0215     struct list_head list;
0216 };
0217 
0218 /*
0219  * Target features
0220  */
0221 
0222 /*
0223  * Any table that contains an instance of this target must have only one.
0224  */
0225 #define DM_TARGET_SINGLETON     0x00000001
0226 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
0227 
0228 /*
0229  * Indicates that a target does not support read-only devices.
0230  */
0231 #define DM_TARGET_ALWAYS_WRITEABLE  0x00000002
0232 #define dm_target_always_writeable(type) \
0233         ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
0234 
0235 /*
0236  * Any device that contains a table with an instance of this target may never
0237  * have tables containing any different target type.
0238  */
0239 #define DM_TARGET_IMMUTABLE     0x00000004
0240 #define dm_target_is_immutable(type)    ((type)->features & DM_TARGET_IMMUTABLE)
0241 
0242 /*
0243  * Indicates that a target may replace any target; even immutable targets.
0244  * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
0245  */
0246 #define DM_TARGET_WILDCARD      0x00000008
0247 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
0248 
0249 /*
0250  * A target implements own bio data integrity.
0251  */
0252 #define DM_TARGET_INTEGRITY     0x00000010
0253 #define dm_target_has_integrity(type)   ((type)->features & DM_TARGET_INTEGRITY)
0254 
0255 /*
0256  * A target passes integrity data to the lower device.
0257  */
0258 #define DM_TARGET_PASSES_INTEGRITY  0x00000020
0259 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
0260 
0261 /*
0262  * Indicates support for zoned block devices:
0263  * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
0264  *   block devices but does not support combining different zoned models.
0265  * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
0266  *   devices with different zoned models.
0267  */
0268 #ifdef CONFIG_BLK_DEV_ZONED
0269 #define DM_TARGET_ZONED_HM      0x00000040
0270 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
0271 #else
0272 #define DM_TARGET_ZONED_HM      0x00000000
0273 #define dm_target_supports_zoned_hm(type) (false)
0274 #endif
0275 
0276 /*
0277  * A target handles REQ_NOWAIT
0278  */
0279 #define DM_TARGET_NOWAIT        0x00000080
0280 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
0281 
0282 /*
0283  * A target supports passing through inline crypto support.
0284  */
0285 #define DM_TARGET_PASSES_CRYPTO     0x00000100
0286 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
0287 
0288 #ifdef CONFIG_BLK_DEV_ZONED
0289 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
0290 #define dm_target_supports_mixed_zoned_model(type) \
0291     ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
0292 #else
0293 #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
0294 #define dm_target_supports_mixed_zoned_model(type) (false)
0295 #endif
0296 
0297 struct dm_target {
0298     struct dm_table *table;
0299     struct target_type *type;
0300 
0301     /* target limits */
0302     sector_t begin;
0303     sector_t len;
0304 
0305     /* If non-zero, maximum size of I/O submitted to a target. */
0306     uint32_t max_io_len;
0307 
0308     /*
0309      * A number of zero-length barrier bios that will be submitted
0310      * to the target for the purpose of flushing cache.
0311      *
0312      * The bio number can be accessed with dm_bio_get_target_bio_nr.
0313      * It is a responsibility of the target driver to remap these bios
0314      * to the real underlying devices.
0315      */
0316     unsigned num_flush_bios;
0317 
0318     /*
0319      * The number of discard bios that will be submitted to the target.
0320      * The bio number can be accessed with dm_bio_get_target_bio_nr.
0321      */
0322     unsigned num_discard_bios;
0323 
0324     /*
0325      * The number of secure erase bios that will be submitted to the target.
0326      * The bio number can be accessed with dm_bio_get_target_bio_nr.
0327      */
0328     unsigned num_secure_erase_bios;
0329 
0330     /*
0331      * The number of WRITE ZEROES bios that will be submitted to the target.
0332      * The bio number can be accessed with dm_bio_get_target_bio_nr.
0333      */
0334     unsigned num_write_zeroes_bios;
0335 
0336     /*
0337      * The minimum number of extra bytes allocated in each io for the
0338      * target to use.
0339      */
0340     unsigned per_io_data_size;
0341 
0342     /* target specific data */
0343     void *private;
0344 
0345     /* Used to provide an error string from the ctr */
0346     char *error;
0347 
0348     /*
0349      * Set if this target needs to receive flushes regardless of
0350      * whether or not its underlying devices have support.
0351      */
0352     bool flush_supported:1;
0353 
0354     /*
0355      * Set if this target needs to receive discards regardless of
0356      * whether or not its underlying devices have support.
0357      */
0358     bool discards_supported:1;
0359 
0360     /*
0361      * Set if we need to limit the number of in-flight bios when swapping.
0362      */
0363     bool limit_swap_bios:1;
0364 
0365     /*
0366      * Set if this target implements a zoned device and needs emulation of
0367      * zone append operations using regular writes.
0368      */
0369     bool emulate_zone_append:1;
0370 
0371     /*
0372      * Set if the target will submit IO using dm_submit_bio_remap()
0373      * after returning DM_MAPIO_SUBMITTED from its map function.
0374      */
0375     bool accounts_remapped_io:1;
0376 
0377     /*
0378      * Set if the target will submit the DM bio without first calling
0379      * bio_set_dev(). NOTE: ideally a target should _not_ need this.
0380      */
0381     bool needs_bio_set_dev:1;
0382 };
0383 
0384 void *dm_per_bio_data(struct bio *bio, size_t data_size);
0385 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
0386 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
0387 
0388 u64 dm_start_time_ns_from_clone(struct bio *bio);
0389 
0390 int dm_register_target(struct target_type *t);
0391 void dm_unregister_target(struct target_type *t);
0392 
0393 /*
0394  * Target argument parsing.
0395  */
0396 struct dm_arg_set {
0397     unsigned argc;
0398     char **argv;
0399 };
0400 
0401 /*
0402  * The minimum and maximum value of a numeric argument, together with
0403  * the error message to use if the number is found to be outside that range.
0404  */
0405 struct dm_arg {
0406     unsigned min;
0407     unsigned max;
0408     char *error;
0409 };
0410 
0411 /*
0412  * Validate the next argument, either returning it as *value or, if invalid,
0413  * returning -EINVAL and setting *error.
0414  */
0415 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
0416         unsigned *value, char **error);
0417 
0418 /*
0419  * Process the next argument as the start of a group containing between
0420  * arg->min and arg->max further arguments. Either return the size as
0421  * *num_args or, if invalid, return -EINVAL and set *error.
0422  */
0423 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
0424               unsigned *num_args, char **error);
0425 
0426 /*
0427  * Return the current argument and shift to the next.
0428  */
0429 const char *dm_shift_arg(struct dm_arg_set *as);
0430 
0431 /*
0432  * Move through num_args arguments.
0433  */
0434 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
0435 
0436 /*-----------------------------------------------------------------
0437  * Functions for creating and manipulating mapped devices.
0438  * Drop the reference with dm_put when you finish with the object.
0439  *---------------------------------------------------------------*/
0440 
0441 /*
0442  * DM_ANY_MINOR chooses the next available minor number.
0443  */
0444 #define DM_ANY_MINOR (-1)
0445 int dm_create(int minor, struct mapped_device **md);
0446 
0447 /*
0448  * Reference counting for md.
0449  */
0450 struct mapped_device *dm_get_md(dev_t dev);
0451 void dm_get(struct mapped_device *md);
0452 int dm_hold(struct mapped_device *md);
0453 void dm_put(struct mapped_device *md);
0454 
0455 /*
0456  * An arbitrary pointer may be stored alongside a mapped device.
0457  */
0458 void dm_set_mdptr(struct mapped_device *md, void *ptr);
0459 void *dm_get_mdptr(struct mapped_device *md);
0460 
0461 /*
0462  * A device can still be used while suspended, but I/O is deferred.
0463  */
0464 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
0465 int dm_resume(struct mapped_device *md);
0466 
0467 /*
0468  * Event functions.
0469  */
0470 uint32_t dm_get_event_nr(struct mapped_device *md);
0471 int dm_wait_event(struct mapped_device *md, int event_nr);
0472 uint32_t dm_next_uevent_seq(struct mapped_device *md);
0473 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
0474 
0475 /*
0476  * Info functions.
0477  */
0478 const char *dm_device_name(struct mapped_device *md);
0479 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
0480 struct gendisk *dm_disk(struct mapped_device *md);
0481 int dm_suspended(struct dm_target *ti);
0482 int dm_post_suspending(struct dm_target *ti);
0483 int dm_noflush_suspending(struct dm_target *ti);
0484 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
0485 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
0486 union map_info *dm_get_rq_mapinfo(struct request *rq);
0487 
0488 #ifdef CONFIG_BLK_DEV_ZONED
0489 struct dm_report_zones_args {
0490     struct dm_target *tgt;
0491     sector_t next_sector;
0492 
0493     void *orig_data;
0494     report_zones_cb orig_cb;
0495     unsigned int zone_idx;
0496 
0497     /* must be filled by ->report_zones before calling dm_report_zones_cb */
0498     sector_t start;
0499 };
0500 int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
0501             struct dm_report_zones_args *args, unsigned int nr_zones);
0502 #endif /* CONFIG_BLK_DEV_ZONED */
0503 
0504 /*
0505  * Device mapper functions to parse and create devices specified by the
0506  * parameter "dm-mod.create="
0507  */
0508 int __init dm_early_create(struct dm_ioctl *dmi,
0509                struct dm_target_spec **spec_array,
0510                char **target_params_array);
0511 
0512 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
0513 
0514 /*
0515  * Geometry functions.
0516  */
0517 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
0518 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
0519 
0520 /*-----------------------------------------------------------------
0521  * Functions for manipulating device-mapper tables.
0522  *---------------------------------------------------------------*/
0523 
0524 /*
0525  * First create an empty table.
0526  */
0527 int dm_table_create(struct dm_table **result, fmode_t mode,
0528             unsigned num_targets, struct mapped_device *md);
0529 
0530 /*
0531  * Then call this once for each target.
0532  */
0533 int dm_table_add_target(struct dm_table *t, const char *type,
0534             sector_t start, sector_t len, char *params);
0535 
0536 /*
0537  * Target can use this to set the table's type.
0538  * Can only ever be called from a target's ctr.
0539  * Useful for "hybrid" target (supports both bio-based
0540  * and request-based).
0541  */
0542 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
0543 
0544 /*
0545  * Finally call this to make the table ready for use.
0546  */
0547 int dm_table_complete(struct dm_table *t);
0548 
0549 /*
0550  * Destroy the table when finished.
0551  */
0552 void dm_table_destroy(struct dm_table *t);
0553 
0554 /*
0555  * Target may require that it is never sent I/O larger than len.
0556  */
0557 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
0558 
0559 /*
0560  * Table reference counting.
0561  */
0562 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
0563 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
0564 void dm_sync_table(struct mapped_device *md);
0565 
0566 /*
0567  * Queries
0568  */
0569 sector_t dm_table_get_size(struct dm_table *t);
0570 fmode_t dm_table_get_mode(struct dm_table *t);
0571 struct mapped_device *dm_table_get_md(struct dm_table *t);
0572 const char *dm_table_device_name(struct dm_table *t);
0573 
0574 /*
0575  * Trigger an event.
0576  */
0577 void dm_table_event(struct dm_table *t);
0578 
0579 /*
0580  * Run the queue for request-based targets.
0581  */
0582 void dm_table_run_md_queue_async(struct dm_table *t);
0583 
0584 /*
0585  * The device must be suspended before calling this method.
0586  * Returns the previous table, which the caller must destroy.
0587  */
0588 struct dm_table *dm_swap_table(struct mapped_device *md,
0589                    struct dm_table *t);
0590 
0591 /*
0592  * Table blk_crypto_profile functions
0593  */
0594 void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
0595 
0596 /*-----------------------------------------------------------------
0597  * Macros.
0598  *---------------------------------------------------------------*/
0599 #define DM_NAME "device-mapper"
0600 
0601 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
0602 
0603 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
0604 
0605 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
0606 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0607 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
0608 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0609 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
0610 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0611 
0612 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
0613 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
0614 
0615 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0616               0 : scnprintf(result + sz, maxlen - sz, x))
0617 
0618 #define DMEMIT_TARGET_NAME_VERSION(y) \
0619         DMEMIT("target_name=%s,target_version=%u.%u.%u", \
0620                (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
0621 
0622 /*
0623  * Definitions of return values from target end_io function.
0624  */
0625 #define DM_ENDIO_DONE       0
0626 #define DM_ENDIO_INCOMPLETE 1
0627 #define DM_ENDIO_REQUEUE    2
0628 #define DM_ENDIO_DELAY_REQUEUE  3
0629 
0630 /*
0631  * Definitions of return values from target map function.
0632  */
0633 #define DM_MAPIO_SUBMITTED  0
0634 #define DM_MAPIO_REMAPPED   1
0635 #define DM_MAPIO_REQUEUE    DM_ENDIO_REQUEUE
0636 #define DM_MAPIO_DELAY_REQUEUE  DM_ENDIO_DELAY_REQUEUE
0637 #define DM_MAPIO_KILL       4
0638 
0639 #define dm_sector_div64(x, y)( \
0640 { \
0641     u64 _res; \
0642     (x) = div64_u64_rem(x, y, &_res); \
0643     _res; \
0644 } \
0645 )
0646 
0647 /*
0648  * Ceiling(n / sz)
0649  */
0650 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
0651 
0652 #define dm_sector_div_up(n, sz) ( \
0653 { \
0654     sector_t _r = ((n) + (sz) - 1); \
0655     sector_div(_r, (sz)); \
0656     _r; \
0657 } \
0658 )
0659 
0660 /*
0661  * ceiling(n / size) * size
0662  */
0663 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
0664 
0665 /*
0666  * Sector offset taken relative to the start of the target instead of
0667  * relative to the start of the device.
0668  */
0669 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
0670 
0671 static inline sector_t to_sector(unsigned long long n)
0672 {
0673     return (n >> SECTOR_SHIFT);
0674 }
0675 
0676 static inline unsigned long to_bytes(sector_t n)
0677 {
0678     return (n << SECTOR_SHIFT);
0679 }
0680 
0681 #endif  /* _LINUX_DEVICE_MAPPER_H */