0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #ifndef _DRBD_INT_H
0015 #define _DRBD_INT_H
0016
0017 #include <crypto/hash.h>
0018 #include <linux/compiler.h>
0019 #include <linux/types.h>
0020 #include <linux/list.h>
0021 #include <linux/sched/signal.h>
0022 #include <linux/bitops.h>
0023 #include <linux/slab.h>
0024 #include <linux/ratelimit.h>
0025 #include <linux/tcp.h>
0026 #include <linux/mutex.h>
0027 #include <linux/major.h>
0028 #include <linux/blkdev.h>
0029 #include <linux/backing-dev.h>
0030 #include <linux/idr.h>
0031 #include <linux/dynamic_debug.h>
0032 #include <net/tcp.h>
0033 #include <linux/lru_cache.h>
0034 #include <linux/prefetch.h>
0035 #include <linux/drbd_genl_api.h>
0036 #include <linux/drbd.h>
0037 #include "drbd_strings.h"
0038 #include "drbd_state.h"
0039 #include "drbd_protocol.h"
0040
0041 #ifdef __CHECKER__
0042 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
0043 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
0044 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
0045 #else
0046 # define __protected_by(x)
0047 # define __protected_read_by(x)
0048 # define __protected_write_by(x)
0049 #endif
0050
0051
0052 #ifdef CONFIG_DRBD_FAULT_INJECTION
0053 extern int drbd_enable_faults;
0054 extern int drbd_fault_rate;
0055 #endif
0056
0057 extern unsigned int drbd_minor_count;
0058 extern char drbd_usermode_helper[];
0059 extern int drbd_proc_details;
0060
0061
0062
0063
0064
0065
0066
0067 #define DRBD_SIGKILL SIGHUP
0068
0069 #define ID_IN_SYNC (4711ULL)
0070 #define ID_OUT_OF_SYNC (4712ULL)
0071 #define ID_SYNCER (-1ULL)
0072
0073 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
0074
0075 struct drbd_device;
0076 struct drbd_connection;
0077
0078 #define __drbd_printk_device(level, device, fmt, args...) \
0079 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
0080 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
0081 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
0082 #define __drbd_printk_resource(level, resource, fmt, args...) \
0083 printk(level "drbd %s: " fmt, (resource)->name, ## args)
0084 #define __drbd_printk_connection(level, connection, fmt, args...) \
0085 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
0086
0087 void drbd_printk_with_wrong_object_type(void);
0088
0089 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
0090 (__builtin_types_compatible_p(typeof(obj), type) || \
0091 __builtin_types_compatible_p(typeof(obj), const type)), \
0092 func(level, (const type)(obj), fmt, ## args)
0093
0094 #define drbd_printk(level, obj, fmt, args...) \
0095 __builtin_choose_expr( \
0096 __drbd_printk_if_same_type(obj, struct drbd_device *, \
0097 __drbd_printk_device, level, fmt, ## args), \
0098 __builtin_choose_expr( \
0099 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
0100 __drbd_printk_resource, level, fmt, ## args), \
0101 __builtin_choose_expr( \
0102 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
0103 __drbd_printk_connection, level, fmt, ## args), \
0104 __builtin_choose_expr( \
0105 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
0106 __drbd_printk_peer_device, level, fmt, ## args), \
0107 drbd_printk_with_wrong_object_type()))))
0108
0109 #define drbd_dbg(obj, fmt, args...) \
0110 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
0111 #define drbd_alert(obj, fmt, args...) \
0112 drbd_printk(KERN_ALERT, obj, fmt, ## args)
0113 #define drbd_err(obj, fmt, args...) \
0114 drbd_printk(KERN_ERR, obj, fmt, ## args)
0115 #define drbd_warn(obj, fmt, args...) \
0116 drbd_printk(KERN_WARNING, obj, fmt, ## args)
0117 #define drbd_info(obj, fmt, args...) \
0118 drbd_printk(KERN_INFO, obj, fmt, ## args)
0119 #define drbd_emerg(obj, fmt, args...) \
0120 drbd_printk(KERN_EMERG, obj, fmt, ## args)
0121
0122 #define dynamic_drbd_dbg(device, fmt, args...) \
0123 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
0124
0125 #define D_ASSERT(device, exp) do { \
0126 if (!(exp)) \
0127 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
0128 } while (0)
0129
0130
0131
0132
0133
0134
0135 #define expect(exp) ({ \
0136 bool _bool = (exp); \
0137 if (!_bool) \
0138 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
0139 #exp, __func__); \
0140 _bool; \
0141 })
0142
0143
0144 enum {
0145 DRBD_FAULT_MD_WR = 0,
0146 DRBD_FAULT_MD_RD = 1,
0147 DRBD_FAULT_RS_WR = 2,
0148 DRBD_FAULT_RS_RD = 3,
0149 DRBD_FAULT_DT_WR = 4,
0150 DRBD_FAULT_DT_RD = 5,
0151 DRBD_FAULT_DT_RA = 6,
0152 DRBD_FAULT_BM_ALLOC = 7,
0153 DRBD_FAULT_AL_EE = 8,
0154 DRBD_FAULT_RECEIVE = 9,
0155
0156 DRBD_FAULT_MAX,
0157 };
0158
0159 extern unsigned int
0160 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
0161
0162 static inline int
0163 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
0164 #ifdef CONFIG_DRBD_FAULT_INJECTION
0165 return drbd_fault_rate &&
0166 (drbd_enable_faults & (1<<type)) &&
0167 _drbd_insert_fault(device, type);
0168 #else
0169 return 0;
0170 #endif
0171 }
0172
0173
0174 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
0175
0176 #define div_floor(A, B) ((A)/(B))
0177
0178 extern struct ratelimit_state drbd_ratelimit_state;
0179 extern struct idr drbd_devices;
0180 extern struct list_head drbd_resources;
0181
0182 extern const char *cmdname(enum drbd_packet cmd);
0183
0184
0185
0186 struct bm_xfer_ctx {
0187
0188
0189
0190
0191 unsigned long bm_bits;
0192 unsigned long bm_words;
0193
0194 unsigned long bit_offset;
0195 unsigned long word_offset;
0196
0197
0198 unsigned packets[2];
0199 unsigned bytes[2];
0200 };
0201
0202 extern void INFO_bm_xfer_stats(struct drbd_device *device,
0203 const char *direction, struct bm_xfer_ctx *c);
0204
0205 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
0206 {
0207
0208
0209
0210
0211
0212
0213
0214 #if BITS_PER_LONG == 64
0215 c->word_offset = c->bit_offset >> 6;
0216 #elif BITS_PER_LONG == 32
0217 c->word_offset = c->bit_offset >> 5;
0218 c->word_offset &= ~(1UL);
0219 #else
0220 # error "unsupported BITS_PER_LONG"
0221 #endif
0222 }
0223
0224 extern unsigned int drbd_header_size(struct drbd_connection *connection);
0225
0226
0227 enum drbd_thread_state {
0228 NONE,
0229 RUNNING,
0230 EXITING,
0231 RESTARTING
0232 };
0233
0234 struct drbd_thread {
0235 spinlock_t t_lock;
0236 struct task_struct *task;
0237 struct completion stop;
0238 enum drbd_thread_state t_state;
0239 int (*function) (struct drbd_thread *);
0240 struct drbd_resource *resource;
0241 struct drbd_connection *connection;
0242 int reset_cpu_mask;
0243 const char *name;
0244 };
0245
0246 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
0247 {
0248
0249
0250
0251
0252 smp_rmb();
0253 return thi->t_state;
0254 }
0255
0256 struct drbd_work {
0257 struct list_head list;
0258 int (*cb)(struct drbd_work *, int cancel);
0259 };
0260
0261 struct drbd_device_work {
0262 struct drbd_work w;
0263 struct drbd_device *device;
0264 };
0265
0266 #include "drbd_interval.h"
0267
0268 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
0269
0270 extern void lock_all_resources(void);
0271 extern void unlock_all_resources(void);
0272
0273 struct drbd_request {
0274 struct drbd_work w;
0275 struct drbd_device *device;
0276
0277
0278
0279
0280
0281 struct bio *private_bio;
0282
0283 struct drbd_interval i;
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 unsigned int epoch;
0294
0295 struct list_head tl_requests;
0296 struct bio *master_bio;
0297
0298
0299 struct list_head req_pending_master_completion;
0300 struct list_head req_pending_local;
0301
0302
0303 unsigned long start_jif;
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 unsigned long in_actlog_jif;
0314
0315
0316 unsigned long pre_submit_jif;
0317
0318
0319 unsigned long pre_send_jif;
0320 unsigned long acked_jif;
0321 unsigned long net_done_jif;
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 atomic_t completion_ref;
0357
0358 struct kref kref;
0359
0360 unsigned rq_state;
0361 };
0362
0363 struct drbd_epoch {
0364 struct drbd_connection *connection;
0365 struct list_head list;
0366 unsigned int barrier_nr;
0367 atomic_t epoch_size;
0368 atomic_t active;
0369 unsigned long flags;
0370 };
0371
0372
0373 int drbdd_init(struct drbd_thread *);
0374 int drbd_asender(struct drbd_thread *);
0375
0376
0377 enum {
0378 DE_HAVE_BARRIER_NUMBER,
0379 };
0380
0381 enum epoch_event {
0382 EV_PUT,
0383 EV_GOT_BARRIER_NR,
0384 EV_BECAME_LAST,
0385 EV_CLEANUP = 32,
0386 };
0387
0388 struct digest_info {
0389 int digest_size;
0390 void *digest;
0391 };
0392
0393 struct drbd_peer_request {
0394 struct drbd_work w;
0395 struct drbd_peer_device *peer_device;
0396 struct drbd_epoch *epoch;
0397 struct page *pages;
0398 atomic_t pending_bios;
0399 struct drbd_interval i;
0400
0401 unsigned long flags;
0402 unsigned long submit_jif;
0403 union {
0404 u64 block_id;
0405 struct digest_info *digest;
0406 };
0407 };
0408
0409
0410
0411
0412
0413
0414
0415 enum {
0416 __EE_CALL_AL_COMPLETE_IO,
0417 __EE_MAY_SET_IN_SYNC,
0418
0419
0420 __EE_TRIM,
0421
0422
0423
0424 __EE_ZEROOUT,
0425
0426
0427
0428 __EE_RESUBMITTED,
0429
0430
0431
0432
0433 __EE_WAS_ERROR,
0434
0435
0436 __EE_HAS_DIGEST,
0437
0438
0439 __EE_RESTART_REQUESTS,
0440
0441
0442 __EE_SEND_WRITE_ACK,
0443
0444
0445 __EE_IN_INTERVAL_TREE,
0446
0447
0448
0449 __EE_SUBMITTED,
0450
0451
0452 __EE_WRITE,
0453
0454
0455 __EE_WRITE_SAME,
0456
0457
0458
0459 __EE_APPLICATION,
0460
0461
0462 __EE_RS_THIN_REQ,
0463 };
0464 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
0465 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
0466 #define EE_TRIM (1<<__EE_TRIM)
0467 #define EE_ZEROOUT (1<<__EE_ZEROOUT)
0468 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
0469 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
0470 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
0471 #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
0472 #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
0473 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
0474 #define EE_SUBMITTED (1<<__EE_SUBMITTED)
0475 #define EE_WRITE (1<<__EE_WRITE)
0476 #define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
0477 #define EE_APPLICATION (1<<__EE_APPLICATION)
0478 #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
0479
0480
0481 enum {
0482 UNPLUG_REMOTE,
0483 MD_DIRTY,
0484 USE_DEGR_WFC_T,
0485 CL_ST_CHG_SUCCESS,
0486 CL_ST_CHG_FAIL,
0487 CRASHED_PRIMARY,
0488
0489
0490 CONSIDER_RESYNC,
0491
0492 MD_NO_FUA,
0493
0494 BITMAP_IO,
0495
0496 BITMAP_IO_QUEUED,
0497 WAS_IO_ERROR,
0498 WAS_READ_ERROR,
0499 FORCE_DETACH,
0500 RESYNC_AFTER_NEG,
0501 RESIZE_PENDING,
0502
0503 NEW_CUR_UUID,
0504 AL_SUSPENDED,
0505 AHEAD_TO_SYNC_SOURCE,
0506 B_RS_H_DONE,
0507 DISCARD_MY_DATA,
0508 READ_BALANCE_RR,
0509
0510 FLUSH_PENDING,
0511
0512
0513
0514 GOING_DISKLESS,
0515
0516
0517 GO_DISKLESS,
0518 DESTROY_DISK,
0519 MD_SYNC,
0520 RS_START,
0521 RS_PROGRESS,
0522 RS_DONE,
0523 };
0524
0525 struct drbd_bitmap;
0526
0527
0528
0529 enum bm_flag {
0530
0531 BM_LOCKED_MASK = 0xf,
0532
0533
0534 BM_DONT_CLEAR = 0x1,
0535 BM_DONT_SET = 0x2,
0536 BM_DONT_TEST = 0x4,
0537
0538
0539
0540 BM_IS_LOCKED = 0x8,
0541
0542
0543 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
0544
0545
0546
0547
0548 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
0549
0550
0551
0552 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
0553 };
0554
0555 struct drbd_work_queue {
0556 struct list_head q;
0557 spinlock_t q_lock;
0558 wait_queue_head_t q_wait;
0559 };
0560
0561 struct drbd_socket {
0562 struct mutex mutex;
0563 struct socket *socket;
0564
0565
0566 void *sbuf;
0567 void *rbuf;
0568 };
0569
0570 struct drbd_md {
0571 u64 md_offset;
0572
0573 u64 la_size_sect;
0574 spinlock_t uuid_lock;
0575 u64 uuid[UI_SIZE];
0576 u64 device_uuid;
0577 u32 flags;
0578 u32 md_size_sect;
0579
0580 s32 al_offset;
0581 s32 bm_offset;
0582
0583
0584 s32 meta_dev_idx;
0585
0586
0587 u32 al_stripes;
0588 u32 al_stripe_size_4k;
0589 u32 al_size_4k;
0590 };
0591
0592 struct drbd_backing_dev {
0593 struct block_device *backing_bdev;
0594 struct block_device *md_bdev;
0595 struct drbd_md md;
0596 struct disk_conf *disk_conf;
0597 sector_t known_size;
0598 };
0599
0600 struct drbd_md_io {
0601 struct page *page;
0602 unsigned long start_jif;
0603 unsigned long submit_jif;
0604 const char *current_use;
0605 atomic_t in_use;
0606 unsigned int done;
0607 int error;
0608 };
0609
0610 struct bm_io_work {
0611 struct drbd_work w;
0612 char *why;
0613 enum bm_flag flags;
0614 int (*io_fn)(struct drbd_device *device);
0615 void (*done)(struct drbd_device *device, int rv);
0616 };
0617
0618 struct fifo_buffer {
0619 unsigned int head_index;
0620 unsigned int size;
0621 int total;
0622 int values[];
0623 };
0624 extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
0625
0626
0627 enum {
0628 NET_CONGESTED,
0629 RESOLVE_CONFLICTS,
0630 SEND_PING,
0631 GOT_PING_ACK,
0632 CONN_WD_ST_CHG_REQ,
0633 CONN_WD_ST_CHG_OKAY,
0634 CONN_WD_ST_CHG_FAIL,
0635 CONN_DRY_RUN,
0636 CREATE_BARRIER,
0637 STATE_SENT,
0638 CALLBACK_PENDING,
0639
0640
0641 DISCONNECT_SENT,
0642
0643 DEVICE_WORK_PENDING,
0644 };
0645
0646 enum which_state { NOW, OLD = NOW, NEW };
0647
0648 struct drbd_resource {
0649 char *name;
0650 #ifdef CONFIG_DEBUG_FS
0651 struct dentry *debugfs_res;
0652 struct dentry *debugfs_res_volumes;
0653 struct dentry *debugfs_res_connections;
0654 struct dentry *debugfs_res_in_flight_summary;
0655 #endif
0656 struct kref kref;
0657 struct idr devices;
0658 struct list_head connections;
0659 struct list_head resources;
0660 struct res_opts res_opts;
0661 struct mutex conf_update;
0662 struct mutex adm_mutex;
0663 spinlock_t req_lock;
0664
0665 unsigned susp:1;
0666 unsigned susp_nod:1;
0667 unsigned susp_fen:1;
0668
0669 enum write_ordering_e write_ordering;
0670
0671 cpumask_var_t cpu_mask;
0672 };
0673
0674 struct drbd_thread_timing_details
0675 {
0676 unsigned long start_jif;
0677 void *cb_addr;
0678 const char *caller_fn;
0679 unsigned int line;
0680 unsigned int cb_nr;
0681 };
0682
0683 struct drbd_connection {
0684 struct list_head connections;
0685 struct drbd_resource *resource;
0686 #ifdef CONFIG_DEBUG_FS
0687 struct dentry *debugfs_conn;
0688 struct dentry *debugfs_conn_callback_history;
0689 struct dentry *debugfs_conn_oldest_requests;
0690 #endif
0691 struct kref kref;
0692 struct idr peer_devices;
0693 enum drbd_conns cstate;
0694 struct mutex cstate_mutex;
0695 unsigned int connect_cnt;
0696
0697 unsigned long flags;
0698 struct net_conf *net_conf;
0699 wait_queue_head_t ping_wait;
0700
0701 struct sockaddr_storage my_addr;
0702 int my_addr_len;
0703 struct sockaddr_storage peer_addr;
0704 int peer_addr_len;
0705
0706 struct drbd_socket data;
0707 struct drbd_socket meta;
0708 int agreed_pro_version;
0709 u32 agreed_features;
0710 unsigned long last_received;
0711 unsigned int ko_count;
0712
0713 struct list_head transfer_log;
0714
0715 struct crypto_shash *cram_hmac_tfm;
0716 struct crypto_shash *integrity_tfm;
0717 struct crypto_shash *peer_integrity_tfm;
0718 struct crypto_shash *csums_tfm;
0719 struct crypto_shash *verify_tfm;
0720 void *int_dig_in;
0721 void *int_dig_vv;
0722
0723
0724 struct drbd_epoch *current_epoch;
0725 spinlock_t epoch_lock;
0726 unsigned int epochs;
0727 atomic_t current_tle_nr;
0728 unsigned current_tle_writes;
0729
0730 unsigned long last_reconnect_jif;
0731
0732 struct blk_plug receiver_plug;
0733 struct drbd_thread receiver;
0734 struct drbd_thread worker;
0735 struct drbd_thread ack_receiver;
0736 struct workqueue_struct *ack_sender;
0737
0738
0739
0740
0741 struct drbd_request *req_next;
0742 struct drbd_request *req_ack_pending;
0743 struct drbd_request *req_not_net_done;
0744
0745
0746 struct drbd_work_queue sender_work;
0747
0748 #define DRBD_THREAD_DETAILS_HIST 16
0749 unsigned int w_cb_nr;
0750 unsigned int r_cb_nr;
0751 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
0752 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
0753
0754 struct {
0755 unsigned long last_sent_barrier_jif;
0756
0757
0758
0759 bool seen_any_write_yet;
0760
0761
0762 int current_epoch_nr;
0763
0764
0765
0766
0767 unsigned current_epoch_writes;
0768 } send;
0769 };
0770
0771 static inline bool has_net_conf(struct drbd_connection *connection)
0772 {
0773 bool has_net_conf;
0774
0775 rcu_read_lock();
0776 has_net_conf = rcu_dereference(connection->net_conf);
0777 rcu_read_unlock();
0778
0779 return has_net_conf;
0780 }
0781
0782 void __update_timing_details(
0783 struct drbd_thread_timing_details *tdp,
0784 unsigned int *cb_nr,
0785 void *cb,
0786 const char *fn, const unsigned int line);
0787
0788 #define update_worker_timing_details(c, cb) \
0789 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
0790 #define update_receiver_timing_details(c, cb) \
0791 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
0792
0793 struct submit_worker {
0794 struct workqueue_struct *wq;
0795 struct work_struct worker;
0796
0797
0798 struct list_head writes;
0799 };
0800
0801 struct drbd_peer_device {
0802 struct list_head peer_devices;
0803 struct drbd_device *device;
0804 struct drbd_connection *connection;
0805 struct work_struct send_acks_work;
0806 #ifdef CONFIG_DEBUG_FS
0807 struct dentry *debugfs_peer_dev;
0808 #endif
0809 };
0810
0811 struct drbd_device {
0812 struct drbd_resource *resource;
0813 struct list_head peer_devices;
0814 struct list_head pending_bitmap_io;
0815
0816 unsigned long flush_jif;
0817 #ifdef CONFIG_DEBUG_FS
0818 struct dentry *debugfs_minor;
0819 struct dentry *debugfs_vol;
0820 struct dentry *debugfs_vol_oldest_requests;
0821 struct dentry *debugfs_vol_act_log_extents;
0822 struct dentry *debugfs_vol_resync_extents;
0823 struct dentry *debugfs_vol_data_gen_id;
0824 struct dentry *debugfs_vol_ed_gen_id;
0825 #endif
0826
0827 unsigned int vnr;
0828 unsigned int minor;
0829
0830 struct kref kref;
0831
0832
0833 unsigned long flags;
0834
0835
0836 struct drbd_backing_dev *ldev __protected_by(local);
0837
0838 sector_t p_size;
0839 struct request_queue *rq_queue;
0840 struct gendisk *vdisk;
0841
0842 unsigned long last_reattach_jif;
0843 struct drbd_work resync_work;
0844 struct drbd_work unplug_work;
0845 struct timer_list resync_timer;
0846 struct timer_list md_sync_timer;
0847 struct timer_list start_resync_timer;
0848 struct timer_list request_timer;
0849
0850
0851 union drbd_state new_state_tmp;
0852
0853 union drbd_dev_state state;
0854 wait_queue_head_t misc_wait;
0855 wait_queue_head_t state_wait;
0856 unsigned int send_cnt;
0857 unsigned int recv_cnt;
0858 unsigned int read_cnt;
0859 unsigned int writ_cnt;
0860 unsigned int al_writ_cnt;
0861 unsigned int bm_writ_cnt;
0862 atomic_t ap_bio_cnt;
0863 atomic_t ap_actlog_cnt;
0864 atomic_t ap_pending_cnt;
0865 atomic_t rs_pending_cnt;
0866 atomic_t unacked_cnt;
0867 atomic_t local_cnt;
0868 atomic_t suspend_cnt;
0869
0870
0871 struct rb_root read_requests;
0872 struct rb_root write_requests;
0873
0874
0875
0876 struct list_head pending_master_completion[2];
0877 struct list_head pending_completion[2];
0878
0879
0880 bool use_csums;
0881
0882 unsigned long rs_total;
0883
0884 unsigned long rs_failed;
0885
0886 unsigned long rs_start;
0887
0888 unsigned long rs_paused;
0889
0890 unsigned long rs_same_csum;
0891 #define DRBD_SYNC_MARKS 8
0892 #define DRBD_SYNC_MARK_STEP (3*HZ)
0893
0894 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
0895
0896 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
0897
0898 int rs_last_mark;
0899 unsigned long rs_last_bcast;
0900
0901
0902 sector_t ov_start_sector;
0903 sector_t ov_stop_sector;
0904
0905 sector_t ov_position;
0906
0907 sector_t ov_last_oos_start;
0908
0909 sector_t ov_last_oos_size;
0910 unsigned long ov_left;
0911
0912 struct drbd_bitmap *bitmap;
0913 unsigned long bm_resync_fo;
0914
0915
0916 struct lru_cache *resync;
0917
0918 unsigned int resync_locked;
0919
0920 unsigned int resync_wenr;
0921
0922 int open_cnt;
0923 u64 *p_uuid;
0924
0925 struct list_head active_ee;
0926 struct list_head sync_ee;
0927 struct list_head done_ee;
0928 struct list_head read_ee;
0929 struct list_head net_ee;
0930
0931 int next_barrier_nr;
0932 struct list_head resync_reads;
0933 atomic_t pp_in_use;
0934 atomic_t pp_in_use_by_net;
0935 wait_queue_head_t ee_wait;
0936 struct drbd_md_io md_io;
0937 spinlock_t al_lock;
0938 wait_queue_head_t al_wait;
0939 struct lru_cache *act_log;
0940 unsigned int al_tr_number;
0941 int al_tr_cycle;
0942 wait_queue_head_t seq_wait;
0943 atomic_t packet_seq;
0944 unsigned int peer_seq;
0945 spinlock_t peer_seq_lock;
0946 unsigned long comm_bm_set;
0947 struct bm_io_work bm_io_work;
0948 u64 ed_uuid;
0949 struct mutex own_state_mutex;
0950 struct mutex *state_mutex;
0951 char congestion_reason;
0952 atomic_t rs_sect_in;
0953 atomic_t rs_sect_ev;
0954 int rs_last_sect_ev;
0955 int rs_last_events;
0956
0957 int c_sync_rate;
0958 struct fifo_buffer *rs_plan_s;
0959 int rs_in_flight;
0960 atomic_t ap_in_flight;
0961 unsigned int peer_max_bio_size;
0962 unsigned int local_max_bio_size;
0963
0964
0965
0966 struct submit_worker submit;
0967 };
0968
0969 struct drbd_bm_aio_ctx {
0970 struct drbd_device *device;
0971 struct list_head list; ;
0972 unsigned long start_jif;
0973 atomic_t in_flight;
0974 unsigned int done;
0975 unsigned flags;
0976 #define BM_AIO_COPY_PAGES 1
0977 #define BM_AIO_WRITE_HINTED 2
0978 #define BM_AIO_WRITE_ALL_PAGES 4
0979 #define BM_AIO_READ 8
0980 int error;
0981 struct kref kref;
0982 };
0983
0984 struct drbd_config_context {
0985
0986 unsigned int minor;
0987
0988 unsigned int volume;
0989 #define VOLUME_UNSPECIFIED (-1U)
0990
0991
0992 char *resource_name;
0993 struct nlattr *my_addr;
0994 struct nlattr *peer_addr;
0995
0996
0997 struct sk_buff *reply_skb;
0998
0999 struct drbd_genlmsghdr *reply_dh;
1000
1001 struct drbd_device *device;
1002 struct drbd_resource *resource;
1003 struct drbd_connection *connection;
1004 };
1005
1006 static inline struct drbd_device *minor_to_device(unsigned int minor)
1007 {
1008 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1009 }
1010
1011 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1012 {
1013 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1014 }
1015
1016 static inline struct drbd_peer_device *
1017 conn_peer_device(struct drbd_connection *connection, int volume_number)
1018 {
1019 return idr_find(&connection->peer_devices, volume_number);
1020 }
1021
1022 #define for_each_resource(resource, _resources) \
1023 list_for_each_entry(resource, _resources, resources)
1024
1025 #define for_each_resource_rcu(resource, _resources) \
1026 list_for_each_entry_rcu(resource, _resources, resources)
1027
1028 #define for_each_resource_safe(resource, tmp, _resources) \
1029 list_for_each_entry_safe(resource, tmp, _resources, resources)
1030
1031 #define for_each_connection(connection, resource) \
1032 list_for_each_entry(connection, &resource->connections, connections)
1033
1034 #define for_each_connection_rcu(connection, resource) \
1035 list_for_each_entry_rcu(connection, &resource->connections, connections)
1036
1037 #define for_each_connection_safe(connection, tmp, resource) \
1038 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1039
1040 #define for_each_peer_device(peer_device, device) \
1041 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1042
1043 #define for_each_peer_device_rcu(peer_device, device) \
1044 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1045
1046 #define for_each_peer_device_safe(peer_device, tmp, device) \
1047 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1048
1049 static inline unsigned int device_to_minor(struct drbd_device *device)
1050 {
1051 return device->minor;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060 enum dds_flags {
1061 DDSF_FORCED = 1,
1062 DDSF_NO_RESYNC = 2,
1063 };
1064
1065 extern void drbd_init_set_defaults(struct drbd_device *device);
1066 extern int drbd_thread_start(struct drbd_thread *thi);
1067 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1068 #ifdef CONFIG_SMP
1069 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1070 #else
1071 #define drbd_thread_current_set_cpu(A) ({})
1072 #endif
1073 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1074 unsigned int set_size);
1075 extern void tl_clear(struct drbd_connection *);
1076 extern void drbd_free_sock(struct drbd_connection *connection);
1077 extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1078 void *buf, size_t size, unsigned msg_flags);
1079 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1080 unsigned);
1081
1082 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1083 extern int drbd_send_protocol(struct drbd_connection *connection);
1084 extern int drbd_send_uuids(struct drbd_peer_device *);
1085 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1086 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1087 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1088 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1089 extern int drbd_send_current_state(struct drbd_peer_device *);
1090 extern int drbd_send_sync_param(struct drbd_peer_device *);
1091 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1092 u32 set_size);
1093 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1094 struct drbd_peer_request *);
1095 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1096 struct p_block_req *rp);
1097 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1098 struct p_data *dp, int data_size);
1099 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1100 sector_t sector, int blksize, u64 block_id);
1101 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1102 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1103 struct drbd_peer_request *);
1104 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1105 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1106 sector_t sector, int size, u64 block_id);
1107 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1108 int size, void *digest, int digest_size,
1109 enum drbd_packet cmd);
1110 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1111
1112 extern int drbd_send_bitmap(struct drbd_device *device);
1113 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1114 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1115 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1116 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1117 extern void drbd_device_cleanup(struct drbd_device *device);
1118 extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1119 extern void drbd_queue_unplug(struct drbd_device *device);
1120
1121 extern void conn_md_sync(struct drbd_connection *connection);
1122 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1123 extern void drbd_md_sync(struct drbd_device *device);
1124 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1125 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1126 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1127 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1128 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1129 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1130 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1132 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1133 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1134 extern void drbd_md_mark_dirty(struct drbd_device *device);
1135 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1136 int (*io_fn)(struct drbd_device *),
1137 void (*done)(struct drbd_device *, int),
1138 char *why, enum bm_flag flags);
1139 extern int drbd_bitmap_io(struct drbd_device *device,
1140 int (*io_fn)(struct drbd_device *),
1141 char *why, enum bm_flag flags);
1142 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1143 int (*io_fn)(struct drbd_device *),
1144 char *why, enum bm_flag flags);
1145 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1146 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 #define MD_128MB_SECT (128LLU << 11)
1182 #define MD_4kB_SECT 8
1183 #define MD_32kB_SECT 64
1184
1185
1186 #define AL_EXTENT_SHIFT 22
1187 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 #define AL_UPDATES_PER_TRANSACTION 64
1204 #define AL_CONTEXT_PER_TRANSACTION 919
1205
1206 #if BITS_PER_LONG == 32
1207 #define LN2_BPL 5
1208 #define cpu_to_lel(A) cpu_to_le32(A)
1209 #define lel_to_cpu(A) le32_to_cpu(A)
1210 #elif BITS_PER_LONG == 64
1211 #define LN2_BPL 6
1212 #define cpu_to_lel(A) cpu_to_le64(A)
1213 #define lel_to_cpu(A) le64_to_cpu(A)
1214 #else
1215 #error "LN2 of BITS_PER_LONG unknown!"
1216 #endif
1217
1218
1219
1220 struct bm_extent {
1221 int rs_left;
1222 int rs_failed;
1223 unsigned long flags;
1224 struct lc_element lce;
1225 };
1226
1227 #define BME_NO_WRITES 0
1228 #define BME_LOCKED 1
1229 #define BME_PRIORITY 2
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 #define SLEEP_TIME (HZ/10)
1240
1241
1242
1243 #define BM_BLOCK_SHIFT 12
1244 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1245
1246
1247
1248 #define BM_EXT_SHIFT 24
1249 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1250
1251 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1252 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1253 #endif
1254
1255
1256 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1257 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1258 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1259
1260
1261 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1262
1263
1264
1265 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1266 #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1267
1268
1269 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1270
1271 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1272
1273 #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1274
1275 #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1276
1277
1278
1279 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1296
1297
1298
1299
1300
1301 #define DRBD_MAX_SECTORS_FIXED_BM \
1302 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1303 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1304
1305 #if BITS_PER_LONG == 32
1306
1307
1308
1309 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1310 #else
1311
1312 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1313
1314 #endif
1315
1316
1317
1318
1319
1320
1321
1322 #define DRBD_MAX_BIO_SIZE (1U << 20)
1323 #if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
1324 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1325 #endif
1326 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1327
1328 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1329 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1330
1331
1332
1333
1334 #define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1335 #define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1336
1337 extern int drbd_bm_init(struct drbd_device *device);
1338 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1339 extern void drbd_bm_cleanup(struct drbd_device *device);
1340 extern void drbd_bm_set_all(struct drbd_device *device);
1341 extern void drbd_bm_clear_all(struct drbd_device *device);
1342
1343 extern int drbd_bm_set_bits(
1344 struct drbd_device *device, unsigned long s, unsigned long e);
1345 extern int drbd_bm_clear_bits(
1346 struct drbd_device *device, unsigned long s, unsigned long e);
1347 extern int drbd_bm_count_bits(
1348 struct drbd_device *device, const unsigned long s, const unsigned long e);
1349
1350
1351 extern void _drbd_bm_set_bits(struct drbd_device *device,
1352 const unsigned long s, const unsigned long e);
1353 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1354 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1355 extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1356 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1357 extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1358 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1359 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1360 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1361 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1362 extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1363 extern size_t drbd_bm_words(struct drbd_device *device);
1364 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1365 extern sector_t drbd_bm_capacity(struct drbd_device *device);
1366
1367 #define DRBD_END_OF_BITMAP (~(unsigned long)0)
1368 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1369
1370 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1371 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1372 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1373 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1374
1375 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1376 size_t number, unsigned long *buffer);
1377
1378 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1379 size_t number, unsigned long *buffer);
1380
1381 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1382 extern void drbd_bm_unlock(struct drbd_device *device);
1383
1384
1385 extern struct kmem_cache *drbd_request_cache;
1386 extern struct kmem_cache *drbd_ee_cache;
1387 extern struct kmem_cache *drbd_bm_ext_cache;
1388 extern struct kmem_cache *drbd_al_ext_cache;
1389 extern mempool_t drbd_request_mempool;
1390 extern mempool_t drbd_ee_mempool;
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 extern struct page *drbd_pp_pool;
1406 extern spinlock_t drbd_pp_lock;
1407 extern int drbd_pp_vacant;
1408 extern wait_queue_head_t drbd_pp_wait;
1409
1410
1411
1412
1413
1414
1415 #define DRBD_MIN_POOL_PAGES 128
1416 extern mempool_t drbd_md_io_page_pool;
1417
1418
1419
1420 extern struct bio_set drbd_md_io_bio_set;
1421
1422
1423 extern struct bio_set drbd_io_bio_set;
1424
1425 extern struct mutex resources_mutex;
1426
1427 extern int conn_lowest_minor(struct drbd_connection *connection);
1428 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1429 extern void drbd_destroy_device(struct kref *kref);
1430 extern void drbd_delete_device(struct drbd_device *device);
1431
1432 extern struct drbd_resource *drbd_create_resource(const char *name);
1433 extern void drbd_free_resource(struct drbd_resource *resource);
1434
1435 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1436 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1437 extern void drbd_destroy_connection(struct kref *kref);
1438 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1439 void *peer_addr, int peer_addr_len);
1440 extern struct drbd_resource *drbd_find_resource(const char *name);
1441 extern void drbd_destroy_resource(struct kref *kref);
1442 extern void conn_free_crypto(struct drbd_connection *connection);
1443
1444
1445 extern void do_submit(struct work_struct *ws);
1446 extern void __drbd_make_request(struct drbd_device *, struct bio *);
1447 void drbd_submit_bio(struct bio *bio);
1448 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1449 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1450
1451
1452
1453
1454 extern struct mutex notification_mutex;
1455
1456 extern void drbd_suspend_io(struct drbd_device *device);
1457 extern void drbd_resume_io(struct drbd_device *device);
1458 extern char *ppsize(char *buf, unsigned long long size);
1459 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1460 enum determine_dev_size {
1461 DS_ERROR_SHRINK = -3,
1462 DS_ERROR_SPACE_MD = -2,
1463 DS_ERROR = -1,
1464 DS_UNCHANGED = 0,
1465 DS_SHRUNK = 1,
1466 DS_GREW = 2,
1467 DS_GREW_FROM_ZERO = 3,
1468 };
1469 extern enum determine_dev_size
1470 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1471 extern void resync_after_online_grow(struct drbd_device *);
1472 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1473 struct drbd_backing_dev *bdev, struct o_qlim *o);
1474 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1475 enum drbd_role new_role,
1476 int force);
1477 extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1478 extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1479 extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1480 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1481
1482
1483
1484 extern void drbd_md_endio(struct bio *bio);
1485 extern void drbd_peer_request_endio(struct bio *bio);
1486 extern void drbd_request_endio(struct bio *bio);
1487 extern int drbd_worker(struct drbd_thread *thi);
1488 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1489 void drbd_resync_after_changed(struct drbd_device *device);
1490 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1491 extern void resume_next_sg(struct drbd_device *device);
1492 extern void suspend_other_sg(struct drbd_device *device);
1493 extern int drbd_resync_finished(struct drbd_device *device);
1494
1495 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1496 extern void drbd_md_put_buffer(struct drbd_device *device);
1497 extern int drbd_md_sync_page_io(struct drbd_device *device,
1498 struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
1499 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1500 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1501 struct drbd_backing_dev *bdev, unsigned int *done);
1502 extern void drbd_rs_controller_reset(struct drbd_device *device);
1503
1504 static inline void ov_out_of_sync_print(struct drbd_device *device)
1505 {
1506 if (device->ov_last_oos_size) {
1507 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1508 (unsigned long long)device->ov_last_oos_start,
1509 (unsigned long)device->ov_last_oos_size);
1510 }
1511 device->ov_last_oos_size = 0;
1512 }
1513
1514
1515 extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1516 extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1517 void *);
1518
1519 extern int w_e_end_data_req(struct drbd_work *, int);
1520 extern int w_e_end_rsdata_req(struct drbd_work *, int);
1521 extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1522 extern int w_e_end_ov_reply(struct drbd_work *, int);
1523 extern int w_e_end_ov_req(struct drbd_work *, int);
1524 extern int w_ov_finished(struct drbd_work *, int);
1525 extern int w_resync_timer(struct drbd_work *, int);
1526 extern int w_send_write_hint(struct drbd_work *, int);
1527 extern int w_send_dblock(struct drbd_work *, int);
1528 extern int w_send_read_req(struct drbd_work *, int);
1529 extern int w_e_reissue(struct drbd_work *, int);
1530 extern int w_restart_disk_io(struct drbd_work *, int);
1531 extern int w_send_out_of_sync(struct drbd_work *, int);
1532 extern int w_start_resync(struct drbd_work *, int);
1533
1534 extern void resync_timer_fn(struct timer_list *t);
1535 extern void start_resync_timer_fn(struct timer_list *t);
1536
1537 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1538
1539
1540 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1541 sector_t start, unsigned int nr_sectors, int flags);
1542 extern int drbd_receiver(struct drbd_thread *thi);
1543 extern int drbd_ack_receiver(struct drbd_thread *thi);
1544 extern void drbd_send_ping_wf(struct work_struct *ws);
1545 extern void drbd_send_acks_wf(struct work_struct *ws);
1546 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1547 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1548 bool throttle_if_app_is_waiting);
1549 extern int drbd_submit_peer_request(struct drbd_device *,
1550 struct drbd_peer_request *, blk_opf_t, int);
1551 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1552 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1553 sector_t, unsigned int,
1554 unsigned int,
1555 gfp_t) __must_hold(local);
1556 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1557 int);
1558 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1559 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1560 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1561 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1562 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1563 extern int drbd_connected(struct drbd_peer_device *);
1564
1565
1566 void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1567
1568
1569
1570
1571 static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1572 int fault_type, struct bio *bio)
1573 {
1574 __release(local);
1575 if (!bio->bi_bdev) {
1576 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
1577 bio->bi_status = BLK_STS_IOERR;
1578 bio_endio(bio);
1579 return;
1580 }
1581
1582 if (drbd_insert_fault(device, fault_type))
1583 bio_io_error(bio);
1584 else
1585 submit_bio_noacct(bio);
1586 }
1587
1588 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1589 enum write_ordering_e wo);
1590
1591
1592 extern struct proc_dir_entry *drbd_proc;
1593 int drbd_seq_show(struct seq_file *seq, void *v);
1594
1595
1596 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1597 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1598 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1599 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1600 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1601 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1602 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1603 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1604 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1605 extern void drbd_rs_cancel_all(struct drbd_device *device);
1606 extern int drbd_rs_del_all(struct drbd_device *device);
1607 extern void drbd_rs_failed_io(struct drbd_device *device,
1608 sector_t sector, int size);
1609 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1610
1611 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1612 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1613 enum update_sync_bits_mode mode);
1614 #define drbd_set_in_sync(device, sector, size) \
1615 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1616 #define drbd_set_out_of_sync(device, sector, size) \
1617 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1618 #define drbd_rs_failed_io(device, sector, size) \
1619 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1620 extern void drbd_al_shrink(struct drbd_device *device);
1621 extern int drbd_al_initialize(struct drbd_device *, void *);
1622
1623
1624
1625 struct sib_info {
1626 enum drbd_state_info_bcast_reason sib_reason;
1627 union {
1628 struct {
1629 char *helper_name;
1630 unsigned helper_exit_code;
1631 };
1632 struct {
1633 union drbd_state os;
1634 union drbd_state ns;
1635 };
1636 };
1637 };
1638 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1639
1640 extern int notify_resource_state(struct sk_buff *,
1641 unsigned int,
1642 struct drbd_resource *,
1643 struct resource_info *,
1644 enum drbd_notification_type);
1645 extern int notify_device_state(struct sk_buff *,
1646 unsigned int,
1647 struct drbd_device *,
1648 struct device_info *,
1649 enum drbd_notification_type);
1650 extern int notify_connection_state(struct sk_buff *,
1651 unsigned int,
1652 struct drbd_connection *,
1653 struct connection_info *,
1654 enum drbd_notification_type);
1655 extern int notify_peer_device_state(struct sk_buff *,
1656 unsigned int,
1657 struct drbd_peer_device *,
1658 struct peer_device_info *,
1659 enum drbd_notification_type);
1660 extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1661 struct drbd_connection *, const char *, int);
1662
1663
1664
1665
1666
1667
1668 static inline struct page *page_chain_next(struct page *page)
1669 {
1670 return (struct page *)page_private(page);
1671 }
1672 #define page_chain_for_each(page) \
1673 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1674 page = page_chain_next(page))
1675 #define page_chain_for_each_safe(page, n) \
1676 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1677
1678
1679 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1680 {
1681 struct page *page = peer_req->pages;
1682 page_chain_for_each(page) {
1683 if (page_count(page) > 1)
1684 return 1;
1685 }
1686 return 0;
1687 }
1688
1689 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1690 {
1691 struct drbd_resource *resource = device->resource;
1692 union drbd_state rv;
1693
1694 rv.i = device->state.i;
1695 rv.susp = resource->susp;
1696 rv.susp_nod = resource->susp_nod;
1697 rv.susp_fen = resource->susp_fen;
1698
1699 return rv;
1700 }
1701
1702 enum drbd_force_detach_flags {
1703 DRBD_READ_ERROR,
1704 DRBD_WRITE_ERROR,
1705 DRBD_META_IO_ERROR,
1706 DRBD_FORCE_DETACH,
1707 };
1708
1709 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1710 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1711 enum drbd_force_detach_flags df,
1712 const char *where)
1713 {
1714 enum drbd_io_error_p ep;
1715
1716 rcu_read_lock();
1717 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1718 rcu_read_unlock();
1719 switch (ep) {
1720 case EP_PASS_ON:
1721 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1722 if (__ratelimit(&drbd_ratelimit_state))
1723 drbd_err(device, "Local IO failed in %s.\n", where);
1724 if (device->state.disk > D_INCONSISTENT)
1725 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1726 break;
1727 }
1728 fallthrough;
1729 case EP_DETACH:
1730 case EP_CALL_HELPER:
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 set_bit(WAS_IO_ERROR, &device->flags);
1752 if (df == DRBD_READ_ERROR)
1753 set_bit(WAS_READ_ERROR, &device->flags);
1754 if (df == DRBD_FORCE_DETACH)
1755 set_bit(FORCE_DETACH, &device->flags);
1756 if (device->state.disk > D_FAILED) {
1757 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1758 drbd_err(device,
1759 "Local IO failed in %s. Detaching...\n", where);
1760 }
1761 break;
1762 }
1763 }
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1774 static inline void drbd_chk_io_error_(struct drbd_device *device,
1775 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1776 {
1777 if (error) {
1778 unsigned long flags;
1779 spin_lock_irqsave(&device->resource->req_lock, flags);
1780 __drbd_chk_io_error_(device, forcedetach, where);
1781 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1782 }
1783 }
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1794 {
1795 switch (bdev->md.meta_dev_idx) {
1796 case DRBD_MD_INDEX_INTERNAL:
1797 case DRBD_MD_INDEX_FLEX_INT:
1798 return bdev->md.md_offset + bdev->md.bm_offset;
1799 case DRBD_MD_INDEX_FLEX_EXT:
1800 default:
1801 return bdev->md.md_offset;
1802 }
1803 }
1804
1805
1806
1807
1808
1809 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1810 {
1811 switch (bdev->md.meta_dev_idx) {
1812 case DRBD_MD_INDEX_INTERNAL:
1813 case DRBD_MD_INDEX_FLEX_INT:
1814 return bdev->md.md_offset + MD_4kB_SECT -1;
1815 case DRBD_MD_INDEX_FLEX_EXT:
1816 default:
1817 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1818 }
1819 }
1820
1821
1822 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1823 {
1824 return bdev ? bdev_nr_sectors(bdev) : 0;
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1836 {
1837 sector_t s;
1838
1839 switch (bdev->md.meta_dev_idx) {
1840 case DRBD_MD_INDEX_INTERNAL:
1841 case DRBD_MD_INDEX_FLEX_INT:
1842 s = drbd_get_capacity(bdev->backing_bdev)
1843 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1844 drbd_md_first_sector(bdev))
1845 : 0;
1846 break;
1847 case DRBD_MD_INDEX_FLEX_EXT:
1848 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1849 drbd_get_capacity(bdev->backing_bdev));
1850
1851 s = min_t(sector_t, s,
1852 BM_EXT_TO_SECT(bdev->md.md_size_sect
1853 - bdev->md.bm_offset));
1854 break;
1855 default:
1856 s = min_t(sector_t, DRBD_MAX_SECTORS,
1857 drbd_get_capacity(bdev->backing_bdev));
1858 }
1859 return s;
1860 }
1861
1862
1863
1864
1865
1866 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1867 {
1868 const int meta_dev_idx = bdev->md.meta_dev_idx;
1869
1870 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1871 return 0;
1872
1873
1874
1875 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1876 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1877 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1878
1879
1880 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1881 }
1882
1883 static inline void
1884 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1885 {
1886 unsigned long flags;
1887 spin_lock_irqsave(&q->q_lock, flags);
1888 list_add_tail(&w->list, &q->q);
1889 spin_unlock_irqrestore(&q->q_lock, flags);
1890 wake_up(&q->q_wait);
1891 }
1892
1893 static inline void
1894 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1895 {
1896 unsigned long flags;
1897 spin_lock_irqsave(&q->q_lock, flags);
1898 if (list_empty_careful(&w->list))
1899 list_add_tail(&w->list, &q->q);
1900 spin_unlock_irqrestore(&q->q_lock, flags);
1901 wake_up(&q->q_wait);
1902 }
1903
1904 static inline void
1905 drbd_device_post_work(struct drbd_device *device, int work_bit)
1906 {
1907 if (!test_and_set_bit(work_bit, &device->flags)) {
1908 struct drbd_connection *connection =
1909 first_peer_device(device)->connection;
1910 struct drbd_work_queue *q = &connection->sender_work;
1911 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1912 wake_up(&q->q_wait);
1913 }
1914 }
1915
1916 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1917
1918
1919
1920
1921
1922 static inline void wake_ack_receiver(struct drbd_connection *connection)
1923 {
1924 struct task_struct *task = connection->ack_receiver.task;
1925 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1926 send_sig(SIGXCPU, task, 1);
1927 }
1928
1929 static inline void request_ping(struct drbd_connection *connection)
1930 {
1931 set_bit(SEND_PING, &connection->flags);
1932 wake_ack_receiver(connection);
1933 }
1934
1935 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1936 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1937 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1938 enum drbd_packet, unsigned int, void *,
1939 unsigned int);
1940 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1941 enum drbd_packet, unsigned int, void *,
1942 unsigned int);
1943
1944 extern int drbd_send_ping(struct drbd_connection *connection);
1945 extern int drbd_send_ping_ack(struct drbd_connection *connection);
1946 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1947 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1948
1949 static inline void drbd_thread_stop(struct drbd_thread *thi)
1950 {
1951 _drbd_thread_stop(thi, false, true);
1952 }
1953
1954 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1955 {
1956 _drbd_thread_stop(thi, false, false);
1957 }
1958
1959 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1960 {
1961 _drbd_thread_stop(thi, true, false);
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 static inline void inc_ap_pending(struct drbd_device *device)
1987 {
1988 atomic_inc(&device->ap_pending_cnt);
1989 }
1990
1991 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
1992 if (atomic_read(&device->which) < 0) \
1993 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
1994 func, line, \
1995 atomic_read(&device->which))
1996
1997 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
1998 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
1999 {
2000 if (atomic_dec_and_test(&device->ap_pending_cnt))
2001 wake_up(&device->misc_wait);
2002 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2003 }
2004
2005
2006
2007
2008
2009
2010
2011 static inline void inc_rs_pending(struct drbd_device *device)
2012 {
2013 atomic_inc(&device->rs_pending_cnt);
2014 }
2015
2016 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2017 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2018 {
2019 atomic_dec(&device->rs_pending_cnt);
2020 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2021 }
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032 static inline void inc_unacked(struct drbd_device *device)
2033 {
2034 atomic_inc(&device->unacked_cnt);
2035 }
2036
2037 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2038 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2039 {
2040 atomic_dec(&device->unacked_cnt);
2041 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2042 }
2043
2044 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2045 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2046 {
2047 atomic_sub(n, &device->unacked_cnt);
2048 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2049 }
2050
2051 static inline bool is_sync_target_state(enum drbd_conns connection_state)
2052 {
2053 return connection_state == C_SYNC_TARGET ||
2054 connection_state == C_PAUSED_SYNC_T;
2055 }
2056
2057 static inline bool is_sync_source_state(enum drbd_conns connection_state)
2058 {
2059 return connection_state == C_SYNC_SOURCE ||
2060 connection_state == C_PAUSED_SYNC_S;
2061 }
2062
2063 static inline bool is_sync_state(enum drbd_conns connection_state)
2064 {
2065 return is_sync_source_state(connection_state) ||
2066 is_sync_target_state(connection_state);
2067 }
2068
2069
2070
2071
2072
2073
2074
2075
2076 #define get_ldev_if_state(_device, _min_state) \
2077 (_get_ldev_if_state((_device), (_min_state)) ? \
2078 ({ __acquire(x); true; }) : false)
2079 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2080
2081 static inline void put_ldev(struct drbd_device *device)
2082 {
2083 enum drbd_disk_state disk_state = device->state.disk;
2084
2085
2086
2087
2088 int i = atomic_dec_return(&device->local_cnt);
2089
2090
2091
2092
2093 __release(local);
2094 D_ASSERT(device, i >= 0);
2095 if (i == 0) {
2096 if (disk_state == D_DISKLESS)
2097
2098 drbd_device_post_work(device, DESTROY_DISK);
2099 if (disk_state == D_FAILED)
2100
2101 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2102 drbd_device_post_work(device, GO_DISKLESS);
2103 wake_up(&device->misc_wait);
2104 }
2105 }
2106
2107 #ifndef __CHECKER__
2108 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2109 {
2110 int io_allowed;
2111
2112
2113 if (device->state.disk == D_DISKLESS)
2114 return 0;
2115
2116 atomic_inc(&device->local_cnt);
2117 io_allowed = (device->state.disk >= mins);
2118 if (!io_allowed)
2119 put_ldev(device);
2120 return io_allowed;
2121 }
2122 #else
2123 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2124 #endif
2125
2126
2127
2128
2129 static inline int drbd_get_max_buffers(struct drbd_device *device)
2130 {
2131 struct net_conf *nc;
2132 int mxb;
2133
2134 rcu_read_lock();
2135 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2136 mxb = nc ? nc->max_buffers : 1000000;
2137 rcu_read_unlock();
2138
2139 return mxb;
2140 }
2141
2142 static inline int drbd_state_is_stable(struct drbd_device *device)
2143 {
2144 union drbd_dev_state s = device->state;
2145
2146
2147
2148
2149 switch ((enum drbd_conns)s.conn) {
2150
2151 case C_STANDALONE:
2152 case C_WF_CONNECTION:
2153
2154 case C_CONNECTED:
2155 case C_SYNC_SOURCE:
2156 case C_SYNC_TARGET:
2157 case C_VERIFY_S:
2158 case C_VERIFY_T:
2159 case C_PAUSED_SYNC_S:
2160 case C_PAUSED_SYNC_T:
2161 case C_AHEAD:
2162 case C_BEHIND:
2163
2164 case C_DISCONNECTING:
2165 case C_UNCONNECTED:
2166 case C_TIMEOUT:
2167 case C_BROKEN_PIPE:
2168 case C_NETWORK_FAILURE:
2169 case C_PROTOCOL_ERROR:
2170 case C_TEAR_DOWN:
2171 case C_WF_REPORT_PARAMS:
2172 case C_STARTING_SYNC_S:
2173 case C_STARTING_SYNC_T:
2174 break;
2175
2176
2177 case C_WF_BITMAP_S:
2178 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2179 return 0;
2180 break;
2181
2182
2183 case C_WF_BITMAP_T:
2184 case C_WF_SYNC_UUID:
2185 case C_MASK:
2186
2187 return 0;
2188 }
2189
2190 switch ((enum drbd_disk_state)s.disk) {
2191 case D_DISKLESS:
2192 case D_INCONSISTENT:
2193 case D_OUTDATED:
2194 case D_CONSISTENT:
2195 case D_UP_TO_DATE:
2196 case D_FAILED:
2197
2198 break;
2199
2200
2201 case D_ATTACHING:
2202 case D_NEGOTIATING:
2203 case D_UNKNOWN:
2204 case D_MASK:
2205
2206 return 0;
2207 }
2208
2209 return 1;
2210 }
2211
2212 static inline int drbd_suspended(struct drbd_device *device)
2213 {
2214 struct drbd_resource *resource = device->resource;
2215
2216 return resource->susp || resource->susp_fen || resource->susp_nod;
2217 }
2218
2219 static inline bool may_inc_ap_bio(struct drbd_device *device)
2220 {
2221 int mxb = drbd_get_max_buffers(device);
2222
2223 if (drbd_suspended(device))
2224 return false;
2225 if (atomic_read(&device->suspend_cnt))
2226 return false;
2227
2228
2229
2230
2231
2232
2233 if (!drbd_state_is_stable(device))
2234 return false;
2235
2236
2237
2238 if (atomic_read(&device->ap_bio_cnt) > mxb)
2239 return false;
2240 if (test_bit(BITMAP_IO, &device->flags))
2241 return false;
2242 return true;
2243 }
2244
2245 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2246 {
2247 bool rv = false;
2248
2249 spin_lock_irq(&device->resource->req_lock);
2250 rv = may_inc_ap_bio(device);
2251 if (rv)
2252 atomic_inc(&device->ap_bio_cnt);
2253 spin_unlock_irq(&device->resource->req_lock);
2254
2255 return rv;
2256 }
2257
2258 static inline void inc_ap_bio(struct drbd_device *device)
2259 {
2260
2261
2262
2263
2264
2265
2266
2267
2268 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2269 }
2270
2271 static inline void dec_ap_bio(struct drbd_device *device)
2272 {
2273 int mxb = drbd_get_max_buffers(device);
2274 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2275
2276 D_ASSERT(device, ap_bio >= 0);
2277
2278 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2279 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2280 drbd_queue_work(&first_peer_device(device)->
2281 connection->sender_work,
2282 &device->bm_io_work.w);
2283 }
2284
2285
2286
2287
2288 if (ap_bio < mxb)
2289 wake_up(&device->misc_wait);
2290 }
2291
2292 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2293 {
2294 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2295 first_peer_device(device)->connection->agreed_pro_version != 100;
2296 }
2297
2298 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2299 {
2300 int changed = device->ed_uuid != val;
2301 device->ed_uuid = val;
2302 return changed;
2303 }
2304
2305 static inline int drbd_queue_order_type(struct drbd_device *device)
2306 {
2307
2308
2309 #ifndef QUEUE_ORDERED_NONE
2310 #define QUEUE_ORDERED_NONE 0
2311 #endif
2312 return QUEUE_ORDERED_NONE;
2313 }
2314
2315 static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2316 {
2317 return list_first_entry_or_null(&resource->connections,
2318 struct drbd_connection, connections);
2319 }
2320
2321 #endif