0001
0002
0003
0004
0005
0006 #ifndef _NVME_H
0007 #define _NVME_H
0008
0009 #include <linux/nvme.h>
0010 #include <linux/cdev.h>
0011 #include <linux/pci.h>
0012 #include <linux/kref.h>
0013 #include <linux/blk-mq.h>
0014 #include <linux/sed-opal.h>
0015 #include <linux/fault-inject.h>
0016 #include <linux/rcupdate.h>
0017 #include <linux/wait.h>
0018 #include <linux/t10-pi.h>
0019
0020 #include <trace/events/block.h>
0021
0022 extern unsigned int nvme_io_timeout;
0023 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
0024
0025 extern unsigned int admin_timeout;
0026 #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
0027
0028 #define NVME_DEFAULT_KATO 5
0029
0030 #ifdef CONFIG_ARCH_NO_SG_CHAIN
0031 #define NVME_INLINE_SG_CNT 0
0032 #define NVME_INLINE_METADATA_SG_CNT 0
0033 #else
0034 #define NVME_INLINE_SG_CNT 2
0035 #define NVME_INLINE_METADATA_SG_CNT 1
0036 #endif
0037
0038
0039
0040
0041
0042
0043 #define NVME_CTRL_PAGE_SHIFT 12
0044 #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
0045
0046 extern struct workqueue_struct *nvme_wq;
0047 extern struct workqueue_struct *nvme_reset_wq;
0048 extern struct workqueue_struct *nvme_delete_wq;
0049
0050
0051
0052
0053
0054 enum nvme_quirks {
0055
0056
0057
0058
0059 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
0060
0061
0062
0063
0064
0065 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
0066
0067
0068
0069
0070
0071 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
0072
0073
0074
0075
0076
0077 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
0078
0079
0080
0081
0082 NVME_QUIRK_NO_APST = (1 << 4),
0083
0084
0085
0086
0087 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
0088
0089
0090
0091
0092 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
0093
0094
0095
0096
0097 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
0098
0099
0100
0101
0102 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
0103
0104
0105
0106
0107 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
0108
0109
0110
0111
0112 NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
0113
0114
0115
0116
0117 NVME_QUIRK_128_BYTES_SQES = (1 << 12),
0118
0119
0120
0121
0122 NVME_QUIRK_SHARED_TAGS = (1 << 13),
0123
0124
0125
0126
0127 NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
0128
0129
0130
0131
0132
0133
0134 NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
0135
0136
0137
0138
0139
0140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
0141
0142
0143
0144
0145
0146 NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
0147
0148
0149
0150
0151 NVME_QUIRK_BOGUS_NID = (1 << 18),
0152 };
0153
0154
0155
0156
0157
0158 struct nvme_request {
0159 struct nvme_command *cmd;
0160 union nvme_result result;
0161 u8 genctr;
0162 u8 retries;
0163 u8 flags;
0164 u16 status;
0165 struct nvme_ctrl *ctrl;
0166 };
0167
0168
0169
0170
0171 #define REQ_NVME_MPATH REQ_DRV
0172
0173 enum {
0174 NVME_REQ_CANCELLED = (1 << 0),
0175 NVME_REQ_USERCMD = (1 << 1),
0176 };
0177
0178 static inline struct nvme_request *nvme_req(struct request *req)
0179 {
0180 return blk_mq_rq_to_pdu(req);
0181 }
0182
0183 static inline u16 nvme_req_qid(struct request *req)
0184 {
0185 if (!req->q->queuedata)
0186 return 0;
0187
0188 return req->mq_hctx->queue_num + 1;
0189 }
0190
0191
0192
0193
0194
0195
0196 #define NVME_QUIRK_DELAY_AMOUNT 2300
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 enum nvme_ctrl_state {
0218 NVME_CTRL_NEW,
0219 NVME_CTRL_LIVE,
0220 NVME_CTRL_RESETTING,
0221 NVME_CTRL_CONNECTING,
0222 NVME_CTRL_DELETING,
0223 NVME_CTRL_DELETING_NOIO,
0224 NVME_CTRL_DEAD,
0225 };
0226
0227 struct nvme_fault_inject {
0228 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
0229 struct fault_attr attr;
0230 struct dentry *parent;
0231 bool dont_retry;
0232 u16 status;
0233 #endif
0234 };
0235
0236 struct nvme_ctrl {
0237 bool comp_seen;
0238 enum nvme_ctrl_state state;
0239 bool identified;
0240 spinlock_t lock;
0241 struct mutex scan_lock;
0242 const struct nvme_ctrl_ops *ops;
0243 struct request_queue *admin_q;
0244 struct request_queue *connect_q;
0245 struct request_queue *fabrics_q;
0246 struct device *dev;
0247 int instance;
0248 int numa_node;
0249 struct blk_mq_tag_set *tagset;
0250 struct blk_mq_tag_set *admin_tagset;
0251 struct list_head namespaces;
0252 struct rw_semaphore namespaces_rwsem;
0253 struct device ctrl_device;
0254 struct device *device;
0255 #ifdef CONFIG_NVME_HWMON
0256 struct device *hwmon_device;
0257 #endif
0258 struct cdev cdev;
0259 struct work_struct reset_work;
0260 struct work_struct delete_work;
0261 wait_queue_head_t state_wq;
0262
0263 struct nvme_subsystem *subsys;
0264 struct list_head subsys_entry;
0265
0266 struct opal_dev *opal_dev;
0267
0268 char name[12];
0269 u16 cntlid;
0270
0271 u32 ctrl_config;
0272 u16 mtfa;
0273 u32 queue_count;
0274
0275 u64 cap;
0276 u32 max_hw_sectors;
0277 u32 max_segments;
0278 u32 max_integrity_segments;
0279 u32 max_discard_sectors;
0280 u32 max_discard_segments;
0281 u32 max_zeroes_sectors;
0282 #ifdef CONFIG_BLK_DEV_ZONED
0283 u32 max_zone_append;
0284 #endif
0285 u16 crdt[3];
0286 u16 oncs;
0287 u32 dmrsl;
0288 u16 oacs;
0289 u16 sqsize;
0290 u32 max_namespaces;
0291 atomic_t abort_limit;
0292 u8 vwc;
0293 u32 vs;
0294 u32 sgls;
0295 u16 kas;
0296 u8 npss;
0297 u8 apsta;
0298 u16 wctemp;
0299 u16 cctemp;
0300 u32 oaes;
0301 u32 aen_result;
0302 u32 ctratt;
0303 unsigned int shutdown_timeout;
0304 unsigned int kato;
0305 bool subsystem;
0306 unsigned long quirks;
0307 struct nvme_id_power_state psd[32];
0308 struct nvme_effects_log *effects;
0309 struct xarray cels;
0310 struct work_struct scan_work;
0311 struct work_struct async_event_work;
0312 struct delayed_work ka_work;
0313 struct delayed_work failfast_work;
0314 struct nvme_command ka_cmd;
0315 struct work_struct fw_act_work;
0316 unsigned long events;
0317
0318 #ifdef CONFIG_NVME_MULTIPATH
0319
0320 u8 anacap;
0321 u8 anatt;
0322 u32 anagrpmax;
0323 u32 nanagrpid;
0324 struct mutex ana_lock;
0325 struct nvme_ana_rsp_hdr *ana_log_buf;
0326 size_t ana_log_size;
0327 struct timer_list anatt_timer;
0328 struct work_struct ana_work;
0329 #endif
0330
0331 #ifdef CONFIG_NVME_AUTH
0332 struct work_struct dhchap_auth_work;
0333 struct list_head dhchap_auth_list;
0334 struct mutex dhchap_auth_mutex;
0335 struct nvme_dhchap_key *host_key;
0336 struct nvme_dhchap_key *ctrl_key;
0337 u16 transaction;
0338 #endif
0339
0340
0341 u64 ps_max_latency_us;
0342 bool apst_enabled;
0343
0344
0345 u32 hmpre;
0346 u32 hmmin;
0347 u32 hmminds;
0348 u16 hmmaxd;
0349
0350
0351 u32 ioccsz;
0352 u32 iorcsz;
0353 u16 icdoff;
0354 u16 maxcmd;
0355 int nr_reconnects;
0356 unsigned long flags;
0357 #define NVME_CTRL_FAILFAST_EXPIRED 0
0358 #define NVME_CTRL_ADMIN_Q_STOPPED 1
0359 struct nvmf_ctrl_options *opts;
0360
0361 struct page *discard_page;
0362 unsigned long discard_page_busy;
0363
0364 struct nvme_fault_inject fault_inject;
0365
0366 enum nvme_ctrl_type cntrltype;
0367 enum nvme_dctype dctype;
0368 };
0369
0370 enum nvme_iopolicy {
0371 NVME_IOPOLICY_NUMA,
0372 NVME_IOPOLICY_RR,
0373 };
0374
0375 struct nvme_subsystem {
0376 int instance;
0377 struct device dev;
0378
0379
0380
0381
0382 struct kref ref;
0383 struct list_head entry;
0384 struct mutex lock;
0385 struct list_head ctrls;
0386 struct list_head nsheads;
0387 char subnqn[NVMF_NQN_SIZE];
0388 char serial[20];
0389 char model[40];
0390 char firmware_rev[8];
0391 u8 cmic;
0392 enum nvme_subsys_type subtype;
0393 u16 vendor_id;
0394 u16 awupf;
0395 struct ida ns_ida;
0396 #ifdef CONFIG_NVME_MULTIPATH
0397 enum nvme_iopolicy iopolicy;
0398 #endif
0399 };
0400
0401
0402
0403
0404 struct nvme_ns_ids {
0405 u8 eui64[8];
0406 u8 nguid[16];
0407 uuid_t uuid;
0408 u8 csi;
0409 };
0410
0411
0412
0413
0414
0415
0416
0417
0418 struct nvme_ns_head {
0419 struct list_head list;
0420 struct srcu_struct srcu;
0421 struct nvme_subsystem *subsys;
0422 unsigned ns_id;
0423 struct nvme_ns_ids ids;
0424 struct list_head entry;
0425 struct kref ref;
0426 bool shared;
0427 int instance;
0428 struct nvme_effects_log *effects;
0429
0430 struct cdev cdev;
0431 struct device cdev_device;
0432
0433 struct gendisk *disk;
0434 #ifdef CONFIG_NVME_MULTIPATH
0435 struct bio_list requeue_list;
0436 spinlock_t requeue_lock;
0437 struct work_struct requeue_work;
0438 struct mutex lock;
0439 unsigned long flags;
0440 #define NVME_NSHEAD_DISK_LIVE 0
0441 struct nvme_ns __rcu *current_path[];
0442 #endif
0443 };
0444
0445 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
0446 {
0447 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
0448 }
0449
0450 enum nvme_ns_features {
0451 NVME_NS_EXT_LBAS = 1 << 0,
0452 NVME_NS_METADATA_SUPPORTED = 1 << 1,
0453 };
0454
0455 struct nvme_ns {
0456 struct list_head list;
0457
0458 struct nvme_ctrl *ctrl;
0459 struct request_queue *queue;
0460 struct gendisk *disk;
0461 #ifdef CONFIG_NVME_MULTIPATH
0462 enum nvme_ana_state ana_state;
0463 u32 ana_grpid;
0464 #endif
0465 struct list_head siblings;
0466 struct kref kref;
0467 struct nvme_ns_head *head;
0468
0469 int lba_shift;
0470 u16 ms;
0471 u16 pi_size;
0472 u16 sgs;
0473 u32 sws;
0474 u8 pi_type;
0475 u8 guard_type;
0476 #ifdef CONFIG_BLK_DEV_ZONED
0477 u64 zsze;
0478 #endif
0479 unsigned long features;
0480 unsigned long flags;
0481 #define NVME_NS_REMOVING 0
0482 #define NVME_NS_DEAD 1
0483 #define NVME_NS_ANA_PENDING 2
0484 #define NVME_NS_FORCE_RO 3
0485 #define NVME_NS_READY 4
0486 #define NVME_NS_STOPPED 5
0487
0488 struct cdev cdev;
0489 struct device cdev_device;
0490
0491 struct nvme_fault_inject fault_inject;
0492
0493 };
0494
0495
0496 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
0497 {
0498 return ns->pi_type && ns->ms == ns->pi_size;
0499 }
0500
0501 struct nvme_ctrl_ops {
0502 const char *name;
0503 struct module *module;
0504 unsigned int flags;
0505 #define NVME_F_FABRICS (1 << 0)
0506 #define NVME_F_METADATA_SUPPORTED (1 << 1)
0507 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
0508 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
0509 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
0510 void (*free_ctrl)(struct nvme_ctrl *ctrl);
0511 void (*submit_async_event)(struct nvme_ctrl *ctrl);
0512 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
0513 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
0514 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
0515 void (*print_device_info)(struct nvme_ctrl *ctrl);
0516 bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
0517 };
0518
0519
0520
0521
0522
0523
0524 #define nvme_genctr_mask(gen) (gen & 0xf)
0525 #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
0526 #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
0527 #define nvme_tag_from_cid(cid) (cid & 0xfff)
0528
0529 static inline u16 nvme_cid(struct request *rq)
0530 {
0531 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
0532 }
0533
0534 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
0535 u16 command_id)
0536 {
0537 u8 genctr = nvme_genctr_from_cid(command_id);
0538 u16 tag = nvme_tag_from_cid(command_id);
0539 struct request *rq;
0540
0541 rq = blk_mq_tag_to_rq(tags, tag);
0542 if (unlikely(!rq)) {
0543 pr_err("could not locate request for tag %#x\n",
0544 tag);
0545 return NULL;
0546 }
0547 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
0548 dev_err(nvme_req(rq)->ctrl->device,
0549 "request %#x genctr mismatch (got %#x expected %#x)\n",
0550 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
0551 return NULL;
0552 }
0553 return rq;
0554 }
0555
0556 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
0557 u16 command_id)
0558 {
0559 return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
0560 }
0561
0562
0563
0564
0565 static inline int nvme_strlen(char *s, int len)
0566 {
0567 while (s[len - 1] == ' ')
0568 len--;
0569 return len;
0570 }
0571
0572 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
0573 {
0574 struct nvme_subsystem *subsys = ctrl->subsys;
0575
0576 if (ctrl->ops->print_device_info) {
0577 ctrl->ops->print_device_info(ctrl);
0578 return;
0579 }
0580
0581 dev_err(ctrl->device,
0582 "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
0583 nvme_strlen(subsys->model, sizeof(subsys->model)),
0584 subsys->model, nvme_strlen(subsys->firmware_rev,
0585 sizeof(subsys->firmware_rev)),
0586 subsys->firmware_rev);
0587 }
0588
0589 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
0590 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
0591 const char *dev_name);
0592 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
0593 void nvme_should_fail(struct request *req);
0594 #else
0595 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
0596 const char *dev_name)
0597 {
0598 }
0599 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
0600 {
0601 }
0602 static inline void nvme_should_fail(struct request *req) {}
0603 #endif
0604
0605 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
0606 {
0607 if (!ctrl->subsystem)
0608 return -ENOTTY;
0609 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
0610 }
0611
0612
0613
0614
0615 static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
0616 {
0617 return sector >> (ns->lba_shift - SECTOR_SHIFT);
0618 }
0619
0620
0621
0622
0623 static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
0624 {
0625 return lba << (ns->lba_shift - SECTOR_SHIFT);
0626 }
0627
0628
0629
0630
0631 static inline u32 nvme_bytes_to_numd(size_t len)
0632 {
0633 return (len >> 2) - 1;
0634 }
0635
0636 static inline bool nvme_is_ana_error(u16 status)
0637 {
0638 switch (status & 0x7ff) {
0639 case NVME_SC_ANA_TRANSITION:
0640 case NVME_SC_ANA_INACCESSIBLE:
0641 case NVME_SC_ANA_PERSISTENT_LOSS:
0642 return true;
0643 default:
0644 return false;
0645 }
0646 }
0647
0648 static inline bool nvme_is_path_error(u16 status)
0649 {
0650
0651 return (status & 0x700) == 0x300;
0652 }
0653
0654
0655
0656
0657
0658
0659
0660 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
0661 union nvme_result result)
0662 {
0663 struct nvme_request *rq = nvme_req(req);
0664 struct nvme_ctrl *ctrl = rq->ctrl;
0665
0666 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
0667 rq->genctr++;
0668
0669 rq->status = le16_to_cpu(status) >> 1;
0670 rq->result = result;
0671
0672 nvme_should_fail(req);
0673 if (unlikely(blk_should_fake_timeout(req->q)))
0674 return true;
0675 return blk_mq_complete_request_remote(req);
0676 }
0677
0678 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
0679 {
0680 get_device(ctrl->device);
0681 }
0682
0683 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
0684 {
0685 put_device(ctrl->device);
0686 }
0687
0688 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
0689 {
0690 return !qid &&
0691 nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
0692 }
0693
0694 void nvme_complete_rq(struct request *req);
0695 void nvme_complete_batch_req(struct request *req);
0696
0697 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
0698 void (*fn)(struct request *rq))
0699 {
0700 struct request *req;
0701
0702 rq_list_for_each(&iob->req_list, req) {
0703 fn(req);
0704 nvme_complete_batch_req(req);
0705 }
0706 blk_mq_end_request_batch(iob);
0707 }
0708
0709 blk_status_t nvme_host_path_error(struct request *req);
0710 bool nvme_cancel_request(struct request *req, void *data);
0711 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
0712 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
0713 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
0714 enum nvme_ctrl_state new_state);
0715 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
0716 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
0717 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
0718 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
0719 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
0720 const struct nvme_ctrl_ops *ops, unsigned long quirks);
0721 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
0722 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
0723 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
0724 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
0725
0726 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
0727
0728 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
0729 bool send);
0730
0731 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
0732 volatile union nvme_result *res);
0733
0734 void nvme_stop_queues(struct nvme_ctrl *ctrl);
0735 void nvme_start_queues(struct nvme_ctrl *ctrl);
0736 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
0737 void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
0738 void nvme_kill_queues(struct nvme_ctrl *ctrl);
0739 void nvme_sync_queues(struct nvme_ctrl *ctrl);
0740 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
0741 void nvme_unfreeze(struct nvme_ctrl *ctrl);
0742 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
0743 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
0744 void nvme_start_freeze(struct nvme_ctrl *ctrl);
0745
0746 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
0747 {
0748 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
0749 }
0750
0751 #define NVME_QID_ANY -1
0752 void nvme_init_request(struct request *req, struct nvme_command *cmd);
0753 void nvme_cleanup_cmd(struct request *req);
0754 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
0755 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
0756 struct request *req);
0757 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
0758 bool queue_live);
0759
0760 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
0761 bool queue_live)
0762 {
0763 if (likely(ctrl->state == NVME_CTRL_LIVE))
0764 return true;
0765 if (ctrl->ops->flags & NVME_F_FABRICS &&
0766 ctrl->state == NVME_CTRL_DELETING)
0767 return queue_live;
0768 return __nvme_check_ready(ctrl, rq, queue_live);
0769 }
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
0781 struct nvme_ns_head *head)
0782 {
0783 return head->shared ||
0784 (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
0785 (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
0786 (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
0787 }
0788
0789 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
0790 void *buf, unsigned bufflen);
0791 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
0792 union nvme_result *result, void *buffer, unsigned bufflen,
0793 int qid, int at_head,
0794 blk_mq_req_flags_t flags);
0795 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
0796 unsigned int dword11, void *buffer, size_t buflen,
0797 u32 *result);
0798 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
0799 unsigned int dword11, void *buffer, size_t buflen,
0800 u32 *result);
0801 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
0802 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
0803 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
0804 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
0805 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
0806 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
0807 void nvme_queue_scan(struct nvme_ctrl *ctrl);
0808 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
0809 void *log, size_t size, u64 offset);
0810 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
0811 void nvme_put_ns_head(struct nvme_ns_head *head);
0812 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
0813 const struct file_operations *fops, struct module *owner);
0814 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
0815 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
0816 unsigned int cmd, unsigned long arg);
0817 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
0818 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
0819 unsigned int cmd, unsigned long arg);
0820 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
0821 unsigned long arg);
0822 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
0823 unsigned long arg);
0824 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
0825 unsigned int issue_flags);
0826 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
0827 unsigned int issue_flags);
0828 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
0829 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
0830
0831 extern const struct attribute_group *nvme_ns_id_attr_groups[];
0832 extern const struct pr_ops nvme_pr_ops;
0833 extern const struct block_device_operations nvme_ns_head_ops;
0834
0835 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
0836 #ifdef CONFIG_NVME_MULTIPATH
0837 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
0838 {
0839 return ctrl->ana_log_buf != NULL;
0840 }
0841
0842 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
0843 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
0844 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
0845 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
0846 void nvme_failover_req(struct request *req);
0847 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
0848 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
0849 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
0850 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
0851 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
0852 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
0853 void nvme_mpath_update(struct nvme_ctrl *ctrl);
0854 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
0855 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
0856 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
0857 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
0858 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
0859 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
0860
0861 static inline void nvme_trace_bio_complete(struct request *req)
0862 {
0863 struct nvme_ns *ns = req->q->queuedata;
0864
0865 if (req->cmd_flags & REQ_NVME_MPATH)
0866 trace_block_bio_complete(ns->head->disk->queue, req->bio);
0867 }
0868
0869 extern bool multipath;
0870 extern struct device_attribute dev_attr_ana_grpid;
0871 extern struct device_attribute dev_attr_ana_state;
0872 extern struct device_attribute subsys_attr_iopolicy;
0873
0874 #else
0875 #define multipath false
0876 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
0877 {
0878 return false;
0879 }
0880 static inline void nvme_failover_req(struct request *req)
0881 {
0882 }
0883 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
0884 {
0885 }
0886 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
0887 struct nvme_ns_head *head)
0888 {
0889 return 0;
0890 }
0891 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
0892 {
0893 }
0894 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
0895 {
0896 }
0897 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
0898 {
0899 return false;
0900 }
0901 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
0902 {
0903 }
0904 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
0905 {
0906 }
0907 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
0908 {
0909 }
0910 static inline void nvme_trace_bio_complete(struct request *req)
0911 {
0912 }
0913 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
0914 {
0915 }
0916 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
0917 struct nvme_id_ctrl *id)
0918 {
0919 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
0920 dev_warn(ctrl->device,
0921 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
0922 return 0;
0923 }
0924 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
0925 {
0926 }
0927 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
0928 {
0929 }
0930 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
0931 {
0932 }
0933 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
0934 {
0935 }
0936 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
0937 {
0938 }
0939 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
0940 {
0941 }
0942 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
0943 {
0944 }
0945 #endif
0946
0947 int nvme_revalidate_zones(struct nvme_ns *ns);
0948 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
0949 unsigned int nr_zones, report_zones_cb cb, void *data);
0950 #ifdef CONFIG_BLK_DEV_ZONED
0951 int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
0952 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
0953 struct nvme_command *cmnd,
0954 enum nvme_zone_mgmt_action action);
0955 #else
0956 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
0957 struct request *req, struct nvme_command *cmnd,
0958 enum nvme_zone_mgmt_action action)
0959 {
0960 return BLK_STS_NOTSUPP;
0961 }
0962
0963 static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
0964 {
0965 dev_warn(ns->ctrl->device,
0966 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
0967 return -EPROTONOSUPPORT;
0968 }
0969 #endif
0970
0971 static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
0972 {
0973 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
0974 if (IS_ERR(ctrl->connect_q))
0975 return PTR_ERR(ctrl->connect_q);
0976 return 0;
0977 }
0978
0979 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
0980 {
0981 return dev_to_disk(dev)->private_data;
0982 }
0983
0984 #ifdef CONFIG_NVME_HWMON
0985 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
0986 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
0987 #else
0988 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
0989 {
0990 return 0;
0991 }
0992
0993 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
0994 {
0995 }
0996 #endif
0997
0998 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
0999 {
1000 return ctrl->sgls & ((1 << 0) | (1 << 1));
1001 }
1002
1003 #ifdef CONFIG_NVME_AUTH
1004 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1005 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1006 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1007 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1008 void nvme_auth_reset(struct nvme_ctrl *ctrl);
1009 void nvme_auth_free(struct nvme_ctrl *ctrl);
1010 #else
1011 static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
1012 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
1013 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1014 {
1015 return -EPROTONOSUPPORT;
1016 }
1017 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1018 {
1019 return NVME_SC_AUTH_REQUIRED;
1020 }
1021 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
1022 #endif
1023
1024 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1025 u8 opcode);
1026 int nvme_execute_passthru_rq(struct request *rq);
1027 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1028 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1029 void nvme_put_ns(struct nvme_ns *ns);
1030
1031 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1032 {
1033 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1034 }
1035
1036 #ifdef CONFIG_NVME_VERBOSE_ERRORS
1037 const unsigned char *nvme_get_error_status_str(u16 status);
1038 const unsigned char *nvme_get_opcode_str(u8 opcode);
1039 const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
1040 #else
1041 static inline const unsigned char *nvme_get_error_status_str(u16 status)
1042 {
1043 return "I/O Error";
1044 }
1045 static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
1046 {
1047 return "I/O Cmd";
1048 }
1049 static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
1050 {
1051 return "Admin Cmd";
1052 }
1053 #endif
1054
1055 #endif