0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/module.h>
0008 #include <linux/init.h>
0009 #include <linux/slab.h>
0010 #include <linux/err.h>
0011 #include <linux/nvme-tcp.h>
0012 #include <net/sock.h>
0013 #include <net/tcp.h>
0014 #include <linux/inet.h>
0015 #include <linux/llist.h>
0016 #include <crypto/hash.h>
0017
0018 #include "nvmet.h"
0019
0020 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
0021
0022
0023
0024
0025
0026
0027
0028 static int so_priority;
0029 module_param(so_priority, int, 0644);
0030 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
0031
0032
0033
0034
0035
0036
0037 static int idle_poll_period_usecs;
0038 module_param(idle_poll_period_usecs, int, 0644);
0039 MODULE_PARM_DESC(idle_poll_period_usecs,
0040 "nvmet tcp io_work poll till idle time period in usecs");
0041
0042 #define NVMET_TCP_RECV_BUDGET 8
0043 #define NVMET_TCP_SEND_BUDGET 8
0044 #define NVMET_TCP_IO_WORK_BUDGET 64
0045
0046 enum nvmet_tcp_send_state {
0047 NVMET_TCP_SEND_DATA_PDU,
0048 NVMET_TCP_SEND_DATA,
0049 NVMET_TCP_SEND_R2T,
0050 NVMET_TCP_SEND_DDGST,
0051 NVMET_TCP_SEND_RESPONSE
0052 };
0053
0054 enum nvmet_tcp_recv_state {
0055 NVMET_TCP_RECV_PDU,
0056 NVMET_TCP_RECV_DATA,
0057 NVMET_TCP_RECV_DDGST,
0058 NVMET_TCP_RECV_ERR,
0059 };
0060
0061 enum {
0062 NVMET_TCP_F_INIT_FAILED = (1 << 0),
0063 };
0064
0065 struct nvmet_tcp_cmd {
0066 struct nvmet_tcp_queue *queue;
0067 struct nvmet_req req;
0068
0069 struct nvme_tcp_cmd_pdu *cmd_pdu;
0070 struct nvme_tcp_rsp_pdu *rsp_pdu;
0071 struct nvme_tcp_data_pdu *data_pdu;
0072 struct nvme_tcp_r2t_pdu *r2t_pdu;
0073
0074 u32 rbytes_done;
0075 u32 wbytes_done;
0076
0077 u32 pdu_len;
0078 u32 pdu_recv;
0079 int sg_idx;
0080 int nr_mapped;
0081 struct msghdr recv_msg;
0082 struct kvec *iov;
0083 u32 flags;
0084
0085 struct list_head entry;
0086 struct llist_node lentry;
0087
0088
0089 u32 offset;
0090 struct scatterlist *cur_sg;
0091 enum nvmet_tcp_send_state state;
0092
0093 __le32 exp_ddgst;
0094 __le32 recv_ddgst;
0095 };
0096
0097 enum nvmet_tcp_queue_state {
0098 NVMET_TCP_Q_CONNECTING,
0099 NVMET_TCP_Q_LIVE,
0100 NVMET_TCP_Q_DISCONNECTING,
0101 };
0102
0103 struct nvmet_tcp_queue {
0104 struct socket *sock;
0105 struct nvmet_tcp_port *port;
0106 struct work_struct io_work;
0107 struct nvmet_cq nvme_cq;
0108 struct nvmet_sq nvme_sq;
0109
0110
0111 struct nvmet_tcp_cmd *cmds;
0112 unsigned int nr_cmds;
0113 struct list_head free_list;
0114 struct llist_head resp_list;
0115 struct list_head resp_send_list;
0116 int send_list_len;
0117 struct nvmet_tcp_cmd *snd_cmd;
0118
0119
0120 int offset;
0121 int left;
0122 enum nvmet_tcp_recv_state rcv_state;
0123 struct nvmet_tcp_cmd *cmd;
0124 union nvme_tcp_pdu pdu;
0125
0126
0127 bool hdr_digest;
0128 bool data_digest;
0129 struct ahash_request *snd_hash;
0130 struct ahash_request *rcv_hash;
0131
0132 unsigned long poll_end;
0133
0134 spinlock_t state_lock;
0135 enum nvmet_tcp_queue_state state;
0136
0137 struct sockaddr_storage sockaddr;
0138 struct sockaddr_storage sockaddr_peer;
0139 struct work_struct release_work;
0140
0141 int idx;
0142 struct list_head queue_list;
0143
0144 struct nvmet_tcp_cmd connect;
0145
0146 struct page_frag_cache pf_cache;
0147
0148 void (*data_ready)(struct sock *);
0149 void (*state_change)(struct sock *);
0150 void (*write_space)(struct sock *);
0151 };
0152
0153 struct nvmet_tcp_port {
0154 struct socket *sock;
0155 struct work_struct accept_work;
0156 struct nvmet_port *nport;
0157 struct sockaddr_storage addr;
0158 void (*data_ready)(struct sock *);
0159 };
0160
0161 static DEFINE_IDA(nvmet_tcp_queue_ida);
0162 static LIST_HEAD(nvmet_tcp_queue_list);
0163 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
0164
0165 static struct workqueue_struct *nvmet_tcp_wq;
0166 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
0167 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
0168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
0169 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
0170 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
0171
0172 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
0173 struct nvmet_tcp_cmd *cmd)
0174 {
0175 if (unlikely(!queue->nr_cmds)) {
0176
0177 return USHRT_MAX;
0178 }
0179
0180 return cmd - queue->cmds;
0181 }
0182
0183 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
0184 {
0185 return nvme_is_write(cmd->req.cmd) &&
0186 cmd->rbytes_done < cmd->req.transfer_len;
0187 }
0188
0189 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
0190 {
0191 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
0192 }
0193
0194 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
0195 {
0196 return !nvme_is_write(cmd->req.cmd) &&
0197 cmd->req.transfer_len > 0 &&
0198 !cmd->req.cqe->status;
0199 }
0200
0201 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
0202 {
0203 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
0204 !cmd->rbytes_done;
0205 }
0206
0207 static inline struct nvmet_tcp_cmd *
0208 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
0209 {
0210 struct nvmet_tcp_cmd *cmd;
0211
0212 cmd = list_first_entry_or_null(&queue->free_list,
0213 struct nvmet_tcp_cmd, entry);
0214 if (!cmd)
0215 return NULL;
0216 list_del_init(&cmd->entry);
0217
0218 cmd->rbytes_done = cmd->wbytes_done = 0;
0219 cmd->pdu_len = 0;
0220 cmd->pdu_recv = 0;
0221 cmd->iov = NULL;
0222 cmd->flags = 0;
0223 return cmd;
0224 }
0225
0226 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
0227 {
0228 if (unlikely(cmd == &cmd->queue->connect))
0229 return;
0230
0231 list_add_tail(&cmd->entry, &cmd->queue->free_list);
0232 }
0233
0234 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
0235 {
0236 return queue->sock->sk->sk_incoming_cpu;
0237 }
0238
0239 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
0240 {
0241 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
0242 }
0243
0244 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
0245 {
0246 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
0247 }
0248
0249 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
0250 void *pdu, size_t len)
0251 {
0252 struct scatterlist sg;
0253
0254 sg_init_one(&sg, pdu, len);
0255 ahash_request_set_crypt(hash, &sg, pdu + len, len);
0256 crypto_ahash_digest(hash);
0257 }
0258
0259 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
0260 void *pdu, size_t len)
0261 {
0262 struct nvme_tcp_hdr *hdr = pdu;
0263 __le32 recv_digest;
0264 __le32 exp_digest;
0265
0266 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
0267 pr_err("queue %d: header digest enabled but no header digest\n",
0268 queue->idx);
0269 return -EPROTO;
0270 }
0271
0272 recv_digest = *(__le32 *)(pdu + hdr->hlen);
0273 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
0274 exp_digest = *(__le32 *)(pdu + hdr->hlen);
0275 if (recv_digest != exp_digest) {
0276 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
0277 queue->idx, le32_to_cpu(recv_digest),
0278 le32_to_cpu(exp_digest));
0279 return -EPROTO;
0280 }
0281
0282 return 0;
0283 }
0284
0285 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
0286 {
0287 struct nvme_tcp_hdr *hdr = pdu;
0288 u8 digest_len = nvmet_tcp_hdgst_len(queue);
0289 u32 len;
0290
0291 len = le32_to_cpu(hdr->plen) - hdr->hlen -
0292 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
0293
0294 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
0295 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
0296 return -EPROTO;
0297 }
0298
0299 return 0;
0300 }
0301
0302 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
0303 {
0304 WARN_ON(unlikely(cmd->nr_mapped > 0));
0305
0306 kfree(cmd->iov);
0307 sgl_free(cmd->req.sg);
0308 cmd->iov = NULL;
0309 cmd->req.sg = NULL;
0310 }
0311
0312 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
0313 {
0314 struct scatterlist *sg;
0315 int i;
0316
0317 sg = &cmd->req.sg[cmd->sg_idx];
0318
0319 for (i = 0; i < cmd->nr_mapped; i++)
0320 kunmap(sg_page(&sg[i]));
0321
0322 cmd->nr_mapped = 0;
0323 }
0324
0325 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
0326 {
0327 struct kvec *iov = cmd->iov;
0328 struct scatterlist *sg;
0329 u32 length, offset, sg_offset;
0330
0331 length = cmd->pdu_len;
0332 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
0333 offset = cmd->rbytes_done;
0334 cmd->sg_idx = offset / PAGE_SIZE;
0335 sg_offset = offset % PAGE_SIZE;
0336 sg = &cmd->req.sg[cmd->sg_idx];
0337
0338 while (length) {
0339 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
0340
0341 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
0342 iov->iov_len = iov_len;
0343
0344 length -= iov_len;
0345 sg = sg_next(sg);
0346 iov++;
0347 sg_offset = 0;
0348 }
0349
0350 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
0351 cmd->nr_mapped, cmd->pdu_len);
0352 }
0353
0354 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
0355 {
0356 queue->rcv_state = NVMET_TCP_RECV_ERR;
0357 if (queue->nvme_sq.ctrl)
0358 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
0359 else
0360 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
0361 }
0362
0363 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
0364 {
0365 if (status == -EPIPE || status == -ECONNRESET)
0366 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
0367 else
0368 nvmet_tcp_fatal_error(queue);
0369 }
0370
0371 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
0372 {
0373 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
0374 u32 len = le32_to_cpu(sgl->length);
0375
0376 if (!len)
0377 return 0;
0378
0379 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
0380 NVME_SGL_FMT_OFFSET)) {
0381 if (!nvme_is_write(cmd->req.cmd))
0382 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0383
0384 if (len > cmd->req.port->inline_data_size)
0385 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
0386 cmd->pdu_len = len;
0387 }
0388 cmd->req.transfer_len += len;
0389
0390 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
0391 if (!cmd->req.sg)
0392 return NVME_SC_INTERNAL;
0393 cmd->cur_sg = cmd->req.sg;
0394
0395 if (nvmet_tcp_has_data_in(cmd)) {
0396 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
0397 sizeof(*cmd->iov), GFP_KERNEL);
0398 if (!cmd->iov)
0399 goto err;
0400 }
0401
0402 return 0;
0403 err:
0404 nvmet_tcp_free_cmd_buffers(cmd);
0405 return NVME_SC_INTERNAL;
0406 }
0407
0408 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
0409 struct nvmet_tcp_cmd *cmd)
0410 {
0411 ahash_request_set_crypt(hash, cmd->req.sg,
0412 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
0413 crypto_ahash_digest(hash);
0414 }
0415
0416 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
0417 {
0418 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
0419 struct nvmet_tcp_queue *queue = cmd->queue;
0420 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0421 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
0422
0423 cmd->offset = 0;
0424 cmd->state = NVMET_TCP_SEND_DATA_PDU;
0425
0426 pdu->hdr.type = nvme_tcp_c2h_data;
0427 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
0428 NVME_TCP_F_DATA_SUCCESS : 0);
0429 pdu->hdr.hlen = sizeof(*pdu);
0430 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
0431 pdu->hdr.plen =
0432 cpu_to_le32(pdu->hdr.hlen + hdgst +
0433 cmd->req.transfer_len + ddgst);
0434 pdu->command_id = cmd->req.cqe->command_id;
0435 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
0436 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
0437
0438 if (queue->data_digest) {
0439 pdu->hdr.flags |= NVME_TCP_F_DDGST;
0440 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
0441 }
0442
0443 if (cmd->queue->hdr_digest) {
0444 pdu->hdr.flags |= NVME_TCP_F_HDGST;
0445 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
0446 }
0447 }
0448
0449 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
0450 {
0451 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
0452 struct nvmet_tcp_queue *queue = cmd->queue;
0453 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0454
0455 cmd->offset = 0;
0456 cmd->state = NVMET_TCP_SEND_R2T;
0457
0458 pdu->hdr.type = nvme_tcp_r2t;
0459 pdu->hdr.flags = 0;
0460 pdu->hdr.hlen = sizeof(*pdu);
0461 pdu->hdr.pdo = 0;
0462 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
0463
0464 pdu->command_id = cmd->req.cmd->common.command_id;
0465 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
0466 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
0467 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
0468 if (cmd->queue->hdr_digest) {
0469 pdu->hdr.flags |= NVME_TCP_F_HDGST;
0470 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
0471 }
0472 }
0473
0474 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
0475 {
0476 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
0477 struct nvmet_tcp_queue *queue = cmd->queue;
0478 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0479
0480 cmd->offset = 0;
0481 cmd->state = NVMET_TCP_SEND_RESPONSE;
0482
0483 pdu->hdr.type = nvme_tcp_rsp;
0484 pdu->hdr.flags = 0;
0485 pdu->hdr.hlen = sizeof(*pdu);
0486 pdu->hdr.pdo = 0;
0487 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
0488 if (cmd->queue->hdr_digest) {
0489 pdu->hdr.flags |= NVME_TCP_F_HDGST;
0490 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
0491 }
0492 }
0493
0494 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
0495 {
0496 struct llist_node *node;
0497 struct nvmet_tcp_cmd *cmd;
0498
0499 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
0500 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
0501 list_add(&cmd->entry, &queue->resp_send_list);
0502 queue->send_list_len++;
0503 }
0504 }
0505
0506 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
0507 {
0508 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
0509 struct nvmet_tcp_cmd, entry);
0510 if (!queue->snd_cmd) {
0511 nvmet_tcp_process_resp_list(queue);
0512 queue->snd_cmd =
0513 list_first_entry_or_null(&queue->resp_send_list,
0514 struct nvmet_tcp_cmd, entry);
0515 if (unlikely(!queue->snd_cmd))
0516 return NULL;
0517 }
0518
0519 list_del_init(&queue->snd_cmd->entry);
0520 queue->send_list_len--;
0521
0522 if (nvmet_tcp_need_data_out(queue->snd_cmd))
0523 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
0524 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
0525 nvmet_setup_r2t_pdu(queue->snd_cmd);
0526 else
0527 nvmet_setup_response_pdu(queue->snd_cmd);
0528
0529 return queue->snd_cmd;
0530 }
0531
0532 static void nvmet_tcp_queue_response(struct nvmet_req *req)
0533 {
0534 struct nvmet_tcp_cmd *cmd =
0535 container_of(req, struct nvmet_tcp_cmd, req);
0536 struct nvmet_tcp_queue *queue = cmd->queue;
0537 struct nvme_sgl_desc *sgl;
0538 u32 len;
0539
0540 if (unlikely(cmd == queue->cmd)) {
0541 sgl = &cmd->req.cmd->common.dptr.sgl;
0542 len = le32_to_cpu(sgl->length);
0543
0544
0545
0546
0547
0548
0549 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
0550 len && len <= cmd->req.port->inline_data_size &&
0551 nvme_is_write(cmd->req.cmd))
0552 return;
0553 }
0554
0555 llist_add(&cmd->lentry, &queue->resp_list);
0556 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
0557 }
0558
0559 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
0560 {
0561 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
0562 nvmet_tcp_queue_response(&cmd->req);
0563 else
0564 cmd->req.execute(&cmd->req);
0565 }
0566
0567 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
0568 {
0569 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0570 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
0571 int ret;
0572
0573 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
0574 offset_in_page(cmd->data_pdu) + cmd->offset,
0575 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
0576 if (ret <= 0)
0577 return ret;
0578
0579 cmd->offset += ret;
0580 left -= ret;
0581
0582 if (left)
0583 return -EAGAIN;
0584
0585 cmd->state = NVMET_TCP_SEND_DATA;
0586 cmd->offset = 0;
0587 return 1;
0588 }
0589
0590 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
0591 {
0592 struct nvmet_tcp_queue *queue = cmd->queue;
0593 int ret;
0594
0595 while (cmd->cur_sg) {
0596 struct page *page = sg_page(cmd->cur_sg);
0597 u32 left = cmd->cur_sg->length - cmd->offset;
0598 int flags = MSG_DONTWAIT;
0599
0600 if ((!last_in_batch && cmd->queue->send_list_len) ||
0601 cmd->wbytes_done + left < cmd->req.transfer_len ||
0602 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
0603 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
0604
0605 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
0606 left, flags);
0607 if (ret <= 0)
0608 return ret;
0609
0610 cmd->offset += ret;
0611 cmd->wbytes_done += ret;
0612
0613
0614 if (cmd->offset == cmd->cur_sg->length) {
0615 cmd->cur_sg = sg_next(cmd->cur_sg);
0616 cmd->offset = 0;
0617 }
0618 }
0619
0620 if (queue->data_digest) {
0621 cmd->state = NVMET_TCP_SEND_DDGST;
0622 cmd->offset = 0;
0623 } else {
0624 if (queue->nvme_sq.sqhd_disabled) {
0625 cmd->queue->snd_cmd = NULL;
0626 nvmet_tcp_put_cmd(cmd);
0627 } else {
0628 nvmet_setup_response_pdu(cmd);
0629 }
0630 }
0631
0632 if (queue->nvme_sq.sqhd_disabled)
0633 nvmet_tcp_free_cmd_buffers(cmd);
0634
0635 return 1;
0636
0637 }
0638
0639 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
0640 bool last_in_batch)
0641 {
0642 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0643 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
0644 int flags = MSG_DONTWAIT;
0645 int ret;
0646
0647 if (!last_in_batch && cmd->queue->send_list_len)
0648 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
0649 else
0650 flags |= MSG_EOR;
0651
0652 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
0653 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
0654 if (ret <= 0)
0655 return ret;
0656 cmd->offset += ret;
0657 left -= ret;
0658
0659 if (left)
0660 return -EAGAIN;
0661
0662 nvmet_tcp_free_cmd_buffers(cmd);
0663 cmd->queue->snd_cmd = NULL;
0664 nvmet_tcp_put_cmd(cmd);
0665 return 1;
0666 }
0667
0668 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
0669 {
0670 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
0671 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
0672 int flags = MSG_DONTWAIT;
0673 int ret;
0674
0675 if (!last_in_batch && cmd->queue->send_list_len)
0676 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
0677 else
0678 flags |= MSG_EOR;
0679
0680 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
0681 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
0682 if (ret <= 0)
0683 return ret;
0684 cmd->offset += ret;
0685 left -= ret;
0686
0687 if (left)
0688 return -EAGAIN;
0689
0690 cmd->queue->snd_cmd = NULL;
0691 return 1;
0692 }
0693
0694 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
0695 {
0696 struct nvmet_tcp_queue *queue = cmd->queue;
0697 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
0698 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
0699 struct kvec iov = {
0700 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
0701 .iov_len = left
0702 };
0703 int ret;
0704
0705 if (!last_in_batch && cmd->queue->send_list_len)
0706 msg.msg_flags |= MSG_MORE;
0707 else
0708 msg.msg_flags |= MSG_EOR;
0709
0710 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
0711 if (unlikely(ret <= 0))
0712 return ret;
0713
0714 cmd->offset += ret;
0715 left -= ret;
0716
0717 if (left)
0718 return -EAGAIN;
0719
0720 if (queue->nvme_sq.sqhd_disabled) {
0721 cmd->queue->snd_cmd = NULL;
0722 nvmet_tcp_put_cmd(cmd);
0723 } else {
0724 nvmet_setup_response_pdu(cmd);
0725 }
0726 return 1;
0727 }
0728
0729 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
0730 bool last_in_batch)
0731 {
0732 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
0733 int ret = 0;
0734
0735 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
0736 cmd = nvmet_tcp_fetch_cmd(queue);
0737 if (unlikely(!cmd))
0738 return 0;
0739 }
0740
0741 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
0742 ret = nvmet_try_send_data_pdu(cmd);
0743 if (ret <= 0)
0744 goto done_send;
0745 }
0746
0747 if (cmd->state == NVMET_TCP_SEND_DATA) {
0748 ret = nvmet_try_send_data(cmd, last_in_batch);
0749 if (ret <= 0)
0750 goto done_send;
0751 }
0752
0753 if (cmd->state == NVMET_TCP_SEND_DDGST) {
0754 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
0755 if (ret <= 0)
0756 goto done_send;
0757 }
0758
0759 if (cmd->state == NVMET_TCP_SEND_R2T) {
0760 ret = nvmet_try_send_r2t(cmd, last_in_batch);
0761 if (ret <= 0)
0762 goto done_send;
0763 }
0764
0765 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
0766 ret = nvmet_try_send_response(cmd, last_in_batch);
0767
0768 done_send:
0769 if (ret < 0) {
0770 if (ret == -EAGAIN)
0771 return 0;
0772 return ret;
0773 }
0774
0775 return 1;
0776 }
0777
0778 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
0779 int budget, int *sends)
0780 {
0781 int i, ret = 0;
0782
0783 for (i = 0; i < budget; i++) {
0784 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
0785 if (unlikely(ret < 0)) {
0786 nvmet_tcp_socket_error(queue, ret);
0787 goto done;
0788 } else if (ret == 0) {
0789 break;
0790 }
0791 (*sends)++;
0792 }
0793 done:
0794 return ret;
0795 }
0796
0797 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
0798 {
0799 queue->offset = 0;
0800 queue->left = sizeof(struct nvme_tcp_hdr);
0801 queue->cmd = NULL;
0802 queue->rcv_state = NVMET_TCP_RECV_PDU;
0803 }
0804
0805 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
0806 {
0807 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
0808
0809 ahash_request_free(queue->rcv_hash);
0810 ahash_request_free(queue->snd_hash);
0811 crypto_free_ahash(tfm);
0812 }
0813
0814 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
0815 {
0816 struct crypto_ahash *tfm;
0817
0818 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
0819 if (IS_ERR(tfm))
0820 return PTR_ERR(tfm);
0821
0822 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
0823 if (!queue->snd_hash)
0824 goto free_tfm;
0825 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
0826
0827 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
0828 if (!queue->rcv_hash)
0829 goto free_snd_hash;
0830 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
0831
0832 return 0;
0833 free_snd_hash:
0834 ahash_request_free(queue->snd_hash);
0835 free_tfm:
0836 crypto_free_ahash(tfm);
0837 return -ENOMEM;
0838 }
0839
0840
0841 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
0842 {
0843 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
0844 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
0845 struct msghdr msg = {};
0846 struct kvec iov;
0847 int ret;
0848
0849 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
0850 pr_err("bad nvme-tcp pdu length (%d)\n",
0851 le32_to_cpu(icreq->hdr.plen));
0852 nvmet_tcp_fatal_error(queue);
0853 }
0854
0855 if (icreq->pfv != NVME_TCP_PFV_1_0) {
0856 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
0857 return -EPROTO;
0858 }
0859
0860 if (icreq->hpda != 0) {
0861 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
0862 icreq->hpda);
0863 return -EPROTO;
0864 }
0865
0866 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
0867 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
0868 if (queue->hdr_digest || queue->data_digest) {
0869 ret = nvmet_tcp_alloc_crypto(queue);
0870 if (ret)
0871 return ret;
0872 }
0873
0874 memset(icresp, 0, sizeof(*icresp));
0875 icresp->hdr.type = nvme_tcp_icresp;
0876 icresp->hdr.hlen = sizeof(*icresp);
0877 icresp->hdr.pdo = 0;
0878 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
0879 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
0880 icresp->maxdata = cpu_to_le32(0x400000);
0881 icresp->cpda = 0;
0882 if (queue->hdr_digest)
0883 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
0884 if (queue->data_digest)
0885 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
0886
0887 iov.iov_base = icresp;
0888 iov.iov_len = sizeof(*icresp);
0889 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
0890 if (ret < 0)
0891 goto free_crypto;
0892
0893 queue->state = NVMET_TCP_Q_LIVE;
0894 nvmet_prepare_receive_pdu(queue);
0895 return 0;
0896 free_crypto:
0897 if (queue->hdr_digest || queue->data_digest)
0898 nvmet_tcp_free_crypto(queue);
0899 return ret;
0900 }
0901
0902 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
0903 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
0904 {
0905 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
0906 int ret;
0907
0908
0909
0910
0911
0912
0913
0914
0915 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
0916 data_len > cmd->req.port->inline_data_size) {
0917 nvmet_prepare_receive_pdu(queue);
0918 return;
0919 }
0920
0921 ret = nvmet_tcp_map_data(cmd);
0922 if (unlikely(ret)) {
0923 pr_err("queue %d: failed to map data\n", queue->idx);
0924 nvmet_tcp_fatal_error(queue);
0925 return;
0926 }
0927
0928 queue->rcv_state = NVMET_TCP_RECV_DATA;
0929 nvmet_tcp_map_pdu_iovec(cmd);
0930 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
0931 }
0932
0933 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
0934 {
0935 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
0936 struct nvmet_tcp_cmd *cmd;
0937
0938 if (likely(queue->nr_cmds))
0939 cmd = &queue->cmds[data->ttag];
0940 else
0941 cmd = &queue->connect;
0942
0943 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
0944 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
0945 data->ttag, le32_to_cpu(data->data_offset),
0946 cmd->rbytes_done);
0947
0948 nvmet_req_complete(&cmd->req,
0949 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
0950 return -EPROTO;
0951 }
0952
0953 cmd->pdu_len = le32_to_cpu(data->data_length);
0954 cmd->pdu_recv = 0;
0955 nvmet_tcp_map_pdu_iovec(cmd);
0956 queue->cmd = cmd;
0957 queue->rcv_state = NVMET_TCP_RECV_DATA;
0958
0959 return 0;
0960 }
0961
0962 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
0963 {
0964 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
0965 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
0966 struct nvmet_req *req;
0967 int ret;
0968
0969 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
0970 if (hdr->type != nvme_tcp_icreq) {
0971 pr_err("unexpected pdu type (%d) before icreq\n",
0972 hdr->type);
0973 nvmet_tcp_fatal_error(queue);
0974 return -EPROTO;
0975 }
0976 return nvmet_tcp_handle_icreq(queue);
0977 }
0978
0979 if (hdr->type == nvme_tcp_h2c_data) {
0980 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
0981 if (unlikely(ret))
0982 return ret;
0983 return 0;
0984 }
0985
0986 queue->cmd = nvmet_tcp_get_cmd(queue);
0987 if (unlikely(!queue->cmd)) {
0988
0989 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
0990 queue->idx, queue->nr_cmds, queue->send_list_len,
0991 nvme_cmd->common.opcode);
0992 nvmet_tcp_fatal_error(queue);
0993 return -ENOMEM;
0994 }
0995
0996 req = &queue->cmd->req;
0997 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
0998
0999 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1000 &queue->nvme_sq, &nvmet_tcp_ops))) {
1001 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1002 req->cmd, req->cmd->common.command_id,
1003 req->cmd->common.opcode,
1004 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1005
1006 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1007 return 0;
1008 }
1009
1010 ret = nvmet_tcp_map_data(queue->cmd);
1011 if (unlikely(ret)) {
1012 pr_err("queue %d: failed to map data\n", queue->idx);
1013 if (nvmet_tcp_has_inline_data(queue->cmd))
1014 nvmet_tcp_fatal_error(queue);
1015 else
1016 nvmet_req_complete(req, ret);
1017 ret = -EAGAIN;
1018 goto out;
1019 }
1020
1021 if (nvmet_tcp_need_data_in(queue->cmd)) {
1022 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1023 queue->rcv_state = NVMET_TCP_RECV_DATA;
1024 nvmet_tcp_map_pdu_iovec(queue->cmd);
1025 return 0;
1026 }
1027
1028 nvmet_tcp_queue_response(&queue->cmd->req);
1029 goto out;
1030 }
1031
1032 queue->cmd->req.execute(&queue->cmd->req);
1033 out:
1034 nvmet_prepare_receive_pdu(queue);
1035 return ret;
1036 }
1037
1038 static const u8 nvme_tcp_pdu_sizes[] = {
1039 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1040 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1041 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1042 };
1043
1044 static inline u8 nvmet_tcp_pdu_size(u8 type)
1045 {
1046 size_t idx = type;
1047
1048 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1049 nvme_tcp_pdu_sizes[idx]) ?
1050 nvme_tcp_pdu_sizes[idx] : 0;
1051 }
1052
1053 static inline bool nvmet_tcp_pdu_valid(u8 type)
1054 {
1055 switch (type) {
1056 case nvme_tcp_icreq:
1057 case nvme_tcp_cmd:
1058 case nvme_tcp_h2c_data:
1059
1060 return true;
1061 }
1062
1063 return false;
1064 }
1065
1066 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1067 {
1068 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1069 int len;
1070 struct kvec iov;
1071 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1072
1073 recv:
1074 iov.iov_base = (void *)&queue->pdu + queue->offset;
1075 iov.iov_len = queue->left;
1076 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1077 iov.iov_len, msg.msg_flags);
1078 if (unlikely(len < 0))
1079 return len;
1080
1081 queue->offset += len;
1082 queue->left -= len;
1083 if (queue->left)
1084 return -EAGAIN;
1085
1086 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1087 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1088
1089 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1090 pr_err("unexpected pdu type %d\n", hdr->type);
1091 nvmet_tcp_fatal_error(queue);
1092 return -EIO;
1093 }
1094
1095 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1096 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1097 return -EIO;
1098 }
1099
1100 queue->left = hdr->hlen - queue->offset + hdgst;
1101 goto recv;
1102 }
1103
1104 if (queue->hdr_digest &&
1105 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1106 nvmet_tcp_fatal_error(queue);
1107 return -EPROTO;
1108 }
1109
1110 if (queue->data_digest &&
1111 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1112 nvmet_tcp_fatal_error(queue);
1113 return -EPROTO;
1114 }
1115
1116 return nvmet_tcp_done_recv_pdu(queue);
1117 }
1118
1119 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1120 {
1121 struct nvmet_tcp_queue *queue = cmd->queue;
1122
1123 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1124 queue->offset = 0;
1125 queue->left = NVME_TCP_DIGEST_LENGTH;
1126 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1127 }
1128
1129 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1130 {
1131 struct nvmet_tcp_cmd *cmd = queue->cmd;
1132 int ret;
1133
1134 while (msg_data_left(&cmd->recv_msg)) {
1135 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1136 cmd->recv_msg.msg_flags);
1137 if (ret <= 0)
1138 return ret;
1139
1140 cmd->pdu_recv += ret;
1141 cmd->rbytes_done += ret;
1142 }
1143
1144 nvmet_tcp_unmap_pdu_iovec(cmd);
1145 if (queue->data_digest) {
1146 nvmet_tcp_prep_recv_ddgst(cmd);
1147 return 0;
1148 }
1149
1150 if (cmd->rbytes_done == cmd->req.transfer_len)
1151 nvmet_tcp_execute_request(cmd);
1152
1153 nvmet_prepare_receive_pdu(queue);
1154 return 0;
1155 }
1156
1157 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1158 {
1159 struct nvmet_tcp_cmd *cmd = queue->cmd;
1160 int ret;
1161 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1162 struct kvec iov = {
1163 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1164 .iov_len = queue->left
1165 };
1166
1167 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1168 iov.iov_len, msg.msg_flags);
1169 if (unlikely(ret < 0))
1170 return ret;
1171
1172 queue->offset += ret;
1173 queue->left -= ret;
1174 if (queue->left)
1175 return -EAGAIN;
1176
1177 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1178 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1179 queue->idx, cmd->req.cmd->common.command_id,
1180 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1181 le32_to_cpu(cmd->exp_ddgst));
1182 nvmet_tcp_finish_cmd(cmd);
1183 nvmet_tcp_fatal_error(queue);
1184 ret = -EPROTO;
1185 goto out;
1186 }
1187
1188 if (cmd->rbytes_done == cmd->req.transfer_len)
1189 nvmet_tcp_execute_request(cmd);
1190
1191 ret = 0;
1192 out:
1193 nvmet_prepare_receive_pdu(queue);
1194 return ret;
1195 }
1196
1197 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1198 {
1199 int result = 0;
1200
1201 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1202 return 0;
1203
1204 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1205 result = nvmet_tcp_try_recv_pdu(queue);
1206 if (result != 0)
1207 goto done_recv;
1208 }
1209
1210 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1211 result = nvmet_tcp_try_recv_data(queue);
1212 if (result != 0)
1213 goto done_recv;
1214 }
1215
1216 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1217 result = nvmet_tcp_try_recv_ddgst(queue);
1218 if (result != 0)
1219 goto done_recv;
1220 }
1221
1222 done_recv:
1223 if (result < 0) {
1224 if (result == -EAGAIN)
1225 return 0;
1226 return result;
1227 }
1228 return 1;
1229 }
1230
1231 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1232 int budget, int *recvs)
1233 {
1234 int i, ret = 0;
1235
1236 for (i = 0; i < budget; i++) {
1237 ret = nvmet_tcp_try_recv_one(queue);
1238 if (unlikely(ret < 0)) {
1239 nvmet_tcp_socket_error(queue, ret);
1240 goto done;
1241 } else if (ret == 0) {
1242 break;
1243 }
1244 (*recvs)++;
1245 }
1246 done:
1247 return ret;
1248 }
1249
1250 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1251 {
1252 spin_lock(&queue->state_lock);
1253 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1254 queue->state = NVMET_TCP_Q_DISCONNECTING;
1255 queue_work(nvmet_wq, &queue->release_work);
1256 }
1257 spin_unlock(&queue->state_lock);
1258 }
1259
1260 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1261 {
1262 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1263 }
1264
1265 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1266 int ops)
1267 {
1268 if (!idle_poll_period_usecs)
1269 return false;
1270
1271 if (ops)
1272 nvmet_tcp_arm_queue_deadline(queue);
1273
1274 return !time_after(jiffies, queue->poll_end);
1275 }
1276
1277 static void nvmet_tcp_io_work(struct work_struct *w)
1278 {
1279 struct nvmet_tcp_queue *queue =
1280 container_of(w, struct nvmet_tcp_queue, io_work);
1281 bool pending;
1282 int ret, ops = 0;
1283
1284 do {
1285 pending = false;
1286
1287 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1288 if (ret > 0)
1289 pending = true;
1290 else if (ret < 0)
1291 return;
1292
1293 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1294 if (ret > 0)
1295 pending = true;
1296 else if (ret < 0)
1297 return;
1298
1299 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1300
1301
1302
1303
1304
1305 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1306 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1307 }
1308
1309 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1310 struct nvmet_tcp_cmd *c)
1311 {
1312 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1313
1314 c->queue = queue;
1315 c->req.port = queue->port->nport;
1316
1317 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1318 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1319 if (!c->cmd_pdu)
1320 return -ENOMEM;
1321 c->req.cmd = &c->cmd_pdu->cmd;
1322
1323 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1324 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1325 if (!c->rsp_pdu)
1326 goto out_free_cmd;
1327 c->req.cqe = &c->rsp_pdu->cqe;
1328
1329 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1330 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1331 if (!c->data_pdu)
1332 goto out_free_rsp;
1333
1334 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1335 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1336 if (!c->r2t_pdu)
1337 goto out_free_data;
1338
1339 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1340
1341 list_add_tail(&c->entry, &queue->free_list);
1342
1343 return 0;
1344 out_free_data:
1345 page_frag_free(c->data_pdu);
1346 out_free_rsp:
1347 page_frag_free(c->rsp_pdu);
1348 out_free_cmd:
1349 page_frag_free(c->cmd_pdu);
1350 return -ENOMEM;
1351 }
1352
1353 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1354 {
1355 page_frag_free(c->r2t_pdu);
1356 page_frag_free(c->data_pdu);
1357 page_frag_free(c->rsp_pdu);
1358 page_frag_free(c->cmd_pdu);
1359 }
1360
1361 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1362 {
1363 struct nvmet_tcp_cmd *cmds;
1364 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1365
1366 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1367 if (!cmds)
1368 goto out;
1369
1370 for (i = 0; i < nr_cmds; i++) {
1371 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1372 if (ret)
1373 goto out_free;
1374 }
1375
1376 queue->cmds = cmds;
1377
1378 return 0;
1379 out_free:
1380 while (--i >= 0)
1381 nvmet_tcp_free_cmd(cmds + i);
1382 kfree(cmds);
1383 out:
1384 return ret;
1385 }
1386
1387 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1388 {
1389 struct nvmet_tcp_cmd *cmds = queue->cmds;
1390 int i;
1391
1392 for (i = 0; i < queue->nr_cmds; i++)
1393 nvmet_tcp_free_cmd(cmds + i);
1394
1395 nvmet_tcp_free_cmd(&queue->connect);
1396 kfree(cmds);
1397 }
1398
1399 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1400 {
1401 struct socket *sock = queue->sock;
1402
1403 write_lock_bh(&sock->sk->sk_callback_lock);
1404 sock->sk->sk_data_ready = queue->data_ready;
1405 sock->sk->sk_state_change = queue->state_change;
1406 sock->sk->sk_write_space = queue->write_space;
1407 sock->sk->sk_user_data = NULL;
1408 write_unlock_bh(&sock->sk->sk_callback_lock);
1409 }
1410
1411 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1412 {
1413 nvmet_req_uninit(&cmd->req);
1414 nvmet_tcp_unmap_pdu_iovec(cmd);
1415 nvmet_tcp_free_cmd_buffers(cmd);
1416 }
1417
1418 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1419 {
1420 struct nvmet_tcp_cmd *cmd = queue->cmds;
1421 int i;
1422
1423 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1424 if (nvmet_tcp_need_data_in(cmd))
1425 nvmet_req_uninit(&cmd->req);
1426
1427 nvmet_tcp_unmap_pdu_iovec(cmd);
1428 nvmet_tcp_free_cmd_buffers(cmd);
1429 }
1430
1431 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1432
1433 nvmet_tcp_finish_cmd(&queue->connect);
1434 }
1435 }
1436
1437 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1438 {
1439 struct page *page;
1440 struct nvmet_tcp_queue *queue =
1441 container_of(w, struct nvmet_tcp_queue, release_work);
1442
1443 mutex_lock(&nvmet_tcp_queue_mutex);
1444 list_del_init(&queue->queue_list);
1445 mutex_unlock(&nvmet_tcp_queue_mutex);
1446
1447 nvmet_tcp_restore_socket_callbacks(queue);
1448 cancel_work_sync(&queue->io_work);
1449
1450 queue->rcv_state = NVMET_TCP_RECV_ERR;
1451
1452 nvmet_tcp_uninit_data_in_cmds(queue);
1453 nvmet_sq_destroy(&queue->nvme_sq);
1454 cancel_work_sync(&queue->io_work);
1455 sock_release(queue->sock);
1456 nvmet_tcp_free_cmds(queue);
1457 if (queue->hdr_digest || queue->data_digest)
1458 nvmet_tcp_free_crypto(queue);
1459 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1460
1461 page = virt_to_head_page(queue->pf_cache.va);
1462 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1463 kfree(queue);
1464 }
1465
1466 static void nvmet_tcp_data_ready(struct sock *sk)
1467 {
1468 struct nvmet_tcp_queue *queue;
1469
1470 read_lock_bh(&sk->sk_callback_lock);
1471 queue = sk->sk_user_data;
1472 if (likely(queue))
1473 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1474 read_unlock_bh(&sk->sk_callback_lock);
1475 }
1476
1477 static void nvmet_tcp_write_space(struct sock *sk)
1478 {
1479 struct nvmet_tcp_queue *queue;
1480
1481 read_lock_bh(&sk->sk_callback_lock);
1482 queue = sk->sk_user_data;
1483 if (unlikely(!queue))
1484 goto out;
1485
1486 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1487 queue->write_space(sk);
1488 goto out;
1489 }
1490
1491 if (sk_stream_is_writeable(sk)) {
1492 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1493 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1494 }
1495 out:
1496 read_unlock_bh(&sk->sk_callback_lock);
1497 }
1498
1499 static void nvmet_tcp_state_change(struct sock *sk)
1500 {
1501 struct nvmet_tcp_queue *queue;
1502
1503 read_lock_bh(&sk->sk_callback_lock);
1504 queue = sk->sk_user_data;
1505 if (!queue)
1506 goto done;
1507
1508 switch (sk->sk_state) {
1509 case TCP_FIN_WAIT2:
1510 case TCP_LAST_ACK:
1511 break;
1512 case TCP_FIN_WAIT1:
1513 case TCP_CLOSE_WAIT:
1514 case TCP_CLOSE:
1515
1516 nvmet_tcp_schedule_release_queue(queue);
1517 break;
1518 default:
1519 pr_warn("queue %d unhandled state %d\n",
1520 queue->idx, sk->sk_state);
1521 }
1522 done:
1523 read_unlock_bh(&sk->sk_callback_lock);
1524 }
1525
1526 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1527 {
1528 struct socket *sock = queue->sock;
1529 struct inet_sock *inet = inet_sk(sock->sk);
1530 int ret;
1531
1532 ret = kernel_getsockname(sock,
1533 (struct sockaddr *)&queue->sockaddr);
1534 if (ret < 0)
1535 return ret;
1536
1537 ret = kernel_getpeername(sock,
1538 (struct sockaddr *)&queue->sockaddr_peer);
1539 if (ret < 0)
1540 return ret;
1541
1542
1543
1544
1545
1546
1547 sock_no_linger(sock->sk);
1548
1549 if (so_priority > 0)
1550 sock_set_priority(sock->sk, so_priority);
1551
1552
1553 if (inet->rcv_tos > 0)
1554 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1555
1556 ret = 0;
1557 write_lock_bh(&sock->sk->sk_callback_lock);
1558 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1559
1560
1561
1562
1563 ret = -ENOTCONN;
1564 } else {
1565 sock->sk->sk_user_data = queue;
1566 queue->data_ready = sock->sk->sk_data_ready;
1567 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1568 queue->state_change = sock->sk->sk_state_change;
1569 sock->sk->sk_state_change = nvmet_tcp_state_change;
1570 queue->write_space = sock->sk->sk_write_space;
1571 sock->sk->sk_write_space = nvmet_tcp_write_space;
1572 if (idle_poll_period_usecs)
1573 nvmet_tcp_arm_queue_deadline(queue);
1574 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1575 }
1576 write_unlock_bh(&sock->sk->sk_callback_lock);
1577
1578 return ret;
1579 }
1580
1581 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1582 struct socket *newsock)
1583 {
1584 struct nvmet_tcp_queue *queue;
1585 int ret;
1586
1587 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1588 if (!queue)
1589 return -ENOMEM;
1590
1591 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1592 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1593 queue->sock = newsock;
1594 queue->port = port;
1595 queue->nr_cmds = 0;
1596 spin_lock_init(&queue->state_lock);
1597 queue->state = NVMET_TCP_Q_CONNECTING;
1598 INIT_LIST_HEAD(&queue->free_list);
1599 init_llist_head(&queue->resp_list);
1600 INIT_LIST_HEAD(&queue->resp_send_list);
1601
1602 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1603 if (queue->idx < 0) {
1604 ret = queue->idx;
1605 goto out_free_queue;
1606 }
1607
1608 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1609 if (ret)
1610 goto out_ida_remove;
1611
1612 ret = nvmet_sq_init(&queue->nvme_sq);
1613 if (ret)
1614 goto out_free_connect;
1615
1616 nvmet_prepare_receive_pdu(queue);
1617
1618 mutex_lock(&nvmet_tcp_queue_mutex);
1619 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1620 mutex_unlock(&nvmet_tcp_queue_mutex);
1621
1622 ret = nvmet_tcp_set_queue_sock(queue);
1623 if (ret)
1624 goto out_destroy_sq;
1625
1626 return 0;
1627 out_destroy_sq:
1628 mutex_lock(&nvmet_tcp_queue_mutex);
1629 list_del_init(&queue->queue_list);
1630 mutex_unlock(&nvmet_tcp_queue_mutex);
1631 nvmet_sq_destroy(&queue->nvme_sq);
1632 out_free_connect:
1633 nvmet_tcp_free_cmd(&queue->connect);
1634 out_ida_remove:
1635 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1636 out_free_queue:
1637 kfree(queue);
1638 return ret;
1639 }
1640
1641 static void nvmet_tcp_accept_work(struct work_struct *w)
1642 {
1643 struct nvmet_tcp_port *port =
1644 container_of(w, struct nvmet_tcp_port, accept_work);
1645 struct socket *newsock;
1646 int ret;
1647
1648 while (true) {
1649 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1650 if (ret < 0) {
1651 if (ret != -EAGAIN)
1652 pr_warn("failed to accept err=%d\n", ret);
1653 return;
1654 }
1655 ret = nvmet_tcp_alloc_queue(port, newsock);
1656 if (ret) {
1657 pr_err("failed to allocate queue\n");
1658 sock_release(newsock);
1659 }
1660 }
1661 }
1662
1663 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1664 {
1665 struct nvmet_tcp_port *port;
1666
1667 read_lock_bh(&sk->sk_callback_lock);
1668 port = sk->sk_user_data;
1669 if (!port)
1670 goto out;
1671
1672 if (sk->sk_state == TCP_LISTEN)
1673 queue_work(nvmet_wq, &port->accept_work);
1674 out:
1675 read_unlock_bh(&sk->sk_callback_lock);
1676 }
1677
1678 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1679 {
1680 struct nvmet_tcp_port *port;
1681 __kernel_sa_family_t af;
1682 int ret;
1683
1684 port = kzalloc(sizeof(*port), GFP_KERNEL);
1685 if (!port)
1686 return -ENOMEM;
1687
1688 switch (nport->disc_addr.adrfam) {
1689 case NVMF_ADDR_FAMILY_IP4:
1690 af = AF_INET;
1691 break;
1692 case NVMF_ADDR_FAMILY_IP6:
1693 af = AF_INET6;
1694 break;
1695 default:
1696 pr_err("address family %d not supported\n",
1697 nport->disc_addr.adrfam);
1698 ret = -EINVAL;
1699 goto err_port;
1700 }
1701
1702 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1703 nport->disc_addr.trsvcid, &port->addr);
1704 if (ret) {
1705 pr_err("malformed ip/port passed: %s:%s\n",
1706 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1707 goto err_port;
1708 }
1709
1710 port->nport = nport;
1711 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1712 if (port->nport->inline_data_size < 0)
1713 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1714
1715 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1716 IPPROTO_TCP, &port->sock);
1717 if (ret) {
1718 pr_err("failed to create a socket\n");
1719 goto err_port;
1720 }
1721
1722 port->sock->sk->sk_user_data = port;
1723 port->data_ready = port->sock->sk->sk_data_ready;
1724 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1725 sock_set_reuseaddr(port->sock->sk);
1726 tcp_sock_set_nodelay(port->sock->sk);
1727 if (so_priority > 0)
1728 sock_set_priority(port->sock->sk, so_priority);
1729
1730 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1731 sizeof(port->addr));
1732 if (ret) {
1733 pr_err("failed to bind port socket %d\n", ret);
1734 goto err_sock;
1735 }
1736
1737 ret = kernel_listen(port->sock, 128);
1738 if (ret) {
1739 pr_err("failed to listen %d on port sock\n", ret);
1740 goto err_sock;
1741 }
1742
1743 nport->priv = port;
1744 pr_info("enabling port %d (%pISpc)\n",
1745 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1746
1747 return 0;
1748
1749 err_sock:
1750 sock_release(port->sock);
1751 err_port:
1752 kfree(port);
1753 return ret;
1754 }
1755
1756 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1757 {
1758 struct nvmet_tcp_queue *queue;
1759
1760 mutex_lock(&nvmet_tcp_queue_mutex);
1761 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1762 if (queue->port == port)
1763 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1764 mutex_unlock(&nvmet_tcp_queue_mutex);
1765 }
1766
1767 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1768 {
1769 struct nvmet_tcp_port *port = nport->priv;
1770
1771 write_lock_bh(&port->sock->sk->sk_callback_lock);
1772 port->sock->sk->sk_data_ready = port->data_ready;
1773 port->sock->sk->sk_user_data = NULL;
1774 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1775 cancel_work_sync(&port->accept_work);
1776
1777
1778
1779
1780 nvmet_tcp_destroy_port_queues(port);
1781
1782 sock_release(port->sock);
1783 kfree(port);
1784 }
1785
1786 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1787 {
1788 struct nvmet_tcp_queue *queue;
1789
1790 mutex_lock(&nvmet_tcp_queue_mutex);
1791 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1792 if (queue->nvme_sq.ctrl == ctrl)
1793 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1794 mutex_unlock(&nvmet_tcp_queue_mutex);
1795 }
1796
1797 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1798 {
1799 struct nvmet_tcp_queue *queue =
1800 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1801
1802 if (sq->qid == 0) {
1803
1804 flush_workqueue(nvmet_wq);
1805 }
1806
1807 queue->nr_cmds = sq->size * 2;
1808 if (nvmet_tcp_alloc_cmds(queue))
1809 return NVME_SC_INTERNAL;
1810 return 0;
1811 }
1812
1813 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1814 struct nvmet_port *nport, char *traddr)
1815 {
1816 struct nvmet_tcp_port *port = nport->priv;
1817
1818 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1819 struct nvmet_tcp_cmd *cmd =
1820 container_of(req, struct nvmet_tcp_cmd, req);
1821 struct nvmet_tcp_queue *queue = cmd->queue;
1822
1823 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1824 } else {
1825 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1826 }
1827 }
1828
1829 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1830 .owner = THIS_MODULE,
1831 .type = NVMF_TRTYPE_TCP,
1832 .msdbd = 1,
1833 .add_port = nvmet_tcp_add_port,
1834 .remove_port = nvmet_tcp_remove_port,
1835 .queue_response = nvmet_tcp_queue_response,
1836 .delete_ctrl = nvmet_tcp_delete_ctrl,
1837 .install_queue = nvmet_tcp_install_queue,
1838 .disc_traddr = nvmet_tcp_disc_port_addr,
1839 };
1840
1841 static int __init nvmet_tcp_init(void)
1842 {
1843 int ret;
1844
1845 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1846 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1847 if (!nvmet_tcp_wq)
1848 return -ENOMEM;
1849
1850 ret = nvmet_register_transport(&nvmet_tcp_ops);
1851 if (ret)
1852 goto err;
1853
1854 return 0;
1855 err:
1856 destroy_workqueue(nvmet_tcp_wq);
1857 return ret;
1858 }
1859
1860 static void __exit nvmet_tcp_exit(void)
1861 {
1862 struct nvmet_tcp_queue *queue;
1863
1864 nvmet_unregister_transport(&nvmet_tcp_ops);
1865
1866 flush_workqueue(nvmet_wq);
1867 mutex_lock(&nvmet_tcp_queue_mutex);
1868 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1869 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1870 mutex_unlock(&nvmet_tcp_queue_mutex);
1871 flush_workqueue(nvmet_wq);
1872
1873 destroy_workqueue(nvmet_tcp_wq);
1874 }
1875
1876 module_init(nvmet_tcp_init);
1877 module_exit(nvmet_tcp_exit);
1878
1879 MODULE_LICENSE("GPL v2");
1880 MODULE_ALIAS("nvmet-transport-3");