0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008 #include <linux/blkdev.h>
0009 #include <linux/random.h>
0010 #include <linux/nvme-auth.h>
0011 #include <crypto/hash.h>
0012 #include <crypto/kpp.h>
0013 #include "nvmet.h"
0014
0015 static void nvmet_auth_expired_work(struct work_struct *work)
0016 {
0017 struct nvmet_sq *sq = container_of(to_delayed_work(work),
0018 struct nvmet_sq, auth_expired_work);
0019
0020 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
0021 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
0022 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
0023 sq->dhchap_tid = -1;
0024 }
0025
0026 void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
0027 {
0028 u32 result = le32_to_cpu(req->cqe->result.u32);
0029
0030
0031 INIT_DELAYED_WORK(&req->sq->auth_expired_work,
0032 nvmet_auth_expired_work);
0033 req->sq->authenticated = false;
0034 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
0035 result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
0036 req->cqe->result.u32 = cpu_to_le32(result);
0037 }
0038
0039 static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
0040 {
0041 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0042 struct nvmf_auth_dhchap_negotiate_data *data = d;
0043 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
0044
0045 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
0046 __func__, ctrl->cntlid, req->sq->qid,
0047 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
0048 data->auth_protocol[0].dhchap.halen,
0049 data->auth_protocol[0].dhchap.dhlen);
0050 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
0051 if (data->sc_c)
0052 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
0053
0054 if (data->napd != 1)
0055 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0056
0057 if (data->auth_protocol[0].dhchap.authid !=
0058 NVME_AUTH_DHCHAP_AUTH_ID)
0059 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0060
0061 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
0062 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
0063
0064 if (!fallback_hash_id &&
0065 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
0066 fallback_hash_id = host_hmac_id;
0067 if (ctrl->shash_id != host_hmac_id)
0068 continue;
0069 hash_id = ctrl->shash_id;
0070 break;
0071 }
0072 if (hash_id == 0) {
0073 if (fallback_hash_id == 0) {
0074 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
0075 __func__, ctrl->cntlid, req->sq->qid);
0076 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0077 }
0078 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
0079 __func__, ctrl->cntlid, req->sq->qid,
0080 nvme_auth_hmac_name(fallback_hash_id));
0081 ctrl->shash_id = fallback_hash_id;
0082 }
0083
0084 dhgid = -1;
0085 fallback_dhgid = -1;
0086 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
0087 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
0088
0089 if (tmp_dhgid != ctrl->dh_gid) {
0090 dhgid = tmp_dhgid;
0091 break;
0092 }
0093 if (fallback_dhgid < 0) {
0094 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
0095
0096 if (crypto_has_kpp(kpp, 0, 0))
0097 fallback_dhgid = tmp_dhgid;
0098 }
0099 }
0100 if (dhgid < 0) {
0101 if (fallback_dhgid < 0) {
0102 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
0103 __func__, ctrl->cntlid, req->sq->qid);
0104 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
0105 }
0106 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
0107 __func__, ctrl->cntlid, req->sq->qid,
0108 nvme_auth_dhgroup_name(fallback_dhgid));
0109 ctrl->dh_gid = fallback_dhgid;
0110 }
0111 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
0112 __func__, ctrl->cntlid, req->sq->qid,
0113 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
0114 return 0;
0115 }
0116
0117 static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
0118 {
0119 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0120 struct nvmf_auth_dhchap_reply_data *data = d;
0121 u16 dhvlen = le16_to_cpu(data->dhvlen);
0122 u8 *response;
0123
0124 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
0125 __func__, ctrl->cntlid, req->sq->qid,
0126 data->hl, data->cvalid, dhvlen);
0127
0128 if (dhvlen) {
0129 if (!ctrl->dh_tfm)
0130 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0131 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
0132 dhvlen) < 0)
0133 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
0134 }
0135
0136 response = kmalloc(data->hl, GFP_KERNEL);
0137 if (!response)
0138 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0139
0140 if (!ctrl->host_key) {
0141 pr_warn("ctrl %d qid %d no host key\n",
0142 ctrl->cntlid, req->sq->qid);
0143 kfree(response);
0144 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0145 }
0146 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
0147 pr_debug("ctrl %d qid %d host hash failed\n",
0148 ctrl->cntlid, req->sq->qid);
0149 kfree(response);
0150 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0151 }
0152
0153 if (memcmp(data->rval, response, data->hl)) {
0154 pr_info("ctrl %d qid %d host response mismatch\n",
0155 ctrl->cntlid, req->sq->qid);
0156 kfree(response);
0157 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0158 }
0159 kfree(response);
0160 pr_debug("%s: ctrl %d qid %d host authenticated\n",
0161 __func__, ctrl->cntlid, req->sq->qid);
0162 if (data->cvalid) {
0163 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
0164 GFP_KERNEL);
0165 if (!req->sq->dhchap_c2)
0166 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0167
0168 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
0169 __func__, ctrl->cntlid, req->sq->qid, data->hl,
0170 req->sq->dhchap_c2);
0171 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
0172 } else {
0173 req->sq->authenticated = true;
0174 req->sq->dhchap_c2 = NULL;
0175 }
0176
0177 return 0;
0178 }
0179
0180 static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
0181 {
0182 struct nvmf_auth_dhchap_failure_data *data = d;
0183
0184 return data->rescode_exp;
0185 }
0186
0187 void nvmet_execute_auth_send(struct nvmet_req *req)
0188 {
0189 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0190 struct nvmf_auth_dhchap_success2_data *data;
0191 void *d;
0192 u32 tl;
0193 u16 status = 0;
0194
0195 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
0196 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0197 req->error_loc =
0198 offsetof(struct nvmf_auth_send_command, secp);
0199 goto done;
0200 }
0201 if (req->cmd->auth_send.spsp0 != 0x01) {
0202 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0203 req->error_loc =
0204 offsetof(struct nvmf_auth_send_command, spsp0);
0205 goto done;
0206 }
0207 if (req->cmd->auth_send.spsp1 != 0x01) {
0208 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0209 req->error_loc =
0210 offsetof(struct nvmf_auth_send_command, spsp1);
0211 goto done;
0212 }
0213 tl = le32_to_cpu(req->cmd->auth_send.tl);
0214 if (!tl) {
0215 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0216 req->error_loc =
0217 offsetof(struct nvmf_auth_send_command, tl);
0218 goto done;
0219 }
0220 if (!nvmet_check_transfer_len(req, tl)) {
0221 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
0222 return;
0223 }
0224
0225 d = kmalloc(tl, GFP_KERNEL);
0226 if (!d) {
0227 status = NVME_SC_INTERNAL;
0228 goto done;
0229 }
0230
0231 status = nvmet_copy_from_sgl(req, 0, d, tl);
0232 if (status) {
0233 kfree(d);
0234 goto done;
0235 }
0236
0237 data = d;
0238 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
0239 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
0240 req->sq->dhchap_step);
0241 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
0242 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
0243 goto done_failure1;
0244 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
0245 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
0246
0247 pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
0248 ctrl->cntlid, req->sq->qid);
0249 if (!req->sq->qid) {
0250 if (nvmet_setup_auth(ctrl) < 0) {
0251 status = NVME_SC_INTERNAL;
0252 pr_err("ctrl %d qid 0 failed to setup"
0253 "re-authentication",
0254 ctrl->cntlid);
0255 goto done_failure1;
0256 }
0257 }
0258 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
0259 } else if (data->auth_id != req->sq->dhchap_step)
0260 goto done_failure1;
0261
0262 status = nvmet_auth_negotiate(req, d);
0263 if (status == 0)
0264 req->sq->dhchap_step =
0265 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
0266 else {
0267 req->sq->dhchap_step =
0268 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
0269 req->sq->dhchap_status = status;
0270 status = 0;
0271 }
0272 goto done_kfree;
0273 }
0274 if (data->auth_id != req->sq->dhchap_step) {
0275 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
0276 __func__, ctrl->cntlid, req->sq->qid,
0277 data->auth_id, req->sq->dhchap_step);
0278 goto done_failure1;
0279 }
0280 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
0281 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
0282 __func__, ctrl->cntlid, req->sq->qid,
0283 le16_to_cpu(data->t_id),
0284 req->sq->dhchap_tid);
0285 req->sq->dhchap_step =
0286 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
0287 req->sq->dhchap_status =
0288 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0289 goto done_kfree;
0290 }
0291
0292 switch (data->auth_id) {
0293 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
0294 status = nvmet_auth_reply(req, d);
0295 if (status == 0)
0296 req->sq->dhchap_step =
0297 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
0298 else {
0299 req->sq->dhchap_step =
0300 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
0301 req->sq->dhchap_status = status;
0302 status = 0;
0303 }
0304 goto done_kfree;
0305 break;
0306 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
0307 req->sq->authenticated = true;
0308 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
0309 __func__, ctrl->cntlid, req->sq->qid);
0310 goto done_kfree;
0311 break;
0312 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
0313 status = nvmet_auth_failure2(req, d);
0314 if (status) {
0315 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
0316 ctrl->cntlid, req->sq->qid, status);
0317 req->sq->dhchap_status = status;
0318 req->sq->authenticated = false;
0319 status = 0;
0320 }
0321 goto done_kfree;
0322 break;
0323 default:
0324 req->sq->dhchap_status =
0325 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
0326 req->sq->dhchap_step =
0327 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
0328 req->sq->authenticated = false;
0329 goto done_kfree;
0330 break;
0331 }
0332 done_failure1:
0333 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
0334 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
0335
0336 done_kfree:
0337 kfree(d);
0338 done:
0339 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
0340 ctrl->cntlid, req->sq->qid,
0341 req->sq->dhchap_status, req->sq->dhchap_step);
0342 if (status)
0343 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
0344 __func__, ctrl->cntlid, req->sq->qid,
0345 status, req->error_loc);
0346 req->cqe->result.u64 = 0;
0347 nvmet_req_complete(req, status);
0348 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
0349 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
0350 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
0351
0352 mod_delayed_work(system_wq, &req->sq->auth_expired_work,
0353 auth_expire_secs * HZ);
0354 return;
0355 }
0356
0357 nvmet_auth_sq_free(req->sq);
0358 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
0359 nvmet_ctrl_fatal_error(ctrl);
0360 }
0361
0362 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
0363 {
0364 struct nvmf_auth_dhchap_challenge_data *data = d;
0365 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0366 int ret = 0;
0367 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
0368 int data_size = sizeof(*d) + hash_len;
0369
0370 if (ctrl->dh_tfm)
0371 data_size += ctrl->dh_keysize;
0372 if (al < data_size) {
0373 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
0374 al, data_size);
0375 return -EINVAL;
0376 }
0377 memset(data, 0, data_size);
0378 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
0379 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
0380 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
0381 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
0382 data->hashid = ctrl->shash_id;
0383 data->hl = hash_len;
0384 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
0385 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
0386 if (!req->sq->dhchap_c1)
0387 return -ENOMEM;
0388 get_random_bytes(req->sq->dhchap_c1, data->hl);
0389 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
0390 if (ctrl->dh_tfm) {
0391 data->dhgid = ctrl->dh_gid;
0392 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
0393 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
0394 ctrl->dh_keysize);
0395 }
0396 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
0397 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
0398 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
0399 return ret;
0400 }
0401
0402 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
0403 {
0404 struct nvmf_auth_dhchap_success1_data *data = d;
0405 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0406 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
0407
0408 WARN_ON(al < sizeof(*data));
0409 memset(data, 0, sizeof(*data));
0410 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
0411 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
0412 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
0413 data->hl = hash_len;
0414 if (req->sq->dhchap_c2) {
0415 if (!ctrl->ctrl_key) {
0416 pr_warn("ctrl %d qid %d no ctrl key\n",
0417 ctrl->cntlid, req->sq->qid);
0418 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
0419 }
0420 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
0421 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0422 data->rvalid = 1;
0423 pr_debug("ctrl %d qid %d response %*ph\n",
0424 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
0425 }
0426 return 0;
0427 }
0428
0429 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
0430 {
0431 struct nvmf_auth_dhchap_failure_data *data = d;
0432
0433 WARN_ON(al < sizeof(*data));
0434 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
0435 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
0436 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
0437 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
0438 data->rescode_exp = req->sq->dhchap_status;
0439 }
0440
0441 void nvmet_execute_auth_receive(struct nvmet_req *req)
0442 {
0443 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0444 void *d;
0445 u32 al;
0446 u16 status = 0;
0447
0448 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
0449 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0450 req->error_loc =
0451 offsetof(struct nvmf_auth_receive_command, secp);
0452 goto done;
0453 }
0454 if (req->cmd->auth_receive.spsp0 != 0x01) {
0455 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0456 req->error_loc =
0457 offsetof(struct nvmf_auth_receive_command, spsp0);
0458 goto done;
0459 }
0460 if (req->cmd->auth_receive.spsp1 != 0x01) {
0461 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0462 req->error_loc =
0463 offsetof(struct nvmf_auth_receive_command, spsp1);
0464 goto done;
0465 }
0466 al = le32_to_cpu(req->cmd->auth_receive.al);
0467 if (!al) {
0468 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0469 req->error_loc =
0470 offsetof(struct nvmf_auth_receive_command, al);
0471 goto done;
0472 }
0473 if (!nvmet_check_transfer_len(req, al)) {
0474 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
0475 return;
0476 }
0477
0478 d = kmalloc(al, GFP_KERNEL);
0479 if (!d) {
0480 status = NVME_SC_INTERNAL;
0481 goto done;
0482 }
0483 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
0484 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
0485 switch (req->sq->dhchap_step) {
0486 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
0487 if (nvmet_auth_challenge(req, d, al) < 0) {
0488 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
0489 ctrl->cntlid, req->sq->qid, status);
0490 status = NVME_SC_INTERNAL;
0491 break;
0492 }
0493 if (status) {
0494 req->sq->dhchap_status = status;
0495 nvmet_auth_failure1(req, d, al);
0496 pr_warn("ctrl %d qid %d: challenge status (%x)\n",
0497 ctrl->cntlid, req->sq->qid,
0498 req->sq->dhchap_status);
0499 status = 0;
0500 break;
0501 }
0502 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
0503 break;
0504 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
0505 status = nvmet_auth_success1(req, d, al);
0506 if (status) {
0507 req->sq->dhchap_status = status;
0508 req->sq->authenticated = false;
0509 nvmet_auth_failure1(req, d, al);
0510 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
0511 ctrl->cntlid, req->sq->qid,
0512 req->sq->dhchap_status);
0513 break;
0514 }
0515 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
0516 break;
0517 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
0518 req->sq->authenticated = false;
0519 nvmet_auth_failure1(req, d, al);
0520 pr_warn("ctrl %d qid %d failure1 (%x)\n",
0521 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
0522 break;
0523 default:
0524 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
0525 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
0526 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
0527 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0528 nvmet_auth_failure1(req, d, al);
0529 status = 0;
0530 break;
0531 }
0532
0533 status = nvmet_copy_to_sgl(req, 0, d, al);
0534 kfree(d);
0535 done:
0536 req->cqe->result.u64 = 0;
0537 nvmet_req_complete(req, status);
0538 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
0539 nvmet_auth_sq_free(req->sq);
0540 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
0541 nvmet_auth_sq_free(req->sq);
0542 nvmet_ctrl_fatal_error(ctrl);
0543 }
0544 }