Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
0004  */
0005 
0006 #include <linux/crc32.h>
0007 #include <linux/base64.h>
0008 #include <linux/prandom.h>
0009 #include <asm/unaligned.h>
0010 #include <crypto/hash.h>
0011 #include <crypto/dh.h>
0012 #include "nvme.h"
0013 #include "fabrics.h"
0014 #include <linux/nvme-auth.h>
0015 
0016 struct nvme_dhchap_queue_context {
0017     struct list_head entry;
0018     struct work_struct auth_work;
0019     struct nvme_ctrl *ctrl;
0020     struct crypto_shash *shash_tfm;
0021     struct crypto_kpp *dh_tfm;
0022     void *buf;
0023     size_t buf_size;
0024     int qid;
0025     int error;
0026     u32 s1;
0027     u32 s2;
0028     u16 transaction;
0029     u8 status;
0030     u8 hash_id;
0031     size_t hash_len;
0032     u8 dhgroup_id;
0033     u8 c1[64];
0034     u8 c2[64];
0035     u8 response[64];
0036     u8 *host_response;
0037     u8 *ctrl_key;
0038     int ctrl_key_len;
0039     u8 *host_key;
0040     int host_key_len;
0041     u8 *sess_key;
0042     int sess_key_len;
0043 };
0044 
0045 #define nvme_auth_flags_from_qid(qid) \
0046     (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
0047 #define nvme_auth_queue_from_qid(ctrl, qid) \
0048     (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
0049 
0050 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
0051                 void *data, size_t data_len, bool auth_send)
0052 {
0053     struct nvme_command cmd = {};
0054     blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
0055     struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
0056     int ret;
0057 
0058     cmd.auth_common.opcode = nvme_fabrics_command;
0059     cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
0060     cmd.auth_common.spsp0 = 0x01;
0061     cmd.auth_common.spsp1 = 0x01;
0062     if (auth_send) {
0063         cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
0064         cmd.auth_send.tl = cpu_to_le32(data_len);
0065     } else {
0066         cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
0067         cmd.auth_receive.al = cpu_to_le32(data_len);
0068     }
0069 
0070     ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
0071                      qid == 0 ? NVME_QID_ANY : qid,
0072                      0, flags);
0073     if (ret > 0)
0074         dev_warn(ctrl->device,
0075             "qid %d auth_send failed with status %d\n", qid, ret);
0076     else if (ret < 0)
0077         dev_err(ctrl->device,
0078             "qid %d auth_send failed with error %d\n", qid, ret);
0079     return ret;
0080 }
0081 
0082 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
0083         struct nvmf_auth_dhchap_failure_data *data,
0084         u16 transaction, u8 expected_msg)
0085 {
0086     dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
0087         __func__, qid, data->auth_type, data->auth_id);
0088 
0089     if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
0090         data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
0091         return data->rescode_exp;
0092     }
0093     if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
0094         data->auth_id != expected_msg) {
0095         dev_warn(ctrl->device,
0096              "qid %d invalid message %02x/%02x\n",
0097              qid, data->auth_type, data->auth_id);
0098         return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
0099     }
0100     if (le16_to_cpu(data->t_id) != transaction) {
0101         dev_warn(ctrl->device,
0102              "qid %d invalid transaction ID %d\n",
0103              qid, le16_to_cpu(data->t_id));
0104         return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
0105     }
0106     return 0;
0107 }
0108 
0109 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
0110         struct nvme_dhchap_queue_context *chap)
0111 {
0112     struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
0113     size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
0114 
0115     if (chap->buf_size < size) {
0116         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0117         return -EINVAL;
0118     }
0119     memset((u8 *)chap->buf, 0, size);
0120     data->auth_type = NVME_AUTH_COMMON_MESSAGES;
0121     data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
0122     data->t_id = cpu_to_le16(chap->transaction);
0123     data->sc_c = 0; /* No secure channel concatenation */
0124     data->napd = 1;
0125     data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
0126     data->auth_protocol[0].dhchap.halen = 3;
0127     data->auth_protocol[0].dhchap.dhlen = 6;
0128     data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
0129     data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
0130     data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
0131     data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
0132     data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
0133     data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
0134     data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
0135     data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
0136     data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
0137 
0138     return size;
0139 }
0140 
0141 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
0142         struct nvme_dhchap_queue_context *chap)
0143 {
0144     struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
0145     u16 dhvlen = le16_to_cpu(data->dhvlen);
0146     size_t size = sizeof(*data) + data->hl + dhvlen;
0147     const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
0148     const char *hmac_name, *kpp_name;
0149 
0150     if (chap->buf_size < size) {
0151         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0152         return NVME_SC_INVALID_FIELD;
0153     }
0154 
0155     hmac_name = nvme_auth_hmac_name(data->hashid);
0156     if (!hmac_name) {
0157         dev_warn(ctrl->device,
0158              "qid %d: invalid HASH ID %d\n",
0159              chap->qid, data->hashid);
0160         chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0161         return NVME_SC_INVALID_FIELD;
0162     }
0163 
0164     if (chap->hash_id == data->hashid && chap->shash_tfm &&
0165         !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
0166         crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
0167         dev_dbg(ctrl->device,
0168             "qid %d: reuse existing hash %s\n",
0169             chap->qid, hmac_name);
0170         goto select_kpp;
0171     }
0172 
0173     /* Reset if hash cannot be reused */
0174     if (chap->shash_tfm) {
0175         crypto_free_shash(chap->shash_tfm);
0176         chap->hash_id = 0;
0177         chap->hash_len = 0;
0178     }
0179     chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
0180                          CRYPTO_ALG_ALLOCATES_MEMORY);
0181     if (IS_ERR(chap->shash_tfm)) {
0182         dev_warn(ctrl->device,
0183              "qid %d: failed to allocate hash %s, error %ld\n",
0184              chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
0185         chap->shash_tfm = NULL;
0186         chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0187         return NVME_SC_AUTH_REQUIRED;
0188     }
0189 
0190     if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
0191         dev_warn(ctrl->device,
0192              "qid %d: invalid hash length %d\n",
0193              chap->qid, data->hl);
0194         crypto_free_shash(chap->shash_tfm);
0195         chap->shash_tfm = NULL;
0196         chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0197         return NVME_SC_AUTH_REQUIRED;
0198     }
0199 
0200     /* Reset host response if the hash had been changed */
0201     if (chap->hash_id != data->hashid) {
0202         kfree(chap->host_response);
0203         chap->host_response = NULL;
0204     }
0205 
0206     chap->hash_id = data->hashid;
0207     chap->hash_len = data->hl;
0208     dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
0209         chap->qid, hmac_name);
0210 
0211 select_kpp:
0212     kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
0213     if (!kpp_name) {
0214         dev_warn(ctrl->device,
0215              "qid %d: invalid DH group id %d\n",
0216              chap->qid, data->dhgid);
0217         chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
0218         /* Leave previous dh_tfm intact */
0219         return NVME_SC_AUTH_REQUIRED;
0220     }
0221 
0222     /* Clear host and controller key to avoid accidental reuse */
0223     kfree_sensitive(chap->host_key);
0224     chap->host_key = NULL;
0225     chap->host_key_len = 0;
0226     kfree_sensitive(chap->ctrl_key);
0227     chap->ctrl_key = NULL;
0228     chap->ctrl_key_len = 0;
0229 
0230     if (chap->dhgroup_id == data->dhgid &&
0231         (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
0232         dev_dbg(ctrl->device,
0233             "qid %d: reuse existing DH group %s\n",
0234             chap->qid, gid_name);
0235         goto skip_kpp;
0236     }
0237 
0238     /* Reset dh_tfm if it can't be reused */
0239     if (chap->dh_tfm) {
0240         crypto_free_kpp(chap->dh_tfm);
0241         chap->dh_tfm = NULL;
0242     }
0243 
0244     if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
0245         if (dhvlen == 0) {
0246             dev_warn(ctrl->device,
0247                  "qid %d: empty DH value\n",
0248                  chap->qid);
0249             chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
0250             return NVME_SC_INVALID_FIELD;
0251         }
0252 
0253         chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
0254         if (IS_ERR(chap->dh_tfm)) {
0255             int ret = PTR_ERR(chap->dh_tfm);
0256 
0257             dev_warn(ctrl->device,
0258                  "qid %d: error %d initializing DH group %s\n",
0259                  chap->qid, ret, gid_name);
0260             chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
0261             chap->dh_tfm = NULL;
0262             return NVME_SC_AUTH_REQUIRED;
0263         }
0264         dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
0265             chap->qid, gid_name);
0266     } else if (dhvlen != 0) {
0267         dev_warn(ctrl->device,
0268              "qid %d: invalid DH value for NULL DH\n",
0269              chap->qid);
0270         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0271         return NVME_SC_INVALID_FIELD;
0272     }
0273     chap->dhgroup_id = data->dhgid;
0274 
0275 skip_kpp:
0276     chap->s1 = le32_to_cpu(data->seqnum);
0277     memcpy(chap->c1, data->cval, chap->hash_len);
0278     if (dhvlen) {
0279         chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
0280         if (!chap->ctrl_key) {
0281             chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0282             return NVME_SC_AUTH_REQUIRED;
0283         }
0284         chap->ctrl_key_len = dhvlen;
0285         memcpy(chap->ctrl_key, data->cval + chap->hash_len,
0286                dhvlen);
0287         dev_dbg(ctrl->device, "ctrl public key %*ph\n",
0288              (int)chap->ctrl_key_len, chap->ctrl_key);
0289     }
0290 
0291     return 0;
0292 }
0293 
0294 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
0295         struct nvme_dhchap_queue_context *chap)
0296 {
0297     struct nvmf_auth_dhchap_reply_data *data = chap->buf;
0298     size_t size = sizeof(*data);
0299 
0300     size += 2 * chap->hash_len;
0301 
0302     if (chap->host_key_len)
0303         size += chap->host_key_len;
0304 
0305     if (chap->buf_size < size) {
0306         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0307         return -EINVAL;
0308     }
0309 
0310     memset(chap->buf, 0, size);
0311     data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
0312     data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
0313     data->t_id = cpu_to_le16(chap->transaction);
0314     data->hl = chap->hash_len;
0315     data->dhvlen = cpu_to_le16(chap->host_key_len);
0316     memcpy(data->rval, chap->response, chap->hash_len);
0317     if (ctrl->ctrl_key) {
0318         get_random_bytes(chap->c2, chap->hash_len);
0319         data->cvalid = 1;
0320         chap->s2 = nvme_auth_get_seqnum();
0321         memcpy(data->rval + chap->hash_len, chap->c2,
0322                chap->hash_len);
0323         dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
0324             __func__, chap->qid, (int)chap->hash_len, chap->c2);
0325     } else {
0326         memset(chap->c2, 0, chap->hash_len);
0327         chap->s2 = 0;
0328     }
0329     data->seqnum = cpu_to_le32(chap->s2);
0330     if (chap->host_key_len) {
0331         dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
0332             __func__, chap->qid,
0333             chap->host_key_len, chap->host_key);
0334         memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
0335                chap->host_key_len);
0336     }
0337 
0338     return size;
0339 }
0340 
0341 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
0342         struct nvme_dhchap_queue_context *chap)
0343 {
0344     struct nvmf_auth_dhchap_success1_data *data = chap->buf;
0345     size_t size = sizeof(*data);
0346 
0347     if (ctrl->ctrl_key)
0348         size += chap->hash_len;
0349 
0350     if (chap->buf_size < size) {
0351         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0352         return NVME_SC_INVALID_FIELD;
0353     }
0354 
0355     if (data->hl != chap->hash_len) {
0356         dev_warn(ctrl->device,
0357              "qid %d: invalid hash length %u\n",
0358              chap->qid, data->hl);
0359         chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
0360         return NVME_SC_INVALID_FIELD;
0361     }
0362 
0363     /* Just print out information for the admin queue */
0364     if (chap->qid == 0)
0365         dev_info(ctrl->device,
0366              "qid 0: authenticated with hash %s dhgroup %s\n",
0367              nvme_auth_hmac_name(chap->hash_id),
0368              nvme_auth_dhgroup_name(chap->dhgroup_id));
0369 
0370     if (!data->rvalid)
0371         return 0;
0372 
0373     /* Validate controller response */
0374     if (memcmp(chap->response, data->rval, data->hl)) {
0375         dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
0376             __func__, chap->qid, (int)chap->hash_len, data->rval);
0377         dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
0378             __func__, chap->qid, (int)chap->hash_len,
0379             chap->response);
0380         dev_warn(ctrl->device,
0381              "qid %d: controller authentication failed\n",
0382              chap->qid);
0383         chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0384         return NVME_SC_AUTH_REQUIRED;
0385     }
0386 
0387     /* Just print out information for the admin queue */
0388     if (chap->qid == 0)
0389         dev_info(ctrl->device,
0390              "qid 0: controller authenticated\n");
0391     return 0;
0392 }
0393 
0394 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
0395         struct nvme_dhchap_queue_context *chap)
0396 {
0397     struct nvmf_auth_dhchap_success2_data *data = chap->buf;
0398     size_t size = sizeof(*data);
0399 
0400     memset(chap->buf, 0, size);
0401     data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
0402     data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
0403     data->t_id = cpu_to_le16(chap->transaction);
0404 
0405     return size;
0406 }
0407 
0408 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
0409         struct nvme_dhchap_queue_context *chap)
0410 {
0411     struct nvmf_auth_dhchap_failure_data *data = chap->buf;
0412     size_t size = sizeof(*data);
0413 
0414     memset(chap->buf, 0, size);
0415     data->auth_type = NVME_AUTH_COMMON_MESSAGES;
0416     data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
0417     data->t_id = cpu_to_le16(chap->transaction);
0418     data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
0419     data->rescode_exp = chap->status;
0420 
0421     return size;
0422 }
0423 
0424 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
0425         struct nvme_dhchap_queue_context *chap)
0426 {
0427     SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
0428     u8 buf[4], *challenge = chap->c1;
0429     int ret;
0430 
0431     dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
0432         __func__, chap->qid, chap->s1, chap->transaction);
0433 
0434     if (!chap->host_response) {
0435         chap->host_response = nvme_auth_transform_key(ctrl->host_key,
0436                         ctrl->opts->host->nqn);
0437         if (IS_ERR(chap->host_response)) {
0438             ret = PTR_ERR(chap->host_response);
0439             chap->host_response = NULL;
0440             return ret;
0441         }
0442     } else {
0443         dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
0444             __func__, chap->qid);
0445     }
0446 
0447     ret = crypto_shash_setkey(chap->shash_tfm,
0448             chap->host_response, ctrl->host_key->len);
0449     if (ret) {
0450         dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
0451              chap->qid, ret);
0452         goto out;
0453     }
0454 
0455     if (chap->dh_tfm) {
0456         challenge = kmalloc(chap->hash_len, GFP_KERNEL);
0457         if (!challenge) {
0458             ret = -ENOMEM;
0459             goto out;
0460         }
0461         ret = nvme_auth_augmented_challenge(chap->hash_id,
0462                             chap->sess_key,
0463                             chap->sess_key_len,
0464                             chap->c1, challenge,
0465                             chap->hash_len);
0466         if (ret)
0467             goto out;
0468     }
0469 
0470     shash->tfm = chap->shash_tfm;
0471     ret = crypto_shash_init(shash);
0472     if (ret)
0473         goto out;
0474     ret = crypto_shash_update(shash, challenge, chap->hash_len);
0475     if (ret)
0476         goto out;
0477     put_unaligned_le32(chap->s1, buf);
0478     ret = crypto_shash_update(shash, buf, 4);
0479     if (ret)
0480         goto out;
0481     put_unaligned_le16(chap->transaction, buf);
0482     ret = crypto_shash_update(shash, buf, 2);
0483     if (ret)
0484         goto out;
0485     memset(buf, 0, sizeof(buf));
0486     ret = crypto_shash_update(shash, buf, 1);
0487     if (ret)
0488         goto out;
0489     ret = crypto_shash_update(shash, "HostHost", 8);
0490     if (ret)
0491         goto out;
0492     ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
0493                   strlen(ctrl->opts->host->nqn));
0494     if (ret)
0495         goto out;
0496     ret = crypto_shash_update(shash, buf, 1);
0497     if (ret)
0498         goto out;
0499     ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
0500                 strlen(ctrl->opts->subsysnqn));
0501     if (ret)
0502         goto out;
0503     ret = crypto_shash_final(shash, chap->response);
0504 out:
0505     if (challenge != chap->c1)
0506         kfree(challenge);
0507     return ret;
0508 }
0509 
0510 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
0511         struct nvme_dhchap_queue_context *chap)
0512 {
0513     SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
0514     u8 *ctrl_response;
0515     u8 buf[4], *challenge = chap->c2;
0516     int ret;
0517 
0518     ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
0519                 ctrl->opts->subsysnqn);
0520     if (IS_ERR(ctrl_response)) {
0521         ret = PTR_ERR(ctrl_response);
0522         return ret;
0523     }
0524     ret = crypto_shash_setkey(chap->shash_tfm,
0525             ctrl_response, ctrl->ctrl_key->len);
0526     if (ret) {
0527         dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
0528              chap->qid, ret);
0529         goto out;
0530     }
0531 
0532     if (chap->dh_tfm) {
0533         challenge = kmalloc(chap->hash_len, GFP_KERNEL);
0534         if (!challenge) {
0535             ret = -ENOMEM;
0536             goto out;
0537         }
0538         ret = nvme_auth_augmented_challenge(chap->hash_id,
0539                             chap->sess_key,
0540                             chap->sess_key_len,
0541                             chap->c2, challenge,
0542                             chap->hash_len);
0543         if (ret)
0544             goto out;
0545     }
0546     dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
0547         __func__, chap->qid, chap->s2, chap->transaction);
0548     dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
0549         __func__, chap->qid, (int)chap->hash_len, challenge);
0550     dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
0551         __func__, chap->qid, ctrl->opts->subsysnqn);
0552     dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
0553         __func__, chap->qid, ctrl->opts->host->nqn);
0554     shash->tfm = chap->shash_tfm;
0555     ret = crypto_shash_init(shash);
0556     if (ret)
0557         goto out;
0558     ret = crypto_shash_update(shash, challenge, chap->hash_len);
0559     if (ret)
0560         goto out;
0561     put_unaligned_le32(chap->s2, buf);
0562     ret = crypto_shash_update(shash, buf, 4);
0563     if (ret)
0564         goto out;
0565     put_unaligned_le16(chap->transaction, buf);
0566     ret = crypto_shash_update(shash, buf, 2);
0567     if (ret)
0568         goto out;
0569     memset(buf, 0, 4);
0570     ret = crypto_shash_update(shash, buf, 1);
0571     if (ret)
0572         goto out;
0573     ret = crypto_shash_update(shash, "Controller", 10);
0574     if (ret)
0575         goto out;
0576     ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
0577                   strlen(ctrl->opts->subsysnqn));
0578     if (ret)
0579         goto out;
0580     ret = crypto_shash_update(shash, buf, 1);
0581     if (ret)
0582         goto out;
0583     ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
0584                   strlen(ctrl->opts->host->nqn));
0585     if (ret)
0586         goto out;
0587     ret = crypto_shash_final(shash, chap->response);
0588 out:
0589     if (challenge != chap->c2)
0590         kfree(challenge);
0591     kfree(ctrl_response);
0592     return ret;
0593 }
0594 
0595 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
0596         struct nvme_dhchap_queue_context *chap)
0597 {
0598     int ret;
0599 
0600     if (chap->host_key && chap->host_key_len) {
0601         dev_dbg(ctrl->device,
0602             "qid %d: reusing host key\n", chap->qid);
0603         goto gen_sesskey;
0604     }
0605     ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
0606     if (ret < 0) {
0607         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0608         return ret;
0609     }
0610 
0611     chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
0612 
0613     chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
0614     if (!chap->host_key) {
0615         chap->host_key_len = 0;
0616         chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0617         return -ENOMEM;
0618     }
0619     ret = nvme_auth_gen_pubkey(chap->dh_tfm,
0620                    chap->host_key, chap->host_key_len);
0621     if (ret) {
0622         dev_dbg(ctrl->device,
0623             "failed to generate public key, error %d\n", ret);
0624         kfree(chap->host_key);
0625         chap->host_key = NULL;
0626         chap->host_key_len = 0;
0627         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0628         return ret;
0629     }
0630 
0631 gen_sesskey:
0632     chap->sess_key_len = chap->host_key_len;
0633     chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
0634     if (!chap->sess_key) {
0635         chap->sess_key_len = 0;
0636         chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
0637         return -ENOMEM;
0638     }
0639 
0640     ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
0641                       chap->ctrl_key, chap->ctrl_key_len,
0642                       chap->sess_key, chap->sess_key_len);
0643     if (ret) {
0644         dev_dbg(ctrl->device,
0645             "failed to generate shared secret, error %d\n", ret);
0646         kfree_sensitive(chap->sess_key);
0647         chap->sess_key = NULL;
0648         chap->sess_key_len = 0;
0649         chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
0650         return ret;
0651     }
0652     dev_dbg(ctrl->device, "shared secret %*ph\n",
0653         (int)chap->sess_key_len, chap->sess_key);
0654     return 0;
0655 }
0656 
0657 static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
0658 {
0659     kfree_sensitive(chap->host_response);
0660     chap->host_response = NULL;
0661     kfree_sensitive(chap->host_key);
0662     chap->host_key = NULL;
0663     chap->host_key_len = 0;
0664     kfree_sensitive(chap->ctrl_key);
0665     chap->ctrl_key = NULL;
0666     chap->ctrl_key_len = 0;
0667     kfree_sensitive(chap->sess_key);
0668     chap->sess_key = NULL;
0669     chap->sess_key_len = 0;
0670     chap->status = 0;
0671     chap->error = 0;
0672     chap->s1 = 0;
0673     chap->s2 = 0;
0674     chap->transaction = 0;
0675     memset(chap->c1, 0, sizeof(chap->c1));
0676     memset(chap->c2, 0, sizeof(chap->c2));
0677 }
0678 
0679 static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
0680 {
0681     __nvme_auth_reset(chap);
0682     if (chap->shash_tfm)
0683         crypto_free_shash(chap->shash_tfm);
0684     if (chap->dh_tfm)
0685         crypto_free_kpp(chap->dh_tfm);
0686     kfree_sensitive(chap->ctrl_key);
0687     kfree_sensitive(chap->host_key);
0688     kfree_sensitive(chap->sess_key);
0689     kfree_sensitive(chap->host_response);
0690     kfree(chap->buf);
0691     kfree(chap);
0692 }
0693 
0694 static void __nvme_auth_work(struct work_struct *work)
0695 {
0696     struct nvme_dhchap_queue_context *chap =
0697         container_of(work, struct nvme_dhchap_queue_context, auth_work);
0698     struct nvme_ctrl *ctrl = chap->ctrl;
0699     size_t tl;
0700     int ret = 0;
0701 
0702     chap->transaction = ctrl->transaction++;
0703 
0704     /* DH-HMAC-CHAP Step 1: send negotiate */
0705     dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
0706         __func__, chap->qid);
0707     ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
0708     if (ret < 0) {
0709         chap->error = ret;
0710         return;
0711     }
0712     tl = ret;
0713     ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
0714     if (ret) {
0715         chap->error = ret;
0716         return;
0717     }
0718 
0719     /* DH-HMAC-CHAP Step 2: receive challenge */
0720     dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
0721         __func__, chap->qid);
0722 
0723     memset(chap->buf, 0, chap->buf_size);
0724     ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
0725     if (ret) {
0726         dev_warn(ctrl->device,
0727              "qid %d failed to receive challenge, %s %d\n",
0728              chap->qid, ret < 0 ? "error" : "nvme status", ret);
0729         chap->error = ret;
0730         return;
0731     }
0732     ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
0733                      NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
0734     if (ret) {
0735         chap->status = ret;
0736         chap->error = NVME_SC_AUTH_REQUIRED;
0737         return;
0738     }
0739 
0740     ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
0741     if (ret) {
0742         /* Invalid challenge parameters */
0743         chap->error = ret;
0744         goto fail2;
0745     }
0746 
0747     if (chap->ctrl_key_len) {
0748         dev_dbg(ctrl->device,
0749             "%s: qid %d DH exponential\n",
0750             __func__, chap->qid);
0751         ret = nvme_auth_dhchap_exponential(ctrl, chap);
0752         if (ret) {
0753             chap->error = ret;
0754             goto fail2;
0755         }
0756     }
0757 
0758     dev_dbg(ctrl->device, "%s: qid %d host response\n",
0759         __func__, chap->qid);
0760     ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
0761     if (ret) {
0762         chap->error = ret;
0763         goto fail2;
0764     }
0765 
0766     /* DH-HMAC-CHAP Step 3: send reply */
0767     dev_dbg(ctrl->device, "%s: qid %d send reply\n",
0768         __func__, chap->qid);
0769     ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
0770     if (ret < 0) {
0771         chap->error = ret;
0772         goto fail2;
0773     }
0774 
0775     tl = ret;
0776     ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
0777     if (ret) {
0778         chap->error = ret;
0779         goto fail2;
0780     }
0781 
0782     /* DH-HMAC-CHAP Step 4: receive success1 */
0783     dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
0784         __func__, chap->qid);
0785 
0786     memset(chap->buf, 0, chap->buf_size);
0787     ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
0788     if (ret) {
0789         dev_warn(ctrl->device,
0790              "qid %d failed to receive success1, %s %d\n",
0791              chap->qid, ret < 0 ? "error" : "nvme status", ret);
0792         chap->error = ret;
0793         return;
0794     }
0795     ret = nvme_auth_receive_validate(ctrl, chap->qid,
0796                      chap->buf, chap->transaction,
0797                      NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
0798     if (ret) {
0799         chap->status = ret;
0800         chap->error = NVME_SC_AUTH_REQUIRED;
0801         return;
0802     }
0803 
0804     if (ctrl->ctrl_key) {
0805         dev_dbg(ctrl->device,
0806             "%s: qid %d controller response\n",
0807             __func__, chap->qid);
0808         ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
0809         if (ret) {
0810             chap->error = ret;
0811             goto fail2;
0812         }
0813     }
0814 
0815     ret = nvme_auth_process_dhchap_success1(ctrl, chap);
0816     if (ret) {
0817         /* Controller authentication failed */
0818         chap->error = NVME_SC_AUTH_REQUIRED;
0819         goto fail2;
0820     }
0821 
0822     if (ctrl->ctrl_key) {
0823         /* DH-HMAC-CHAP Step 5: send success2 */
0824         dev_dbg(ctrl->device, "%s: qid %d send success2\n",
0825             __func__, chap->qid);
0826         tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
0827         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
0828         if (ret)
0829             chap->error = ret;
0830     }
0831     if (!ret) {
0832         chap->error = 0;
0833         return;
0834     }
0835 
0836 fail2:
0837     dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
0838         __func__, chap->qid, chap->status);
0839     tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
0840     ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
0841     /*
0842      * only update error if send failure2 failed and no other
0843      * error had been set during authentication.
0844      */
0845     if (ret && !chap->error)
0846         chap->error = ret;
0847 }
0848 
0849 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
0850 {
0851     struct nvme_dhchap_queue_context *chap;
0852 
0853     if (!ctrl->host_key) {
0854         dev_warn(ctrl->device, "qid %d: no key\n", qid);
0855         return -ENOKEY;
0856     }
0857 
0858     if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
0859         dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
0860         return -ENOKEY;
0861     }
0862 
0863     mutex_lock(&ctrl->dhchap_auth_mutex);
0864     /* Check if the context is already queued */
0865     list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
0866         WARN_ON(!chap->buf);
0867         if (chap->qid == qid) {
0868             dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
0869             mutex_unlock(&ctrl->dhchap_auth_mutex);
0870             flush_work(&chap->auth_work);
0871             __nvme_auth_reset(chap);
0872             queue_work(nvme_wq, &chap->auth_work);
0873             return 0;
0874         }
0875     }
0876     chap = kzalloc(sizeof(*chap), GFP_KERNEL);
0877     if (!chap) {
0878         mutex_unlock(&ctrl->dhchap_auth_mutex);
0879         return -ENOMEM;
0880     }
0881     chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
0882     chap->ctrl = ctrl;
0883 
0884     /*
0885      * Allocate a large enough buffer for the entire negotiation:
0886      * 4k should be enough to ffdhe8192.
0887      */
0888     chap->buf_size = 4096;
0889     chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
0890     if (!chap->buf) {
0891         mutex_unlock(&ctrl->dhchap_auth_mutex);
0892         kfree(chap);
0893         return -ENOMEM;
0894     }
0895 
0896     INIT_WORK(&chap->auth_work, __nvme_auth_work);
0897     list_add(&chap->entry, &ctrl->dhchap_auth_list);
0898     mutex_unlock(&ctrl->dhchap_auth_mutex);
0899     queue_work(nvme_wq, &chap->auth_work);
0900     return 0;
0901 }
0902 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
0903 
0904 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
0905 {
0906     struct nvme_dhchap_queue_context *chap;
0907     int ret;
0908 
0909     mutex_lock(&ctrl->dhchap_auth_mutex);
0910     list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
0911         if (chap->qid != qid)
0912             continue;
0913         mutex_unlock(&ctrl->dhchap_auth_mutex);
0914         flush_work(&chap->auth_work);
0915         ret = chap->error;
0916         return ret;
0917     }
0918     mutex_unlock(&ctrl->dhchap_auth_mutex);
0919     return -ENXIO;
0920 }
0921 EXPORT_SYMBOL_GPL(nvme_auth_wait);
0922 
0923 void nvme_auth_reset(struct nvme_ctrl *ctrl)
0924 {
0925     struct nvme_dhchap_queue_context *chap;
0926 
0927     mutex_lock(&ctrl->dhchap_auth_mutex);
0928     list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
0929         mutex_unlock(&ctrl->dhchap_auth_mutex);
0930         flush_work(&chap->auth_work);
0931         __nvme_auth_reset(chap);
0932     }
0933     mutex_unlock(&ctrl->dhchap_auth_mutex);
0934 }
0935 EXPORT_SYMBOL_GPL(nvme_auth_reset);
0936 
0937 static void nvme_dhchap_auth_work(struct work_struct *work)
0938 {
0939     struct nvme_ctrl *ctrl =
0940         container_of(work, struct nvme_ctrl, dhchap_auth_work);
0941     int ret, q;
0942 
0943     /* Authenticate admin queue first */
0944     ret = nvme_auth_negotiate(ctrl, 0);
0945     if (ret) {
0946         dev_warn(ctrl->device,
0947              "qid 0: error %d setting up authentication\n", ret);
0948         return;
0949     }
0950     ret = nvme_auth_wait(ctrl, 0);
0951     if (ret) {
0952         dev_warn(ctrl->device,
0953              "qid 0: authentication failed\n");
0954         return;
0955     }
0956 
0957     for (q = 1; q < ctrl->queue_count; q++) {
0958         ret = nvme_auth_negotiate(ctrl, q);
0959         if (ret) {
0960             dev_warn(ctrl->device,
0961                  "qid %d: error %d setting up authentication\n",
0962                  q, ret);
0963             break;
0964         }
0965     }
0966 
0967     /*
0968      * Failure is a soft-state; credentials remain valid until
0969      * the controller terminates the connection.
0970      */
0971 }
0972 
0973 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
0974 {
0975     INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
0976     INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
0977     mutex_init(&ctrl->dhchap_auth_mutex);
0978     if (!ctrl->opts)
0979         return;
0980     nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
0981     nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
0982 }
0983 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
0984 
0985 void nvme_auth_stop(struct nvme_ctrl *ctrl)
0986 {
0987     struct nvme_dhchap_queue_context *chap = NULL, *tmp;
0988 
0989     cancel_work_sync(&ctrl->dhchap_auth_work);
0990     mutex_lock(&ctrl->dhchap_auth_mutex);
0991     list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
0992         cancel_work_sync(&chap->auth_work);
0993     mutex_unlock(&ctrl->dhchap_auth_mutex);
0994 }
0995 EXPORT_SYMBOL_GPL(nvme_auth_stop);
0996 
0997 void nvme_auth_free(struct nvme_ctrl *ctrl)
0998 {
0999     struct nvme_dhchap_queue_context *chap = NULL, *tmp;
1000 
1001     mutex_lock(&ctrl->dhchap_auth_mutex);
1002     list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
1003         list_del_init(&chap->entry);
1004         flush_work(&chap->auth_work);
1005         __nvme_auth_free(chap);
1006     }
1007     mutex_unlock(&ctrl->dhchap_auth_mutex);
1008     if (ctrl->host_key) {
1009         nvme_auth_free_key(ctrl->host_key);
1010         ctrl->host_key = NULL;
1011     }
1012     if (ctrl->ctrl_key) {
1013         nvme_auth_free_key(ctrl->ctrl_key);
1014         ctrl->ctrl_key = NULL;
1015     }
1016 }
1017 EXPORT_SYMBOL_GPL(nvme_auth_free);