0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/platform_device.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/dmapool.h>
0011 #include <linux/crypto.h>
0012 #include <linux/kernel.h>
0013 #include <linux/rtnetlink.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/gfp.h>
0017 #include <linux/module.h>
0018 #include <linux/of.h>
0019
0020 #include <crypto/ctr.h>
0021 #include <crypto/internal/des.h>
0022 #include <crypto/aes.h>
0023 #include <crypto/hmac.h>
0024 #include <crypto/sha1.h>
0025 #include <crypto/algapi.h>
0026 #include <crypto/internal/aead.h>
0027 #include <crypto/internal/skcipher.h>
0028 #include <crypto/authenc.h>
0029 #include <crypto/scatterwalk.h>
0030
0031 #include <linux/soc/ixp4xx/npe.h>
0032 #include <linux/soc/ixp4xx/qmgr.h>
0033
0034
0035 #include <linux/soc/ixp4xx/cpu.h>
0036
0037 #define MAX_KEYLEN 32
0038
0039
0040 #define NPE_CTX_LEN 80
0041 #define AES_BLOCK128 16
0042
0043 #define NPE_OP_HASH_VERIFY 0x01
0044 #define NPE_OP_CCM_ENABLE 0x04
0045 #define NPE_OP_CRYPT_ENABLE 0x08
0046 #define NPE_OP_HASH_ENABLE 0x10
0047 #define NPE_OP_NOT_IN_PLACE 0x20
0048 #define NPE_OP_HMAC_DISABLE 0x40
0049 #define NPE_OP_CRYPT_ENCRYPT 0x80
0050
0051 #define NPE_OP_CCM_GEN_MIC 0xcc
0052 #define NPE_OP_HASH_GEN_ICV 0x50
0053 #define NPE_OP_ENC_GEN_KEY 0xc9
0054
0055 #define MOD_ECB 0x0000
0056 #define MOD_CTR 0x1000
0057 #define MOD_CBC_ENC 0x2000
0058 #define MOD_CBC_DEC 0x3000
0059 #define MOD_CCM_ENC 0x4000
0060 #define MOD_CCM_DEC 0x5000
0061
0062 #define KEYLEN_128 4
0063 #define KEYLEN_192 6
0064 #define KEYLEN_256 8
0065
0066 #define CIPH_DECR 0x0000
0067 #define CIPH_ENCR 0x0400
0068
0069 #define MOD_DES 0x0000
0070 #define MOD_TDEA2 0x0100
0071 #define MOD_3DES 0x0200
0072 #define MOD_AES 0x0800
0073 #define MOD_AES128 (0x0800 | KEYLEN_128)
0074 #define MOD_AES192 (0x0900 | KEYLEN_192)
0075 #define MOD_AES256 (0x0a00 | KEYLEN_256)
0076
0077 #define MAX_IVLEN 16
0078 #define NPE_QLEN 16
0079
0080
0081 #define NPE_QLEN_TOTAL 64
0082
0083 #define CTL_FLAG_UNUSED 0x0000
0084 #define CTL_FLAG_USED 0x1000
0085 #define CTL_FLAG_PERFORM_ABLK 0x0001
0086 #define CTL_FLAG_GEN_ICV 0x0002
0087 #define CTL_FLAG_GEN_REVAES 0x0004
0088 #define CTL_FLAG_PERFORM_AEAD 0x0008
0089 #define CTL_FLAG_MASK 0x000f
0090
0091 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
0092
0093 #define MD5_DIGEST_SIZE 16
0094
0095 struct buffer_desc {
0096 u32 phys_next;
0097 #ifdef __ARMEB__
0098 u16 buf_len;
0099 u16 pkt_len;
0100 #else
0101 u16 pkt_len;
0102 u16 buf_len;
0103 #endif
0104 dma_addr_t phys_addr;
0105 u32 __reserved[4];
0106 struct buffer_desc *next;
0107 enum dma_data_direction dir;
0108 };
0109
0110 struct crypt_ctl {
0111 #ifdef __ARMEB__
0112 u8 mode;
0113 u8 init_len;
0114 u16 reserved;
0115 #else
0116 u16 reserved;
0117 u8 init_len;
0118 u8 mode;
0119 #endif
0120 u8 iv[MAX_IVLEN];
0121 dma_addr_t icv_rev_aes;
0122 dma_addr_t src_buf;
0123 dma_addr_t dst_buf;
0124 #ifdef __ARMEB__
0125 u16 auth_offs;
0126 u16 auth_len;
0127 u16 crypt_offs;
0128 u16 crypt_len;
0129 #else
0130 u16 auth_len;
0131 u16 auth_offs;
0132 u16 crypt_len;
0133 u16 crypt_offs;
0134 #endif
0135 u32 aadAddr;
0136 u32 crypto_ctx;
0137
0138
0139 unsigned int ctl_flags;
0140 union {
0141 struct skcipher_request *ablk_req;
0142 struct aead_request *aead_req;
0143 struct crypto_tfm *tfm;
0144 } data;
0145 struct buffer_desc *regist_buf;
0146 u8 *regist_ptr;
0147 };
0148
0149 struct ablk_ctx {
0150 struct buffer_desc *src;
0151 struct buffer_desc *dst;
0152 u8 iv[MAX_IVLEN];
0153 bool encrypt;
0154 struct skcipher_request fallback_req;
0155 };
0156
0157 struct aead_ctx {
0158 struct buffer_desc *src;
0159 struct buffer_desc *dst;
0160 struct scatterlist ivlist;
0161
0162 u8 *hmac_virt;
0163 int encrypt;
0164 };
0165
0166 struct ix_hash_algo {
0167 u32 cfgword;
0168 unsigned char *icv;
0169 };
0170
0171 struct ix_sa_dir {
0172 unsigned char *npe_ctx;
0173 dma_addr_t npe_ctx_phys;
0174 int npe_ctx_idx;
0175 u8 npe_mode;
0176 };
0177
0178 struct ixp_ctx {
0179 struct ix_sa_dir encrypt;
0180 struct ix_sa_dir decrypt;
0181 int authkey_len;
0182 u8 authkey[MAX_KEYLEN];
0183 int enckey_len;
0184 u8 enckey[MAX_KEYLEN];
0185 u8 salt[MAX_IVLEN];
0186 u8 nonce[CTR_RFC3686_NONCE_SIZE];
0187 unsigned int salted;
0188 atomic_t configuring;
0189 struct completion completion;
0190 struct crypto_skcipher *fallback_tfm;
0191 };
0192
0193 struct ixp_alg {
0194 struct skcipher_alg crypto;
0195 const struct ix_hash_algo *hash;
0196 u32 cfg_enc;
0197 u32 cfg_dec;
0198
0199 int registered;
0200 };
0201
0202 struct ixp_aead_alg {
0203 struct aead_alg crypto;
0204 const struct ix_hash_algo *hash;
0205 u32 cfg_enc;
0206 u32 cfg_dec;
0207
0208 int registered;
0209 };
0210
0211 static const struct ix_hash_algo hash_alg_md5 = {
0212 .cfgword = 0xAA010004,
0213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
0214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
0215 };
0216
0217 static const struct ix_hash_algo hash_alg_sha1 = {
0218 .cfgword = 0x00000005,
0219 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
0220 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
0221 };
0222
0223 static struct npe *npe_c;
0224
0225 static unsigned int send_qid;
0226 static unsigned int recv_qid;
0227 static struct dma_pool *buffer_pool;
0228 static struct dma_pool *ctx_pool;
0229
0230 static struct crypt_ctl *crypt_virt;
0231 static dma_addr_t crypt_phys;
0232
0233 static int support_aes = 1;
0234
0235 static struct platform_device *pdev;
0236
0237 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
0238 {
0239 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
0240 }
0241
0242 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
0243 {
0244 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
0245 }
0246
0247 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
0248 {
0249 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
0250 }
0251
0252 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
0253 {
0254 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
0255 }
0256
0257 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
0258 {
0259 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
0260 }
0261
0262 static int setup_crypt_desc(void)
0263 {
0264 struct device *dev = &pdev->dev;
0265
0266 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
0267 crypt_virt = dma_alloc_coherent(dev,
0268 NPE_QLEN * sizeof(struct crypt_ctl),
0269 &crypt_phys, GFP_ATOMIC);
0270 if (!crypt_virt)
0271 return -ENOMEM;
0272 return 0;
0273 }
0274
0275 static DEFINE_SPINLOCK(desc_lock);
0276 static struct crypt_ctl *get_crypt_desc(void)
0277 {
0278 int i;
0279 static int idx;
0280 unsigned long flags;
0281
0282 spin_lock_irqsave(&desc_lock, flags);
0283
0284 if (unlikely(!crypt_virt))
0285 setup_crypt_desc();
0286 if (unlikely(!crypt_virt)) {
0287 spin_unlock_irqrestore(&desc_lock, flags);
0288 return NULL;
0289 }
0290 i = idx;
0291 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
0292 if (++idx >= NPE_QLEN)
0293 idx = 0;
0294 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
0295 spin_unlock_irqrestore(&desc_lock, flags);
0296 return crypt_virt + i;
0297 } else {
0298 spin_unlock_irqrestore(&desc_lock, flags);
0299 return NULL;
0300 }
0301 }
0302
0303 static DEFINE_SPINLOCK(emerg_lock);
0304 static struct crypt_ctl *get_crypt_desc_emerg(void)
0305 {
0306 int i;
0307 static int idx = NPE_QLEN;
0308 struct crypt_ctl *desc;
0309 unsigned long flags;
0310
0311 desc = get_crypt_desc();
0312 if (desc)
0313 return desc;
0314 if (unlikely(!crypt_virt))
0315 return NULL;
0316
0317 spin_lock_irqsave(&emerg_lock, flags);
0318 i = idx;
0319 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
0320 if (++idx >= NPE_QLEN_TOTAL)
0321 idx = NPE_QLEN;
0322 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
0323 spin_unlock_irqrestore(&emerg_lock, flags);
0324 return crypt_virt + i;
0325 } else {
0326 spin_unlock_irqrestore(&emerg_lock, flags);
0327 return NULL;
0328 }
0329 }
0330
0331 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
0332 dma_addr_t phys)
0333 {
0334 while (buf) {
0335 struct buffer_desc *buf1;
0336 u32 phys1;
0337
0338 buf1 = buf->next;
0339 phys1 = buf->phys_next;
0340 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
0341 dma_pool_free(buffer_pool, buf, phys);
0342 buf = buf1;
0343 phys = phys1;
0344 }
0345 }
0346
0347 static struct tasklet_struct crypto_done_tasklet;
0348
0349 static void finish_scattered_hmac(struct crypt_ctl *crypt)
0350 {
0351 struct aead_request *req = crypt->data.aead_req;
0352 struct aead_ctx *req_ctx = aead_request_ctx(req);
0353 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0354 int authsize = crypto_aead_authsize(tfm);
0355 int decryptlen = req->assoclen + req->cryptlen - authsize;
0356
0357 if (req_ctx->encrypt) {
0358 scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
0359 decryptlen, authsize, 1);
0360 }
0361 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
0362 }
0363
0364 static void one_packet(dma_addr_t phys)
0365 {
0366 struct device *dev = &pdev->dev;
0367 struct crypt_ctl *crypt;
0368 struct ixp_ctx *ctx;
0369 int failed;
0370
0371 failed = phys & 0x1 ? -EBADMSG : 0;
0372 phys &= ~0x3;
0373 crypt = crypt_phys2virt(phys);
0374
0375 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
0376 case CTL_FLAG_PERFORM_AEAD: {
0377 struct aead_request *req = crypt->data.aead_req;
0378 struct aead_ctx *req_ctx = aead_request_ctx(req);
0379
0380 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
0381 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
0382 if (req_ctx->hmac_virt)
0383 finish_scattered_hmac(crypt);
0384
0385 req->base.complete(&req->base, failed);
0386 break;
0387 }
0388 case CTL_FLAG_PERFORM_ABLK: {
0389 struct skcipher_request *req = crypt->data.ablk_req;
0390 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
0391 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0392 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
0393 unsigned int offset;
0394
0395 if (ivsize > 0) {
0396 offset = req->cryptlen - ivsize;
0397 if (req_ctx->encrypt) {
0398 scatterwalk_map_and_copy(req->iv, req->dst,
0399 offset, ivsize, 0);
0400 } else {
0401 memcpy(req->iv, req_ctx->iv, ivsize);
0402 memzero_explicit(req_ctx->iv, ivsize);
0403 }
0404 }
0405
0406 if (req_ctx->dst)
0407 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
0408
0409 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
0410 req->base.complete(&req->base, failed);
0411 break;
0412 }
0413 case CTL_FLAG_GEN_ICV:
0414 ctx = crypto_tfm_ctx(crypt->data.tfm);
0415 dma_pool_free(ctx_pool, crypt->regist_ptr,
0416 crypt->regist_buf->phys_addr);
0417 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
0418 if (atomic_dec_and_test(&ctx->configuring))
0419 complete(&ctx->completion);
0420 break;
0421 case CTL_FLAG_GEN_REVAES:
0422 ctx = crypto_tfm_ctx(crypt->data.tfm);
0423 *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
0424 if (atomic_dec_and_test(&ctx->configuring))
0425 complete(&ctx->completion);
0426 break;
0427 default:
0428 BUG();
0429 }
0430 crypt->ctl_flags = CTL_FLAG_UNUSED;
0431 }
0432
0433 static void irqhandler(void *_unused)
0434 {
0435 tasklet_schedule(&crypto_done_tasklet);
0436 }
0437
0438 static void crypto_done_action(unsigned long arg)
0439 {
0440 int i;
0441
0442 for (i = 0; i < 4; i++) {
0443 dma_addr_t phys = qmgr_get_entry(recv_qid);
0444 if (!phys)
0445 return;
0446 one_packet(phys);
0447 }
0448 tasklet_schedule(&crypto_done_tasklet);
0449 }
0450
0451 static int init_ixp_crypto(struct device *dev)
0452 {
0453 struct device_node *np = dev->of_node;
0454 u32 msg[2] = { 0, 0 };
0455 int ret = -ENODEV;
0456 u32 npe_id;
0457
0458 dev_info(dev, "probing...\n");
0459
0460
0461 if (IS_ENABLED(CONFIG_OF) && np) {
0462 struct of_phandle_args queue_spec;
0463 struct of_phandle_args npe_spec;
0464
0465 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
0466 1, 0, &npe_spec);
0467 if (ret) {
0468 dev_err(dev, "no NPE engine specified\n");
0469 return -ENODEV;
0470 }
0471 npe_id = npe_spec.args[0];
0472
0473 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
0474 &queue_spec);
0475 if (ret) {
0476 dev_err(dev, "no rx queue phandle\n");
0477 return -ENODEV;
0478 }
0479 recv_qid = queue_spec.args[0];
0480
0481 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
0482 &queue_spec);
0483 if (ret) {
0484 dev_err(dev, "no txready queue phandle\n");
0485 return -ENODEV;
0486 }
0487 send_qid = queue_spec.args[0];
0488 } else {
0489
0490
0491
0492
0493 npe_id = 2;
0494 send_qid = 29;
0495 recv_qid = 30;
0496 }
0497
0498 npe_c = npe_request(npe_id);
0499 if (!npe_c)
0500 return ret;
0501
0502 if (!npe_running(npe_c)) {
0503 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
0504 if (ret)
0505 goto npe_release;
0506 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
0507 goto npe_error;
0508 } else {
0509 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
0510 goto npe_error;
0511
0512 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
0513 goto npe_error;
0514 }
0515
0516 switch ((msg[1] >> 16) & 0xff) {
0517 case 3:
0518 dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
0519 support_aes = 0;
0520 break;
0521 case 4:
0522 case 5:
0523 support_aes = 1;
0524 break;
0525 default:
0526 dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
0527 ret = -ENODEV;
0528 goto npe_release;
0529 }
0530
0531
0532
0533 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
0534 buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
0535 32, 0);
0536 ret = -ENOMEM;
0537 if (!buffer_pool)
0538 goto err;
0539
0540 ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
0541 if (!ctx_pool)
0542 goto err;
0543
0544 ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
0545 "ixp_crypto:out", NULL);
0546 if (ret)
0547 goto err;
0548 ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
0549 "ixp_crypto:in", NULL);
0550 if (ret) {
0551 qmgr_release_queue(send_qid);
0552 goto err;
0553 }
0554 qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
0555 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
0556
0557 qmgr_enable_irq(recv_qid);
0558 return 0;
0559
0560 npe_error:
0561 dev_err(dev, "%s not responding\n", npe_name(npe_c));
0562 ret = -EIO;
0563 err:
0564 dma_pool_destroy(ctx_pool);
0565 dma_pool_destroy(buffer_pool);
0566 npe_release:
0567 npe_release(npe_c);
0568 return ret;
0569 }
0570
0571 static void release_ixp_crypto(struct device *dev)
0572 {
0573 qmgr_disable_irq(recv_qid);
0574 tasklet_kill(&crypto_done_tasklet);
0575
0576 qmgr_release_queue(send_qid);
0577 qmgr_release_queue(recv_qid);
0578
0579 dma_pool_destroy(ctx_pool);
0580 dma_pool_destroy(buffer_pool);
0581
0582 npe_release(npe_c);
0583
0584 if (crypt_virt)
0585 dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
0586 crypt_virt, crypt_phys);
0587 }
0588
0589 static void reset_sa_dir(struct ix_sa_dir *dir)
0590 {
0591 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
0592 dir->npe_ctx_idx = 0;
0593 dir->npe_mode = 0;
0594 }
0595
0596 static int init_sa_dir(struct ix_sa_dir *dir)
0597 {
0598 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
0599 if (!dir->npe_ctx)
0600 return -ENOMEM;
0601
0602 reset_sa_dir(dir);
0603 return 0;
0604 }
0605
0606 static void free_sa_dir(struct ix_sa_dir *dir)
0607 {
0608 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
0609 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
0610 }
0611
0612 static int init_tfm(struct crypto_tfm *tfm)
0613 {
0614 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0615 int ret;
0616
0617 atomic_set(&ctx->configuring, 0);
0618 ret = init_sa_dir(&ctx->encrypt);
0619 if (ret)
0620 return ret;
0621 ret = init_sa_dir(&ctx->decrypt);
0622 if (ret)
0623 free_sa_dir(&ctx->encrypt);
0624
0625 return ret;
0626 }
0627
0628 static int init_tfm_ablk(struct crypto_skcipher *tfm)
0629 {
0630 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
0631 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
0632 const char *name = crypto_tfm_alg_name(ctfm);
0633
0634 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
0635 if (IS_ERR(ctx->fallback_tfm)) {
0636 pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
0637 name, PTR_ERR(ctx->fallback_tfm));
0638 return PTR_ERR(ctx->fallback_tfm);
0639 }
0640
0641 pr_info("Fallback for %s is %s\n",
0642 crypto_tfm_alg_driver_name(&tfm->base),
0643 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
0644 );
0645
0646 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
0647 return init_tfm(crypto_skcipher_tfm(tfm));
0648 }
0649
0650 static int init_tfm_aead(struct crypto_aead *tfm)
0651 {
0652 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
0653 return init_tfm(crypto_aead_tfm(tfm));
0654 }
0655
0656 static void exit_tfm(struct crypto_tfm *tfm)
0657 {
0658 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0659
0660 free_sa_dir(&ctx->encrypt);
0661 free_sa_dir(&ctx->decrypt);
0662 }
0663
0664 static void exit_tfm_ablk(struct crypto_skcipher *tfm)
0665 {
0666 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
0667 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
0668
0669 crypto_free_skcipher(ctx->fallback_tfm);
0670 exit_tfm(crypto_skcipher_tfm(tfm));
0671 }
0672
0673 static void exit_tfm_aead(struct crypto_aead *tfm)
0674 {
0675 exit_tfm(crypto_aead_tfm(tfm));
0676 }
0677
0678 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
0679 int init_len, u32 ctx_addr, const u8 *key,
0680 int key_len)
0681 {
0682 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0683 struct crypt_ctl *crypt;
0684 struct buffer_desc *buf;
0685 int i;
0686 u8 *pad;
0687 dma_addr_t pad_phys, buf_phys;
0688
0689 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
0690 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
0691 if (!pad)
0692 return -ENOMEM;
0693 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
0694 if (!buf) {
0695 dma_pool_free(ctx_pool, pad, pad_phys);
0696 return -ENOMEM;
0697 }
0698 crypt = get_crypt_desc_emerg();
0699 if (!crypt) {
0700 dma_pool_free(ctx_pool, pad, pad_phys);
0701 dma_pool_free(buffer_pool, buf, buf_phys);
0702 return -EAGAIN;
0703 }
0704
0705 memcpy(pad, key, key_len);
0706 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
0707 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
0708 pad[i] ^= xpad;
0709
0710 crypt->data.tfm = tfm;
0711 crypt->regist_ptr = pad;
0712 crypt->regist_buf = buf;
0713
0714 crypt->auth_offs = 0;
0715 crypt->auth_len = HMAC_PAD_BLOCKLEN;
0716 crypt->crypto_ctx = ctx_addr;
0717 crypt->src_buf = buf_phys;
0718 crypt->icv_rev_aes = target;
0719 crypt->mode = NPE_OP_HASH_GEN_ICV;
0720 crypt->init_len = init_len;
0721 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
0722
0723 buf->next = 0;
0724 buf->buf_len = HMAC_PAD_BLOCKLEN;
0725 buf->pkt_len = 0;
0726 buf->phys_addr = pad_phys;
0727
0728 atomic_inc(&ctx->configuring);
0729 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
0730 BUG_ON(qmgr_stat_overflow(send_qid));
0731 return 0;
0732 }
0733
0734 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
0735 const u8 *key, int key_len, unsigned int digest_len)
0736 {
0737 u32 itarget, otarget, npe_ctx_addr;
0738 unsigned char *cinfo;
0739 int init_len, ret = 0;
0740 u32 cfgword;
0741 struct ix_sa_dir *dir;
0742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0743 const struct ix_hash_algo *algo;
0744
0745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
0746 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
0747 algo = ix_hash(tfm);
0748
0749
0750 cfgword = algo->cfgword | (authsize << 6);
0751 #ifndef __ARMEB__
0752 cfgword ^= 0xAA000000;
0753 #endif
0754 *(u32 *)cinfo = cpu_to_be32(cfgword);
0755 cinfo += sizeof(cfgword);
0756
0757
0758 memcpy(cinfo, algo->icv, digest_len);
0759 cinfo += digest_len;
0760
0761 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
0762 + sizeof(algo->cfgword);
0763 otarget = itarget + digest_len;
0764 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
0765 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
0766
0767 dir->npe_ctx_idx += init_len;
0768 dir->npe_mode |= NPE_OP_HASH_ENABLE;
0769
0770 if (!encrypt)
0771 dir->npe_mode |= NPE_OP_HASH_VERIFY;
0772
0773 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
0774 init_len, npe_ctx_addr, key, key_len);
0775 if (ret)
0776 return ret;
0777 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
0778 init_len, npe_ctx_addr, key, key_len);
0779 }
0780
0781 static int gen_rev_aes_key(struct crypto_tfm *tfm)
0782 {
0783 struct crypt_ctl *crypt;
0784 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0785 struct ix_sa_dir *dir = &ctx->decrypt;
0786
0787 crypt = get_crypt_desc_emerg();
0788 if (!crypt)
0789 return -EAGAIN;
0790
0791 *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
0792
0793 crypt->data.tfm = tfm;
0794 crypt->crypt_offs = 0;
0795 crypt->crypt_len = AES_BLOCK128;
0796 crypt->src_buf = 0;
0797 crypt->crypto_ctx = dir->npe_ctx_phys;
0798 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
0799 crypt->mode = NPE_OP_ENC_GEN_KEY;
0800 crypt->init_len = dir->npe_ctx_idx;
0801 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
0802
0803 atomic_inc(&ctx->configuring);
0804 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
0805 BUG_ON(qmgr_stat_overflow(send_qid));
0806 return 0;
0807 }
0808
0809 static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
0810 int key_len)
0811 {
0812 u8 *cinfo;
0813 u32 cipher_cfg;
0814 u32 keylen_cfg = 0;
0815 struct ix_sa_dir *dir;
0816 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
0817 int err;
0818
0819 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
0820 cinfo = dir->npe_ctx;
0821
0822 if (encrypt) {
0823 cipher_cfg = cipher_cfg_enc(tfm);
0824 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
0825 } else {
0826 cipher_cfg = cipher_cfg_dec(tfm);
0827 }
0828 if (cipher_cfg & MOD_AES) {
0829 switch (key_len) {
0830 case 16:
0831 keylen_cfg = MOD_AES128;
0832 break;
0833 case 24:
0834 keylen_cfg = MOD_AES192;
0835 break;
0836 case 32:
0837 keylen_cfg = MOD_AES256;
0838 break;
0839 default:
0840 return -EINVAL;
0841 }
0842 cipher_cfg |= keylen_cfg;
0843 } else {
0844 err = crypto_des_verify_key(tfm, key);
0845 if (err)
0846 return err;
0847 }
0848
0849 *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
0850 cinfo += sizeof(cipher_cfg);
0851
0852
0853 memcpy(cinfo, key, key_len);
0854
0855 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
0856 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
0857 key_len = DES3_EDE_KEY_SIZE;
0858 }
0859 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
0860 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
0861 if ((cipher_cfg & MOD_AES) && !encrypt)
0862 return gen_rev_aes_key(tfm);
0863
0864 return 0;
0865 }
0866
0867 static struct buffer_desc *chainup_buffers(struct device *dev,
0868 struct scatterlist *sg, unsigned int nbytes,
0869 struct buffer_desc *buf, gfp_t flags,
0870 enum dma_data_direction dir)
0871 {
0872 for (; nbytes > 0; sg = sg_next(sg)) {
0873 unsigned int len = min(nbytes, sg->length);
0874 struct buffer_desc *next_buf;
0875 dma_addr_t next_buf_phys;
0876 void *ptr;
0877
0878 nbytes -= len;
0879 ptr = sg_virt(sg);
0880 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
0881 if (!next_buf) {
0882 buf = NULL;
0883 break;
0884 }
0885 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
0886 buf->next = next_buf;
0887 buf->phys_next = next_buf_phys;
0888 buf = next_buf;
0889
0890 buf->phys_addr = sg_dma_address(sg);
0891 buf->buf_len = len;
0892 buf->dir = dir;
0893 }
0894 buf->next = NULL;
0895 buf->phys_next = 0;
0896 return buf;
0897 }
0898
0899 static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
0900 unsigned int key_len)
0901 {
0902 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
0903 int ret;
0904
0905 init_completion(&ctx->completion);
0906 atomic_inc(&ctx->configuring);
0907
0908 reset_sa_dir(&ctx->encrypt);
0909 reset_sa_dir(&ctx->decrypt);
0910
0911 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
0912 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
0913
0914 ret = setup_cipher(&tfm->base, 0, key, key_len);
0915 if (ret)
0916 goto out;
0917 ret = setup_cipher(&tfm->base, 1, key, key_len);
0918 out:
0919 if (!atomic_dec_and_test(&ctx->configuring))
0920 wait_for_completion(&ctx->completion);
0921 if (ret)
0922 return ret;
0923 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
0924 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
0925
0926 return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
0927 }
0928
0929 static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
0930 unsigned int key_len)
0931 {
0932 return verify_skcipher_des3_key(tfm, key) ?:
0933 ablk_setkey(tfm, key, key_len);
0934 }
0935
0936 static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
0937 unsigned int key_len)
0938 {
0939 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
0940
0941
0942 if (key_len < CTR_RFC3686_NONCE_SIZE)
0943 return -EINVAL;
0944
0945 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
0946 CTR_RFC3686_NONCE_SIZE);
0947
0948 key_len -= CTR_RFC3686_NONCE_SIZE;
0949 return ablk_setkey(tfm, key, key_len);
0950 }
0951
0952 static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
0953 {
0954 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0955 struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
0956 struct ablk_ctx *rctx = skcipher_request_ctx(areq);
0957 int err;
0958
0959 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
0960 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
0961 areq->base.complete, areq->base.data);
0962 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
0963 areq->cryptlen, areq->iv);
0964 if (encrypt)
0965 err = crypto_skcipher_encrypt(&rctx->fallback_req);
0966 else
0967 err = crypto_skcipher_decrypt(&rctx->fallback_req);
0968 return err;
0969 }
0970
0971 static int ablk_perform(struct skcipher_request *req, int encrypt)
0972 {
0973 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0974 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
0975 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
0976 struct ix_sa_dir *dir;
0977 struct crypt_ctl *crypt;
0978 unsigned int nbytes = req->cryptlen;
0979 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
0980 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
0981 struct buffer_desc src_hook;
0982 struct device *dev = &pdev->dev;
0983 unsigned int offset;
0984 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
0985 GFP_KERNEL : GFP_ATOMIC;
0986
0987 if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
0988 return ixp4xx_cipher_fallback(req, encrypt);
0989
0990 if (qmgr_stat_full(send_qid))
0991 return -EAGAIN;
0992 if (atomic_read(&ctx->configuring))
0993 return -EAGAIN;
0994
0995 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
0996 req_ctx->encrypt = encrypt;
0997
0998 crypt = get_crypt_desc();
0999 if (!crypt)
1000 return -ENOMEM;
1001
1002 crypt->data.ablk_req = req;
1003 crypt->crypto_ctx = dir->npe_ctx_phys;
1004 crypt->mode = dir->npe_mode;
1005 crypt->init_len = dir->npe_ctx_idx;
1006
1007 crypt->crypt_offs = 0;
1008 crypt->crypt_len = nbytes;
1009
1010 BUG_ON(ivsize && !req->iv);
1011 memcpy(crypt->iv, req->iv, ivsize);
1012 if (ivsize > 0 && !encrypt) {
1013 offset = req->cryptlen - ivsize;
1014 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1015 }
1016 if (req->src != req->dst) {
1017 struct buffer_desc dst_hook;
1018
1019 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1020
1021
1022 req_ctx->dst = NULL;
1023 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1024 flags, DMA_FROM_DEVICE))
1025 goto free_buf_dest;
1026 src_direction = DMA_TO_DEVICE;
1027 req_ctx->dst = dst_hook.next;
1028 crypt->dst_buf = dst_hook.phys_next;
1029 } else {
1030 req_ctx->dst = NULL;
1031 }
1032 req_ctx->src = NULL;
1033 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1034 src_direction))
1035 goto free_buf_src;
1036
1037 req_ctx->src = src_hook.next;
1038 crypt->src_buf = src_hook.phys_next;
1039 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1040 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1041 BUG_ON(qmgr_stat_overflow(send_qid));
1042 return -EINPROGRESS;
1043
1044 free_buf_src:
1045 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1046 free_buf_dest:
1047 if (req->src != req->dst)
1048 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1049
1050 crypt->ctl_flags = CTL_FLAG_UNUSED;
1051 return -ENOMEM;
1052 }
1053
1054 static int ablk_encrypt(struct skcipher_request *req)
1055 {
1056 return ablk_perform(req, 1);
1057 }
1058
1059 static int ablk_decrypt(struct skcipher_request *req)
1060 {
1061 return ablk_perform(req, 0);
1062 }
1063
1064 static int ablk_rfc3686_crypt(struct skcipher_request *req)
1065 {
1066 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1067 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1068 u8 iv[CTR_RFC3686_BLOCK_SIZE];
1069 u8 *info = req->iv;
1070 int ret;
1071
1072
1073 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1074 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1075
1076
1077 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1078 cpu_to_be32(1);
1079
1080 req->iv = iv;
1081 ret = ablk_perform(req, 1);
1082 req->iv = info;
1083 return ret;
1084 }
1085
1086 static int aead_perform(struct aead_request *req, int encrypt,
1087 int cryptoffset, int eff_cryptlen, u8 *iv)
1088 {
1089 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1090 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1091 unsigned int ivsize = crypto_aead_ivsize(tfm);
1092 unsigned int authsize = crypto_aead_authsize(tfm);
1093 struct ix_sa_dir *dir;
1094 struct crypt_ctl *crypt;
1095 unsigned int cryptlen;
1096 struct buffer_desc *buf, src_hook;
1097 struct aead_ctx *req_ctx = aead_request_ctx(req);
1098 struct device *dev = &pdev->dev;
1099 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100 GFP_KERNEL : GFP_ATOMIC;
1101 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1102 unsigned int lastlen;
1103
1104 if (qmgr_stat_full(send_qid))
1105 return -EAGAIN;
1106 if (atomic_read(&ctx->configuring))
1107 return -EAGAIN;
1108
1109 if (encrypt) {
1110 dir = &ctx->encrypt;
1111 cryptlen = req->cryptlen;
1112 } else {
1113 dir = &ctx->decrypt;
1114
1115 cryptlen = req->cryptlen - authsize;
1116 eff_cryptlen -= authsize;
1117 }
1118 crypt = get_crypt_desc();
1119 if (!crypt)
1120 return -ENOMEM;
1121
1122 crypt->data.aead_req = req;
1123 crypt->crypto_ctx = dir->npe_ctx_phys;
1124 crypt->mode = dir->npe_mode;
1125 crypt->init_len = dir->npe_ctx_idx;
1126
1127 crypt->crypt_offs = cryptoffset;
1128 crypt->crypt_len = eff_cryptlen;
1129
1130 crypt->auth_offs = 0;
1131 crypt->auth_len = req->assoclen + cryptlen;
1132 BUG_ON(ivsize && !req->iv);
1133 memcpy(crypt->iv, req->iv, ivsize);
1134
1135 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1136 &src_hook, flags, src_direction);
1137 req_ctx->src = src_hook.next;
1138 crypt->src_buf = src_hook.phys_next;
1139 if (!buf)
1140 goto free_buf_src;
1141
1142 lastlen = buf->buf_len;
1143 if (lastlen >= authsize)
1144 crypt->icv_rev_aes = buf->phys_addr +
1145 buf->buf_len - authsize;
1146
1147 req_ctx->dst = NULL;
1148
1149 if (req->src != req->dst) {
1150 struct buffer_desc dst_hook;
1151
1152 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1153 src_direction = DMA_TO_DEVICE;
1154
1155 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1156 &dst_hook, flags, DMA_FROM_DEVICE);
1157 req_ctx->dst = dst_hook.next;
1158 crypt->dst_buf = dst_hook.phys_next;
1159
1160 if (!buf)
1161 goto free_buf_dst;
1162
1163 if (encrypt) {
1164 lastlen = buf->buf_len;
1165 if (lastlen >= authsize)
1166 crypt->icv_rev_aes = buf->phys_addr +
1167 buf->buf_len - authsize;
1168 }
1169 }
1170
1171 if (unlikely(lastlen < authsize)) {
1172
1173
1174 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1175 &crypt->icv_rev_aes);
1176 if (unlikely(!req_ctx->hmac_virt))
1177 goto free_buf_dst;
1178 if (!encrypt) {
1179 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1180 req->src, cryptlen, authsize, 0);
1181 }
1182 req_ctx->encrypt = encrypt;
1183 } else {
1184 req_ctx->hmac_virt = NULL;
1185 }
1186
1187 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1188 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1189 BUG_ON(qmgr_stat_overflow(send_qid));
1190 return -EINPROGRESS;
1191
1192 free_buf_dst:
1193 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1194 free_buf_src:
1195 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1196 crypt->ctl_flags = CTL_FLAG_UNUSED;
1197 return -ENOMEM;
1198 }
1199
1200 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1201 {
1202 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1203 unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1204 int ret;
1205
1206 if (!ctx->enckey_len && !ctx->authkey_len)
1207 return 0;
1208 init_completion(&ctx->completion);
1209 atomic_inc(&ctx->configuring);
1210
1211 reset_sa_dir(&ctx->encrypt);
1212 reset_sa_dir(&ctx->decrypt);
1213
1214 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1215 if (ret)
1216 goto out;
1217 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1218 if (ret)
1219 goto out;
1220 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1221 ctx->authkey_len, digest_len);
1222 if (ret)
1223 goto out;
1224 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1225 ctx->authkey_len, digest_len);
1226 out:
1227 if (!atomic_dec_and_test(&ctx->configuring))
1228 wait_for_completion(&ctx->completion);
1229 return ret;
1230 }
1231
1232 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1233 {
1234 int max = crypto_aead_maxauthsize(tfm) >> 2;
1235
1236 if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1237 return -EINVAL;
1238 return aead_setup(tfm, authsize);
1239 }
1240
1241 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1242 unsigned int keylen)
1243 {
1244 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1245 struct crypto_authenc_keys keys;
1246
1247 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1248 goto badkey;
1249
1250 if (keys.authkeylen > sizeof(ctx->authkey))
1251 goto badkey;
1252
1253 if (keys.enckeylen > sizeof(ctx->enckey))
1254 goto badkey;
1255
1256 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1257 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1258 ctx->authkey_len = keys.authkeylen;
1259 ctx->enckey_len = keys.enckeylen;
1260
1261 memzero_explicit(&keys, sizeof(keys));
1262 return aead_setup(tfm, crypto_aead_authsize(tfm));
1263 badkey:
1264 memzero_explicit(&keys, sizeof(keys));
1265 return -EINVAL;
1266 }
1267
1268 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1269 unsigned int keylen)
1270 {
1271 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1272 struct crypto_authenc_keys keys;
1273 int err;
1274
1275 err = crypto_authenc_extractkeys(&keys, key, keylen);
1276 if (unlikely(err))
1277 goto badkey;
1278
1279 err = -EINVAL;
1280 if (keys.authkeylen > sizeof(ctx->authkey))
1281 goto badkey;
1282
1283 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1284 if (err)
1285 goto badkey;
1286
1287 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1288 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1289 ctx->authkey_len = keys.authkeylen;
1290 ctx->enckey_len = keys.enckeylen;
1291
1292 memzero_explicit(&keys, sizeof(keys));
1293 return aead_setup(tfm, crypto_aead_authsize(tfm));
1294 badkey:
1295 memzero_explicit(&keys, sizeof(keys));
1296 return err;
1297 }
1298
1299 static int aead_encrypt(struct aead_request *req)
1300 {
1301 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1302 }
1303
1304 static int aead_decrypt(struct aead_request *req)
1305 {
1306 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1307 }
1308
1309 static struct ixp_alg ixp4xx_algos[] = {
1310 {
1311 .crypto = {
1312 .base.cra_name = "cbc(des)",
1313 .base.cra_blocksize = DES_BLOCK_SIZE,
1314
1315 .min_keysize = DES_KEY_SIZE,
1316 .max_keysize = DES_KEY_SIZE,
1317 .ivsize = DES_BLOCK_SIZE,
1318 },
1319 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1320 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1321
1322 }, {
1323 .crypto = {
1324 .base.cra_name = "ecb(des)",
1325 .base.cra_blocksize = DES_BLOCK_SIZE,
1326 .min_keysize = DES_KEY_SIZE,
1327 .max_keysize = DES_KEY_SIZE,
1328 },
1329 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1330 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1331 }, {
1332 .crypto = {
1333 .base.cra_name = "cbc(des3_ede)",
1334 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1335
1336 .min_keysize = DES3_EDE_KEY_SIZE,
1337 .max_keysize = DES3_EDE_KEY_SIZE,
1338 .ivsize = DES3_EDE_BLOCK_SIZE,
1339 .setkey = ablk_des3_setkey,
1340 },
1341 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1342 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1343 }, {
1344 .crypto = {
1345 .base.cra_name = "ecb(des3_ede)",
1346 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1347
1348 .min_keysize = DES3_EDE_KEY_SIZE,
1349 .max_keysize = DES3_EDE_KEY_SIZE,
1350 .setkey = ablk_des3_setkey,
1351 },
1352 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1353 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1354 }, {
1355 .crypto = {
1356 .base.cra_name = "cbc(aes)",
1357 .base.cra_blocksize = AES_BLOCK_SIZE,
1358
1359 .min_keysize = AES_MIN_KEY_SIZE,
1360 .max_keysize = AES_MAX_KEY_SIZE,
1361 .ivsize = AES_BLOCK_SIZE,
1362 },
1363 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1364 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1365 }, {
1366 .crypto = {
1367 .base.cra_name = "ecb(aes)",
1368 .base.cra_blocksize = AES_BLOCK_SIZE,
1369
1370 .min_keysize = AES_MIN_KEY_SIZE,
1371 .max_keysize = AES_MAX_KEY_SIZE,
1372 },
1373 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1374 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1375 }, {
1376 .crypto = {
1377 .base.cra_name = "ctr(aes)",
1378 .base.cra_blocksize = 1,
1379
1380 .min_keysize = AES_MIN_KEY_SIZE,
1381 .max_keysize = AES_MAX_KEY_SIZE,
1382 .ivsize = AES_BLOCK_SIZE,
1383 },
1384 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1385 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1386 }, {
1387 .crypto = {
1388 .base.cra_name = "rfc3686(ctr(aes))",
1389 .base.cra_blocksize = 1,
1390
1391 .min_keysize = AES_MIN_KEY_SIZE,
1392 .max_keysize = AES_MAX_KEY_SIZE,
1393 .ivsize = AES_BLOCK_SIZE,
1394 .setkey = ablk_rfc3686_setkey,
1395 .encrypt = ablk_rfc3686_crypt,
1396 .decrypt = ablk_rfc3686_crypt,
1397 },
1398 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1399 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1400 } };
1401
1402 static struct ixp_aead_alg ixp4xx_aeads[] = {
1403 {
1404 .crypto = {
1405 .base = {
1406 .cra_name = "authenc(hmac(md5),cbc(des))",
1407 .cra_blocksize = DES_BLOCK_SIZE,
1408 },
1409 .ivsize = DES_BLOCK_SIZE,
1410 .maxauthsize = MD5_DIGEST_SIZE,
1411 },
1412 .hash = &hash_alg_md5,
1413 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1414 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1415 }, {
1416 .crypto = {
1417 .base = {
1418 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1419 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1420 },
1421 .ivsize = DES3_EDE_BLOCK_SIZE,
1422 .maxauthsize = MD5_DIGEST_SIZE,
1423 .setkey = des3_aead_setkey,
1424 },
1425 .hash = &hash_alg_md5,
1426 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1427 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1428 }, {
1429 .crypto = {
1430 .base = {
1431 .cra_name = "authenc(hmac(sha1),cbc(des))",
1432 .cra_blocksize = DES_BLOCK_SIZE,
1433 },
1434 .ivsize = DES_BLOCK_SIZE,
1435 .maxauthsize = SHA1_DIGEST_SIZE,
1436 },
1437 .hash = &hash_alg_sha1,
1438 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1439 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1440 }, {
1441 .crypto = {
1442 .base = {
1443 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1444 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1445 },
1446 .ivsize = DES3_EDE_BLOCK_SIZE,
1447 .maxauthsize = SHA1_DIGEST_SIZE,
1448 .setkey = des3_aead_setkey,
1449 },
1450 .hash = &hash_alg_sha1,
1451 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1452 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1453 }, {
1454 .crypto = {
1455 .base = {
1456 .cra_name = "authenc(hmac(md5),cbc(aes))",
1457 .cra_blocksize = AES_BLOCK_SIZE,
1458 },
1459 .ivsize = AES_BLOCK_SIZE,
1460 .maxauthsize = MD5_DIGEST_SIZE,
1461 },
1462 .hash = &hash_alg_md5,
1463 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1464 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1465 }, {
1466 .crypto = {
1467 .base = {
1468 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1469 .cra_blocksize = AES_BLOCK_SIZE,
1470 },
1471 .ivsize = AES_BLOCK_SIZE,
1472 .maxauthsize = SHA1_DIGEST_SIZE,
1473 },
1474 .hash = &hash_alg_sha1,
1475 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1476 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1477 } };
1478
1479 #define IXP_POSTFIX "-ixp4xx"
1480
1481 static int ixp_crypto_probe(struct platform_device *_pdev)
1482 {
1483 struct device *dev = &_pdev->dev;
1484 int num = ARRAY_SIZE(ixp4xx_algos);
1485 int i, err;
1486
1487 pdev = _pdev;
1488
1489 err = init_ixp_crypto(dev);
1490 if (err)
1491 return err;
1492
1493 for (i = 0; i < num; i++) {
1494 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1495
1496 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1497 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1498 CRYPTO_MAX_ALG_NAME)
1499 continue;
1500 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1501 continue;
1502
1503
1504 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1505 CRYPTO_ALG_ASYNC |
1506 CRYPTO_ALG_ALLOCATES_MEMORY |
1507 CRYPTO_ALG_NEED_FALLBACK;
1508 if (!cra->setkey)
1509 cra->setkey = ablk_setkey;
1510 if (!cra->encrypt)
1511 cra->encrypt = ablk_encrypt;
1512 if (!cra->decrypt)
1513 cra->decrypt = ablk_decrypt;
1514 cra->init = init_tfm_ablk;
1515 cra->exit = exit_tfm_ablk;
1516
1517 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1518 cra->base.cra_module = THIS_MODULE;
1519 cra->base.cra_alignmask = 3;
1520 cra->base.cra_priority = 300;
1521 if (crypto_register_skcipher(cra))
1522 dev_err(&pdev->dev, "Failed to register '%s'\n",
1523 cra->base.cra_name);
1524 else
1525 ixp4xx_algos[i].registered = 1;
1526 }
1527
1528 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1529 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1530
1531 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1532 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1533 CRYPTO_MAX_ALG_NAME)
1534 continue;
1535 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1536 continue;
1537
1538
1539 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1540 CRYPTO_ALG_ASYNC |
1541 CRYPTO_ALG_ALLOCATES_MEMORY;
1542 cra->setkey = cra->setkey ?: aead_setkey;
1543 cra->setauthsize = aead_setauthsize;
1544 cra->encrypt = aead_encrypt;
1545 cra->decrypt = aead_decrypt;
1546 cra->init = init_tfm_aead;
1547 cra->exit = exit_tfm_aead;
1548
1549 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1550 cra->base.cra_module = THIS_MODULE;
1551 cra->base.cra_alignmask = 3;
1552 cra->base.cra_priority = 300;
1553
1554 if (crypto_register_aead(cra))
1555 dev_err(&pdev->dev, "Failed to register '%s'\n",
1556 cra->base.cra_driver_name);
1557 else
1558 ixp4xx_aeads[i].registered = 1;
1559 }
1560 return 0;
1561 }
1562
1563 static int ixp_crypto_remove(struct platform_device *pdev)
1564 {
1565 int num = ARRAY_SIZE(ixp4xx_algos);
1566 int i;
1567
1568 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1569 if (ixp4xx_aeads[i].registered)
1570 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1571 }
1572
1573 for (i = 0; i < num; i++) {
1574 if (ixp4xx_algos[i].registered)
1575 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1576 }
1577 release_ixp_crypto(&pdev->dev);
1578
1579 return 0;
1580 }
1581 static const struct of_device_id ixp4xx_crypto_of_match[] = {
1582 {
1583 .compatible = "intel,ixp4xx-crypto",
1584 },
1585 {},
1586 };
1587
1588 static struct platform_driver ixp_crypto_driver = {
1589 .probe = ixp_crypto_probe,
1590 .remove = ixp_crypto_remove,
1591 .driver = {
1592 .name = "ixp4xx_crypto",
1593 .of_match_table = ixp4xx_crypto_of_match,
1594 },
1595 };
1596 module_platform_driver(ixp_crypto_driver);
1597
1598 MODULE_LICENSE("GPL");
1599 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1600 MODULE_DESCRIPTION("IXP4xx hardware crypto");
1601