0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/clk.h>
0013 #include <linux/crypto.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/errno.h>
0017 #include <linux/init.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/io.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/of.h>
0023 #include <linux/of_device.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/scatterlist.h>
0026
0027 #include <crypto/ctr.h>
0028 #include <crypto/aes.h>
0029 #include <crypto/algapi.h>
0030 #include <crypto/scatterwalk.h>
0031
0032 #include <crypto/hash.h>
0033 #include <crypto/md5.h>
0034 #include <crypto/sha1.h>
0035 #include <crypto/sha2.h>
0036 #include <crypto/internal/hash.h>
0037
0038 #define _SBF(s, v) ((v) << (s))
0039
0040
0041 #define SSS_REG_FCINTSTAT 0x0000
0042 #define SSS_FCINTSTAT_HPARTINT BIT(7)
0043 #define SSS_FCINTSTAT_HDONEINT BIT(5)
0044 #define SSS_FCINTSTAT_BRDMAINT BIT(3)
0045 #define SSS_FCINTSTAT_BTDMAINT BIT(2)
0046 #define SSS_FCINTSTAT_HRDMAINT BIT(1)
0047 #define SSS_FCINTSTAT_PKDMAINT BIT(0)
0048
0049 #define SSS_REG_FCINTENSET 0x0004
0050 #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
0051 #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
0052 #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
0053 #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
0054 #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
0055 #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
0056
0057 #define SSS_REG_FCINTENCLR 0x0008
0058 #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
0059 #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
0060 #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
0061 #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
0062 #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
0063 #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
0064
0065 #define SSS_REG_FCINTPEND 0x000C
0066 #define SSS_FCINTPEND_HPARTINTP BIT(7)
0067 #define SSS_FCINTPEND_HDONEINTP BIT(5)
0068 #define SSS_FCINTPEND_BRDMAINTP BIT(3)
0069 #define SSS_FCINTPEND_BTDMAINTP BIT(2)
0070 #define SSS_FCINTPEND_HRDMAINTP BIT(1)
0071 #define SSS_FCINTPEND_PKDMAINTP BIT(0)
0072
0073 #define SSS_REG_FCFIFOSTAT 0x0010
0074 #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
0075 #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
0076 #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
0077 #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
0078 #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
0079 #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
0080 #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
0081 #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
0082
0083 #define SSS_REG_FCFIFOCTRL 0x0014
0084 #define SSS_FCFIFOCTRL_DESSEL BIT(2)
0085 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
0086 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
0087 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
0088 #define SSS_HASHIN_MASK _SBF(0, 0x03)
0089
0090 #define SSS_REG_FCBRDMAS 0x0020
0091 #define SSS_REG_FCBRDMAL 0x0024
0092 #define SSS_REG_FCBRDMAC 0x0028
0093 #define SSS_FCBRDMAC_BYTESWAP BIT(1)
0094 #define SSS_FCBRDMAC_FLUSH BIT(0)
0095
0096 #define SSS_REG_FCBTDMAS 0x0030
0097 #define SSS_REG_FCBTDMAL 0x0034
0098 #define SSS_REG_FCBTDMAC 0x0038
0099 #define SSS_FCBTDMAC_BYTESWAP BIT(1)
0100 #define SSS_FCBTDMAC_FLUSH BIT(0)
0101
0102 #define SSS_REG_FCHRDMAS 0x0040
0103 #define SSS_REG_FCHRDMAL 0x0044
0104 #define SSS_REG_FCHRDMAC 0x0048
0105 #define SSS_FCHRDMAC_BYTESWAP BIT(1)
0106 #define SSS_FCHRDMAC_FLUSH BIT(0)
0107
0108 #define SSS_REG_FCPKDMAS 0x0050
0109 #define SSS_REG_FCPKDMAL 0x0054
0110 #define SSS_REG_FCPKDMAC 0x0058
0111 #define SSS_FCPKDMAC_BYTESWAP BIT(3)
0112 #define SSS_FCPKDMAC_DESCEND BIT(2)
0113 #define SSS_FCPKDMAC_TRANSMIT BIT(1)
0114 #define SSS_FCPKDMAC_FLUSH BIT(0)
0115
0116 #define SSS_REG_FCPKDMAO 0x005C
0117
0118
0119 #define SSS_REG_AES_CONTROL 0x00
0120 #define SSS_AES_BYTESWAP_DI BIT(11)
0121 #define SSS_AES_BYTESWAP_DO BIT(10)
0122 #define SSS_AES_BYTESWAP_IV BIT(9)
0123 #define SSS_AES_BYTESWAP_CNT BIT(8)
0124 #define SSS_AES_BYTESWAP_KEY BIT(7)
0125 #define SSS_AES_KEY_CHANGE_MODE BIT(6)
0126 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
0127 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
0128 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
0129 #define SSS_AES_FIFO_MODE BIT(3)
0130 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
0131 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
0132 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
0133 #define SSS_AES_MODE_DECRYPT BIT(0)
0134
0135 #define SSS_REG_AES_STATUS 0x04
0136 #define SSS_AES_BUSY BIT(2)
0137 #define SSS_AES_INPUT_READY BIT(1)
0138 #define SSS_AES_OUTPUT_READY BIT(0)
0139
0140 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
0141 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
0142 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
0143 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
0144 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
0145
0146 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
0147 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
0148 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
0149
0150 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
0151 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
0152 SSS_AES_REG(dev, reg))
0153
0154
0155 #define FLAGS_AES_DECRYPT BIT(0)
0156 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
0157 #define FLAGS_AES_CBC _SBF(1, 0x01)
0158 #define FLAGS_AES_CTR _SBF(1, 0x02)
0159
0160 #define AES_KEY_LEN 16
0161 #define CRYPTO_QUEUE_LEN 1
0162
0163
0164 #define SSS_REG_HASH_CTRL 0x00
0165
0166 #define SSS_HASH_USER_IV_EN BIT(5)
0167 #define SSS_HASH_INIT_BIT BIT(4)
0168 #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
0169 #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
0170 #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
0171
0172 #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
0173
0174 #define SSS_REG_HASH_CTRL_PAUSE 0x04
0175
0176 #define SSS_HASH_PAUSE BIT(0)
0177
0178 #define SSS_REG_HASH_CTRL_FIFO 0x08
0179
0180 #define SSS_HASH_FIFO_MODE_DMA BIT(0)
0181 #define SSS_HASH_FIFO_MODE_CPU 0
0182
0183 #define SSS_REG_HASH_CTRL_SWAP 0x0C
0184
0185 #define SSS_HASH_BYTESWAP_DI BIT(3)
0186 #define SSS_HASH_BYTESWAP_DO BIT(2)
0187 #define SSS_HASH_BYTESWAP_IV BIT(1)
0188 #define SSS_HASH_BYTESWAP_KEY BIT(0)
0189
0190 #define SSS_REG_HASH_STATUS 0x10
0191
0192 #define SSS_HASH_STATUS_MSG_DONE BIT(6)
0193 #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
0194 #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
0195
0196 #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
0197 #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
0198
0199 #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
0200 #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
0201
0202 #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
0203 #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
0204
0205 #define HASH_BLOCK_SIZE 64
0206 #define HASH_REG_SIZEOF 4
0207 #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
0208 #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
0209 #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
0210
0211
0212
0213
0214
0215
0216
0217 #define HASH_FLAGS_BUSY 0
0218 #define HASH_FLAGS_FINAL 1
0219 #define HASH_FLAGS_DMA_ACTIVE 2
0220 #define HASH_FLAGS_OUTPUT_READY 3
0221 #define HASH_FLAGS_DMA_READY 4
0222 #define HASH_FLAGS_SGS_COPIED 5
0223 #define HASH_FLAGS_SGS_ALLOCED 6
0224
0225
0226 #define BUFLEN HASH_BLOCK_SIZE
0227
0228 #define SSS_HASH_DMA_LEN_ALIGN 8
0229 #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
0230
0231 #define SSS_HASH_QUEUE_LENGTH 10
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 struct samsung_aes_variant {
0244 unsigned int aes_offset;
0245 unsigned int hash_offset;
0246 const char *clk_names[2];
0247 };
0248
0249 struct s5p_aes_reqctx {
0250 unsigned long mode;
0251 };
0252
0253 struct s5p_aes_ctx {
0254 struct s5p_aes_dev *dev;
0255
0256 u8 aes_key[AES_MAX_KEY_SIZE];
0257 u8 nonce[CTR_RFC3686_NONCE_SIZE];
0258 int keylen;
0259 };
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 struct s5p_aes_dev {
0302 struct device *dev;
0303 struct clk *clk;
0304 struct clk *pclk;
0305 void __iomem *ioaddr;
0306 void __iomem *aes_ioaddr;
0307 int irq_fc;
0308
0309 struct skcipher_request *req;
0310 struct s5p_aes_ctx *ctx;
0311 struct scatterlist *sg_src;
0312 struct scatterlist *sg_dst;
0313
0314 struct scatterlist *sg_src_cpy;
0315 struct scatterlist *sg_dst_cpy;
0316
0317 struct tasklet_struct tasklet;
0318 struct crypto_queue queue;
0319 bool busy;
0320 spinlock_t lock;
0321
0322 struct resource *res;
0323 void __iomem *io_hash_base;
0324
0325 spinlock_t hash_lock;
0326 unsigned long hash_flags;
0327 struct crypto_queue hash_queue;
0328 struct tasklet_struct hash_tasklet;
0329
0330 u8 xmit_buf[BUFLEN];
0331 struct ahash_request *hash_req;
0332 struct scatterlist *hash_sg_iter;
0333 unsigned int hash_sg_cnt;
0334
0335 bool use_hash;
0336 };
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 struct s5p_hash_reqctx {
0357 struct s5p_aes_dev *dd;
0358 bool op_update;
0359
0360 u64 digcnt;
0361 u8 digest[SHA256_DIGEST_SIZE];
0362
0363 unsigned int nregs;
0364 u32 engine;
0365
0366 struct scatterlist *sg;
0367 unsigned int sg_len;
0368 struct scatterlist sgl[2];
0369 unsigned int skip;
0370 unsigned int total;
0371 bool finup;
0372 bool error;
0373
0374 u32 bufcnt;
0375 u8 buffer[];
0376 };
0377
0378
0379
0380
0381
0382
0383
0384 struct s5p_hash_ctx {
0385 struct s5p_aes_dev *dd;
0386 unsigned long flags;
0387 struct crypto_shash *fallback;
0388 };
0389
0390 static const struct samsung_aes_variant s5p_aes_data = {
0391 .aes_offset = 0x4000,
0392 .hash_offset = 0x6000,
0393 .clk_names = { "secss", },
0394 };
0395
0396 static const struct samsung_aes_variant exynos_aes_data = {
0397 .aes_offset = 0x200,
0398 .hash_offset = 0x400,
0399 .clk_names = { "secss", },
0400 };
0401
0402 static const struct samsung_aes_variant exynos5433_slim_aes_data = {
0403 .aes_offset = 0x400,
0404 .hash_offset = 0x800,
0405 .clk_names = { "aclk", "pclk", },
0406 };
0407
0408 static const struct of_device_id s5p_sss_dt_match[] = {
0409 {
0410 .compatible = "samsung,s5pv210-secss",
0411 .data = &s5p_aes_data,
0412 },
0413 {
0414 .compatible = "samsung,exynos4210-secss",
0415 .data = &exynos_aes_data,
0416 },
0417 {
0418 .compatible = "samsung,exynos5433-slim-sss",
0419 .data = &exynos5433_slim_aes_data,
0420 },
0421 { },
0422 };
0423 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
0424
0425 static inline const struct samsung_aes_variant *find_s5p_sss_version
0426 (const struct platform_device *pdev)
0427 {
0428 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
0429 return of_device_get_match_data(&pdev->dev);
0430
0431 return (const struct samsung_aes_variant *)
0432 platform_get_device_id(pdev)->driver_data;
0433 }
0434
0435 static struct s5p_aes_dev *s5p_dev;
0436
0437 static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
0438 const struct scatterlist *sg)
0439 {
0440 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
0441 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
0442 }
0443
0444 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
0445 const struct scatterlist *sg)
0446 {
0447 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
0448 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
0449 }
0450
0451 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
0452 {
0453 int len;
0454
0455 if (!*sg)
0456 return;
0457
0458 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
0459 free_pages((unsigned long)sg_virt(*sg), get_order(len));
0460
0461 kfree(*sg);
0462 *sg = NULL;
0463 }
0464
0465 static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
0466 unsigned int nbytes, int out)
0467 {
0468 struct scatter_walk walk;
0469
0470 if (!nbytes)
0471 return;
0472
0473 scatterwalk_start(&walk, sg);
0474 scatterwalk_copychunks(buf, &walk, nbytes, out);
0475 scatterwalk_done(&walk, out, 0);
0476 }
0477
0478 static void s5p_sg_done(struct s5p_aes_dev *dev)
0479 {
0480 struct skcipher_request *req = dev->req;
0481 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
0482
0483 if (dev->sg_dst_cpy) {
0484 dev_dbg(dev->dev,
0485 "Copying %d bytes of output data back to original place\n",
0486 dev->req->cryptlen);
0487 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
0488 dev->req->cryptlen, 1);
0489 }
0490 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
0491 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
0492 if (reqctx->mode & FLAGS_AES_CBC)
0493 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
0494
0495 else if (reqctx->mode & FLAGS_AES_CTR)
0496 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
0497 }
0498
0499
0500 static void s5p_aes_complete(struct skcipher_request *req, int err)
0501 {
0502 req->base.complete(&req->base, err);
0503 }
0504
0505 static void s5p_unset_outdata(struct s5p_aes_dev *dev)
0506 {
0507 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
0508 }
0509
0510 static void s5p_unset_indata(struct s5p_aes_dev *dev)
0511 {
0512 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
0513 }
0514
0515 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
0516 struct scatterlist **dst)
0517 {
0518 void *pages;
0519 int len;
0520
0521 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
0522 if (!*dst)
0523 return -ENOMEM;
0524
0525 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
0526 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
0527 if (!pages) {
0528 kfree(*dst);
0529 *dst = NULL;
0530 return -ENOMEM;
0531 }
0532
0533 s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
0534
0535 sg_init_table(*dst, 1);
0536 sg_set_buf(*dst, pages, len);
0537
0538 return 0;
0539 }
0540
0541 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
0542 {
0543 if (!sg->length)
0544 return -EINVAL;
0545
0546 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
0547 return -ENOMEM;
0548
0549 dev->sg_dst = sg;
0550
0551 return 0;
0552 }
0553
0554 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
0555 {
0556 if (!sg->length)
0557 return -EINVAL;
0558
0559 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
0560 return -ENOMEM;
0561
0562 dev->sg_src = sg;
0563
0564 return 0;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574 static int s5p_aes_tx(struct s5p_aes_dev *dev)
0575 {
0576 int ret = 0;
0577
0578 s5p_unset_outdata(dev);
0579
0580 if (!sg_is_last(dev->sg_dst)) {
0581 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
0582 if (!ret)
0583 ret = 1;
0584 }
0585
0586 return ret;
0587 }
0588
0589
0590
0591
0592
0593
0594
0595
0596 static int s5p_aes_rx(struct s5p_aes_dev *dev)
0597 {
0598 int ret = 0;
0599
0600 s5p_unset_indata(dev);
0601
0602 if (!sg_is_last(dev->sg_src)) {
0603 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
0604 if (!ret)
0605 ret = 1;
0606 }
0607
0608 return ret;
0609 }
0610
0611 static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
0612 {
0613 return __raw_readl(dd->io_hash_base + offset);
0614 }
0615
0616 static inline void s5p_hash_write(struct s5p_aes_dev *dd,
0617 u32 offset, u32 value)
0618 {
0619 __raw_writel(value, dd->io_hash_base + offset);
0620 }
0621
0622
0623
0624
0625
0626
0627 static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
0628 const struct scatterlist *sg)
0629 {
0630 dev->hash_sg_cnt--;
0631 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
0632 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg));
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 static int s5p_hash_rx(struct s5p_aes_dev *dev)
0645 {
0646 if (dev->hash_sg_cnt > 0) {
0647 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
0648 return 1;
0649 }
0650
0651 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
0652 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
0653 return 0;
0654
0655 return 2;
0656 }
0657
0658 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
0659 {
0660 struct platform_device *pdev = dev_id;
0661 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
0662 struct skcipher_request *req;
0663 int err_dma_tx = 0;
0664 int err_dma_rx = 0;
0665 int err_dma_hx = 0;
0666 bool tx_end = false;
0667 bool hx_end = false;
0668 unsigned long flags;
0669 u32 status, st_bits;
0670 int err;
0671
0672 spin_lock_irqsave(&dev->lock, flags);
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684 status = SSS_READ(dev, FCINTSTAT);
0685 if (status & SSS_FCINTSTAT_BRDMAINT)
0686 err_dma_rx = s5p_aes_rx(dev);
0687
0688 if (status & SSS_FCINTSTAT_BTDMAINT) {
0689 if (sg_is_last(dev->sg_dst))
0690 tx_end = true;
0691 err_dma_tx = s5p_aes_tx(dev);
0692 }
0693
0694 if (status & SSS_FCINTSTAT_HRDMAINT)
0695 err_dma_hx = s5p_hash_rx(dev);
0696
0697 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
0698 SSS_FCINTSTAT_HRDMAINT);
0699
0700 SSS_WRITE(dev, FCINTPEND, st_bits);
0701
0702
0703 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
0704
0705 if (status & SSS_FCINTSTAT_HPARTINT)
0706 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
0707
0708 if (status & SSS_FCINTSTAT_HDONEINT)
0709 st_bits = SSS_HASH_STATUS_MSG_DONE;
0710
0711 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
0712 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
0713 hx_end = true;
0714
0715 err_dma_hx = 0;
0716 }
0717
0718 if (err_dma_rx < 0) {
0719 err = err_dma_rx;
0720 goto error;
0721 }
0722 if (err_dma_tx < 0) {
0723 err = err_dma_tx;
0724 goto error;
0725 }
0726
0727 if (tx_end) {
0728 s5p_sg_done(dev);
0729 if (err_dma_hx == 1)
0730 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
0731
0732 spin_unlock_irqrestore(&dev->lock, flags);
0733
0734 s5p_aes_complete(dev->req, 0);
0735
0736 tasklet_schedule(&dev->tasklet);
0737 } else {
0738
0739
0740
0741
0742
0743
0744 if (err_dma_tx == 1)
0745 s5p_set_dma_outdata(dev, dev->sg_dst);
0746 if (err_dma_rx == 1)
0747 s5p_set_dma_indata(dev, dev->sg_src);
0748 if (err_dma_hx == 1)
0749 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
0750
0751 spin_unlock_irqrestore(&dev->lock, flags);
0752 }
0753
0754 goto hash_irq_end;
0755
0756 error:
0757 s5p_sg_done(dev);
0758 dev->busy = false;
0759 req = dev->req;
0760 if (err_dma_hx == 1)
0761 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
0762
0763 spin_unlock_irqrestore(&dev->lock, flags);
0764 s5p_aes_complete(req, err);
0765
0766 hash_irq_end:
0767
0768
0769
0770
0771
0772 if (hx_end)
0773 tasklet_schedule(&dev->hash_tasklet);
0774 else if (err_dma_hx == 2)
0775 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
0776 SSS_HASH_PAUSE);
0777
0778 return IRQ_HANDLED;
0779 }
0780
0781
0782
0783
0784
0785 static void s5p_hash_read_msg(struct ahash_request *req)
0786 {
0787 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
0788 struct s5p_aes_dev *dd = ctx->dd;
0789 u32 *hash = (u32 *)ctx->digest;
0790 unsigned int i;
0791
0792 for (i = 0; i < ctx->nregs; i++)
0793 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
0794 }
0795
0796
0797
0798
0799
0800
0801 static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
0802 const struct s5p_hash_reqctx *ctx)
0803 {
0804 const u32 *hash = (const u32 *)ctx->digest;
0805 unsigned int i;
0806
0807 for (i = 0; i < ctx->nregs; i++)
0808 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
0809 }
0810
0811
0812
0813
0814
0815 static void s5p_hash_write_iv(struct ahash_request *req)
0816 {
0817 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
0818
0819 s5p_hash_write_ctx_iv(ctx->dd, ctx);
0820 }
0821
0822
0823
0824
0825
0826 static void s5p_hash_copy_result(struct ahash_request *req)
0827 {
0828 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
0829
0830 if (!req->result)
0831 return;
0832
0833 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
0834 }
0835
0836
0837
0838
0839
0840 static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
0841 {
0842 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
0843 }
0844
0845
0846
0847
0848
0849
0850
0851 static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
0852 {
0853 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
0854 }
0855
0856
0857
0858
0859
0860
0861 static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
0862 {
0863 SSS_WRITE(dev, FCINTENCLR, flags);
0864 }
0865
0866
0867
0868
0869
0870
0871 static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
0872 {
0873 SSS_WRITE(dev, FCINTENSET, flags);
0874 }
0875
0876
0877
0878
0879
0880
0881 static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
0882 {
0883 unsigned long flags;
0884 u32 flow;
0885
0886 spin_lock_irqsave(&dev->lock, flags);
0887
0888 flow = SSS_READ(dev, FCFIFOCTRL);
0889 flow &= ~SSS_HASHIN_MASK;
0890 flow |= hashflow;
0891 SSS_WRITE(dev, FCFIFOCTRL, flow);
0892
0893 spin_unlock_irqrestore(&dev->lock, flags);
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904 static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
0905 {
0906 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
0907 SSS_FCINTENCLR_HDONEINTENCLR |
0908 SSS_FCINTENCLR_HPARTINTENCLR);
0909 s5p_hash_dma_flush(dev);
0910
0911 s5p_hash_dma_enable(dev);
0912 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
0913 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
0914 SSS_FCINTENSET_HDONEINTENSET |
0915 SSS_FCINTENSET_HPARTINTENSET);
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932 static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
0933 bool final)
0934 {
0935 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
0936 u32 prelow, prehigh, low, high;
0937 u32 configflags, swapflags;
0938 u64 tmplen;
0939
0940 configflags = ctx->engine | SSS_HASH_INIT_BIT;
0941
0942 if (likely(ctx->digcnt)) {
0943 s5p_hash_write_ctx_iv(dd, ctx);
0944 configflags |= SSS_HASH_USER_IV_EN;
0945 }
0946
0947 if (final) {
0948
0949 low = length;
0950 high = 0;
0951
0952 tmplen = ctx->digcnt * 8;
0953 prelow = (u32)tmplen;
0954 prehigh = (u32)(tmplen >> 32);
0955 } else {
0956 prelow = 0;
0957 prehigh = 0;
0958 low = 0;
0959 high = BIT(31);
0960 }
0961
0962 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
0963 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
0964
0965 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
0966 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
0967 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
0968 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
0969
0970 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
0971 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
0972 }
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982 static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
0983 bool final)
0984 {
0985 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
0986 unsigned int cnt;
0987
0988 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
0989 if (!cnt) {
0990 dev_err(dd->dev, "dma_map_sg error\n");
0991 ctx->error = true;
0992 return -EINVAL;
0993 }
0994
0995 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
0996 dd->hash_sg_iter = ctx->sg;
0997 dd->hash_sg_cnt = cnt;
0998 s5p_hash_write_ctrl(dd, length, final);
0999 ctx->digcnt += length;
1000 ctx->total -= length;
1001
1002
1003 if (final)
1004 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1005
1006 s5p_set_dma_hashdata(dd, dd->hash_sg_iter);
1007
1008 return -EINPROGRESS;
1009 }
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1024 struct scatterlist *sg, unsigned int new_len)
1025 {
1026 unsigned int pages, len;
1027 void *buf;
1028
1029 len = new_len + ctx->bufcnt;
1030 pages = get_order(len);
1031
1032 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1033 if (!buf) {
1034 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1035 ctx->error = true;
1036 return -ENOMEM;
1037 }
1038
1039 if (ctx->bufcnt)
1040 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1041
1042 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1043 new_len, 0);
1044 sg_init_table(ctx->sgl, 1);
1045 sg_set_buf(ctx->sgl, buf, len);
1046 ctx->sg = ctx->sgl;
1047 ctx->sg_len = 1;
1048 ctx->bufcnt = 0;
1049 ctx->skip = 0;
1050 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1051
1052 return 0;
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1070 struct scatterlist *sg, unsigned int new_len)
1071 {
1072 unsigned int skip = ctx->skip, n = sg_nents(sg);
1073 struct scatterlist *tmp;
1074 unsigned int len;
1075
1076 if (ctx->bufcnt)
1077 n++;
1078
1079 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1080 if (!ctx->sg) {
1081 ctx->error = true;
1082 return -ENOMEM;
1083 }
1084
1085 sg_init_table(ctx->sg, n);
1086
1087 tmp = ctx->sg;
1088
1089 ctx->sg_len = 0;
1090
1091 if (ctx->bufcnt) {
1092 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1093 tmp = sg_next(tmp);
1094 ctx->sg_len++;
1095 }
1096
1097 while (sg && skip >= sg->length) {
1098 skip -= sg->length;
1099 sg = sg_next(sg);
1100 }
1101
1102 while (sg && new_len) {
1103 len = sg->length - skip;
1104 if (new_len < len)
1105 len = new_len;
1106
1107 new_len -= len;
1108 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1109 skip = 0;
1110 if (new_len <= 0)
1111 sg_mark_end(tmp);
1112
1113 tmp = sg_next(tmp);
1114 ctx->sg_len++;
1115 sg = sg_next(sg);
1116 }
1117
1118 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1119
1120 return 0;
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139 static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1140 struct scatterlist *sg,
1141 unsigned int new_len, bool final)
1142 {
1143 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1144 bool aligned = true, list_ok = true;
1145 struct scatterlist *sg_tmp = sg;
1146
1147 if (!sg || !sg->length || !new_len)
1148 return 0;
1149
1150 if (skip || !final)
1151 list_ok = false;
1152
1153 while (nbytes > 0 && sg_tmp) {
1154 n++;
1155 if (skip >= sg_tmp->length) {
1156 skip -= sg_tmp->length;
1157 if (!sg_tmp->length) {
1158 aligned = false;
1159 break;
1160 }
1161 } else {
1162 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1163 aligned = false;
1164 break;
1165 }
1166
1167 if (nbytes < sg_tmp->length - skip) {
1168 list_ok = false;
1169 break;
1170 }
1171
1172 nbytes -= sg_tmp->length - skip;
1173 skip = 0;
1174 }
1175
1176 sg_tmp = sg_next(sg_tmp);
1177 }
1178
1179 if (!aligned)
1180 return s5p_hash_copy_sgs(ctx, sg, new_len);
1181 else if (!list_ok)
1182 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1183
1184
1185
1186
1187
1188 if (ctx->bufcnt) {
1189 ctx->sg_len = n;
1190 sg_init_table(ctx->sgl, 2);
1191 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1192 sg_chain(ctx->sgl, 2, sg);
1193 ctx->sg = ctx->sgl;
1194 ctx->sg_len++;
1195 } else {
1196 ctx->sg = sg;
1197 ctx->sg_len = n;
1198 }
1199
1200 return 0;
1201 }
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1214 {
1215 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1216 bool final = ctx->finup;
1217 int xmit_len, hash_later, nbytes;
1218 int ret;
1219
1220 if (update)
1221 nbytes = req->nbytes;
1222 else
1223 nbytes = 0;
1224
1225 ctx->total = nbytes + ctx->bufcnt;
1226 if (!ctx->total)
1227 return 0;
1228
1229 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1230
1231 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1232
1233 if (len > nbytes)
1234 len = nbytes;
1235
1236 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1237 0, len, 0);
1238 ctx->bufcnt += len;
1239 nbytes -= len;
1240 ctx->skip = len;
1241 } else {
1242 ctx->skip = 0;
1243 }
1244
1245 if (ctx->bufcnt)
1246 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1247
1248 xmit_len = ctx->total;
1249 if (final) {
1250 hash_later = 0;
1251 } else {
1252 if (IS_ALIGNED(xmit_len, BUFLEN))
1253 xmit_len -= BUFLEN;
1254 else
1255 xmit_len -= xmit_len & (BUFLEN - 1);
1256
1257 hash_later = ctx->total - xmit_len;
1258
1259
1260 scatterwalk_map_and_copy(ctx->buffer, req->src,
1261 req->nbytes - hash_later,
1262 hash_later, 0);
1263 }
1264
1265 if (xmit_len > BUFLEN) {
1266 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1267 final);
1268 if (ret)
1269 return ret;
1270 } else {
1271
1272 if (unlikely(!ctx->bufcnt)) {
1273
1274 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1275 0, xmit_len, 0);
1276 }
1277
1278 sg_init_table(ctx->sgl, 1);
1279 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1280
1281 ctx->sg = ctx->sgl;
1282 ctx->sg_len = 1;
1283 }
1284
1285 ctx->bufcnt = hash_later;
1286 if (!final)
1287 ctx->total = xmit_len;
1288
1289 return 0;
1290 }
1291
1292
1293
1294
1295
1296
1297
1298 static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1299 {
1300 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1301
1302 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1303 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1304 }
1305
1306
1307
1308
1309
1310 static void s5p_hash_finish(struct ahash_request *req)
1311 {
1312 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1313 struct s5p_aes_dev *dd = ctx->dd;
1314
1315 if (ctx->digcnt)
1316 s5p_hash_copy_result(req);
1317
1318 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1319 }
1320
1321
1322
1323
1324
1325
1326 static void s5p_hash_finish_req(struct ahash_request *req, int err)
1327 {
1328 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1329 struct s5p_aes_dev *dd = ctx->dd;
1330 unsigned long flags;
1331
1332 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1333 free_pages((unsigned long)sg_virt(ctx->sg),
1334 get_order(ctx->sg->length));
1335
1336 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1337 kfree(ctx->sg);
1338
1339 ctx->sg = NULL;
1340 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1341 BIT(HASH_FLAGS_SGS_COPIED));
1342
1343 if (!err && !ctx->error) {
1344 s5p_hash_read_msg(req);
1345 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1346 s5p_hash_finish(req);
1347 } else {
1348 ctx->error = true;
1349 }
1350
1351 spin_lock_irqsave(&dd->hash_lock, flags);
1352 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1353 BIT(HASH_FLAGS_DMA_READY) |
1354 BIT(HASH_FLAGS_OUTPUT_READY));
1355 spin_unlock_irqrestore(&dd->hash_lock, flags);
1356
1357 if (req->base.complete)
1358 req->base.complete(&req->base, err);
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1372 struct ahash_request *req)
1373 {
1374 struct crypto_async_request *async_req, *backlog;
1375 struct s5p_hash_reqctx *ctx;
1376 unsigned long flags;
1377 int err = 0, ret = 0;
1378
1379 retry:
1380 spin_lock_irqsave(&dd->hash_lock, flags);
1381 if (req)
1382 ret = ahash_enqueue_request(&dd->hash_queue, req);
1383
1384 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1385 spin_unlock_irqrestore(&dd->hash_lock, flags);
1386 return ret;
1387 }
1388
1389 backlog = crypto_get_backlog(&dd->hash_queue);
1390 async_req = crypto_dequeue_request(&dd->hash_queue);
1391 if (async_req)
1392 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1393
1394 spin_unlock_irqrestore(&dd->hash_lock, flags);
1395
1396 if (!async_req)
1397 return ret;
1398
1399 if (backlog)
1400 backlog->complete(backlog, -EINPROGRESS);
1401
1402 req = ahash_request_cast(async_req);
1403 dd->hash_req = req;
1404 ctx = ahash_request_ctx(req);
1405
1406 err = s5p_hash_prepare_request(req, ctx->op_update);
1407 if (err || !ctx->total)
1408 goto out;
1409
1410 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1411 ctx->op_update, req->nbytes);
1412
1413 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1414 if (ctx->digcnt)
1415 s5p_hash_write_iv(req);
1416
1417 if (ctx->op_update) {
1418 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1419 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1420
1421 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1422 } else {
1423 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1424 }
1425 out:
1426 if (err != -EINPROGRESS) {
1427
1428 s5p_hash_finish_req(req, err);
1429 req = NULL;
1430
1431
1432
1433
1434
1435 goto retry;
1436 }
1437
1438 return ret;
1439 }
1440
1441
1442
1443
1444
1445 static void s5p_hash_tasklet_cb(unsigned long data)
1446 {
1447 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1448
1449 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1450 s5p_hash_handle_queue(dd, NULL);
1451 return;
1452 }
1453
1454 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1455 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1456 &dd->hash_flags)) {
1457 s5p_hash_update_dma_stop(dd);
1458 }
1459
1460 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1461 &dd->hash_flags)) {
1462
1463 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1464 goto finish;
1465 }
1466 }
1467
1468 return;
1469
1470 finish:
1471
1472 s5p_hash_finish_req(dd->hash_req, 0);
1473
1474
1475 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1476 s5p_hash_handle_queue(dd, NULL);
1477 }
1478
1479
1480
1481
1482
1483
1484
1485
1486 static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1487 {
1488 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1489 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1490
1491 ctx->op_update = op;
1492
1493 return s5p_hash_handle_queue(tctx->dd, req);
1494 }
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 static int s5p_hash_update(struct ahash_request *req)
1506 {
1507 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1508
1509 if (!req->nbytes)
1510 return 0;
1511
1512 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1513 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1514 0, req->nbytes, 0);
1515 ctx->bufcnt += req->nbytes;
1516 return 0;
1517 }
1518
1519 return s5p_hash_enqueue(req, true);
1520 }
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 static int s5p_hash_final(struct ahash_request *req)
1546 {
1547 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1548
1549 ctx->finup = true;
1550 if (ctx->error)
1551 return -EINVAL;
1552
1553 if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1554 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1555
1556 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1557 ctx->bufcnt, req->result);
1558 }
1559
1560 return s5p_hash_enqueue(req, false);
1561 }
1562
1563
1564
1565
1566
1567
1568
1569 static int s5p_hash_finup(struct ahash_request *req)
1570 {
1571 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1572 int err1, err2;
1573
1574 ctx->finup = true;
1575
1576 err1 = s5p_hash_update(req);
1577 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1578 return err1;
1579
1580
1581
1582
1583
1584
1585 err2 = s5p_hash_final(req);
1586
1587 return err1 ?: err2;
1588 }
1589
1590
1591
1592
1593
1594
1595
1596 static int s5p_hash_init(struct ahash_request *req)
1597 {
1598 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1599 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1600 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1601
1602 ctx->dd = tctx->dd;
1603 ctx->error = false;
1604 ctx->finup = false;
1605 ctx->bufcnt = 0;
1606 ctx->digcnt = 0;
1607 ctx->total = 0;
1608 ctx->skip = 0;
1609
1610 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1611 crypto_ahash_digestsize(tfm));
1612
1613 switch (crypto_ahash_digestsize(tfm)) {
1614 case MD5_DIGEST_SIZE:
1615 ctx->engine = SSS_HASH_ENGINE_MD5;
1616 ctx->nregs = HASH_MD5_MAX_REG;
1617 break;
1618 case SHA1_DIGEST_SIZE:
1619 ctx->engine = SSS_HASH_ENGINE_SHA1;
1620 ctx->nregs = HASH_SHA1_MAX_REG;
1621 break;
1622 case SHA256_DIGEST_SIZE:
1623 ctx->engine = SSS_HASH_ENGINE_SHA256;
1624 ctx->nregs = HASH_SHA256_MAX_REG;
1625 break;
1626 default:
1627 ctx->error = true;
1628 return -EINVAL;
1629 }
1630
1631 return 0;
1632 }
1633
1634
1635
1636
1637
1638
1639
1640 static int s5p_hash_digest(struct ahash_request *req)
1641 {
1642 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1643 }
1644
1645
1646
1647
1648
1649 static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1650 {
1651 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1652 const char *alg_name = crypto_tfm_alg_name(tfm);
1653
1654 tctx->dd = s5p_dev;
1655
1656 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1657 CRYPTO_ALG_NEED_FALLBACK);
1658 if (IS_ERR(tctx->fallback)) {
1659 pr_err("fallback alloc fails for '%s'\n", alg_name);
1660 return PTR_ERR(tctx->fallback);
1661 }
1662
1663 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1664 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1665
1666 return 0;
1667 }
1668
1669
1670
1671
1672
1673 static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1674 {
1675 return s5p_hash_cra_init_alg(tfm);
1676 }
1677
1678
1679
1680
1681
1682
1683
1684 static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1685 {
1686 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1687
1688 crypto_free_shash(tctx->fallback);
1689 tctx->fallback = NULL;
1690 }
1691
1692
1693
1694
1695
1696
1697 static int s5p_hash_export(struct ahash_request *req, void *out)
1698 {
1699 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1700
1701 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1702
1703 return 0;
1704 }
1705
1706
1707
1708
1709
1710
1711 static int s5p_hash_import(struct ahash_request *req, const void *in)
1712 {
1713 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1714 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1715 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1716 const struct s5p_hash_reqctx *ctx_in = in;
1717
1718 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1719 if (ctx_in->bufcnt > BUFLEN) {
1720 ctx->error = true;
1721 return -EINVAL;
1722 }
1723
1724 ctx->dd = tctx->dd;
1725 ctx->error = false;
1726
1727 return 0;
1728 }
1729
1730 static struct ahash_alg algs_sha1_md5_sha256[] = {
1731 {
1732 .init = s5p_hash_init,
1733 .update = s5p_hash_update,
1734 .final = s5p_hash_final,
1735 .finup = s5p_hash_finup,
1736 .digest = s5p_hash_digest,
1737 .export = s5p_hash_export,
1738 .import = s5p_hash_import,
1739 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1740 .halg.digestsize = SHA1_DIGEST_SIZE,
1741 .halg.base = {
1742 .cra_name = "sha1",
1743 .cra_driver_name = "exynos-sha1",
1744 .cra_priority = 100,
1745 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1746 CRYPTO_ALG_ASYNC |
1747 CRYPTO_ALG_NEED_FALLBACK,
1748 .cra_blocksize = HASH_BLOCK_SIZE,
1749 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1750 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1751 .cra_module = THIS_MODULE,
1752 .cra_init = s5p_hash_cra_init,
1753 .cra_exit = s5p_hash_cra_exit,
1754 }
1755 },
1756 {
1757 .init = s5p_hash_init,
1758 .update = s5p_hash_update,
1759 .final = s5p_hash_final,
1760 .finup = s5p_hash_finup,
1761 .digest = s5p_hash_digest,
1762 .export = s5p_hash_export,
1763 .import = s5p_hash_import,
1764 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1765 .halg.digestsize = MD5_DIGEST_SIZE,
1766 .halg.base = {
1767 .cra_name = "md5",
1768 .cra_driver_name = "exynos-md5",
1769 .cra_priority = 100,
1770 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1771 CRYPTO_ALG_ASYNC |
1772 CRYPTO_ALG_NEED_FALLBACK,
1773 .cra_blocksize = HASH_BLOCK_SIZE,
1774 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1775 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1776 .cra_module = THIS_MODULE,
1777 .cra_init = s5p_hash_cra_init,
1778 .cra_exit = s5p_hash_cra_exit,
1779 }
1780 },
1781 {
1782 .init = s5p_hash_init,
1783 .update = s5p_hash_update,
1784 .final = s5p_hash_final,
1785 .finup = s5p_hash_finup,
1786 .digest = s5p_hash_digest,
1787 .export = s5p_hash_export,
1788 .import = s5p_hash_import,
1789 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1790 .halg.digestsize = SHA256_DIGEST_SIZE,
1791 .halg.base = {
1792 .cra_name = "sha256",
1793 .cra_driver_name = "exynos-sha256",
1794 .cra_priority = 100,
1795 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1796 CRYPTO_ALG_ASYNC |
1797 CRYPTO_ALG_NEED_FALLBACK,
1798 .cra_blocksize = HASH_BLOCK_SIZE,
1799 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1800 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1801 .cra_module = THIS_MODULE,
1802 .cra_init = s5p_hash_cra_init,
1803 .cra_exit = s5p_hash_cra_exit,
1804 }
1805 }
1806
1807 };
1808
1809 static void s5p_set_aes(struct s5p_aes_dev *dev,
1810 const u8 *key, const u8 *iv, const u8 *ctr,
1811 unsigned int keylen)
1812 {
1813 void __iomem *keystart;
1814
1815 if (iv)
1816 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1817 AES_BLOCK_SIZE);
1818
1819 if (ctr)
1820 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1821 AES_BLOCK_SIZE);
1822
1823 if (keylen == AES_KEYSIZE_256)
1824 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1825 else if (keylen == AES_KEYSIZE_192)
1826 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1827 else
1828 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1829
1830 memcpy_toio(keystart, key, keylen);
1831 }
1832
1833 static bool s5p_is_sg_aligned(struct scatterlist *sg)
1834 {
1835 while (sg) {
1836 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1837 return false;
1838 sg = sg_next(sg);
1839 }
1840
1841 return true;
1842 }
1843
1844 static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1845 struct skcipher_request *req)
1846 {
1847 struct scatterlist *sg;
1848 int err;
1849
1850 dev->sg_src_cpy = NULL;
1851 sg = req->src;
1852 if (!s5p_is_sg_aligned(sg)) {
1853 dev_dbg(dev->dev,
1854 "At least one unaligned source scatter list, making a copy\n");
1855 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1856 if (err)
1857 return err;
1858
1859 sg = dev->sg_src_cpy;
1860 }
1861
1862 err = s5p_set_indata(dev, sg);
1863 if (err) {
1864 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1865 return err;
1866 }
1867
1868 return 0;
1869 }
1870
1871 static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1872 struct skcipher_request *req)
1873 {
1874 struct scatterlist *sg;
1875 int err;
1876
1877 dev->sg_dst_cpy = NULL;
1878 sg = req->dst;
1879 if (!s5p_is_sg_aligned(sg)) {
1880 dev_dbg(dev->dev,
1881 "At least one unaligned dest scatter list, making a copy\n");
1882 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1883 if (err)
1884 return err;
1885
1886 sg = dev->sg_dst_cpy;
1887 }
1888
1889 err = s5p_set_outdata(dev, sg);
1890 if (err) {
1891 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1892 return err;
1893 }
1894
1895 return 0;
1896 }
1897
1898 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1899 {
1900 struct skcipher_request *req = dev->req;
1901 u32 aes_control;
1902 unsigned long flags;
1903 int err;
1904 u8 *iv, *ctr;
1905
1906
1907 aes_control = SSS_AES_KEY_CHANGE_MODE;
1908 if (mode & FLAGS_AES_DECRYPT)
1909 aes_control |= SSS_AES_MODE_DECRYPT;
1910
1911 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1912 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1913 iv = req->iv;
1914 ctr = NULL;
1915 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1916 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1917 iv = NULL;
1918 ctr = req->iv;
1919 } else {
1920 iv = NULL;
1921 ctr = NULL;
1922 }
1923
1924 if (dev->ctx->keylen == AES_KEYSIZE_192)
1925 aes_control |= SSS_AES_KEY_SIZE_192;
1926 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1927 aes_control |= SSS_AES_KEY_SIZE_256;
1928
1929 aes_control |= SSS_AES_FIFO_MODE;
1930
1931
1932 aes_control |= SSS_AES_BYTESWAP_DI
1933 | SSS_AES_BYTESWAP_DO
1934 | SSS_AES_BYTESWAP_IV
1935 | SSS_AES_BYTESWAP_KEY
1936 | SSS_AES_BYTESWAP_CNT;
1937
1938 spin_lock_irqsave(&dev->lock, flags);
1939
1940 SSS_WRITE(dev, FCINTENCLR,
1941 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1942 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1943
1944 err = s5p_set_indata_start(dev, req);
1945 if (err)
1946 goto indata_error;
1947
1948 err = s5p_set_outdata_start(dev, req);
1949 if (err)
1950 goto outdata_error;
1951
1952 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1953 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1954
1955 s5p_set_dma_indata(dev, dev->sg_src);
1956 s5p_set_dma_outdata(dev, dev->sg_dst);
1957
1958 SSS_WRITE(dev, FCINTENSET,
1959 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1960
1961 spin_unlock_irqrestore(&dev->lock, flags);
1962
1963 return;
1964
1965 outdata_error:
1966 s5p_unset_indata(dev);
1967
1968 indata_error:
1969 s5p_sg_done(dev);
1970 dev->busy = false;
1971 spin_unlock_irqrestore(&dev->lock, flags);
1972 s5p_aes_complete(req, err);
1973 }
1974
1975 static void s5p_tasklet_cb(unsigned long data)
1976 {
1977 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1978 struct crypto_async_request *async_req, *backlog;
1979 struct s5p_aes_reqctx *reqctx;
1980 unsigned long flags;
1981
1982 spin_lock_irqsave(&dev->lock, flags);
1983 backlog = crypto_get_backlog(&dev->queue);
1984 async_req = crypto_dequeue_request(&dev->queue);
1985
1986 if (!async_req) {
1987 dev->busy = false;
1988 spin_unlock_irqrestore(&dev->lock, flags);
1989 return;
1990 }
1991 spin_unlock_irqrestore(&dev->lock, flags);
1992
1993 if (backlog)
1994 backlog->complete(backlog, -EINPROGRESS);
1995
1996 dev->req = skcipher_request_cast(async_req);
1997 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
1998 reqctx = skcipher_request_ctx(dev->req);
1999
2000 s5p_aes_crypt_start(dev, reqctx->mode);
2001 }
2002
2003 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2004 struct skcipher_request *req)
2005 {
2006 unsigned long flags;
2007 int err;
2008
2009 spin_lock_irqsave(&dev->lock, flags);
2010 err = crypto_enqueue_request(&dev->queue, &req->base);
2011 if (dev->busy) {
2012 spin_unlock_irqrestore(&dev->lock, flags);
2013 return err;
2014 }
2015 dev->busy = true;
2016
2017 spin_unlock_irqrestore(&dev->lock, flags);
2018
2019 tasklet_schedule(&dev->tasklet);
2020
2021 return err;
2022 }
2023
2024 static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
2025 {
2026 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2027 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
2028 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2029 struct s5p_aes_dev *dev = ctx->dev;
2030
2031 if (!req->cryptlen)
2032 return 0;
2033
2034 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
2035 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2036 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
2037 return -EINVAL;
2038 }
2039
2040 reqctx->mode = mode;
2041
2042 return s5p_aes_handle_req(dev, req);
2043 }
2044
2045 static int s5p_aes_setkey(struct crypto_skcipher *cipher,
2046 const u8 *key, unsigned int keylen)
2047 {
2048 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
2049 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2050
2051 if (keylen != AES_KEYSIZE_128 &&
2052 keylen != AES_KEYSIZE_192 &&
2053 keylen != AES_KEYSIZE_256)
2054 return -EINVAL;
2055
2056 memcpy(ctx->aes_key, key, keylen);
2057 ctx->keylen = keylen;
2058
2059 return 0;
2060 }
2061
2062 static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
2063 {
2064 return s5p_aes_crypt(req, 0);
2065 }
2066
2067 static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
2068 {
2069 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2070 }
2071
2072 static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
2073 {
2074 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2075 }
2076
2077 static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
2078 {
2079 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2080 }
2081
2082 static int s5p_aes_ctr_crypt(struct skcipher_request *req)
2083 {
2084 return s5p_aes_crypt(req, FLAGS_AES_CTR);
2085 }
2086
2087 static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
2088 {
2089 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2090
2091 ctx->dev = s5p_dev;
2092 crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
2093
2094 return 0;
2095 }
2096
2097 static struct skcipher_alg algs[] = {
2098 {
2099 .base.cra_name = "ecb(aes)",
2100 .base.cra_driver_name = "ecb-aes-s5p",
2101 .base.cra_priority = 100,
2102 .base.cra_flags = CRYPTO_ALG_ASYNC |
2103 CRYPTO_ALG_KERN_DRIVER_ONLY,
2104 .base.cra_blocksize = AES_BLOCK_SIZE,
2105 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2106 .base.cra_alignmask = 0x0f,
2107 .base.cra_module = THIS_MODULE,
2108
2109 .min_keysize = AES_MIN_KEY_SIZE,
2110 .max_keysize = AES_MAX_KEY_SIZE,
2111 .setkey = s5p_aes_setkey,
2112 .encrypt = s5p_aes_ecb_encrypt,
2113 .decrypt = s5p_aes_ecb_decrypt,
2114 .init = s5p_aes_init_tfm,
2115 },
2116 {
2117 .base.cra_name = "cbc(aes)",
2118 .base.cra_driver_name = "cbc-aes-s5p",
2119 .base.cra_priority = 100,
2120 .base.cra_flags = CRYPTO_ALG_ASYNC |
2121 CRYPTO_ALG_KERN_DRIVER_ONLY,
2122 .base.cra_blocksize = AES_BLOCK_SIZE,
2123 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2124 .base.cra_alignmask = 0x0f,
2125 .base.cra_module = THIS_MODULE,
2126
2127 .min_keysize = AES_MIN_KEY_SIZE,
2128 .max_keysize = AES_MAX_KEY_SIZE,
2129 .ivsize = AES_BLOCK_SIZE,
2130 .setkey = s5p_aes_setkey,
2131 .encrypt = s5p_aes_cbc_encrypt,
2132 .decrypt = s5p_aes_cbc_decrypt,
2133 .init = s5p_aes_init_tfm,
2134 },
2135 {
2136 .base.cra_name = "ctr(aes)",
2137 .base.cra_driver_name = "ctr-aes-s5p",
2138 .base.cra_priority = 100,
2139 .base.cra_flags = CRYPTO_ALG_ASYNC |
2140 CRYPTO_ALG_KERN_DRIVER_ONLY,
2141 .base.cra_blocksize = 1,
2142 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2143 .base.cra_alignmask = 0x0f,
2144 .base.cra_module = THIS_MODULE,
2145
2146 .min_keysize = AES_MIN_KEY_SIZE,
2147 .max_keysize = AES_MAX_KEY_SIZE,
2148 .ivsize = AES_BLOCK_SIZE,
2149 .setkey = s5p_aes_setkey,
2150 .encrypt = s5p_aes_ctr_crypt,
2151 .decrypt = s5p_aes_ctr_crypt,
2152 .init = s5p_aes_init_tfm,
2153 },
2154 };
2155
2156 static int s5p_aes_probe(struct platform_device *pdev)
2157 {
2158 struct device *dev = &pdev->dev;
2159 int i, j, err;
2160 const struct samsung_aes_variant *variant;
2161 struct s5p_aes_dev *pdata;
2162 struct resource *res;
2163 unsigned int hash_i;
2164
2165 if (s5p_dev)
2166 return -EEXIST;
2167
2168 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2169 if (!pdata)
2170 return -ENOMEM;
2171
2172 variant = find_s5p_sss_version(pdev);
2173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2174 if (!res)
2175 return -EINVAL;
2176
2177
2178
2179
2180
2181
2182
2183 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2184 if (variant == &exynos_aes_data) {
2185 res->end += 0x300;
2186 pdata->use_hash = true;
2187 }
2188 }
2189
2190 pdata->res = res;
2191 pdata->ioaddr = devm_ioremap_resource(dev, res);
2192 if (IS_ERR(pdata->ioaddr)) {
2193 if (!pdata->use_hash)
2194 return PTR_ERR(pdata->ioaddr);
2195
2196 res->end -= 0x300;
2197 pdata->use_hash = false;
2198 pdata->ioaddr = devm_ioremap_resource(dev, res);
2199 if (IS_ERR(pdata->ioaddr))
2200 return PTR_ERR(pdata->ioaddr);
2201 }
2202
2203 pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2204 if (IS_ERR(pdata->clk))
2205 return dev_err_probe(dev, PTR_ERR(pdata->clk),
2206 "failed to find secss clock %s\n",
2207 variant->clk_names[0]);
2208
2209 err = clk_prepare_enable(pdata->clk);
2210 if (err < 0) {
2211 dev_err(dev, "Enabling clock %s failed, err %d\n",
2212 variant->clk_names[0], err);
2213 return err;
2214 }
2215
2216 if (variant->clk_names[1]) {
2217 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2218 if (IS_ERR(pdata->pclk)) {
2219 err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
2220 "failed to find clock %s\n",
2221 variant->clk_names[1]);
2222 goto err_clk;
2223 }
2224
2225 err = clk_prepare_enable(pdata->pclk);
2226 if (err < 0) {
2227 dev_err(dev, "Enabling clock %s failed, err %d\n",
2228 variant->clk_names[0], err);
2229 goto err_clk;
2230 }
2231 } else {
2232 pdata->pclk = NULL;
2233 }
2234
2235 spin_lock_init(&pdata->lock);
2236 spin_lock_init(&pdata->hash_lock);
2237
2238 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2239 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2240
2241 pdata->irq_fc = platform_get_irq(pdev, 0);
2242 if (pdata->irq_fc < 0) {
2243 err = pdata->irq_fc;
2244 dev_warn(dev, "feed control interrupt is not available.\n");
2245 goto err_irq;
2246 }
2247 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2248 s5p_aes_interrupt, IRQF_ONESHOT,
2249 pdev->name, pdev);
2250 if (err < 0) {
2251 dev_warn(dev, "feed control interrupt is not available.\n");
2252 goto err_irq;
2253 }
2254
2255 pdata->busy = false;
2256 pdata->dev = dev;
2257 platform_set_drvdata(pdev, pdata);
2258 s5p_dev = pdata;
2259
2260 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2261 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2262
2263 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2264 err = crypto_register_skcipher(&algs[i]);
2265 if (err)
2266 goto err_algs;
2267 }
2268
2269 if (pdata->use_hash) {
2270 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2271 (unsigned long)pdata);
2272 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2273
2274 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2275 hash_i++) {
2276 struct ahash_alg *alg;
2277
2278 alg = &algs_sha1_md5_sha256[hash_i];
2279 err = crypto_register_ahash(alg);
2280 if (err) {
2281 dev_err(dev, "can't register '%s': %d\n",
2282 alg->halg.base.cra_driver_name, err);
2283 goto err_hash;
2284 }
2285 }
2286 }
2287
2288 dev_info(dev, "s5p-sss driver registered\n");
2289
2290 return 0;
2291
2292 err_hash:
2293 for (j = hash_i - 1; j >= 0; j--)
2294 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2295
2296 tasklet_kill(&pdata->hash_tasklet);
2297 res->end -= 0x300;
2298
2299 err_algs:
2300 if (i < ARRAY_SIZE(algs))
2301 dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
2302 err);
2303
2304 for (j = 0; j < i; j++)
2305 crypto_unregister_skcipher(&algs[j]);
2306
2307 tasklet_kill(&pdata->tasklet);
2308
2309 err_irq:
2310 clk_disable_unprepare(pdata->pclk);
2311
2312 err_clk:
2313 clk_disable_unprepare(pdata->clk);
2314 s5p_dev = NULL;
2315
2316 return err;
2317 }
2318
2319 static int s5p_aes_remove(struct platform_device *pdev)
2320 {
2321 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2322 int i;
2323
2324 for (i = 0; i < ARRAY_SIZE(algs); i++)
2325 crypto_unregister_skcipher(&algs[i]);
2326
2327 tasklet_kill(&pdata->tasklet);
2328 if (pdata->use_hash) {
2329 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2330 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2331
2332 pdata->res->end -= 0x300;
2333 tasklet_kill(&pdata->hash_tasklet);
2334 pdata->use_hash = false;
2335 }
2336
2337 clk_disable_unprepare(pdata->pclk);
2338
2339 clk_disable_unprepare(pdata->clk);
2340 s5p_dev = NULL;
2341
2342 return 0;
2343 }
2344
2345 static struct platform_driver s5p_aes_crypto = {
2346 .probe = s5p_aes_probe,
2347 .remove = s5p_aes_remove,
2348 .driver = {
2349 .name = "s5p-secss",
2350 .of_match_table = s5p_sss_dt_match,
2351 },
2352 };
2353
2354 module_platform_driver(s5p_aes_crypto);
2355
2356 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2357 MODULE_LICENSE("GPL v2");
2358 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2359 MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");