0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <crypto/aes.h>
0015 #include <crypto/internal/hash.h>
0016 #include <crypto/internal/skcipher.h>
0017 #include <crypto/scatterwalk.h>
0018 #include <crypto/sha1.h>
0019 #include <crypto/sha2.h>
0020
0021 #include <linux/clk.h>
0022 #include <linux/dma-mapping.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/io.h>
0025 #include <linux/irq.h>
0026 #include <linux/kernel.h>
0027 #include <linux/kthread.h>
0028 #include <linux/module.h>
0029 #include <linux/mutex.h>
0030 #include <linux/of.h>
0031 #include <linux/of_device.h>
0032 #include <linux/platform_device.h>
0033
0034 #define SHA_BUFFER_LEN PAGE_SIZE
0035 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
0036
0037 #define SAHARA_NAME "sahara"
0038 #define SAHARA_VERSION_3 3
0039 #define SAHARA_VERSION_4 4
0040 #define SAHARA_TIMEOUT_MS 1000
0041 #define SAHARA_MAX_HW_DESC 2
0042 #define SAHARA_MAX_HW_LINK 20
0043
0044 #define FLAGS_MODE_MASK 0x000f
0045 #define FLAGS_ENCRYPT BIT(0)
0046 #define FLAGS_CBC BIT(1)
0047 #define FLAGS_NEW_KEY BIT(3)
0048
0049 #define SAHARA_HDR_BASE 0x00800000
0050 #define SAHARA_HDR_SKHA_ALG_AES 0
0051 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
0052 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
0053 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
0054 #define SAHARA_HDR_FORM_DATA (5 << 16)
0055 #define SAHARA_HDR_FORM_KEY (8 << 16)
0056 #define SAHARA_HDR_LLO (1 << 24)
0057 #define SAHARA_HDR_CHA_SKHA (1 << 28)
0058 #define SAHARA_HDR_CHA_MDHA (2 << 28)
0059 #define SAHARA_HDR_PARITY_BIT (1 << 31)
0060
0061 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
0062 #define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
0063 #define SAHARA_HDR_MDHA_HASH 0xA0850000
0064 #define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
0065 #define SAHARA_HDR_MDHA_ALG_SHA1 0
0066 #define SAHARA_HDR_MDHA_ALG_MD5 1
0067 #define SAHARA_HDR_MDHA_ALG_SHA256 2
0068 #define SAHARA_HDR_MDHA_ALG_SHA224 3
0069 #define SAHARA_HDR_MDHA_PDATA (1 << 2)
0070 #define SAHARA_HDR_MDHA_HMAC (1 << 3)
0071 #define SAHARA_HDR_MDHA_INIT (1 << 5)
0072 #define SAHARA_HDR_MDHA_IPAD (1 << 6)
0073 #define SAHARA_HDR_MDHA_OPAD (1 << 7)
0074 #define SAHARA_HDR_MDHA_SWAP (1 << 8)
0075 #define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
0076 #define SAHARA_HDR_MDHA_SSL (1 << 10)
0077
0078
0079 #define SAHARA_QUEUE_LENGTH 1
0080
0081 #define SAHARA_REG_VERSION 0x00
0082 #define SAHARA_REG_DAR 0x04
0083 #define SAHARA_REG_CONTROL 0x08
0084 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
0085 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
0086 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
0087 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
0088 #define SAHARA_REG_CMD 0x0C
0089 #define SAHARA_CMD_RESET (1 << 0)
0090 #define SAHARA_CMD_CLEAR_INT (1 << 8)
0091 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
0092 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
0093 #define SAHARA_CMD_MODE_BATCH (1 << 16)
0094 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
0095 #define SAHARA_REG_STATUS 0x10
0096 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
0097 #define SAHARA_STATE_IDLE 0
0098 #define SAHARA_STATE_BUSY 1
0099 #define SAHARA_STATE_ERR 2
0100 #define SAHARA_STATE_FAULT 3
0101 #define SAHARA_STATE_COMPLETE 4
0102 #define SAHARA_STATE_COMP_FLAG (1 << 2)
0103 #define SAHARA_STATUS_DAR_FULL (1 << 3)
0104 #define SAHARA_STATUS_ERROR (1 << 4)
0105 #define SAHARA_STATUS_SECURE (1 << 5)
0106 #define SAHARA_STATUS_FAIL (1 << 6)
0107 #define SAHARA_STATUS_INIT (1 << 7)
0108 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
0109 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
0110 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
0111 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
0112 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
0113 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
0114 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
0115 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
0116 #define SAHARA_REG_ERRSTATUS 0x14
0117 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
0118 #define SAHARA_ERRSOURCE_CHA 14
0119 #define SAHARA_ERRSOURCE_DMA 15
0120 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
0121 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
0122 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
0123 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
0124 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
0125 #define SAHARA_REG_FADDR 0x18
0126 #define SAHARA_REG_CDAR 0x1C
0127 #define SAHARA_REG_IDAR 0x20
0128
0129 struct sahara_hw_desc {
0130 u32 hdr;
0131 u32 len1;
0132 u32 p1;
0133 u32 len2;
0134 u32 p2;
0135 u32 next;
0136 };
0137
0138 struct sahara_hw_link {
0139 u32 len;
0140 u32 p;
0141 u32 next;
0142 };
0143
0144 struct sahara_ctx {
0145 unsigned long flags;
0146
0147
0148 int keylen;
0149 u8 key[AES_KEYSIZE_128];
0150 struct crypto_skcipher *fallback;
0151 };
0152
0153 struct sahara_aes_reqctx {
0154 unsigned long mode;
0155 struct skcipher_request fallback_req;
0156 };
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 struct sahara_sha_reqctx {
0177 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
0178 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
0179 u8 context[SHA256_DIGEST_SIZE + 4];
0180 unsigned int mode;
0181 unsigned int digest_size;
0182 unsigned int context_size;
0183 unsigned int buf_cnt;
0184 unsigned int sg_in_idx;
0185 struct scatterlist *in_sg;
0186 struct scatterlist in_sg_chain[2];
0187 size_t total;
0188 unsigned int last;
0189 unsigned int first;
0190 unsigned int active;
0191 };
0192
0193 struct sahara_dev {
0194 struct device *device;
0195 unsigned int version;
0196 void __iomem *regs_base;
0197 struct clk *clk_ipg;
0198 struct clk *clk_ahb;
0199 struct mutex queue_mutex;
0200 struct task_struct *kthread;
0201 struct completion dma_completion;
0202
0203 struct sahara_ctx *ctx;
0204 struct crypto_queue queue;
0205 unsigned long flags;
0206
0207 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
0208 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
0209
0210 u8 *key_base;
0211 dma_addr_t key_phys_base;
0212
0213 u8 *iv_base;
0214 dma_addr_t iv_phys_base;
0215
0216 u8 *context_base;
0217 dma_addr_t context_phys_base;
0218
0219 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
0220 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
0221
0222 size_t total;
0223 struct scatterlist *in_sg;
0224 int nb_in_sg;
0225 struct scatterlist *out_sg;
0226 int nb_out_sg;
0227
0228 u32 error;
0229 };
0230
0231 static struct sahara_dev *dev_ptr;
0232
0233 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
0234 {
0235 writel(data, dev->regs_base + reg);
0236 }
0237
0238 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
0239 {
0240 return readl(dev->regs_base + reg);
0241 }
0242
0243 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
0244 {
0245 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
0246 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
0247 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
0248
0249 if (dev->flags & FLAGS_CBC) {
0250 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
0251 hdr ^= SAHARA_HDR_PARITY_BIT;
0252 }
0253
0254 if (dev->flags & FLAGS_ENCRYPT) {
0255 hdr |= SAHARA_HDR_SKHA_OP_ENC;
0256 hdr ^= SAHARA_HDR_PARITY_BIT;
0257 }
0258
0259 return hdr;
0260 }
0261
0262 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
0263 {
0264 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
0265 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
0266 }
0267
0268 static const char *sahara_err_src[16] = {
0269 "No error",
0270 "Header error",
0271 "Descriptor length error",
0272 "Descriptor length or pointer error",
0273 "Link length error",
0274 "Link pointer error",
0275 "Input buffer error",
0276 "Output buffer error",
0277 "Output buffer starvation",
0278 "Internal state fault",
0279 "General descriptor problem",
0280 "Reserved",
0281 "Descriptor address error",
0282 "Link address error",
0283 "CHA error",
0284 "DMA error"
0285 };
0286
0287 static const char *sahara_err_dmasize[4] = {
0288 "Byte transfer",
0289 "Half-word transfer",
0290 "Word transfer",
0291 "Reserved"
0292 };
0293
0294 static const char *sahara_err_dmasrc[8] = {
0295 "No error",
0296 "AHB bus error",
0297 "Internal IP bus error",
0298 "Parity error",
0299 "DMA crosses 256 byte boundary",
0300 "DMA is busy",
0301 "Reserved",
0302 "DMA HW error"
0303 };
0304
0305 static const char *sahara_cha_errsrc[12] = {
0306 "Input buffer non-empty",
0307 "Illegal address",
0308 "Illegal mode",
0309 "Illegal data size",
0310 "Illegal key size",
0311 "Write during processing",
0312 "CTX read during processing",
0313 "HW error",
0314 "Input buffer disabled/underflow",
0315 "Output buffer disabled/overflow",
0316 "DES key parity error",
0317 "Reserved"
0318 };
0319
0320 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
0321
0322 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
0323 {
0324 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
0325 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
0326
0327 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
0328
0329 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
0330
0331 if (source == SAHARA_ERRSOURCE_DMA) {
0332 if (error & SAHARA_ERRSTATUS_DMA_DIR)
0333 dev_err(dev->device, " * DMA read.\n");
0334 else
0335 dev_err(dev->device, " * DMA write.\n");
0336
0337 dev_err(dev->device, " * %s.\n",
0338 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
0339 dev_err(dev->device, " * %s.\n",
0340 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
0341 } else if (source == SAHARA_ERRSOURCE_CHA) {
0342 dev_err(dev->device, " * %s.\n",
0343 sahara_cha_errsrc[chasrc]);
0344 dev_err(dev->device, " * %s.\n",
0345 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
0346 }
0347 dev_err(dev->device, "\n");
0348 }
0349
0350 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
0351
0352 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
0353 {
0354 u8 state;
0355
0356 if (!__is_defined(DEBUG))
0357 return;
0358
0359 state = SAHARA_STATUS_GET_STATE(status);
0360
0361 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
0362 __func__, status);
0363
0364 dev_dbg(dev->device, " - State = %d:\n", state);
0365 if (state & SAHARA_STATE_COMP_FLAG)
0366 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
0367
0368 dev_dbg(dev->device, " * %s.\n",
0369 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
0370
0371 if (status & SAHARA_STATUS_DAR_FULL)
0372 dev_dbg(dev->device, " - DAR Full.\n");
0373 if (status & SAHARA_STATUS_ERROR)
0374 dev_dbg(dev->device, " - Error.\n");
0375 if (status & SAHARA_STATUS_SECURE)
0376 dev_dbg(dev->device, " - Secure.\n");
0377 if (status & SAHARA_STATUS_FAIL)
0378 dev_dbg(dev->device, " - Fail.\n");
0379 if (status & SAHARA_STATUS_RNG_RESEED)
0380 dev_dbg(dev->device, " - RNG Reseed Request.\n");
0381 if (status & SAHARA_STATUS_ACTIVE_RNG)
0382 dev_dbg(dev->device, " - RNG Active.\n");
0383 if (status & SAHARA_STATUS_ACTIVE_MDHA)
0384 dev_dbg(dev->device, " - MDHA Active.\n");
0385 if (status & SAHARA_STATUS_ACTIVE_SKHA)
0386 dev_dbg(dev->device, " - SKHA Active.\n");
0387
0388 if (status & SAHARA_STATUS_MODE_BATCH)
0389 dev_dbg(dev->device, " - Batch Mode.\n");
0390 else if (status & SAHARA_STATUS_MODE_DEDICATED)
0391 dev_dbg(dev->device, " - Dedicated Mode.\n");
0392 else if (status & SAHARA_STATUS_MODE_DEBUG)
0393 dev_dbg(dev->device, " - Debug Mode.\n");
0394
0395 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
0396 SAHARA_STATUS_GET_ISTATE(status));
0397
0398 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
0399 sahara_read(dev, SAHARA_REG_CDAR));
0400 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
0401 sahara_read(dev, SAHARA_REG_IDAR));
0402 }
0403
0404 static void sahara_dump_descriptors(struct sahara_dev *dev)
0405 {
0406 int i;
0407
0408 if (!__is_defined(DEBUG))
0409 return;
0410
0411 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
0412 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
0413 i, &dev->hw_phys_desc[i]);
0414 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
0415 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
0416 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
0417 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
0418 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
0419 dev_dbg(dev->device, "\tnext = 0x%08x\n",
0420 dev->hw_desc[i]->next);
0421 }
0422 dev_dbg(dev->device, "\n");
0423 }
0424
0425 static void sahara_dump_links(struct sahara_dev *dev)
0426 {
0427 int i;
0428
0429 if (!__is_defined(DEBUG))
0430 return;
0431
0432 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
0433 dev_dbg(dev->device, "Link (%d) (%pad):\n",
0434 i, &dev->hw_phys_link[i]);
0435 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
0436 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
0437 dev_dbg(dev->device, "\tnext = 0x%08x\n",
0438 dev->hw_link[i]->next);
0439 }
0440 dev_dbg(dev->device, "\n");
0441 }
0442
0443 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
0444 {
0445 struct sahara_ctx *ctx = dev->ctx;
0446 struct scatterlist *sg;
0447 int ret;
0448 int i, j;
0449 int idx = 0;
0450
0451
0452 if (ctx->flags & FLAGS_NEW_KEY) {
0453 memcpy(dev->key_base, ctx->key, ctx->keylen);
0454 ctx->flags &= ~FLAGS_NEW_KEY;
0455
0456 if (dev->flags & FLAGS_CBC) {
0457 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
0458 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
0459 } else {
0460 dev->hw_desc[idx]->len1 = 0;
0461 dev->hw_desc[idx]->p1 = 0;
0462 }
0463 dev->hw_desc[idx]->len2 = ctx->keylen;
0464 dev->hw_desc[idx]->p2 = dev->key_phys_base;
0465 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
0466
0467 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
0468
0469 idx++;
0470 }
0471
0472 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
0473 if (dev->nb_in_sg < 0) {
0474 dev_err(dev->device, "Invalid numbers of src SG.\n");
0475 return dev->nb_in_sg;
0476 }
0477 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
0478 if (dev->nb_out_sg < 0) {
0479 dev_err(dev->device, "Invalid numbers of dst SG.\n");
0480 return dev->nb_out_sg;
0481 }
0482 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
0483 dev_err(dev->device, "not enough hw links (%d)\n",
0484 dev->nb_in_sg + dev->nb_out_sg);
0485 return -EINVAL;
0486 }
0487
0488 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
0489 DMA_TO_DEVICE);
0490 if (ret != dev->nb_in_sg) {
0491 dev_err(dev->device, "couldn't map in sg\n");
0492 goto unmap_in;
0493 }
0494 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
0495 DMA_FROM_DEVICE);
0496 if (ret != dev->nb_out_sg) {
0497 dev_err(dev->device, "couldn't map out sg\n");
0498 goto unmap_out;
0499 }
0500
0501
0502 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
0503 sg = dev->in_sg;
0504 for (i = 0; i < dev->nb_in_sg; i++) {
0505 dev->hw_link[i]->len = sg->length;
0506 dev->hw_link[i]->p = sg->dma_address;
0507 if (i == (dev->nb_in_sg - 1)) {
0508 dev->hw_link[i]->next = 0;
0509 } else {
0510 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
0511 sg = sg_next(sg);
0512 }
0513 }
0514
0515
0516 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
0517 sg = dev->out_sg;
0518 for (j = i; j < dev->nb_out_sg + i; j++) {
0519 dev->hw_link[j]->len = sg->length;
0520 dev->hw_link[j]->p = sg->dma_address;
0521 if (j == (dev->nb_out_sg + i - 1)) {
0522 dev->hw_link[j]->next = 0;
0523 } else {
0524 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
0525 sg = sg_next(sg);
0526 }
0527 }
0528
0529
0530 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
0531 dev->hw_desc[idx]->len1 = dev->total;
0532 dev->hw_desc[idx]->len2 = dev->total;
0533 dev->hw_desc[idx]->next = 0;
0534
0535 sahara_dump_descriptors(dev);
0536 sahara_dump_links(dev);
0537
0538 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
0539
0540 return 0;
0541
0542 unmap_out:
0543 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
0544 DMA_FROM_DEVICE);
0545 unmap_in:
0546 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
0547 DMA_TO_DEVICE);
0548
0549 return -EINVAL;
0550 }
0551
0552 static int sahara_aes_process(struct skcipher_request *req)
0553 {
0554 struct sahara_dev *dev = dev_ptr;
0555 struct sahara_ctx *ctx;
0556 struct sahara_aes_reqctx *rctx;
0557 int ret;
0558 unsigned long timeout;
0559
0560
0561 dev_dbg(dev->device,
0562 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
0563 req->cryptlen, req->src, req->dst);
0564
0565
0566 dev->total = req->cryptlen;
0567 dev->in_sg = req->src;
0568 dev->out_sg = req->dst;
0569
0570 rctx = skcipher_request_ctx(req);
0571 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
0572 rctx->mode &= FLAGS_MODE_MASK;
0573 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
0574
0575 if ((dev->flags & FLAGS_CBC) && req->iv)
0576 memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
0577
0578
0579 dev->ctx = ctx;
0580
0581 reinit_completion(&dev->dma_completion);
0582
0583 ret = sahara_hw_descriptor_create(dev);
0584 if (ret)
0585 return -EINVAL;
0586
0587 timeout = wait_for_completion_timeout(&dev->dma_completion,
0588 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
0589 if (!timeout) {
0590 dev_err(dev->device, "AES timeout\n");
0591 return -ETIMEDOUT;
0592 }
0593
0594 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
0595 DMA_FROM_DEVICE);
0596 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
0597 DMA_TO_DEVICE);
0598
0599 return 0;
0600 }
0601
0602 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0603 unsigned int keylen)
0604 {
0605 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
0606
0607 ctx->keylen = keylen;
0608
0609
0610 if (keylen == AES_KEYSIZE_128) {
0611 memcpy(ctx->key, key, keylen);
0612 ctx->flags |= FLAGS_NEW_KEY;
0613 return 0;
0614 }
0615
0616 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
0617 return -EINVAL;
0618
0619
0620
0621
0622 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
0623 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
0624 CRYPTO_TFM_REQ_MASK);
0625 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
0626 }
0627
0628 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
0629 {
0630 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
0631 struct sahara_dev *dev = dev_ptr;
0632 int err = 0;
0633
0634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
0635 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
0636
0637 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
0638 dev_err(dev->device,
0639 "request size is not exact amount of AES blocks\n");
0640 return -EINVAL;
0641 }
0642
0643 rctx->mode = mode;
0644
0645 mutex_lock(&dev->queue_mutex);
0646 err = crypto_enqueue_request(&dev->queue, &req->base);
0647 mutex_unlock(&dev->queue_mutex);
0648
0649 wake_up_process(dev->kthread);
0650
0651 return err;
0652 }
0653
0654 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
0655 {
0656 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
0657 struct sahara_ctx *ctx = crypto_skcipher_ctx(
0658 crypto_skcipher_reqtfm(req));
0659
0660 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
0661 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0662 skcipher_request_set_callback(&rctx->fallback_req,
0663 req->base.flags,
0664 req->base.complete,
0665 req->base.data);
0666 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
0667 req->dst, req->cryptlen, req->iv);
0668 return crypto_skcipher_encrypt(&rctx->fallback_req);
0669 }
0670
0671 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
0672 }
0673
0674 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
0675 {
0676 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
0677 struct sahara_ctx *ctx = crypto_skcipher_ctx(
0678 crypto_skcipher_reqtfm(req));
0679
0680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
0681 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0682 skcipher_request_set_callback(&rctx->fallback_req,
0683 req->base.flags,
0684 req->base.complete,
0685 req->base.data);
0686 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
0687 req->dst, req->cryptlen, req->iv);
0688 return crypto_skcipher_decrypt(&rctx->fallback_req);
0689 }
0690
0691 return sahara_aes_crypt(req, 0);
0692 }
0693
0694 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
0695 {
0696 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
0697 struct sahara_ctx *ctx = crypto_skcipher_ctx(
0698 crypto_skcipher_reqtfm(req));
0699
0700 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
0701 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0702 skcipher_request_set_callback(&rctx->fallback_req,
0703 req->base.flags,
0704 req->base.complete,
0705 req->base.data);
0706 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
0707 req->dst, req->cryptlen, req->iv);
0708 return crypto_skcipher_encrypt(&rctx->fallback_req);
0709 }
0710
0711 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
0712 }
0713
0714 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
0715 {
0716 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
0717 struct sahara_ctx *ctx = crypto_skcipher_ctx(
0718 crypto_skcipher_reqtfm(req));
0719
0720 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
0721 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0722 skcipher_request_set_callback(&rctx->fallback_req,
0723 req->base.flags,
0724 req->base.complete,
0725 req->base.data);
0726 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
0727 req->dst, req->cryptlen, req->iv);
0728 return crypto_skcipher_decrypt(&rctx->fallback_req);
0729 }
0730
0731 return sahara_aes_crypt(req, FLAGS_CBC);
0732 }
0733
0734 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
0735 {
0736 const char *name = crypto_tfm_alg_name(&tfm->base);
0737 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
0738
0739 ctx->fallback = crypto_alloc_skcipher(name, 0,
0740 CRYPTO_ALG_NEED_FALLBACK);
0741 if (IS_ERR(ctx->fallback)) {
0742 pr_err("Error allocating fallback algo %s\n", name);
0743 return PTR_ERR(ctx->fallback);
0744 }
0745
0746 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
0747 crypto_skcipher_reqsize(ctx->fallback));
0748
0749 return 0;
0750 }
0751
0752 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
0753 {
0754 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
0755
0756 crypto_free_skcipher(ctx->fallback);
0757 }
0758
0759 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
0760 struct sahara_sha_reqctx *rctx)
0761 {
0762 u32 hdr = 0;
0763
0764 hdr = rctx->mode;
0765
0766 if (rctx->first) {
0767 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
0768 hdr |= SAHARA_HDR_MDHA_INIT;
0769 } else {
0770 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
0771 }
0772
0773 if (rctx->last)
0774 hdr |= SAHARA_HDR_MDHA_PDATA;
0775
0776 if (hweight_long(hdr) % 2 == 0)
0777 hdr |= SAHARA_HDR_PARITY_BIT;
0778
0779 return hdr;
0780 }
0781
0782 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
0783 struct sahara_sha_reqctx *rctx,
0784 int start)
0785 {
0786 struct scatterlist *sg;
0787 unsigned int i;
0788 int ret;
0789
0790 dev->in_sg = rctx->in_sg;
0791
0792 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
0793 if (dev->nb_in_sg < 0) {
0794 dev_err(dev->device, "Invalid numbers of src SG.\n");
0795 return dev->nb_in_sg;
0796 }
0797 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
0798 dev_err(dev->device, "not enough hw links (%d)\n",
0799 dev->nb_in_sg + dev->nb_out_sg);
0800 return -EINVAL;
0801 }
0802
0803 sg = dev->in_sg;
0804 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
0805 if (!ret)
0806 return -EFAULT;
0807
0808 for (i = start; i < dev->nb_in_sg + start; i++) {
0809 dev->hw_link[i]->len = sg->length;
0810 dev->hw_link[i]->p = sg->dma_address;
0811 if (i == (dev->nb_in_sg + start - 1)) {
0812 dev->hw_link[i]->next = 0;
0813 } else {
0814 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
0815 sg = sg_next(sg);
0816 }
0817 }
0818
0819 return i;
0820 }
0821
0822 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
0823 struct sahara_sha_reqctx *rctx,
0824 struct ahash_request *req,
0825 int index)
0826 {
0827 unsigned result_len;
0828 int i = index;
0829
0830 if (rctx->first)
0831
0832 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
0833 else
0834
0835 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
0836
0837 dev->hw_desc[index]->len1 = rctx->total;
0838 if (dev->hw_desc[index]->len1 == 0) {
0839
0840 dev->hw_desc[index]->p1 = 0;
0841 rctx->sg_in_idx = 0;
0842 } else {
0843
0844 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
0845 i = sahara_sha_hw_links_create(dev, rctx, index);
0846
0847 rctx->sg_in_idx = index;
0848 if (i < 0)
0849 return i;
0850 }
0851
0852 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
0853
0854
0855 result_len = rctx->context_size;
0856 dev->hw_link[i]->p = dev->context_phys_base;
0857
0858 dev->hw_link[i]->len = result_len;
0859 dev->hw_desc[index]->len2 = result_len;
0860
0861 dev->hw_link[i]->next = 0;
0862
0863 return 0;
0864 }
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
0876 struct sahara_sha_reqctx *rctx,
0877 struct ahash_request *req,
0878 int index)
0879 {
0880 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
0881
0882 dev->hw_desc[index]->len1 = rctx->context_size;
0883 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
0884 dev->hw_desc[index]->len2 = 0;
0885 dev->hw_desc[index]->p2 = 0;
0886
0887 dev->hw_link[index]->len = rctx->context_size;
0888 dev->hw_link[index]->p = dev->context_phys_base;
0889 dev->hw_link[index]->next = 0;
0890
0891 return 0;
0892 }
0893
0894 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
0895 {
0896 if (!sg || !sg->length)
0897 return nbytes;
0898
0899 while (nbytes && sg) {
0900 if (nbytes <= sg->length) {
0901 sg->length = nbytes;
0902 sg_mark_end(sg);
0903 break;
0904 }
0905 nbytes -= sg->length;
0906 sg = sg_next(sg);
0907 }
0908
0909 return nbytes;
0910 }
0911
0912 static int sahara_sha_prepare_request(struct ahash_request *req)
0913 {
0914 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0915 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
0916 unsigned int hash_later;
0917 unsigned int block_size;
0918 unsigned int len;
0919
0920 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
0921
0922
0923 len = rctx->buf_cnt + req->nbytes;
0924
0925
0926 if (!rctx->last && (len < block_size)) {
0927
0928 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
0929 0, req->nbytes, 0);
0930 rctx->buf_cnt += req->nbytes;
0931
0932 return 0;
0933 }
0934
0935
0936 if (rctx->buf_cnt)
0937 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
0938
0939
0940 hash_later = rctx->last ? 0 : len & (block_size - 1);
0941 if (hash_later) {
0942 unsigned int offset = req->nbytes - hash_later;
0943
0944 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
0945 hash_later, 0);
0946 }
0947
0948
0949 req->nbytes = req->nbytes - hash_later;
0950
0951 sahara_walk_and_recalc(req->src, req->nbytes);
0952
0953
0954 if (rctx->buf_cnt && req->nbytes) {
0955 sg_init_table(rctx->in_sg_chain, 2);
0956 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
0957
0958 sg_chain(rctx->in_sg_chain, 2, req->src);
0959
0960 rctx->total = req->nbytes + rctx->buf_cnt;
0961 rctx->in_sg = rctx->in_sg_chain;
0962
0963 req->src = rctx->in_sg_chain;
0964
0965 } else if (rctx->buf_cnt) {
0966 if (req->src)
0967 rctx->in_sg = req->src;
0968 else
0969 rctx->in_sg = rctx->in_sg_chain;
0970
0971 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
0972 rctx->total = rctx->buf_cnt;
0973
0974 } else {
0975 rctx->in_sg = req->src;
0976 rctx->total = req->nbytes;
0977 req->src = rctx->in_sg;
0978 }
0979
0980
0981 rctx->buf_cnt = hash_later;
0982
0983 return -EINPROGRESS;
0984 }
0985
0986 static int sahara_sha_process(struct ahash_request *req)
0987 {
0988 struct sahara_dev *dev = dev_ptr;
0989 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
0990 int ret;
0991 unsigned long timeout;
0992
0993 ret = sahara_sha_prepare_request(req);
0994 if (!ret)
0995 return ret;
0996
0997 if (rctx->first) {
0998 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
0999 dev->hw_desc[0]->next = 0;
1000 rctx->first = 0;
1001 } else {
1002 memcpy(dev->context_base, rctx->context, rctx->context_size);
1003
1004 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1005 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1006 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1007 dev->hw_desc[1]->next = 0;
1008 }
1009
1010 sahara_dump_descriptors(dev);
1011 sahara_dump_links(dev);
1012
1013 reinit_completion(&dev->dma_completion);
1014
1015 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1016
1017 timeout = wait_for_completion_timeout(&dev->dma_completion,
1018 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1019 if (!timeout) {
1020 dev_err(dev->device, "SHA timeout\n");
1021 return -ETIMEDOUT;
1022 }
1023
1024 if (rctx->sg_in_idx)
1025 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1026 DMA_TO_DEVICE);
1027
1028 memcpy(rctx->context, dev->context_base, rctx->context_size);
1029
1030 if (req->result)
1031 memcpy(req->result, rctx->context, rctx->digest_size);
1032
1033 return 0;
1034 }
1035
1036 static int sahara_queue_manage(void *data)
1037 {
1038 struct sahara_dev *dev = (struct sahara_dev *)data;
1039 struct crypto_async_request *async_req;
1040 struct crypto_async_request *backlog;
1041 int ret = 0;
1042
1043 do {
1044 __set_current_state(TASK_INTERRUPTIBLE);
1045
1046 mutex_lock(&dev->queue_mutex);
1047 backlog = crypto_get_backlog(&dev->queue);
1048 async_req = crypto_dequeue_request(&dev->queue);
1049 mutex_unlock(&dev->queue_mutex);
1050
1051 if (backlog)
1052 backlog->complete(backlog, -EINPROGRESS);
1053
1054 if (async_req) {
1055 if (crypto_tfm_alg_type(async_req->tfm) ==
1056 CRYPTO_ALG_TYPE_AHASH) {
1057 struct ahash_request *req =
1058 ahash_request_cast(async_req);
1059
1060 ret = sahara_sha_process(req);
1061 } else {
1062 struct skcipher_request *req =
1063 skcipher_request_cast(async_req);
1064
1065 ret = sahara_aes_process(req);
1066 }
1067
1068 async_req->complete(async_req, ret);
1069
1070 continue;
1071 }
1072
1073 schedule();
1074 } while (!kthread_should_stop());
1075
1076 return 0;
1077 }
1078
1079 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1080 {
1081 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1082 struct sahara_dev *dev = dev_ptr;
1083 int ret;
1084
1085 if (!req->nbytes && !last)
1086 return 0;
1087
1088 rctx->last = last;
1089
1090 if (!rctx->active) {
1091 rctx->active = 1;
1092 rctx->first = 1;
1093 }
1094
1095 mutex_lock(&dev->queue_mutex);
1096 ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 mutex_unlock(&dev->queue_mutex);
1098
1099 wake_up_process(dev->kthread);
1100
1101 return ret;
1102 }
1103
1104 static int sahara_sha_init(struct ahash_request *req)
1105 {
1106 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1107 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1108
1109 memset(rctx, 0, sizeof(*rctx));
1110
1111 switch (crypto_ahash_digestsize(tfm)) {
1112 case SHA1_DIGEST_SIZE:
1113 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1114 rctx->digest_size = SHA1_DIGEST_SIZE;
1115 break;
1116 case SHA256_DIGEST_SIZE:
1117 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1118 rctx->digest_size = SHA256_DIGEST_SIZE;
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123
1124 rctx->context_size = rctx->digest_size + 4;
1125 rctx->active = 0;
1126
1127 return 0;
1128 }
1129
1130 static int sahara_sha_update(struct ahash_request *req)
1131 {
1132 return sahara_sha_enqueue(req, 0);
1133 }
1134
1135 static int sahara_sha_final(struct ahash_request *req)
1136 {
1137 req->nbytes = 0;
1138 return sahara_sha_enqueue(req, 1);
1139 }
1140
1141 static int sahara_sha_finup(struct ahash_request *req)
1142 {
1143 return sahara_sha_enqueue(req, 1);
1144 }
1145
1146 static int sahara_sha_digest(struct ahash_request *req)
1147 {
1148 sahara_sha_init(req);
1149
1150 return sahara_sha_finup(req);
1151 }
1152
1153 static int sahara_sha_export(struct ahash_request *req, void *out)
1154 {
1155 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1156
1157 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1158
1159 return 0;
1160 }
1161
1162 static int sahara_sha_import(struct ahash_request *req, const void *in)
1163 {
1164 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1165
1166 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1167
1168 return 0;
1169 }
1170
1171 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1172 {
1173 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1174 sizeof(struct sahara_sha_reqctx) +
1175 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1176
1177 return 0;
1178 }
1179
1180 static struct skcipher_alg aes_algs[] = {
1181 {
1182 .base.cra_name = "ecb(aes)",
1183 .base.cra_driver_name = "sahara-ecb-aes",
1184 .base.cra_priority = 300,
1185 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1186 .base.cra_blocksize = AES_BLOCK_SIZE,
1187 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1188 .base.cra_alignmask = 0x0,
1189 .base.cra_module = THIS_MODULE,
1190
1191 .init = sahara_aes_init_tfm,
1192 .exit = sahara_aes_exit_tfm,
1193 .min_keysize = AES_MIN_KEY_SIZE ,
1194 .max_keysize = AES_MAX_KEY_SIZE,
1195 .setkey = sahara_aes_setkey,
1196 .encrypt = sahara_aes_ecb_encrypt,
1197 .decrypt = sahara_aes_ecb_decrypt,
1198 }, {
1199 .base.cra_name = "cbc(aes)",
1200 .base.cra_driver_name = "sahara-cbc-aes",
1201 .base.cra_priority = 300,
1202 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 .base.cra_blocksize = AES_BLOCK_SIZE,
1204 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1205 .base.cra_alignmask = 0x0,
1206 .base.cra_module = THIS_MODULE,
1207
1208 .init = sahara_aes_init_tfm,
1209 .exit = sahara_aes_exit_tfm,
1210 .min_keysize = AES_MIN_KEY_SIZE ,
1211 .max_keysize = AES_MAX_KEY_SIZE,
1212 .ivsize = AES_BLOCK_SIZE,
1213 .setkey = sahara_aes_setkey,
1214 .encrypt = sahara_aes_cbc_encrypt,
1215 .decrypt = sahara_aes_cbc_decrypt,
1216 }
1217 };
1218
1219 static struct ahash_alg sha_v3_algs[] = {
1220 {
1221 .init = sahara_sha_init,
1222 .update = sahara_sha_update,
1223 .final = sahara_sha_final,
1224 .finup = sahara_sha_finup,
1225 .digest = sahara_sha_digest,
1226 .export = sahara_sha_export,
1227 .import = sahara_sha_import,
1228 .halg.digestsize = SHA1_DIGEST_SIZE,
1229 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1230 .halg.base = {
1231 .cra_name = "sha1",
1232 .cra_driver_name = "sahara-sha1",
1233 .cra_priority = 300,
1234 .cra_flags = CRYPTO_ALG_ASYNC |
1235 CRYPTO_ALG_NEED_FALLBACK,
1236 .cra_blocksize = SHA1_BLOCK_SIZE,
1237 .cra_ctxsize = sizeof(struct sahara_ctx),
1238 .cra_alignmask = 0,
1239 .cra_module = THIS_MODULE,
1240 .cra_init = sahara_sha_cra_init,
1241 }
1242 },
1243 };
1244
1245 static struct ahash_alg sha_v4_algs[] = {
1246 {
1247 .init = sahara_sha_init,
1248 .update = sahara_sha_update,
1249 .final = sahara_sha_final,
1250 .finup = sahara_sha_finup,
1251 .digest = sahara_sha_digest,
1252 .export = sahara_sha_export,
1253 .import = sahara_sha_import,
1254 .halg.digestsize = SHA256_DIGEST_SIZE,
1255 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1256 .halg.base = {
1257 .cra_name = "sha256",
1258 .cra_driver_name = "sahara-sha256",
1259 .cra_priority = 300,
1260 .cra_flags = CRYPTO_ALG_ASYNC |
1261 CRYPTO_ALG_NEED_FALLBACK,
1262 .cra_blocksize = SHA256_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct sahara_ctx),
1264 .cra_alignmask = 0,
1265 .cra_module = THIS_MODULE,
1266 .cra_init = sahara_sha_cra_init,
1267 }
1268 },
1269 };
1270
1271 static irqreturn_t sahara_irq_handler(int irq, void *data)
1272 {
1273 struct sahara_dev *dev = (struct sahara_dev *)data;
1274 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1275 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1276
1277 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1278 SAHARA_REG_CMD);
1279
1280 sahara_decode_status(dev, stat);
1281
1282 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1283 return IRQ_NONE;
1284 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1285 dev->error = 0;
1286 } else {
1287 sahara_decode_error(dev, err);
1288 dev->error = -EINVAL;
1289 }
1290
1291 complete(&dev->dma_completion);
1292
1293 return IRQ_HANDLED;
1294 }
1295
1296
1297 static int sahara_register_algs(struct sahara_dev *dev)
1298 {
1299 int err;
1300 unsigned int i, j, k, l;
1301
1302 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1303 err = crypto_register_skcipher(&aes_algs[i]);
1304 if (err)
1305 goto err_aes_algs;
1306 }
1307
1308 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1309 err = crypto_register_ahash(&sha_v3_algs[k]);
1310 if (err)
1311 goto err_sha_v3_algs;
1312 }
1313
1314 if (dev->version > SAHARA_VERSION_3)
1315 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1316 err = crypto_register_ahash(&sha_v4_algs[l]);
1317 if (err)
1318 goto err_sha_v4_algs;
1319 }
1320
1321 return 0;
1322
1323 err_sha_v4_algs:
1324 for (j = 0; j < l; j++)
1325 crypto_unregister_ahash(&sha_v4_algs[j]);
1326
1327 err_sha_v3_algs:
1328 for (j = 0; j < k; j++)
1329 crypto_unregister_ahash(&sha_v3_algs[j]);
1330
1331 err_aes_algs:
1332 for (j = 0; j < i; j++)
1333 crypto_unregister_skcipher(&aes_algs[j]);
1334
1335 return err;
1336 }
1337
1338 static void sahara_unregister_algs(struct sahara_dev *dev)
1339 {
1340 unsigned int i;
1341
1342 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1343 crypto_unregister_skcipher(&aes_algs[i]);
1344
1345 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1346 crypto_unregister_ahash(&sha_v3_algs[i]);
1347
1348 if (dev->version > SAHARA_VERSION_3)
1349 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1350 crypto_unregister_ahash(&sha_v4_algs[i]);
1351 }
1352
1353 static const struct of_device_id sahara_dt_ids[] = {
1354 { .compatible = "fsl,imx53-sahara" },
1355 { .compatible = "fsl,imx27-sahara" },
1356 { }
1357 };
1358 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1359
1360 static int sahara_probe(struct platform_device *pdev)
1361 {
1362 struct sahara_dev *dev;
1363 u32 version;
1364 int irq;
1365 int err;
1366 int i;
1367
1368 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1369 if (!dev)
1370 return -ENOMEM;
1371
1372 dev->device = &pdev->dev;
1373 platform_set_drvdata(pdev, dev);
1374
1375
1376 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1377 if (IS_ERR(dev->regs_base))
1378 return PTR_ERR(dev->regs_base);
1379
1380
1381 irq = platform_get_irq(pdev, 0);
1382 if (irq < 0)
1383 return irq;
1384
1385 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1386 0, dev_name(&pdev->dev), dev);
1387 if (err) {
1388 dev_err(&pdev->dev, "failed to request irq\n");
1389 return err;
1390 }
1391
1392
1393 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1394 if (IS_ERR(dev->clk_ipg)) {
1395 dev_err(&pdev->dev, "Could not get ipg clock\n");
1396 return PTR_ERR(dev->clk_ipg);
1397 }
1398
1399 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1400 if (IS_ERR(dev->clk_ahb)) {
1401 dev_err(&pdev->dev, "Could not get ahb clock\n");
1402 return PTR_ERR(dev->clk_ahb);
1403 }
1404
1405
1406 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1407 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1408 &dev->hw_phys_desc[0], GFP_KERNEL);
1409 if (!dev->hw_desc[0]) {
1410 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1411 return -ENOMEM;
1412 }
1413 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1414 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1415 sizeof(struct sahara_hw_desc);
1416
1417
1418 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1419 &dev->key_phys_base, GFP_KERNEL);
1420 if (!dev->key_base) {
1421 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1422 return -ENOMEM;
1423 }
1424 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1425 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1426
1427
1428 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1429 SHA256_DIGEST_SIZE + 4,
1430 &dev->context_phys_base, GFP_KERNEL);
1431 if (!dev->context_base) {
1432 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1433 return -ENOMEM;
1434 }
1435
1436
1437 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1438 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1439 &dev->hw_phys_link[0], GFP_KERNEL);
1440 if (!dev->hw_link[0]) {
1441 dev_err(&pdev->dev, "Could not allocate hw links\n");
1442 return -ENOMEM;
1443 }
1444 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1445 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1446 sizeof(struct sahara_hw_link);
1447 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1448 }
1449
1450 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1451
1452 mutex_init(&dev->queue_mutex);
1453
1454 dev_ptr = dev;
1455
1456 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1457 if (IS_ERR(dev->kthread)) {
1458 return PTR_ERR(dev->kthread);
1459 }
1460
1461 init_completion(&dev->dma_completion);
1462
1463 err = clk_prepare_enable(dev->clk_ipg);
1464 if (err)
1465 return err;
1466 err = clk_prepare_enable(dev->clk_ahb);
1467 if (err)
1468 goto clk_ipg_disable;
1469
1470 version = sahara_read(dev, SAHARA_REG_VERSION);
1471 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1472 if (version != SAHARA_VERSION_3)
1473 err = -ENODEV;
1474 } else if (of_device_is_compatible(pdev->dev.of_node,
1475 "fsl,imx53-sahara")) {
1476 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1477 err = -ENODEV;
1478 version = (version >> 8) & 0xff;
1479 }
1480 if (err == -ENODEV) {
1481 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1482 version);
1483 goto err_algs;
1484 }
1485
1486 dev->version = version;
1487
1488 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1489 SAHARA_REG_CMD);
1490 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1491 SAHARA_CONTROL_SET_MAXBURST(8) |
1492 SAHARA_CONTROL_RNG_AUTORSD |
1493 SAHARA_CONTROL_ENABLE_INT,
1494 SAHARA_REG_CONTROL);
1495
1496 err = sahara_register_algs(dev);
1497 if (err)
1498 goto err_algs;
1499
1500 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1501
1502 return 0;
1503
1504 err_algs:
1505 kthread_stop(dev->kthread);
1506 dev_ptr = NULL;
1507 clk_disable_unprepare(dev->clk_ahb);
1508 clk_ipg_disable:
1509 clk_disable_unprepare(dev->clk_ipg);
1510
1511 return err;
1512 }
1513
1514 static int sahara_remove(struct platform_device *pdev)
1515 {
1516 struct sahara_dev *dev = platform_get_drvdata(pdev);
1517
1518 kthread_stop(dev->kthread);
1519
1520 sahara_unregister_algs(dev);
1521
1522 clk_disable_unprepare(dev->clk_ipg);
1523 clk_disable_unprepare(dev->clk_ahb);
1524
1525 dev_ptr = NULL;
1526
1527 return 0;
1528 }
1529
1530 static struct platform_driver sahara_driver = {
1531 .probe = sahara_probe,
1532 .remove = sahara_remove,
1533 .driver = {
1534 .name = SAHARA_NAME,
1535 .of_match_table = sahara_dt_ids,
1536 },
1537 };
1538
1539 module_platform_driver(sahara_driver);
1540
1541 MODULE_LICENSE("GPL");
1542 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1543 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1544 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");