Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2014 Imagination Technologies
0004  * Authors:  Will Thomas, James Hartley
0005  *
0006  *  Interface structure taken from omap-sham driver
0007  */
0008 
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/of_device.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/scatterlist.h>
0019 
0020 #include <crypto/internal/hash.h>
0021 #include <crypto/md5.h>
0022 #include <crypto/sha1.h>
0023 #include <crypto/sha2.h>
0024 
0025 #define CR_RESET            0
0026 #define CR_RESET_SET            1
0027 #define CR_RESET_UNSET          0
0028 
0029 #define CR_MESSAGE_LENGTH_H     0x4
0030 #define CR_MESSAGE_LENGTH_L     0x8
0031 
0032 #define CR_CONTROL          0xc
0033 #define CR_CONTROL_BYTE_ORDER_3210  0
0034 #define CR_CONTROL_BYTE_ORDER_0123  1
0035 #define CR_CONTROL_BYTE_ORDER_2310  2
0036 #define CR_CONTROL_BYTE_ORDER_1032  3
0037 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
0038 #define CR_CONTROL_ALGO_MD5 0
0039 #define CR_CONTROL_ALGO_SHA1    1
0040 #define CR_CONTROL_ALGO_SHA224  2
0041 #define CR_CONTROL_ALGO_SHA256  3
0042 
0043 #define CR_INTSTAT          0x10
0044 #define CR_INTENAB          0x14
0045 #define CR_INTCLEAR         0x18
0046 #define CR_INT_RESULTS_AVAILABLE    BIT(0)
0047 #define CR_INT_NEW_RESULTS_SET      BIT(1)
0048 #define CR_INT_RESULT_READ_ERR      BIT(2)
0049 #define CR_INT_MESSAGE_WRITE_ERROR  BIT(3)
0050 #define CR_INT_STATUS           BIT(8)
0051 
0052 #define CR_RESULT_QUEUE     0x1c
0053 #define CR_RSD0             0x40
0054 #define CR_CORE_REV         0x50
0055 #define CR_CORE_DES1        0x60
0056 #define CR_CORE_DES2        0x70
0057 
0058 #define DRIVER_FLAGS_BUSY       BIT(0)
0059 #define DRIVER_FLAGS_FINAL      BIT(1)
0060 #define DRIVER_FLAGS_DMA_ACTIVE     BIT(2)
0061 #define DRIVER_FLAGS_OUTPUT_READY   BIT(3)
0062 #define DRIVER_FLAGS_INIT       BIT(4)
0063 #define DRIVER_FLAGS_CPU        BIT(5)
0064 #define DRIVER_FLAGS_DMA_READY      BIT(6)
0065 #define DRIVER_FLAGS_ERROR      BIT(7)
0066 #define DRIVER_FLAGS_SG         BIT(8)
0067 #define DRIVER_FLAGS_SHA1       BIT(18)
0068 #define DRIVER_FLAGS_SHA224     BIT(19)
0069 #define DRIVER_FLAGS_SHA256     BIT(20)
0070 #define DRIVER_FLAGS_MD5        BIT(21)
0071 
0072 #define IMG_HASH_QUEUE_LENGTH       20
0073 #define IMG_HASH_DMA_BURST      4
0074 #define IMG_HASH_DMA_THRESHOLD      64
0075 
0076 #ifdef __LITTLE_ENDIAN
0077 #define IMG_HASH_BYTE_ORDER     CR_CONTROL_BYTE_ORDER_3210
0078 #else
0079 #define IMG_HASH_BYTE_ORDER     CR_CONTROL_BYTE_ORDER_0123
0080 #endif
0081 
0082 struct img_hash_dev;
0083 
0084 struct img_hash_request_ctx {
0085     struct img_hash_dev *hdev;
0086     u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
0087     unsigned long       flags;
0088     size_t          digsize;
0089 
0090     dma_addr_t      dma_addr;
0091     size_t          dma_ct;
0092 
0093     /* sg root */
0094     struct scatterlist  *sgfirst;
0095     /* walk state */
0096     struct scatterlist  *sg;
0097     size_t          nents;
0098     size_t          offset;
0099     unsigned int        total;
0100     size_t          sent;
0101 
0102     unsigned long       op;
0103 
0104     size_t          bufcnt;
0105     struct ahash_request    fallback_req;
0106 
0107     /* Zero length buffer must remain last member of struct */
0108     u8 buffer[] __aligned(sizeof(u32));
0109 };
0110 
0111 struct img_hash_ctx {
0112     struct img_hash_dev *hdev;
0113     unsigned long       flags;
0114     struct crypto_ahash *fallback;
0115 };
0116 
0117 struct img_hash_dev {
0118     struct list_head    list;
0119     struct device       *dev;
0120     struct clk      *hash_clk;
0121     struct clk      *sys_clk;
0122     void __iomem        *io_base;
0123 
0124     phys_addr_t     bus_addr;
0125     void __iomem        *cpu_addr;
0126 
0127     spinlock_t      lock;
0128     int         err;
0129     struct tasklet_struct   done_task;
0130     struct tasklet_struct   dma_task;
0131 
0132     unsigned long       flags;
0133     struct crypto_queue queue;
0134     struct ahash_request    *req;
0135 
0136     struct dma_chan     *dma_lch;
0137 };
0138 
0139 struct img_hash_drv {
0140     struct list_head dev_list;
0141     spinlock_t lock;
0142 };
0143 
0144 static struct img_hash_drv img_hash = {
0145     .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
0146     .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
0147 };
0148 
0149 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
0150 {
0151     return readl_relaxed(hdev->io_base + offset);
0152 }
0153 
0154 static inline void img_hash_write(struct img_hash_dev *hdev,
0155                   u32 offset, u32 value)
0156 {
0157     writel_relaxed(value, hdev->io_base + offset);
0158 }
0159 
0160 static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
0161 {
0162     return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
0163 }
0164 
0165 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
0166 {
0167     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0168     u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
0169 
0170     if (ctx->flags & DRIVER_FLAGS_MD5)
0171         cr |= CR_CONTROL_ALGO_MD5;
0172     else if (ctx->flags & DRIVER_FLAGS_SHA1)
0173         cr |= CR_CONTROL_ALGO_SHA1;
0174     else if (ctx->flags & DRIVER_FLAGS_SHA224)
0175         cr |= CR_CONTROL_ALGO_SHA224;
0176     else if (ctx->flags & DRIVER_FLAGS_SHA256)
0177         cr |= CR_CONTROL_ALGO_SHA256;
0178     dev_dbg(hdev->dev, "Starting hash process\n");
0179     img_hash_write(hdev, CR_CONTROL, cr);
0180 
0181     /*
0182      * The hardware block requires two cycles between writing the control
0183      * register and writing the first word of data in non DMA mode, to
0184      * ensure the first data write is not grouped in burst with the control
0185      * register write a read is issued to 'flush' the bus.
0186      */
0187     if (!dma)
0188         img_hash_read(hdev, CR_CONTROL);
0189 }
0190 
0191 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
0192                  size_t length, int final)
0193 {
0194     u32 count, len32;
0195     const u32 *buffer = (const u32 *)buf;
0196 
0197     dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
0198 
0199     if (final)
0200         hdev->flags |= DRIVER_FLAGS_FINAL;
0201 
0202     len32 = DIV_ROUND_UP(length, sizeof(u32));
0203 
0204     for (count = 0; count < len32; count++)
0205         writel_relaxed(buffer[count], hdev->cpu_addr);
0206 
0207     return -EINPROGRESS;
0208 }
0209 
0210 static void img_hash_dma_callback(void *data)
0211 {
0212     struct img_hash_dev *hdev = (struct img_hash_dev *)data;
0213     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0214 
0215     if (ctx->bufcnt) {
0216         img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
0217         ctx->bufcnt = 0;
0218     }
0219     if (ctx->sg)
0220         tasklet_schedule(&hdev->dma_task);
0221 }
0222 
0223 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
0224 {
0225     struct dma_async_tx_descriptor *desc;
0226     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0227 
0228     ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
0229     if (ctx->dma_ct == 0) {
0230         dev_err(hdev->dev, "Invalid DMA sg\n");
0231         hdev->err = -EINVAL;
0232         return -EINVAL;
0233     }
0234 
0235     desc = dmaengine_prep_slave_sg(hdev->dma_lch,
0236                        sg,
0237                        ctx->dma_ct,
0238                        DMA_MEM_TO_DEV,
0239                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0240     if (!desc) {
0241         dev_err(hdev->dev, "Null DMA descriptor\n");
0242         hdev->err = -EINVAL;
0243         dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
0244         return -EINVAL;
0245     }
0246     desc->callback = img_hash_dma_callback;
0247     desc->callback_param = hdev;
0248     dmaengine_submit(desc);
0249     dma_async_issue_pending(hdev->dma_lch);
0250 
0251     return 0;
0252 }
0253 
0254 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
0255 {
0256     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0257 
0258     ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
0259                     ctx->buffer, hdev->req->nbytes);
0260 
0261     ctx->total = hdev->req->nbytes;
0262     ctx->bufcnt = 0;
0263 
0264     hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
0265 
0266     img_hash_start(hdev, false);
0267 
0268     return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
0269 }
0270 
0271 static int img_hash_finish(struct ahash_request *req)
0272 {
0273     struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
0274 
0275     if (!req->result)
0276         return -EINVAL;
0277 
0278     memcpy(req->result, ctx->digest, ctx->digsize);
0279 
0280     return 0;
0281 }
0282 
0283 static void img_hash_copy_hash(struct ahash_request *req)
0284 {
0285     struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
0286     u32 *hash = (u32 *)ctx->digest;
0287     int i;
0288 
0289     for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
0290         hash[i] = img_hash_read_result_queue(ctx->hdev);
0291 }
0292 
0293 static void img_hash_finish_req(struct ahash_request *req, int err)
0294 {
0295     struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
0296     struct img_hash_dev *hdev =  ctx->hdev;
0297 
0298     if (!err) {
0299         img_hash_copy_hash(req);
0300         if (DRIVER_FLAGS_FINAL & hdev->flags)
0301             err = img_hash_finish(req);
0302     } else {
0303         dev_warn(hdev->dev, "Hash failed with error %d\n", err);
0304         ctx->flags |= DRIVER_FLAGS_ERROR;
0305     }
0306 
0307     hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
0308         DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
0309 
0310     if (req->base.complete)
0311         req->base.complete(&req->base, err);
0312 }
0313 
0314 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
0315 {
0316     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0317 
0318     img_hash_start(hdev, true);
0319 
0320     dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
0321 
0322     if (!ctx->total)
0323         hdev->flags |= DRIVER_FLAGS_FINAL;
0324 
0325     hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
0326 
0327     tasklet_schedule(&hdev->dma_task);
0328 
0329     return -EINPROGRESS;
0330 }
0331 
0332 static int img_hash_dma_init(struct img_hash_dev *hdev)
0333 {
0334     struct dma_slave_config dma_conf;
0335     int err;
0336 
0337     hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
0338     if (IS_ERR(hdev->dma_lch)) {
0339         dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
0340         return PTR_ERR(hdev->dma_lch);
0341     }
0342     dma_conf.direction = DMA_MEM_TO_DEV;
0343     dma_conf.dst_addr = hdev->bus_addr;
0344     dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0345     dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
0346     dma_conf.device_fc = false;
0347 
0348     err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
0349     if (err) {
0350         dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
0351         dma_release_channel(hdev->dma_lch);
0352         return err;
0353     }
0354 
0355     return 0;
0356 }
0357 
0358 static void img_hash_dma_task(unsigned long d)
0359 {
0360     struct img_hash_dev *hdev = (struct img_hash_dev *)d;
0361     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0362     u8 *addr;
0363     size_t nbytes, bleft, wsend, len, tbc;
0364     struct scatterlist tsg;
0365 
0366     if (!hdev->req || !ctx->sg)
0367         return;
0368 
0369     addr = sg_virt(ctx->sg);
0370     nbytes = ctx->sg->length - ctx->offset;
0371 
0372     /*
0373      * The hash accelerator does not support a data valid mask. This means
0374      * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
0375      * padding bytes in the last word written by that dma would erroneously
0376      * be included in the hash. To avoid this we round down the transfer,
0377      * and add the excess to the start of the next dma. It does not matter
0378      * that the final dma may not be a multiple of 4 bytes as the hashing
0379      * block is programmed to accept the correct number of bytes.
0380      */
0381 
0382     bleft = nbytes % 4;
0383     wsend = (nbytes / 4);
0384 
0385     if (wsend) {
0386         sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
0387         if (img_hash_xmit_dma(hdev, &tsg)) {
0388             dev_err(hdev->dev, "DMA failed, falling back to CPU");
0389             ctx->flags |= DRIVER_FLAGS_CPU;
0390             hdev->err = 0;
0391             img_hash_xmit_cpu(hdev, addr + ctx->offset,
0392                       wsend * 4, 0);
0393             ctx->sent += wsend * 4;
0394             wsend = 0;
0395         } else {
0396             ctx->sent += wsend * 4;
0397         }
0398     }
0399 
0400     if (bleft) {
0401         ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
0402                          ctx->buffer, bleft, ctx->sent);
0403         tbc = 0;
0404         ctx->sg = sg_next(ctx->sg);
0405         while (ctx->sg && (ctx->bufcnt < 4)) {
0406             len = ctx->sg->length;
0407             if (likely(len > (4 - ctx->bufcnt)))
0408                 len = 4 - ctx->bufcnt;
0409             tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
0410                          ctx->buffer + ctx->bufcnt, len,
0411                     ctx->sent + ctx->bufcnt);
0412             ctx->bufcnt += tbc;
0413             if (tbc >= ctx->sg->length) {
0414                 ctx->sg = sg_next(ctx->sg);
0415                 tbc = 0;
0416             }
0417         }
0418 
0419         ctx->sent += ctx->bufcnt;
0420         ctx->offset = tbc;
0421 
0422         if (!wsend)
0423             img_hash_dma_callback(hdev);
0424     } else {
0425         ctx->offset = 0;
0426         ctx->sg = sg_next(ctx->sg);
0427     }
0428 }
0429 
0430 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
0431 {
0432     struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
0433 
0434     if (ctx->flags & DRIVER_FLAGS_SG)
0435         dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
0436 
0437     return 0;
0438 }
0439 
0440 static int img_hash_process_data(struct img_hash_dev *hdev)
0441 {
0442     struct ahash_request *req = hdev->req;
0443     struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
0444     int err = 0;
0445 
0446     ctx->bufcnt = 0;
0447 
0448     if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
0449         dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
0450             req->nbytes);
0451         err = img_hash_write_via_dma(hdev);
0452     } else {
0453         dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
0454             req->nbytes);
0455         err = img_hash_write_via_cpu(hdev);
0456     }
0457     return err;
0458 }
0459 
0460 static int img_hash_hw_init(struct img_hash_dev *hdev)
0461 {
0462     unsigned long long nbits;
0463     u32 u, l;
0464 
0465     img_hash_write(hdev, CR_RESET, CR_RESET_SET);
0466     img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
0467     img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
0468 
0469     nbits = (u64)hdev->req->nbytes << 3;
0470     u = nbits >> 32;
0471     l = nbits;
0472     img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
0473     img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
0474 
0475     if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
0476         hdev->flags |= DRIVER_FLAGS_INIT;
0477         hdev->err = 0;
0478     }
0479     dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
0480     return 0;
0481 }
0482 
0483 static int img_hash_init(struct ahash_request *req)
0484 {
0485     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0486     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0487     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0488 
0489     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0490     rctx->fallback_req.base.flags = req->base.flags
0491         & CRYPTO_TFM_REQ_MAY_SLEEP;
0492 
0493     return crypto_ahash_init(&rctx->fallback_req);
0494 }
0495 
0496 static int img_hash_handle_queue(struct img_hash_dev *hdev,
0497                  struct ahash_request *req)
0498 {
0499     struct crypto_async_request *async_req, *backlog;
0500     struct img_hash_request_ctx *ctx;
0501     unsigned long flags;
0502     int err = 0, res = 0;
0503 
0504     spin_lock_irqsave(&hdev->lock, flags);
0505 
0506     if (req)
0507         res = ahash_enqueue_request(&hdev->queue, req);
0508 
0509     if (DRIVER_FLAGS_BUSY & hdev->flags) {
0510         spin_unlock_irqrestore(&hdev->lock, flags);
0511         return res;
0512     }
0513 
0514     backlog = crypto_get_backlog(&hdev->queue);
0515     async_req = crypto_dequeue_request(&hdev->queue);
0516     if (async_req)
0517         hdev->flags |= DRIVER_FLAGS_BUSY;
0518 
0519     spin_unlock_irqrestore(&hdev->lock, flags);
0520 
0521     if (!async_req)
0522         return res;
0523 
0524     if (backlog)
0525         backlog->complete(backlog, -EINPROGRESS);
0526 
0527     req = ahash_request_cast(async_req);
0528     hdev->req = req;
0529 
0530     ctx = ahash_request_ctx(req);
0531 
0532     dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
0533          ctx->op, req->nbytes);
0534 
0535     err = img_hash_hw_init(hdev);
0536 
0537     if (!err)
0538         err = img_hash_process_data(hdev);
0539 
0540     if (err != -EINPROGRESS) {
0541         /* done_task will not finish so do it here */
0542         img_hash_finish_req(req, err);
0543     }
0544     return res;
0545 }
0546 
0547 static int img_hash_update(struct ahash_request *req)
0548 {
0549     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0550     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0551     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0552 
0553     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0554     rctx->fallback_req.base.flags = req->base.flags
0555         & CRYPTO_TFM_REQ_MAY_SLEEP;
0556     rctx->fallback_req.nbytes = req->nbytes;
0557     rctx->fallback_req.src = req->src;
0558 
0559     return crypto_ahash_update(&rctx->fallback_req);
0560 }
0561 
0562 static int img_hash_final(struct ahash_request *req)
0563 {
0564     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0565     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0566     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0567 
0568     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0569     rctx->fallback_req.base.flags = req->base.flags
0570         & CRYPTO_TFM_REQ_MAY_SLEEP;
0571     rctx->fallback_req.result = req->result;
0572 
0573     return crypto_ahash_final(&rctx->fallback_req);
0574 }
0575 
0576 static int img_hash_finup(struct ahash_request *req)
0577 {
0578     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0579     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0580     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0581 
0582     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0583     rctx->fallback_req.base.flags = req->base.flags
0584         & CRYPTO_TFM_REQ_MAY_SLEEP;
0585     rctx->fallback_req.nbytes = req->nbytes;
0586     rctx->fallback_req.src = req->src;
0587     rctx->fallback_req.result = req->result;
0588 
0589     return crypto_ahash_finup(&rctx->fallback_req);
0590 }
0591 
0592 static int img_hash_import(struct ahash_request *req, const void *in)
0593 {
0594     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0595     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0596     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0597 
0598     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0599     rctx->fallback_req.base.flags = req->base.flags
0600         & CRYPTO_TFM_REQ_MAY_SLEEP;
0601 
0602     return crypto_ahash_import(&rctx->fallback_req, in);
0603 }
0604 
0605 static int img_hash_export(struct ahash_request *req, void *out)
0606 {
0607     struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
0608     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0609     struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0610 
0611     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0612     rctx->fallback_req.base.flags = req->base.flags
0613         & CRYPTO_TFM_REQ_MAY_SLEEP;
0614 
0615     return crypto_ahash_export(&rctx->fallback_req, out);
0616 }
0617 
0618 static int img_hash_digest(struct ahash_request *req)
0619 {
0620     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0621     struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
0622     struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
0623     struct img_hash_dev *hdev = NULL;
0624     struct img_hash_dev *tmp;
0625     int err;
0626 
0627     spin_lock(&img_hash.lock);
0628     if (!tctx->hdev) {
0629         list_for_each_entry(tmp, &img_hash.dev_list, list) {
0630             hdev = tmp;
0631             break;
0632         }
0633         tctx->hdev = hdev;
0634 
0635     } else {
0636         hdev = tctx->hdev;
0637     }
0638 
0639     spin_unlock(&img_hash.lock);
0640     ctx->hdev = hdev;
0641     ctx->flags = 0;
0642     ctx->digsize = crypto_ahash_digestsize(tfm);
0643 
0644     switch (ctx->digsize) {
0645     case SHA1_DIGEST_SIZE:
0646         ctx->flags |= DRIVER_FLAGS_SHA1;
0647         break;
0648     case SHA256_DIGEST_SIZE:
0649         ctx->flags |= DRIVER_FLAGS_SHA256;
0650         break;
0651     case SHA224_DIGEST_SIZE:
0652         ctx->flags |= DRIVER_FLAGS_SHA224;
0653         break;
0654     case MD5_DIGEST_SIZE:
0655         ctx->flags |= DRIVER_FLAGS_MD5;
0656         break;
0657     default:
0658         return -EINVAL;
0659     }
0660 
0661     ctx->bufcnt = 0;
0662     ctx->offset = 0;
0663     ctx->sent = 0;
0664     ctx->total = req->nbytes;
0665     ctx->sg = req->src;
0666     ctx->sgfirst = req->src;
0667     ctx->nents = sg_nents(ctx->sg);
0668 
0669     err = img_hash_handle_queue(tctx->hdev, req);
0670 
0671     return err;
0672 }
0673 
0674 static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
0675 {
0676     struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
0677 
0678     ctx->fallback = crypto_alloc_ahash(alg_name, 0,
0679                        CRYPTO_ALG_NEED_FALLBACK);
0680     if (IS_ERR(ctx->fallback)) {
0681         pr_err("img_hash: Could not load fallback driver.\n");
0682         return PTR_ERR(ctx->fallback);
0683     }
0684     crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
0685                  sizeof(struct img_hash_request_ctx) +
0686                  crypto_ahash_reqsize(ctx->fallback) +
0687                  IMG_HASH_DMA_THRESHOLD);
0688 
0689     return 0;
0690 }
0691 
0692 static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
0693 {
0694     return img_hash_cra_init(tfm, "md5-generic");
0695 }
0696 
0697 static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
0698 {
0699     return img_hash_cra_init(tfm, "sha1-generic");
0700 }
0701 
0702 static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
0703 {
0704     return img_hash_cra_init(tfm, "sha224-generic");
0705 }
0706 
0707 static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
0708 {
0709     return img_hash_cra_init(tfm, "sha256-generic");
0710 }
0711 
0712 static void img_hash_cra_exit(struct crypto_tfm *tfm)
0713 {
0714     struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
0715 
0716     crypto_free_ahash(tctx->fallback);
0717 }
0718 
0719 static irqreturn_t img_irq_handler(int irq, void *dev_id)
0720 {
0721     struct img_hash_dev *hdev = dev_id;
0722     u32 reg;
0723 
0724     reg = img_hash_read(hdev, CR_INTSTAT);
0725     img_hash_write(hdev, CR_INTCLEAR, reg);
0726 
0727     if (reg & CR_INT_NEW_RESULTS_SET) {
0728         dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
0729         if (DRIVER_FLAGS_BUSY & hdev->flags) {
0730             hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
0731             if (!(DRIVER_FLAGS_CPU & hdev->flags))
0732                 hdev->flags |= DRIVER_FLAGS_DMA_READY;
0733             tasklet_schedule(&hdev->done_task);
0734         } else {
0735             dev_warn(hdev->dev,
0736                  "HASH interrupt when no active requests.\n");
0737         }
0738     } else if (reg & CR_INT_RESULTS_AVAILABLE) {
0739         dev_warn(hdev->dev,
0740              "IRQ triggered before the hash had completed\n");
0741     } else if (reg & CR_INT_RESULT_READ_ERR) {
0742         dev_warn(hdev->dev,
0743              "Attempt to read from an empty result queue\n");
0744     } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
0745         dev_warn(hdev->dev,
0746              "Data written before the hardware was configured\n");
0747     }
0748     return IRQ_HANDLED;
0749 }
0750 
0751 static struct ahash_alg img_algs[] = {
0752     {
0753         .init = img_hash_init,
0754         .update = img_hash_update,
0755         .final = img_hash_final,
0756         .finup = img_hash_finup,
0757         .export = img_hash_export,
0758         .import = img_hash_import,
0759         .digest = img_hash_digest,
0760         .halg = {
0761             .digestsize = MD5_DIGEST_SIZE,
0762             .statesize = sizeof(struct md5_state),
0763             .base = {
0764                 .cra_name = "md5",
0765                 .cra_driver_name = "img-md5",
0766                 .cra_priority = 300,
0767                 .cra_flags =
0768                 CRYPTO_ALG_ASYNC |
0769                 CRYPTO_ALG_NEED_FALLBACK,
0770                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
0771                 .cra_ctxsize = sizeof(struct img_hash_ctx),
0772                 .cra_init = img_hash_cra_md5_init,
0773                 .cra_exit = img_hash_cra_exit,
0774                 .cra_module = THIS_MODULE,
0775             }
0776         }
0777     },
0778     {
0779         .init = img_hash_init,
0780         .update = img_hash_update,
0781         .final = img_hash_final,
0782         .finup = img_hash_finup,
0783         .export = img_hash_export,
0784         .import = img_hash_import,
0785         .digest = img_hash_digest,
0786         .halg = {
0787             .digestsize = SHA1_DIGEST_SIZE,
0788             .statesize = sizeof(struct sha1_state),
0789             .base = {
0790                 .cra_name = "sha1",
0791                 .cra_driver_name = "img-sha1",
0792                 .cra_priority = 300,
0793                 .cra_flags =
0794                 CRYPTO_ALG_ASYNC |
0795                 CRYPTO_ALG_NEED_FALLBACK,
0796                 .cra_blocksize = SHA1_BLOCK_SIZE,
0797                 .cra_ctxsize = sizeof(struct img_hash_ctx),
0798                 .cra_init = img_hash_cra_sha1_init,
0799                 .cra_exit = img_hash_cra_exit,
0800                 .cra_module = THIS_MODULE,
0801             }
0802         }
0803     },
0804     {
0805         .init = img_hash_init,
0806         .update = img_hash_update,
0807         .final = img_hash_final,
0808         .finup = img_hash_finup,
0809         .export = img_hash_export,
0810         .import = img_hash_import,
0811         .digest = img_hash_digest,
0812         .halg = {
0813             .digestsize = SHA224_DIGEST_SIZE,
0814             .statesize = sizeof(struct sha256_state),
0815             .base = {
0816                 .cra_name = "sha224",
0817                 .cra_driver_name = "img-sha224",
0818                 .cra_priority = 300,
0819                 .cra_flags =
0820                 CRYPTO_ALG_ASYNC |
0821                 CRYPTO_ALG_NEED_FALLBACK,
0822                 .cra_blocksize = SHA224_BLOCK_SIZE,
0823                 .cra_ctxsize = sizeof(struct img_hash_ctx),
0824                 .cra_init = img_hash_cra_sha224_init,
0825                 .cra_exit = img_hash_cra_exit,
0826                 .cra_module = THIS_MODULE,
0827             }
0828         }
0829     },
0830     {
0831         .init = img_hash_init,
0832         .update = img_hash_update,
0833         .final = img_hash_final,
0834         .finup = img_hash_finup,
0835         .export = img_hash_export,
0836         .import = img_hash_import,
0837         .digest = img_hash_digest,
0838         .halg = {
0839             .digestsize = SHA256_DIGEST_SIZE,
0840             .statesize = sizeof(struct sha256_state),
0841             .base = {
0842                 .cra_name = "sha256",
0843                 .cra_driver_name = "img-sha256",
0844                 .cra_priority = 300,
0845                 .cra_flags =
0846                 CRYPTO_ALG_ASYNC |
0847                 CRYPTO_ALG_NEED_FALLBACK,
0848                 .cra_blocksize = SHA256_BLOCK_SIZE,
0849                 .cra_ctxsize = sizeof(struct img_hash_ctx),
0850                 .cra_init = img_hash_cra_sha256_init,
0851                 .cra_exit = img_hash_cra_exit,
0852                 .cra_module = THIS_MODULE,
0853             }
0854         }
0855     }
0856 };
0857 
0858 static int img_register_algs(struct img_hash_dev *hdev)
0859 {
0860     int i, err;
0861 
0862     for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
0863         err = crypto_register_ahash(&img_algs[i]);
0864         if (err)
0865             goto err_reg;
0866     }
0867     return 0;
0868 
0869 err_reg:
0870     for (; i--; )
0871         crypto_unregister_ahash(&img_algs[i]);
0872 
0873     return err;
0874 }
0875 
0876 static int img_unregister_algs(struct img_hash_dev *hdev)
0877 {
0878     int i;
0879 
0880     for (i = 0; i < ARRAY_SIZE(img_algs); i++)
0881         crypto_unregister_ahash(&img_algs[i]);
0882     return 0;
0883 }
0884 
0885 static void img_hash_done_task(unsigned long data)
0886 {
0887     struct img_hash_dev *hdev = (struct img_hash_dev *)data;
0888     int err = 0;
0889 
0890     if (hdev->err == -EINVAL) {
0891         err = hdev->err;
0892         goto finish;
0893     }
0894 
0895     if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
0896         img_hash_handle_queue(hdev, NULL);
0897         return;
0898     }
0899 
0900     if (DRIVER_FLAGS_CPU & hdev->flags) {
0901         if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
0902             hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
0903             goto finish;
0904         }
0905     } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
0906         if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
0907             hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
0908             img_hash_write_via_dma_stop(hdev);
0909             if (hdev->err) {
0910                 err = hdev->err;
0911                 goto finish;
0912             }
0913         }
0914         if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
0915             hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
0916                     DRIVER_FLAGS_OUTPUT_READY);
0917             goto finish;
0918         }
0919     }
0920     return;
0921 
0922 finish:
0923     img_hash_finish_req(hdev->req, err);
0924 }
0925 
0926 static const struct of_device_id img_hash_match[] = {
0927     { .compatible = "img,hash-accelerator" },
0928     {}
0929 };
0930 MODULE_DEVICE_TABLE(of, img_hash_match);
0931 
0932 static int img_hash_probe(struct platform_device *pdev)
0933 {
0934     struct img_hash_dev *hdev;
0935     struct device *dev = &pdev->dev;
0936     struct resource *hash_res;
0937     int irq;
0938     int err;
0939 
0940     hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
0941     if (hdev == NULL)
0942         return -ENOMEM;
0943 
0944     spin_lock_init(&hdev->lock);
0945 
0946     hdev->dev = dev;
0947 
0948     platform_set_drvdata(pdev, hdev);
0949 
0950     INIT_LIST_HEAD(&hdev->list);
0951 
0952     tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
0953     tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
0954 
0955     crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
0956 
0957     /* Register bank */
0958     hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
0959     if (IS_ERR(hdev->io_base)) {
0960         err = PTR_ERR(hdev->io_base);
0961         goto res_err;
0962     }
0963 
0964     /* Write port (DMA or CPU) */
0965     hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
0966     hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
0967     if (IS_ERR(hdev->cpu_addr)) {
0968         err = PTR_ERR(hdev->cpu_addr);
0969         goto res_err;
0970     }
0971     hdev->bus_addr = hash_res->start;
0972 
0973     irq = platform_get_irq(pdev, 0);
0974     if (irq < 0) {
0975         err = irq;
0976         goto res_err;
0977     }
0978 
0979     err = devm_request_irq(dev, irq, img_irq_handler, 0,
0980                    dev_name(dev), hdev);
0981     if (err) {
0982         dev_err(dev, "unable to request irq\n");
0983         goto res_err;
0984     }
0985     dev_dbg(dev, "using IRQ channel %d\n", irq);
0986 
0987     hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
0988     if (IS_ERR(hdev->hash_clk)) {
0989         dev_err(dev, "clock initialization failed.\n");
0990         err = PTR_ERR(hdev->hash_clk);
0991         goto res_err;
0992     }
0993 
0994     hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
0995     if (IS_ERR(hdev->sys_clk)) {
0996         dev_err(dev, "clock initialization failed.\n");
0997         err = PTR_ERR(hdev->sys_clk);
0998         goto res_err;
0999     }
1000 
1001     err = clk_prepare_enable(hdev->hash_clk);
1002     if (err)
1003         goto res_err;
1004 
1005     err = clk_prepare_enable(hdev->sys_clk);
1006     if (err)
1007         goto clk_err;
1008 
1009     err = img_hash_dma_init(hdev);
1010     if (err)
1011         goto dma_err;
1012 
1013     dev_dbg(dev, "using %s for DMA transfers\n",
1014         dma_chan_name(hdev->dma_lch));
1015 
1016     spin_lock(&img_hash.lock);
1017     list_add_tail(&hdev->list, &img_hash.dev_list);
1018     spin_unlock(&img_hash.lock);
1019 
1020     err = img_register_algs(hdev);
1021     if (err)
1022         goto err_algs;
1023     dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1024 
1025     return 0;
1026 
1027 err_algs:
1028     spin_lock(&img_hash.lock);
1029     list_del(&hdev->list);
1030     spin_unlock(&img_hash.lock);
1031     dma_release_channel(hdev->dma_lch);
1032 dma_err:
1033     clk_disable_unprepare(hdev->sys_clk);
1034 clk_err:
1035     clk_disable_unprepare(hdev->hash_clk);
1036 res_err:
1037     tasklet_kill(&hdev->done_task);
1038     tasklet_kill(&hdev->dma_task);
1039 
1040     return err;
1041 }
1042 
1043 static int img_hash_remove(struct platform_device *pdev)
1044 {
1045     struct img_hash_dev *hdev;
1046 
1047     hdev = platform_get_drvdata(pdev);
1048     spin_lock(&img_hash.lock);
1049     list_del(&hdev->list);
1050     spin_unlock(&img_hash.lock);
1051 
1052     img_unregister_algs(hdev);
1053 
1054     tasklet_kill(&hdev->done_task);
1055     tasklet_kill(&hdev->dma_task);
1056 
1057     dma_release_channel(hdev->dma_lch);
1058 
1059     clk_disable_unprepare(hdev->hash_clk);
1060     clk_disable_unprepare(hdev->sys_clk);
1061 
1062     return 0;
1063 }
1064 
1065 #ifdef CONFIG_PM_SLEEP
1066 static int img_hash_suspend(struct device *dev)
1067 {
1068     struct img_hash_dev *hdev = dev_get_drvdata(dev);
1069 
1070     clk_disable_unprepare(hdev->hash_clk);
1071     clk_disable_unprepare(hdev->sys_clk);
1072 
1073     return 0;
1074 }
1075 
1076 static int img_hash_resume(struct device *dev)
1077 {
1078     struct img_hash_dev *hdev = dev_get_drvdata(dev);
1079     int ret;
1080 
1081     ret = clk_prepare_enable(hdev->hash_clk);
1082     if (ret)
1083         return ret;
1084 
1085     ret = clk_prepare_enable(hdev->sys_clk);
1086     if (ret) {
1087         clk_disable_unprepare(hdev->hash_clk);
1088         return ret;
1089     }
1090 
1091     return 0;
1092 }
1093 #endif /* CONFIG_PM_SLEEP */
1094 
1095 static const struct dev_pm_ops img_hash_pm_ops = {
1096     SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1097 };
1098 
1099 static struct platform_driver img_hash_driver = {
1100     .probe      = img_hash_probe,
1101     .remove     = img_hash_remove,
1102     .driver     = {
1103         .name   = "img-hash-accelerator",
1104         .pm = &img_hash_pm_ops,
1105         .of_match_table = of_match_ptr(img_hash_match),
1106     }
1107 };
1108 module_platform_driver(img_hash_driver);
1109 
1110 MODULE_LICENSE("GPL v2");
1111 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1112 MODULE_AUTHOR("Will Thomas.");
1113 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");