0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include "rk3288_crypto.h"
0013 #include <linux/dma-mapping.h>
0014 #include <linux/module.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/of.h>
0017 #include <linux/clk.h>
0018 #include <linux/crypto.h>
0019 #include <linux/reset.h>
0020
0021 static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
0022 {
0023 int err;
0024
0025 err = clk_prepare_enable(dev->sclk);
0026 if (err) {
0027 dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
0028 __func__, __LINE__);
0029 goto err_return;
0030 }
0031 err = clk_prepare_enable(dev->aclk);
0032 if (err) {
0033 dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
0034 __func__, __LINE__);
0035 goto err_aclk;
0036 }
0037 err = clk_prepare_enable(dev->hclk);
0038 if (err) {
0039 dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
0040 __func__, __LINE__);
0041 goto err_hclk;
0042 }
0043 err = clk_prepare_enable(dev->dmaclk);
0044 if (err) {
0045 dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
0046 __func__, __LINE__);
0047 goto err_dmaclk;
0048 }
0049 return err;
0050 err_dmaclk:
0051 clk_disable_unprepare(dev->hclk);
0052 err_hclk:
0053 clk_disable_unprepare(dev->aclk);
0054 err_aclk:
0055 clk_disable_unprepare(dev->sclk);
0056 err_return:
0057 return err;
0058 }
0059
0060 static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
0061 {
0062 clk_disable_unprepare(dev->dmaclk);
0063 clk_disable_unprepare(dev->hclk);
0064 clk_disable_unprepare(dev->aclk);
0065 clk_disable_unprepare(dev->sclk);
0066 }
0067
0068 static int check_alignment(struct scatterlist *sg_src,
0069 struct scatterlist *sg_dst,
0070 int align_mask)
0071 {
0072 int in, out, align;
0073
0074 in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
0075 IS_ALIGNED((uint32_t)sg_src->length, align_mask);
0076 if (!sg_dst)
0077 return in;
0078 out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
0079 IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
0080 align = in && out;
0081
0082 return (align && (sg_src->length == sg_dst->length));
0083 }
0084
0085 static int rk_load_data(struct rk_crypto_info *dev,
0086 struct scatterlist *sg_src,
0087 struct scatterlist *sg_dst)
0088 {
0089 unsigned int count;
0090
0091 dev->aligned = dev->aligned ?
0092 check_alignment(sg_src, sg_dst, dev->align_size) :
0093 dev->aligned;
0094 if (dev->aligned) {
0095 count = min(dev->left_bytes, sg_src->length);
0096 dev->left_bytes -= count;
0097
0098 if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
0099 dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
0100 __func__, __LINE__);
0101 return -EINVAL;
0102 }
0103 dev->addr_in = sg_dma_address(sg_src);
0104
0105 if (sg_dst) {
0106 if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
0107 dev_err(dev->dev,
0108 "[%s:%d] dma_map_sg(dst) error\n",
0109 __func__, __LINE__);
0110 dma_unmap_sg(dev->dev, sg_src, 1,
0111 DMA_TO_DEVICE);
0112 return -EINVAL;
0113 }
0114 dev->addr_out = sg_dma_address(sg_dst);
0115 }
0116 } else {
0117 count = (dev->left_bytes > PAGE_SIZE) ?
0118 PAGE_SIZE : dev->left_bytes;
0119
0120 if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
0121 dev->addr_vir, count,
0122 dev->total - dev->left_bytes)) {
0123 dev_err(dev->dev, "[%s:%d] pcopy err\n",
0124 __func__, __LINE__);
0125 return -EINVAL;
0126 }
0127 dev->left_bytes -= count;
0128 sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
0129 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
0130 dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
0131 __func__, __LINE__);
0132 return -ENOMEM;
0133 }
0134 dev->addr_in = sg_dma_address(&dev->sg_tmp);
0135
0136 if (sg_dst) {
0137 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
0138 DMA_FROM_DEVICE)) {
0139 dev_err(dev->dev,
0140 "[%s:%d] dma_map_sg(sg_tmp) error\n",
0141 __func__, __LINE__);
0142 dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
0143 DMA_TO_DEVICE);
0144 return -ENOMEM;
0145 }
0146 dev->addr_out = sg_dma_address(&dev->sg_tmp);
0147 }
0148 }
0149 dev->count = count;
0150 return 0;
0151 }
0152
0153 static void rk_unload_data(struct rk_crypto_info *dev)
0154 {
0155 struct scatterlist *sg_in, *sg_out;
0156
0157 sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
0158 dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
0159
0160 if (dev->sg_dst) {
0161 sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
0162 dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
0163 }
0164 }
0165
0166 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
0167 {
0168 struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
0169 u32 interrupt_status;
0170
0171 spin_lock(&dev->lock);
0172 interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
0173 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
0174
0175 if (interrupt_status & 0x0a) {
0176 dev_warn(dev->dev, "DMA Error\n");
0177 dev->err = -EFAULT;
0178 }
0179 tasklet_schedule(&dev->done_task);
0180
0181 spin_unlock(&dev->lock);
0182 return IRQ_HANDLED;
0183 }
0184
0185 static int rk_crypto_enqueue(struct rk_crypto_info *dev,
0186 struct crypto_async_request *async_req)
0187 {
0188 unsigned long flags;
0189 int ret;
0190
0191 spin_lock_irqsave(&dev->lock, flags);
0192 ret = crypto_enqueue_request(&dev->queue, async_req);
0193 if (dev->busy) {
0194 spin_unlock_irqrestore(&dev->lock, flags);
0195 return ret;
0196 }
0197 dev->busy = true;
0198 spin_unlock_irqrestore(&dev->lock, flags);
0199 tasklet_schedule(&dev->queue_task);
0200
0201 return ret;
0202 }
0203
0204 static void rk_crypto_queue_task_cb(unsigned long data)
0205 {
0206 struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
0207 struct crypto_async_request *async_req, *backlog;
0208 unsigned long flags;
0209 int err = 0;
0210
0211 dev->err = 0;
0212 spin_lock_irqsave(&dev->lock, flags);
0213 backlog = crypto_get_backlog(&dev->queue);
0214 async_req = crypto_dequeue_request(&dev->queue);
0215
0216 if (!async_req) {
0217 dev->busy = false;
0218 spin_unlock_irqrestore(&dev->lock, flags);
0219 return;
0220 }
0221 spin_unlock_irqrestore(&dev->lock, flags);
0222
0223 if (backlog) {
0224 backlog->complete(backlog, -EINPROGRESS);
0225 backlog = NULL;
0226 }
0227
0228 dev->async_req = async_req;
0229 err = dev->start(dev);
0230 if (err)
0231 dev->complete(dev->async_req, err);
0232 }
0233
0234 static void rk_crypto_done_task_cb(unsigned long data)
0235 {
0236 struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
0237
0238 if (dev->err) {
0239 dev->complete(dev->async_req, dev->err);
0240 return;
0241 }
0242
0243 dev->err = dev->update(dev);
0244 if (dev->err)
0245 dev->complete(dev->async_req, dev->err);
0246 }
0247
0248 static struct rk_crypto_tmp *rk_cipher_algs[] = {
0249 &rk_ecb_aes_alg,
0250 &rk_cbc_aes_alg,
0251 &rk_ecb_des_alg,
0252 &rk_cbc_des_alg,
0253 &rk_ecb_des3_ede_alg,
0254 &rk_cbc_des3_ede_alg,
0255 &rk_ahash_sha1,
0256 &rk_ahash_sha256,
0257 &rk_ahash_md5,
0258 };
0259
0260 static int rk_crypto_register(struct rk_crypto_info *crypto_info)
0261 {
0262 unsigned int i, k;
0263 int err = 0;
0264
0265 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
0266 rk_cipher_algs[i]->dev = crypto_info;
0267 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
0268 err = crypto_register_skcipher(
0269 &rk_cipher_algs[i]->alg.skcipher);
0270 else
0271 err = crypto_register_ahash(
0272 &rk_cipher_algs[i]->alg.hash);
0273 if (err)
0274 goto err_cipher_algs;
0275 }
0276 return 0;
0277
0278 err_cipher_algs:
0279 for (k = 0; k < i; k++) {
0280 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
0281 crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
0282 else
0283 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
0284 }
0285 return err;
0286 }
0287
0288 static void rk_crypto_unregister(void)
0289 {
0290 unsigned int i;
0291
0292 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
0293 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
0294 crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
0295 else
0296 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
0297 }
0298 }
0299
0300 static void rk_crypto_action(void *data)
0301 {
0302 struct rk_crypto_info *crypto_info = data;
0303
0304 reset_control_assert(crypto_info->rst);
0305 }
0306
0307 static const struct of_device_id crypto_of_id_table[] = {
0308 { .compatible = "rockchip,rk3288-crypto" },
0309 {}
0310 };
0311 MODULE_DEVICE_TABLE(of, crypto_of_id_table);
0312
0313 static int rk_crypto_probe(struct platform_device *pdev)
0314 {
0315 struct device *dev = &pdev->dev;
0316 struct rk_crypto_info *crypto_info;
0317 int err = 0;
0318
0319 crypto_info = devm_kzalloc(&pdev->dev,
0320 sizeof(*crypto_info), GFP_KERNEL);
0321 if (!crypto_info) {
0322 err = -ENOMEM;
0323 goto err_crypto;
0324 }
0325
0326 crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
0327 if (IS_ERR(crypto_info->rst)) {
0328 err = PTR_ERR(crypto_info->rst);
0329 goto err_crypto;
0330 }
0331
0332 reset_control_assert(crypto_info->rst);
0333 usleep_range(10, 20);
0334 reset_control_deassert(crypto_info->rst);
0335
0336 err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
0337 if (err)
0338 goto err_crypto;
0339
0340 spin_lock_init(&crypto_info->lock);
0341
0342 crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
0343 if (IS_ERR(crypto_info->reg)) {
0344 err = PTR_ERR(crypto_info->reg);
0345 goto err_crypto;
0346 }
0347
0348 crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
0349 if (IS_ERR(crypto_info->aclk)) {
0350 err = PTR_ERR(crypto_info->aclk);
0351 goto err_crypto;
0352 }
0353
0354 crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
0355 if (IS_ERR(crypto_info->hclk)) {
0356 err = PTR_ERR(crypto_info->hclk);
0357 goto err_crypto;
0358 }
0359
0360 crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
0361 if (IS_ERR(crypto_info->sclk)) {
0362 err = PTR_ERR(crypto_info->sclk);
0363 goto err_crypto;
0364 }
0365
0366 crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
0367 if (IS_ERR(crypto_info->dmaclk)) {
0368 err = PTR_ERR(crypto_info->dmaclk);
0369 goto err_crypto;
0370 }
0371
0372 crypto_info->irq = platform_get_irq(pdev, 0);
0373 if (crypto_info->irq < 0) {
0374 dev_warn(crypto_info->dev,
0375 "control Interrupt is not available.\n");
0376 err = crypto_info->irq;
0377 goto err_crypto;
0378 }
0379
0380 err = devm_request_irq(&pdev->dev, crypto_info->irq,
0381 rk_crypto_irq_handle, IRQF_SHARED,
0382 "rk-crypto", pdev);
0383
0384 if (err) {
0385 dev_err(crypto_info->dev, "irq request failed.\n");
0386 goto err_crypto;
0387 }
0388
0389 crypto_info->dev = &pdev->dev;
0390 platform_set_drvdata(pdev, crypto_info);
0391
0392 tasklet_init(&crypto_info->queue_task,
0393 rk_crypto_queue_task_cb, (unsigned long)crypto_info);
0394 tasklet_init(&crypto_info->done_task,
0395 rk_crypto_done_task_cb, (unsigned long)crypto_info);
0396 crypto_init_queue(&crypto_info->queue, 50);
0397
0398 crypto_info->enable_clk = rk_crypto_enable_clk;
0399 crypto_info->disable_clk = rk_crypto_disable_clk;
0400 crypto_info->load_data = rk_load_data;
0401 crypto_info->unload_data = rk_unload_data;
0402 crypto_info->enqueue = rk_crypto_enqueue;
0403 crypto_info->busy = false;
0404
0405 err = rk_crypto_register(crypto_info);
0406 if (err) {
0407 dev_err(dev, "err in register alg");
0408 goto err_register_alg;
0409 }
0410
0411 dev_info(dev, "Crypto Accelerator successfully registered\n");
0412 return 0;
0413
0414 err_register_alg:
0415 tasklet_kill(&crypto_info->queue_task);
0416 tasklet_kill(&crypto_info->done_task);
0417 err_crypto:
0418 return err;
0419 }
0420
0421 static int rk_crypto_remove(struct platform_device *pdev)
0422 {
0423 struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
0424
0425 rk_crypto_unregister();
0426 tasklet_kill(&crypto_tmp->done_task);
0427 tasklet_kill(&crypto_tmp->queue_task);
0428 return 0;
0429 }
0430
0431 static struct platform_driver crypto_driver = {
0432 .probe = rk_crypto_probe,
0433 .remove = rk_crypto_remove,
0434 .driver = {
0435 .name = "rk3288-crypto",
0436 .of_match_table = crypto_of_id_table,
0437 },
0438 };
0439
0440 module_platform_driver(crypto_driver);
0441
0442 MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
0443 MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
0444 MODULE_LICENSE("GPL");