Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * AMCC SoC PPC4xx Crypto Driver
0004  *
0005  * Copyright (c) 2008 Applied Micro Circuits Corporation.
0006  * All rights reserved. James Hsiao <jhsiao@amcc.com>
0007  *
0008  * This file implements AMCC crypto offload Linux device driver for use with
0009  * Linux CryptoAPI.
0010  */
0011 
0012 #include <linux/kernel.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/spinlock_types.h>
0015 #include <linux/random.h>
0016 #include <linux/scatterlist.h>
0017 #include <linux/crypto.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/init.h>
0021 #include <linux/module.h>
0022 #include <linux/of_address.h>
0023 #include <linux/of_irq.h>
0024 #include <linux/of_platform.h>
0025 #include <linux/slab.h>
0026 #include <asm/dcr.h>
0027 #include <asm/dcr-regs.h>
0028 #include <asm/cacheflush.h>
0029 #include <crypto/aead.h>
0030 #include <crypto/aes.h>
0031 #include <crypto/ctr.h>
0032 #include <crypto/gcm.h>
0033 #include <crypto/sha1.h>
0034 #include <crypto/rng.h>
0035 #include <crypto/scatterwalk.h>
0036 #include <crypto/skcipher.h>
0037 #include <crypto/internal/aead.h>
0038 #include <crypto/internal/rng.h>
0039 #include <crypto/internal/skcipher.h>
0040 #include "crypto4xx_reg_def.h"
0041 #include "crypto4xx_core.h"
0042 #include "crypto4xx_sa.h"
0043 #include "crypto4xx_trng.h"
0044 
0045 #define PPC4XX_SEC_VERSION_STR          "0.5"
0046 
0047 /*
0048  * PPC4xx Crypto Engine Initialization Routine
0049  */
0050 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
0051 {
0052     union ce_ring_size ring_size;
0053     union ce_ring_control ring_ctrl;
0054     union ce_part_ring_size part_ring_size;
0055     union ce_io_threshold io_threshold;
0056     u32 rand_num;
0057     union ce_pe_dma_cfg pe_dma_cfg;
0058     u32 device_ctrl;
0059 
0060     writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
0061     /* setup pe dma, include reset sg, pdr and pe, then release reset */
0062     pe_dma_cfg.w = 0;
0063     pe_dma_cfg.bf.bo_sgpd_en = 1;
0064     pe_dma_cfg.bf.bo_data_en = 0;
0065     pe_dma_cfg.bf.bo_sa_en = 1;
0066     pe_dma_cfg.bf.bo_pd_en = 1;
0067     pe_dma_cfg.bf.dynamic_sa_en = 1;
0068     pe_dma_cfg.bf.reset_sg = 1;
0069     pe_dma_cfg.bf.reset_pdr = 1;
0070     pe_dma_cfg.bf.reset_pe = 1;
0071     writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
0072     /* un reset pe,sg and pdr */
0073     pe_dma_cfg.bf.pe_mode = 0;
0074     pe_dma_cfg.bf.reset_sg = 0;
0075     pe_dma_cfg.bf.reset_pdr = 0;
0076     pe_dma_cfg.bf.reset_pe = 0;
0077     pe_dma_cfg.bf.bo_td_en = 0;
0078     writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
0079     writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
0080     writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
0081     writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
0082     get_random_bytes(&rand_num, sizeof(rand_num));
0083     writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
0084     get_random_bytes(&rand_num, sizeof(rand_num));
0085     writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
0086     ring_size.w = 0;
0087     ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
0088     ring_size.bf.ring_size   = PPC4XX_NUM_PD;
0089     writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
0090     ring_ctrl.w = 0;
0091     writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
0092     device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
0093     device_ctrl |= PPC4XX_DC_3DES_EN;
0094     writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
0095     writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
0096     writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
0097     part_ring_size.w = 0;
0098     part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
0099     part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
0100     writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
0101     writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
0102     io_threshold.w = 0;
0103     io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
0104     io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
0105     writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
0106     writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
0107     writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
0108     writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
0109     writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
0110     writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
0111     writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
0112     writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
0113     /* un reset pe,sg and pdr */
0114     pe_dma_cfg.bf.pe_mode = 1;
0115     pe_dma_cfg.bf.reset_sg = 0;
0116     pe_dma_cfg.bf.reset_pdr = 0;
0117     pe_dma_cfg.bf.reset_pe = 0;
0118     pe_dma_cfg.bf.bo_td_en = 0;
0119     writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
0120     /*clear all pending interrupt*/
0121     writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
0122     writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
0123     writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
0124     writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
0125     if (dev->is_revb) {
0126         writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
0127                dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
0128         writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
0129                dev->ce_base + CRYPTO4XX_INT_EN);
0130     } else {
0131         writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
0132     }
0133 }
0134 
0135 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
0136 {
0137     ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
0138     if (ctx->sa_in == NULL)
0139         return -ENOMEM;
0140 
0141     ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
0142     if (ctx->sa_out == NULL) {
0143         kfree(ctx->sa_in);
0144         ctx->sa_in = NULL;
0145         return -ENOMEM;
0146     }
0147 
0148     ctx->sa_len = size;
0149 
0150     return 0;
0151 }
0152 
0153 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
0154 {
0155     kfree(ctx->sa_in);
0156     ctx->sa_in = NULL;
0157     kfree(ctx->sa_out);
0158     ctx->sa_out = NULL;
0159     ctx->sa_len = 0;
0160 }
0161 
0162 /*
0163  * alloc memory for the gather ring
0164  * no need to alloc buf for the ring
0165  * gdr_tail, gdr_head and gdr_count are initialized by this function
0166  */
0167 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
0168 {
0169     int i;
0170     dev->pdr = dma_alloc_coherent(dev->core_dev->device,
0171                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
0172                       &dev->pdr_pa, GFP_KERNEL);
0173     if (!dev->pdr)
0174         return -ENOMEM;
0175 
0176     dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
0177                  GFP_KERNEL);
0178     if (!dev->pdr_uinfo) {
0179         dma_free_coherent(dev->core_dev->device,
0180                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
0181                   dev->pdr,
0182                   dev->pdr_pa);
0183         return -ENOMEM;
0184     }
0185     dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
0186                    sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
0187                    &dev->shadow_sa_pool_pa,
0188                    GFP_KERNEL);
0189     if (!dev->shadow_sa_pool)
0190         return -ENOMEM;
0191 
0192     dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
0193              sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
0194              &dev->shadow_sr_pool_pa, GFP_KERNEL);
0195     if (!dev->shadow_sr_pool)
0196         return -ENOMEM;
0197     for (i = 0; i < PPC4XX_NUM_PD; i++) {
0198         struct ce_pd *pd = &dev->pdr[i];
0199         struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
0200 
0201         pd->sa = dev->shadow_sa_pool_pa +
0202             sizeof(union shadow_sa_buf) * i;
0203 
0204         /* alloc 256 bytes which is enough for any kind of dynamic sa */
0205         pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
0206 
0207         /* alloc state record */
0208         pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
0209         pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
0210             sizeof(struct sa_state_record) * i;
0211     }
0212 
0213     return 0;
0214 }
0215 
0216 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
0217 {
0218     if (dev->pdr)
0219         dma_free_coherent(dev->core_dev->device,
0220                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
0221                   dev->pdr, dev->pdr_pa);
0222 
0223     if (dev->shadow_sa_pool)
0224         dma_free_coherent(dev->core_dev->device,
0225             sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
0226             dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
0227 
0228     if (dev->shadow_sr_pool)
0229         dma_free_coherent(dev->core_dev->device,
0230             sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
0231             dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
0232 
0233     kfree(dev->pdr_uinfo);
0234 }
0235 
0236 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
0237 {
0238     u32 retval;
0239     u32 tmp;
0240 
0241     retval = dev->pdr_head;
0242     tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
0243 
0244     if (tmp == dev->pdr_tail)
0245         return ERING_WAS_FULL;
0246 
0247     dev->pdr_head = tmp;
0248 
0249     return retval;
0250 }
0251 
0252 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
0253 {
0254     struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
0255     u32 tail;
0256     unsigned long flags;
0257 
0258     spin_lock_irqsave(&dev->core_dev->lock, flags);
0259     pd_uinfo->state = PD_ENTRY_FREE;
0260 
0261     if (dev->pdr_tail != PPC4XX_LAST_PD)
0262         dev->pdr_tail++;
0263     else
0264         dev->pdr_tail = 0;
0265     tail = dev->pdr_tail;
0266     spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0267 
0268     return tail;
0269 }
0270 
0271 /*
0272  * alloc memory for the gather ring
0273  * no need to alloc buf for the ring
0274  * gdr_tail, gdr_head and gdr_count are initialized by this function
0275  */
0276 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
0277 {
0278     dev->gdr = dma_alloc_coherent(dev->core_dev->device,
0279                       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
0280                       &dev->gdr_pa, GFP_KERNEL);
0281     if (!dev->gdr)
0282         return -ENOMEM;
0283 
0284     return 0;
0285 }
0286 
0287 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
0288 {
0289     if (dev->gdr)
0290         dma_free_coherent(dev->core_dev->device,
0291               sizeof(struct ce_gd) * PPC4XX_NUM_GD,
0292               dev->gdr, dev->gdr_pa);
0293 }
0294 
0295 /*
0296  * when this function is called.
0297  * preemption or interrupt must be disabled
0298  */
0299 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
0300 {
0301     u32 retval;
0302     u32 tmp;
0303 
0304     if (n >= PPC4XX_NUM_GD)
0305         return ERING_WAS_FULL;
0306 
0307     retval = dev->gdr_head;
0308     tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
0309     if (dev->gdr_head > dev->gdr_tail) {
0310         if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
0311             return ERING_WAS_FULL;
0312     } else if (dev->gdr_head < dev->gdr_tail) {
0313         if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
0314             return ERING_WAS_FULL;
0315     }
0316     dev->gdr_head = tmp;
0317 
0318     return retval;
0319 }
0320 
0321 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
0322 {
0323     unsigned long flags;
0324 
0325     spin_lock_irqsave(&dev->core_dev->lock, flags);
0326     if (dev->gdr_tail == dev->gdr_head) {
0327         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0328         return 0;
0329     }
0330 
0331     if (dev->gdr_tail != PPC4XX_LAST_GD)
0332         dev->gdr_tail++;
0333     else
0334         dev->gdr_tail = 0;
0335 
0336     spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0337 
0338     return 0;
0339 }
0340 
0341 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
0342                           dma_addr_t *gd_dma, u32 idx)
0343 {
0344     *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
0345 
0346     return &dev->gdr[idx];
0347 }
0348 
0349 /*
0350  * alloc memory for the scatter ring
0351  * need to alloc buf for the ring
0352  * sdr_tail, sdr_head and sdr_count are initialized by this function
0353  */
0354 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
0355 {
0356     int i;
0357 
0358     dev->scatter_buffer_va =
0359         dma_alloc_coherent(dev->core_dev->device,
0360             PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
0361             &dev->scatter_buffer_pa, GFP_KERNEL);
0362     if (!dev->scatter_buffer_va)
0363         return -ENOMEM;
0364 
0365     /* alloc memory for scatter descriptor ring */
0366     dev->sdr = dma_alloc_coherent(dev->core_dev->device,
0367                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
0368                       &dev->sdr_pa, GFP_KERNEL);
0369     if (!dev->sdr)
0370         return -ENOMEM;
0371 
0372     for (i = 0; i < PPC4XX_NUM_SD; i++) {
0373         dev->sdr[i].ptr = dev->scatter_buffer_pa +
0374                   PPC4XX_SD_BUFFER_SIZE * i;
0375     }
0376 
0377     return 0;
0378 }
0379 
0380 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
0381 {
0382     if (dev->sdr)
0383         dma_free_coherent(dev->core_dev->device,
0384                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
0385                   dev->sdr, dev->sdr_pa);
0386 
0387     if (dev->scatter_buffer_va)
0388         dma_free_coherent(dev->core_dev->device,
0389                   PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
0390                   dev->scatter_buffer_va,
0391                   dev->scatter_buffer_pa);
0392 }
0393 
0394 /*
0395  * when this function is called.
0396  * preemption or interrupt must be disabled
0397  */
0398 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
0399 {
0400     u32 retval;
0401     u32 tmp;
0402 
0403     if (n >= PPC4XX_NUM_SD)
0404         return ERING_WAS_FULL;
0405 
0406     retval = dev->sdr_head;
0407     tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
0408     if (dev->sdr_head > dev->gdr_tail) {
0409         if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
0410             return ERING_WAS_FULL;
0411     } else if (dev->sdr_head < dev->sdr_tail) {
0412         if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
0413             return ERING_WAS_FULL;
0414     } /* the head = tail, or empty case is already take cared */
0415     dev->sdr_head = tmp;
0416 
0417     return retval;
0418 }
0419 
0420 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
0421 {
0422     unsigned long flags;
0423 
0424     spin_lock_irqsave(&dev->core_dev->lock, flags);
0425     if (dev->sdr_tail == dev->sdr_head) {
0426         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0427         return 0;
0428     }
0429     if (dev->sdr_tail != PPC4XX_LAST_SD)
0430         dev->sdr_tail++;
0431     else
0432         dev->sdr_tail = 0;
0433     spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0434 
0435     return 0;
0436 }
0437 
0438 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
0439                           dma_addr_t *sd_dma, u32 idx)
0440 {
0441     *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
0442 
0443     return &dev->sdr[idx];
0444 }
0445 
0446 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
0447                       struct ce_pd *pd,
0448                       struct pd_uinfo *pd_uinfo,
0449                       u32 nbytes,
0450                       struct scatterlist *dst)
0451 {
0452     unsigned int first_sd = pd_uinfo->first_sd;
0453     unsigned int last_sd;
0454     unsigned int overflow = 0;
0455     unsigned int to_copy;
0456     unsigned int dst_start = 0;
0457 
0458     /*
0459      * Because the scatter buffers are all neatly organized in one
0460      * big continuous ringbuffer; scatterwalk_map_and_copy() can
0461      * be instructed to copy a range of buffers in one go.
0462      */
0463 
0464     last_sd = (first_sd + pd_uinfo->num_sd);
0465     if (last_sd > PPC4XX_LAST_SD) {
0466         last_sd = PPC4XX_LAST_SD;
0467         overflow = last_sd % PPC4XX_NUM_SD;
0468     }
0469 
0470     while (nbytes) {
0471         void *buf = dev->scatter_buffer_va +
0472             first_sd * PPC4XX_SD_BUFFER_SIZE;
0473 
0474         to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
0475                       (1 + last_sd - first_sd));
0476         scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
0477         nbytes -= to_copy;
0478 
0479         if (overflow) {
0480             first_sd = 0;
0481             last_sd = overflow;
0482             dst_start += to_copy;
0483             overflow = 0;
0484         }
0485     }
0486 }
0487 
0488 static void crypto4xx_copy_digest_to_dst(void *dst,
0489                     struct pd_uinfo *pd_uinfo,
0490                     struct crypto4xx_ctx *ctx)
0491 {
0492     struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
0493 
0494     if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
0495         memcpy(dst, pd_uinfo->sr_va->save_digest,
0496                SA_HASH_ALG_SHA1_DIGEST_SIZE);
0497     }
0498 }
0499 
0500 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
0501                   struct pd_uinfo *pd_uinfo)
0502 {
0503     int i;
0504     if (pd_uinfo->num_gd) {
0505         for (i = 0; i < pd_uinfo->num_gd; i++)
0506             crypto4xx_put_gd_to_gdr(dev);
0507         pd_uinfo->first_gd = 0xffffffff;
0508         pd_uinfo->num_gd = 0;
0509     }
0510     if (pd_uinfo->num_sd) {
0511         for (i = 0; i < pd_uinfo->num_sd; i++)
0512             crypto4xx_put_sd_to_sdr(dev);
0513 
0514         pd_uinfo->first_sd = 0xffffffff;
0515         pd_uinfo->num_sd = 0;
0516     }
0517 }
0518 
0519 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
0520                      struct pd_uinfo *pd_uinfo,
0521                      struct ce_pd *pd)
0522 {
0523     struct skcipher_request *req;
0524     struct scatterlist *dst;
0525     dma_addr_t addr;
0526 
0527     req = skcipher_request_cast(pd_uinfo->async_req);
0528 
0529     if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
0530         crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
0531                       req->cryptlen, req->dst);
0532     } else {
0533         dst = pd_uinfo->dest_va;
0534         addr = dma_map_page(dev->core_dev->device, sg_page(dst),
0535                     dst->offset, dst->length, DMA_FROM_DEVICE);
0536     }
0537 
0538     if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
0539         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0540 
0541         crypto4xx_memcpy_from_le32((u32 *)req->iv,
0542             pd_uinfo->sr_va->save_iv,
0543             crypto_skcipher_ivsize(skcipher));
0544     }
0545 
0546     crypto4xx_ret_sg_desc(dev, pd_uinfo);
0547 
0548     if (pd_uinfo->state & PD_ENTRY_BUSY)
0549         skcipher_request_complete(req, -EINPROGRESS);
0550     skcipher_request_complete(req, 0);
0551 }
0552 
0553 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
0554                 struct pd_uinfo *pd_uinfo)
0555 {
0556     struct crypto4xx_ctx *ctx;
0557     struct ahash_request *ahash_req;
0558 
0559     ahash_req = ahash_request_cast(pd_uinfo->async_req);
0560     ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
0561 
0562     crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
0563                      crypto_tfm_ctx(ahash_req->base.tfm));
0564     crypto4xx_ret_sg_desc(dev, pd_uinfo);
0565 
0566     if (pd_uinfo->state & PD_ENTRY_BUSY)
0567         ahash_request_complete(ahash_req, -EINPROGRESS);
0568     ahash_request_complete(ahash_req, 0);
0569 }
0570 
0571 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
0572                 struct pd_uinfo *pd_uinfo,
0573                 struct ce_pd *pd)
0574 {
0575     struct aead_request *aead_req = container_of(pd_uinfo->async_req,
0576         struct aead_request, base);
0577     struct scatterlist *dst = pd_uinfo->dest_va;
0578     size_t cp_len = crypto_aead_authsize(
0579         crypto_aead_reqtfm(aead_req));
0580     u32 icv[AES_BLOCK_SIZE];
0581     int err = 0;
0582 
0583     if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
0584         crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
0585                       pd->pd_ctl_len.bf.pkt_len,
0586                       dst);
0587     } else {
0588         dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
0589                 DMA_FROM_DEVICE);
0590     }
0591 
0592     if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
0593         /* append icv at the end */
0594         crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
0595                        sizeof(icv));
0596 
0597         scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
0598                      cp_len, 1);
0599     } else {
0600         /* check icv at the end */
0601         scatterwalk_map_and_copy(icv, aead_req->src,
0602             aead_req->assoclen + aead_req->cryptlen -
0603             cp_len, cp_len, 0);
0604 
0605         crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
0606 
0607         if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
0608             err = -EBADMSG;
0609     }
0610 
0611     crypto4xx_ret_sg_desc(dev, pd_uinfo);
0612 
0613     if (pd->pd_ctl.bf.status & 0xff) {
0614         if (!__ratelimit(&dev->aead_ratelimit)) {
0615             if (pd->pd_ctl.bf.status & 2)
0616                 pr_err("pad fail error\n");
0617             if (pd->pd_ctl.bf.status & 4)
0618                 pr_err("seqnum fail\n");
0619             if (pd->pd_ctl.bf.status & 8)
0620                 pr_err("error _notify\n");
0621             pr_err("aead return err status = 0x%02x\n",
0622                 pd->pd_ctl.bf.status & 0xff);
0623             pr_err("pd pad_ctl = 0x%08x\n",
0624                 pd->pd_ctl.bf.pd_pad_ctl);
0625         }
0626         err = -EINVAL;
0627     }
0628 
0629     if (pd_uinfo->state & PD_ENTRY_BUSY)
0630         aead_request_complete(aead_req, -EINPROGRESS);
0631 
0632     aead_request_complete(aead_req, err);
0633 }
0634 
0635 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
0636 {
0637     struct ce_pd *pd = &dev->pdr[idx];
0638     struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
0639 
0640     switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
0641     case CRYPTO_ALG_TYPE_SKCIPHER:
0642         crypto4xx_cipher_done(dev, pd_uinfo, pd);
0643         break;
0644     case CRYPTO_ALG_TYPE_AEAD:
0645         crypto4xx_aead_done(dev, pd_uinfo, pd);
0646         break;
0647     case CRYPTO_ALG_TYPE_AHASH:
0648         crypto4xx_ahash_done(dev, pd_uinfo);
0649         break;
0650     }
0651 }
0652 
0653 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
0654 {
0655     crypto4xx_destroy_pdr(core_dev->dev);
0656     crypto4xx_destroy_gdr(core_dev->dev);
0657     crypto4xx_destroy_sdr(core_dev->dev);
0658     iounmap(core_dev->dev->ce_base);
0659     kfree(core_dev->dev);
0660     kfree(core_dev);
0661 }
0662 
0663 static u32 get_next_gd(u32 current)
0664 {
0665     if (current != PPC4XX_LAST_GD)
0666         return current + 1;
0667     else
0668         return 0;
0669 }
0670 
0671 static u32 get_next_sd(u32 current)
0672 {
0673     if (current != PPC4XX_LAST_SD)
0674         return current + 1;
0675     else
0676         return 0;
0677 }
0678 
0679 int crypto4xx_build_pd(struct crypto_async_request *req,
0680                struct crypto4xx_ctx *ctx,
0681                struct scatterlist *src,
0682                struct scatterlist *dst,
0683                const unsigned int datalen,
0684                const __le32 *iv, const u32 iv_len,
0685                const struct dynamic_sa_ctl *req_sa,
0686                const unsigned int sa_len,
0687                const unsigned int assoclen,
0688                struct scatterlist *_dst)
0689 {
0690     struct crypto4xx_device *dev = ctx->dev;
0691     struct dynamic_sa_ctl *sa;
0692     struct ce_gd *gd;
0693     struct ce_pd *pd;
0694     u32 num_gd, num_sd;
0695     u32 fst_gd = 0xffffffff;
0696     u32 fst_sd = 0xffffffff;
0697     u32 pd_entry;
0698     unsigned long flags;
0699     struct pd_uinfo *pd_uinfo;
0700     unsigned int nbytes = datalen;
0701     size_t offset_to_sr_ptr;
0702     u32 gd_idx = 0;
0703     int tmp;
0704     bool is_busy, force_sd;
0705 
0706     /*
0707      * There's a very subtile/disguised "bug" in the hardware that
0708      * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
0709      * of the hardware spec:
0710      * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
0711      * operation modes for >>> "Block ciphers" <<<.
0712      *
0713      * To workaround this issue and stop the hardware from causing
0714      * "overran dst buffer" on crypttexts that are not a multiple
0715      * of 16 (AES_BLOCK_SIZE), we force the driver to use the
0716      * scatter buffers.
0717      */
0718     force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
0719         || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
0720         && (datalen % AES_BLOCK_SIZE);
0721 
0722     /* figure how many gd are needed */
0723     tmp = sg_nents_for_len(src, assoclen + datalen);
0724     if (tmp < 0) {
0725         dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
0726         return tmp;
0727     }
0728     if (tmp == 1)
0729         tmp = 0;
0730     num_gd = tmp;
0731 
0732     if (assoclen) {
0733         nbytes += assoclen;
0734         dst = scatterwalk_ffwd(_dst, dst, assoclen);
0735     }
0736 
0737     /* figure how many sd are needed */
0738     if (sg_is_last(dst) && force_sd == false) {
0739         num_sd = 0;
0740     } else {
0741         if (datalen > PPC4XX_SD_BUFFER_SIZE) {
0742             num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
0743             if (datalen % PPC4XX_SD_BUFFER_SIZE)
0744                 num_sd++;
0745         } else {
0746             num_sd = 1;
0747         }
0748     }
0749 
0750     /*
0751      * The follow section of code needs to be protected
0752      * The gather ring and scatter ring needs to be consecutive
0753      * In case of run out of any kind of descriptor, the descriptor
0754      * already got must be return the original place.
0755      */
0756     spin_lock_irqsave(&dev->core_dev->lock, flags);
0757     /*
0758      * Let the caller know to slow down, once more than 13/16ths = 81%
0759      * of the available data contexts are being used simultaneously.
0760      *
0761      * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
0762      * 31 more contexts. Before new requests have to be rejected.
0763      */
0764     if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
0765         is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
0766             ((PPC4XX_NUM_PD * 13) / 16);
0767     } else {
0768         /*
0769          * To fix contention issues between ipsec (no blacklog) and
0770          * dm-crypto (backlog) reserve 32 entries for "no backlog"
0771          * data contexts.
0772          */
0773         is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
0774             ((PPC4XX_NUM_PD * 15) / 16);
0775 
0776         if (is_busy) {
0777             spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0778             return -EBUSY;
0779         }
0780     }
0781 
0782     if (num_gd) {
0783         fst_gd = crypto4xx_get_n_gd(dev, num_gd);
0784         if (fst_gd == ERING_WAS_FULL) {
0785             spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0786             return -EAGAIN;
0787         }
0788     }
0789     if (num_sd) {
0790         fst_sd = crypto4xx_get_n_sd(dev, num_sd);
0791         if (fst_sd == ERING_WAS_FULL) {
0792             if (num_gd)
0793                 dev->gdr_head = fst_gd;
0794             spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0795             return -EAGAIN;
0796         }
0797     }
0798     pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
0799     if (pd_entry == ERING_WAS_FULL) {
0800         if (num_gd)
0801             dev->gdr_head = fst_gd;
0802         if (num_sd)
0803             dev->sdr_head = fst_sd;
0804         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0805         return -EAGAIN;
0806     }
0807     spin_unlock_irqrestore(&dev->core_dev->lock, flags);
0808 
0809     pd = &dev->pdr[pd_entry];
0810     pd->sa_len = sa_len;
0811 
0812     pd_uinfo = &dev->pdr_uinfo[pd_entry];
0813     pd_uinfo->num_gd = num_gd;
0814     pd_uinfo->num_sd = num_sd;
0815     pd_uinfo->dest_va = dst;
0816     pd_uinfo->async_req = req;
0817 
0818     if (iv_len)
0819         memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
0820 
0821     sa = pd_uinfo->sa_va;
0822     memcpy(sa, req_sa, sa_len * 4);
0823 
0824     sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
0825     offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
0826     *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
0827 
0828     if (num_gd) {
0829         dma_addr_t gd_dma;
0830         struct scatterlist *sg;
0831 
0832         /* get first gd we are going to use */
0833         gd_idx = fst_gd;
0834         pd_uinfo->first_gd = fst_gd;
0835         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
0836         pd->src = gd_dma;
0837         /* enable gather */
0838         sa->sa_command_0.bf.gather = 1;
0839         /* walk the sg, and setup gather array */
0840 
0841         sg = src;
0842         while (nbytes) {
0843             size_t len;
0844 
0845             len = min(sg->length, nbytes);
0846             gd->ptr = dma_map_page(dev->core_dev->device,
0847                 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
0848             gd->ctl_len.len = len;
0849             gd->ctl_len.done = 0;
0850             gd->ctl_len.ready = 1;
0851             if (len >= nbytes)
0852                 break;
0853 
0854             nbytes -= sg->length;
0855             gd_idx = get_next_gd(gd_idx);
0856             gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
0857             sg = sg_next(sg);
0858         }
0859     } else {
0860         pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
0861                 src->offset, min(nbytes, src->length),
0862                 DMA_TO_DEVICE);
0863         /*
0864          * Disable gather in sa command
0865          */
0866         sa->sa_command_0.bf.gather = 0;
0867         /*
0868          * Indicate gather array is not used
0869          */
0870         pd_uinfo->first_gd = 0xffffffff;
0871     }
0872     if (!num_sd) {
0873         /*
0874          * we know application give us dst a whole piece of memory
0875          * no need to use scatter ring.
0876          */
0877         pd_uinfo->first_sd = 0xffffffff;
0878         sa->sa_command_0.bf.scatter = 0;
0879         pd->dest = (u32)dma_map_page(dev->core_dev->device,
0880                          sg_page(dst), dst->offset,
0881                          min(datalen, dst->length),
0882                          DMA_TO_DEVICE);
0883     } else {
0884         dma_addr_t sd_dma;
0885         struct ce_sd *sd = NULL;
0886 
0887         u32 sd_idx = fst_sd;
0888         nbytes = datalen;
0889         sa->sa_command_0.bf.scatter = 1;
0890         pd_uinfo->first_sd = fst_sd;
0891         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
0892         pd->dest = sd_dma;
0893         /* setup scatter descriptor */
0894         sd->ctl.done = 0;
0895         sd->ctl.rdy = 1;
0896         /* sd->ptr should be setup by sd_init routine*/
0897         if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
0898             nbytes -= PPC4XX_SD_BUFFER_SIZE;
0899         else
0900             nbytes = 0;
0901         while (nbytes) {
0902             sd_idx = get_next_sd(sd_idx);
0903             sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
0904             /* setup scatter descriptor */
0905             sd->ctl.done = 0;
0906             sd->ctl.rdy = 1;
0907             if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
0908                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
0909             } else {
0910                 /*
0911                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
0912                  * which is more than nbytes, so done.
0913                  */
0914                 nbytes = 0;
0915             }
0916         }
0917     }
0918 
0919     pd->pd_ctl.w = PD_CTL_HOST_READY |
0920         ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
0921          (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
0922             PD_CTL_HASH_FINAL : 0);
0923     pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
0924     pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
0925 
0926     wmb();
0927     /* write any value to push engine to read a pd */
0928     writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
0929     writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
0930     return is_busy ? -EBUSY : -EINPROGRESS;
0931 }
0932 
0933 /*
0934  * Algorithm Registration Functions
0935  */
0936 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
0937                    struct crypto4xx_ctx *ctx)
0938 {
0939     ctx->dev = amcc_alg->dev;
0940     ctx->sa_in = NULL;
0941     ctx->sa_out = NULL;
0942     ctx->sa_len = 0;
0943 }
0944 
0945 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
0946 {
0947     struct skcipher_alg *alg = crypto_skcipher_alg(sk);
0948     struct crypto4xx_alg *amcc_alg;
0949     struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
0950 
0951     if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
0952         ctx->sw_cipher.cipher =
0953             crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
0954                           CRYPTO_ALG_NEED_FALLBACK);
0955         if (IS_ERR(ctx->sw_cipher.cipher))
0956             return PTR_ERR(ctx->sw_cipher.cipher);
0957     }
0958 
0959     amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
0960     crypto4xx_ctx_init(amcc_alg, ctx);
0961     return 0;
0962 }
0963 
0964 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
0965 {
0966     crypto4xx_free_sa(ctx);
0967 }
0968 
0969 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
0970 {
0971     struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
0972 
0973     crypto4xx_common_exit(ctx);
0974     if (ctx->sw_cipher.cipher)
0975         crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
0976 }
0977 
0978 static int crypto4xx_aead_init(struct crypto_aead *tfm)
0979 {
0980     struct aead_alg *alg = crypto_aead_alg(tfm);
0981     struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
0982     struct crypto4xx_alg *amcc_alg;
0983 
0984     ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
0985                         CRYPTO_ALG_NEED_FALLBACK |
0986                         CRYPTO_ALG_ASYNC);
0987     if (IS_ERR(ctx->sw_cipher.aead))
0988         return PTR_ERR(ctx->sw_cipher.aead);
0989 
0990     amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
0991     crypto4xx_ctx_init(amcc_alg, ctx);
0992     crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
0993                 crypto_aead_reqsize(ctx->sw_cipher.aead),
0994                 sizeof(struct crypto4xx_aead_reqctx)));
0995     return 0;
0996 }
0997 
0998 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
0999 {
1000     struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1001 
1002     crypto4xx_common_exit(ctx);
1003     crypto_free_aead(ctx->sw_cipher.aead);
1004 }
1005 
1006 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1007                   struct crypto4xx_alg_common *crypto_alg,
1008                   int array_size)
1009 {
1010     struct crypto4xx_alg *alg;
1011     int i;
1012     int rc = 0;
1013 
1014     for (i = 0; i < array_size; i++) {
1015         alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1016         if (!alg)
1017             return -ENOMEM;
1018 
1019         alg->alg = crypto_alg[i];
1020         alg->dev = sec_dev;
1021 
1022         switch (alg->alg.type) {
1023         case CRYPTO_ALG_TYPE_AEAD:
1024             rc = crypto_register_aead(&alg->alg.u.aead);
1025             break;
1026 
1027         case CRYPTO_ALG_TYPE_AHASH:
1028             rc = crypto_register_ahash(&alg->alg.u.hash);
1029             break;
1030 
1031         case CRYPTO_ALG_TYPE_RNG:
1032             rc = crypto_register_rng(&alg->alg.u.rng);
1033             break;
1034 
1035         default:
1036             rc = crypto_register_skcipher(&alg->alg.u.cipher);
1037             break;
1038         }
1039 
1040         if (rc)
1041             kfree(alg);
1042         else
1043             list_add_tail(&alg->entry, &sec_dev->alg_list);
1044     }
1045 
1046     return 0;
1047 }
1048 
1049 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1050 {
1051     struct crypto4xx_alg *alg, *tmp;
1052 
1053     list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1054         list_del(&alg->entry);
1055         switch (alg->alg.type) {
1056         case CRYPTO_ALG_TYPE_AHASH:
1057             crypto_unregister_ahash(&alg->alg.u.hash);
1058             break;
1059 
1060         case CRYPTO_ALG_TYPE_AEAD:
1061             crypto_unregister_aead(&alg->alg.u.aead);
1062             break;
1063 
1064         case CRYPTO_ALG_TYPE_RNG:
1065             crypto_unregister_rng(&alg->alg.u.rng);
1066             break;
1067 
1068         default:
1069             crypto_unregister_skcipher(&alg->alg.u.cipher);
1070         }
1071         kfree(alg);
1072     }
1073 }
1074 
1075 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1076 {
1077     struct device *dev = (struct device *)data;
1078     struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1079     struct pd_uinfo *pd_uinfo;
1080     struct ce_pd *pd;
1081     u32 tail = core_dev->dev->pdr_tail;
1082     u32 head = core_dev->dev->pdr_head;
1083 
1084     do {
1085         pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1086         pd = &core_dev->dev->pdr[tail];
1087         if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1088              ((READ_ONCE(pd->pd_ctl.w) &
1089                (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1090                PD_CTL_PE_DONE)) {
1091             crypto4xx_pd_done(core_dev->dev, tail);
1092             tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1093         } else {
1094             /* if tail not done, break */
1095             break;
1096         }
1097     } while (head != tail);
1098 }
1099 
1100 /*
1101  * Top Half of isr.
1102  */
1103 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1104                               u32 clr_val)
1105 {
1106     struct device *dev = (struct device *)data;
1107     struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1108 
1109     writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1110     tasklet_schedule(&core_dev->tasklet);
1111 
1112     return IRQ_HANDLED;
1113 }
1114 
1115 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1116 {
1117     return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1118 }
1119 
1120 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1121 {
1122     return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1123         PPC4XX_TMO_ERR_INT);
1124 }
1125 
1126 static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
1127                  u8 *data, unsigned int max)
1128 {
1129     unsigned int i, curr = 0;
1130     u32 val[2];
1131 
1132     do {
1133         /* trigger PRN generation */
1134         writel(PPC4XX_PRNG_CTRL_AUTO_EN,
1135                dev->ce_base + CRYPTO4XX_PRNG_CTRL);
1136 
1137         for (i = 0; i < 1024; i++) {
1138             /* usually 19 iterations are enough */
1139             if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
1140                  CRYPTO4XX_PRNG_STAT_BUSY))
1141                 continue;
1142 
1143             val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
1144             val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
1145             break;
1146         }
1147         if (i == 1024)
1148             return -ETIMEDOUT;
1149 
1150         if ((max - curr) >= 8) {
1151             memcpy(data, &val, 8);
1152             data += 8;
1153             curr += 8;
1154         } else {
1155             /* copy only remaining bytes */
1156             memcpy(data, &val, max - curr);
1157             break;
1158         }
1159     } while (curr < max);
1160 
1161     return curr;
1162 }
1163 
1164 static int crypto4xx_prng_generate(struct crypto_rng *tfm,
1165                    const u8 *src, unsigned int slen,
1166                    u8 *dstn, unsigned int dlen)
1167 {
1168     struct rng_alg *alg = crypto_rng_alg(tfm);
1169     struct crypto4xx_alg *amcc_alg;
1170     struct crypto4xx_device *dev;
1171     int ret;
1172 
1173     amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
1174     dev = amcc_alg->dev;
1175 
1176     mutex_lock(&dev->core_dev->rng_lock);
1177     ret = ppc4xx_prng_data_read(dev, dstn, dlen);
1178     mutex_unlock(&dev->core_dev->rng_lock);
1179     return ret;
1180 }
1181 
1182 
1183 static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
1184             unsigned int slen)
1185 {
1186     return 0;
1187 }
1188 
1189 /*
1190  * Supported Crypto Algorithms
1191  */
1192 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1193     /* Crypto AES modes */
1194     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1195         .base = {
1196             .cra_name = "cbc(aes)",
1197             .cra_driver_name = "cbc-aes-ppc4xx",
1198             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1199             .cra_flags = CRYPTO_ALG_ASYNC |
1200                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1201             .cra_blocksize = AES_BLOCK_SIZE,
1202             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1203             .cra_module = THIS_MODULE,
1204         },
1205         .min_keysize = AES_MIN_KEY_SIZE,
1206         .max_keysize = AES_MAX_KEY_SIZE,
1207         .ivsize = AES_IV_SIZE,
1208         .setkey = crypto4xx_setkey_aes_cbc,
1209         .encrypt = crypto4xx_encrypt_iv_block,
1210         .decrypt = crypto4xx_decrypt_iv_block,
1211         .init = crypto4xx_sk_init,
1212         .exit = crypto4xx_sk_exit,
1213     } },
1214     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1215         .base = {
1216             .cra_name = "cfb(aes)",
1217             .cra_driver_name = "cfb-aes-ppc4xx",
1218             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1219             .cra_flags = CRYPTO_ALG_ASYNC |
1220                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1221             .cra_blocksize = 1,
1222             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1223             .cra_module = THIS_MODULE,
1224         },
1225         .min_keysize = AES_MIN_KEY_SIZE,
1226         .max_keysize = AES_MAX_KEY_SIZE,
1227         .ivsize = AES_IV_SIZE,
1228         .setkey = crypto4xx_setkey_aes_cfb,
1229         .encrypt = crypto4xx_encrypt_iv_stream,
1230         .decrypt = crypto4xx_decrypt_iv_stream,
1231         .init = crypto4xx_sk_init,
1232         .exit = crypto4xx_sk_exit,
1233     } },
1234     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1235         .base = {
1236             .cra_name = "ctr(aes)",
1237             .cra_driver_name = "ctr-aes-ppc4xx",
1238             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1239             .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1240                 CRYPTO_ALG_ASYNC |
1241                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1242             .cra_blocksize = 1,
1243             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1244             .cra_module = THIS_MODULE,
1245         },
1246         .min_keysize = AES_MIN_KEY_SIZE,
1247         .max_keysize = AES_MAX_KEY_SIZE,
1248         .ivsize = AES_IV_SIZE,
1249         .setkey = crypto4xx_setkey_aes_ctr,
1250         .encrypt = crypto4xx_encrypt_ctr,
1251         .decrypt = crypto4xx_decrypt_ctr,
1252         .init = crypto4xx_sk_init,
1253         .exit = crypto4xx_sk_exit,
1254     } },
1255     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1256         .base = {
1257             .cra_name = "rfc3686(ctr(aes))",
1258             .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1259             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1260             .cra_flags = CRYPTO_ALG_ASYNC |
1261                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1262             .cra_blocksize = 1,
1263             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1264             .cra_module = THIS_MODULE,
1265         },
1266         .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1267         .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1268         .ivsize = CTR_RFC3686_IV_SIZE,
1269         .setkey = crypto4xx_setkey_rfc3686,
1270         .encrypt = crypto4xx_rfc3686_encrypt,
1271         .decrypt = crypto4xx_rfc3686_decrypt,
1272         .init = crypto4xx_sk_init,
1273         .exit = crypto4xx_sk_exit,
1274     } },
1275     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1276         .base = {
1277             .cra_name = "ecb(aes)",
1278             .cra_driver_name = "ecb-aes-ppc4xx",
1279             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1280             .cra_flags = CRYPTO_ALG_ASYNC |
1281                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1282             .cra_blocksize = AES_BLOCK_SIZE,
1283             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1284             .cra_module = THIS_MODULE,
1285         },
1286         .min_keysize = AES_MIN_KEY_SIZE,
1287         .max_keysize = AES_MAX_KEY_SIZE,
1288         .setkey = crypto4xx_setkey_aes_ecb,
1289         .encrypt = crypto4xx_encrypt_noiv_block,
1290         .decrypt = crypto4xx_decrypt_noiv_block,
1291         .init = crypto4xx_sk_init,
1292         .exit = crypto4xx_sk_exit,
1293     } },
1294     { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1295         .base = {
1296             .cra_name = "ofb(aes)",
1297             .cra_driver_name = "ofb-aes-ppc4xx",
1298             .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1299             .cra_flags = CRYPTO_ALG_ASYNC |
1300                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1301             .cra_blocksize = 1,
1302             .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1303             .cra_module = THIS_MODULE,
1304         },
1305         .min_keysize = AES_MIN_KEY_SIZE,
1306         .max_keysize = AES_MAX_KEY_SIZE,
1307         .ivsize = AES_IV_SIZE,
1308         .setkey = crypto4xx_setkey_aes_ofb,
1309         .encrypt = crypto4xx_encrypt_iv_stream,
1310         .decrypt = crypto4xx_decrypt_iv_stream,
1311         .init = crypto4xx_sk_init,
1312         .exit = crypto4xx_sk_exit,
1313     } },
1314 
1315     /* AEAD */
1316     { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1317         .setkey     = crypto4xx_setkey_aes_ccm,
1318         .setauthsize    = crypto4xx_setauthsize_aead,
1319         .encrypt    = crypto4xx_encrypt_aes_ccm,
1320         .decrypt    = crypto4xx_decrypt_aes_ccm,
1321         .init       = crypto4xx_aead_init,
1322         .exit       = crypto4xx_aead_exit,
1323         .ivsize     = AES_BLOCK_SIZE,
1324         .maxauthsize    = 16,
1325         .base = {
1326             .cra_name   = "ccm(aes)",
1327             .cra_driver_name = "ccm-aes-ppc4xx",
1328             .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1329             .cra_flags  = CRYPTO_ALG_ASYNC |
1330                       CRYPTO_ALG_NEED_FALLBACK |
1331                       CRYPTO_ALG_KERN_DRIVER_ONLY,
1332             .cra_blocksize  = 1,
1333             .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1334             .cra_module = THIS_MODULE,
1335         },
1336     } },
1337     { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1338         .setkey     = crypto4xx_setkey_aes_gcm,
1339         .setauthsize    = crypto4xx_setauthsize_aead,
1340         .encrypt    = crypto4xx_encrypt_aes_gcm,
1341         .decrypt    = crypto4xx_decrypt_aes_gcm,
1342         .init       = crypto4xx_aead_init,
1343         .exit       = crypto4xx_aead_exit,
1344         .ivsize     = GCM_AES_IV_SIZE,
1345         .maxauthsize    = 16,
1346         .base = {
1347             .cra_name   = "gcm(aes)",
1348             .cra_driver_name = "gcm-aes-ppc4xx",
1349             .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1350             .cra_flags  = CRYPTO_ALG_ASYNC |
1351                       CRYPTO_ALG_NEED_FALLBACK |
1352                       CRYPTO_ALG_KERN_DRIVER_ONLY,
1353             .cra_blocksize  = 1,
1354             .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1355             .cra_module = THIS_MODULE,
1356         },
1357     } },
1358     { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
1359         .base = {
1360             .cra_name       = "stdrng",
1361             .cra_driver_name        = "crypto4xx_rng",
1362             .cra_priority       = 300,
1363             .cra_ctxsize        = 0,
1364             .cra_module     = THIS_MODULE,
1365         },
1366         .generate               = crypto4xx_prng_generate,
1367         .seed                   = crypto4xx_prng_seed,
1368         .seedsize               = 0,
1369     } },
1370 };
1371 
1372 /*
1373  * Module Initialization Routine
1374  */
1375 static int crypto4xx_probe(struct platform_device *ofdev)
1376 {
1377     int rc;
1378     struct resource res;
1379     struct device *dev = &ofdev->dev;
1380     struct crypto4xx_core_device *core_dev;
1381     struct device_node *np;
1382     u32 pvr;
1383     bool is_revb = true;
1384 
1385     rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1386     if (rc)
1387         return -ENODEV;
1388 
1389     np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
1390     if (np) {
1391         mtdcri(SDR0, PPC460EX_SDR0_SRST,
1392                mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1393         mtdcri(SDR0, PPC460EX_SDR0_SRST,
1394                mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1395     } else {
1396         np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto");
1397         if (np) {
1398             mtdcri(SDR0, PPC405EX_SDR0_SRST,
1399                    mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1400             mtdcri(SDR0, PPC405EX_SDR0_SRST,
1401                    mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1402             is_revb = false;
1403         } else {
1404             np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto");
1405             if (np) {
1406                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1407                     mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1408                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1409                     mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1410             } else {
1411                 printk(KERN_ERR "Crypto Function Not supported!\n");
1412                 return -EINVAL;
1413             }
1414         }
1415     }
1416 
1417     of_node_put(np);
1418 
1419     core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1420     if (!core_dev)
1421         return -ENOMEM;
1422 
1423     dev_set_drvdata(dev, core_dev);
1424     core_dev->ofdev = ofdev;
1425     core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1426     rc = -ENOMEM;
1427     if (!core_dev->dev)
1428         goto err_alloc_dev;
1429 
1430     /*
1431      * Older version of 460EX/GT have a hardware bug.
1432      * Hence they do not support H/W based security intr coalescing
1433      */
1434     pvr = mfspr(SPRN_PVR);
1435     if (is_revb && ((pvr >> 4) == 0x130218A)) {
1436         u32 min = PVR_MIN(pvr);
1437 
1438         if (min < 4) {
1439             dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1440             is_revb = false;
1441         }
1442     }
1443 
1444     core_dev->dev->core_dev = core_dev;
1445     core_dev->dev->is_revb = is_revb;
1446     core_dev->device = dev;
1447     mutex_init(&core_dev->rng_lock);
1448     spin_lock_init(&core_dev->lock);
1449     INIT_LIST_HEAD(&core_dev->dev->alg_list);
1450     ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1451     rc = crypto4xx_build_sdr(core_dev->dev);
1452     if (rc)
1453         goto err_build_sdr;
1454     rc = crypto4xx_build_pdr(core_dev->dev);
1455     if (rc)
1456         goto err_build_sdr;
1457 
1458     rc = crypto4xx_build_gdr(core_dev->dev);
1459     if (rc)
1460         goto err_build_sdr;
1461 
1462     /* Init tasklet for bottom half processing */
1463     tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1464              (unsigned long) dev);
1465 
1466     core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1467     if (!core_dev->dev->ce_base) {
1468         dev_err(dev, "failed to of_iomap\n");
1469         rc = -ENOMEM;
1470         goto err_iomap;
1471     }
1472 
1473     /* Register for Crypto isr, Crypto Engine IRQ */
1474     core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1475     rc = request_irq(core_dev->irq, is_revb ?
1476              crypto4xx_ce_interrupt_handler_revb :
1477              crypto4xx_ce_interrupt_handler, 0,
1478              KBUILD_MODNAME, dev);
1479     if (rc)
1480         goto err_request_irq;
1481 
1482     /* need to setup pdr, rdr, gdr and sdr before this */
1483     crypto4xx_hw_init(core_dev->dev);
1484 
1485     /* Register security algorithms with Linux CryptoAPI */
1486     rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1487                    ARRAY_SIZE(crypto4xx_alg));
1488     if (rc)
1489         goto err_start_dev;
1490 
1491     ppc4xx_trng_probe(core_dev);
1492     return 0;
1493 
1494 err_start_dev:
1495     free_irq(core_dev->irq, dev);
1496 err_request_irq:
1497     irq_dispose_mapping(core_dev->irq);
1498     iounmap(core_dev->dev->ce_base);
1499 err_iomap:
1500     tasklet_kill(&core_dev->tasklet);
1501 err_build_sdr:
1502     crypto4xx_destroy_sdr(core_dev->dev);
1503     crypto4xx_destroy_gdr(core_dev->dev);
1504     crypto4xx_destroy_pdr(core_dev->dev);
1505     kfree(core_dev->dev);
1506 err_alloc_dev:
1507     kfree(core_dev);
1508 
1509     return rc;
1510 }
1511 
1512 static int crypto4xx_remove(struct platform_device *ofdev)
1513 {
1514     struct device *dev = &ofdev->dev;
1515     struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1516 
1517     ppc4xx_trng_remove(core_dev);
1518 
1519     free_irq(core_dev->irq, dev);
1520     irq_dispose_mapping(core_dev->irq);
1521 
1522     tasklet_kill(&core_dev->tasklet);
1523     /* Un-register with Linux CryptoAPI */
1524     crypto4xx_unregister_alg(core_dev->dev);
1525     mutex_destroy(&core_dev->rng_lock);
1526     /* Free all allocated memory */
1527     crypto4xx_stop_all(core_dev);
1528 
1529     return 0;
1530 }
1531 
1532 static const struct of_device_id crypto4xx_match[] = {
1533     { .compatible      = "amcc,ppc4xx-crypto",},
1534     { },
1535 };
1536 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1537 
1538 static struct platform_driver crypto4xx_driver = {
1539     .driver = {
1540         .name = KBUILD_MODNAME,
1541         .of_match_table = crypto4xx_match,
1542     },
1543     .probe      = crypto4xx_probe,
1544     .remove     = crypto4xx_remove,
1545 };
1546 
1547 module_platform_driver(crypto4xx_driver);
1548 
1549 MODULE_LICENSE("GPL");
1550 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1551 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");