Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * AMD Cryptographic Coprocessor (CCP) driver
0004  *
0005  * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
0006  *
0007  * Author: Gary R Hook <gary.hook@amd.com>
0008  */
0009 
0010 #include <linux/kernel.h>
0011 #include <linux/kthread.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/compiler.h>
0015 #include <linux/ccp.h>
0016 
0017 #include "ccp-dev.h"
0018 
0019 /* Allocate the requested number of contiguous LSB slots
0020  * from the LSB bitmap. Look in the private range for this
0021  * queue first; failing that, check the public area.
0022  * If no space is available, wait around.
0023  * Return: first slot number
0024  */
0025 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
0026 {
0027     struct ccp_device *ccp;
0028     int start;
0029 
0030     /* First look at the map for the queue */
0031     if (cmd_q->lsb >= 0) {
0032         start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
0033                             LSB_SIZE,
0034                             0, count, 0);
0035         if (start < LSB_SIZE) {
0036             bitmap_set(cmd_q->lsbmap, start, count);
0037             return start + cmd_q->lsb * LSB_SIZE;
0038         }
0039     }
0040 
0041     /* No joy; try to get an entry from the shared blocks */
0042     ccp = cmd_q->ccp;
0043     for (;;) {
0044         mutex_lock(&ccp->sb_mutex);
0045 
0046         start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
0047                             MAX_LSB_CNT * LSB_SIZE,
0048                             0,
0049                             count, 0);
0050         if (start <= MAX_LSB_CNT * LSB_SIZE) {
0051             bitmap_set(ccp->lsbmap, start, count);
0052 
0053             mutex_unlock(&ccp->sb_mutex);
0054             return start;
0055         }
0056 
0057         ccp->sb_avail = 0;
0058 
0059         mutex_unlock(&ccp->sb_mutex);
0060 
0061         /* Wait for KSB entries to become available */
0062         if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
0063             return 0;
0064     }
0065 }
0066 
0067 /* Free a number of LSB slots from the bitmap, starting at
0068  * the indicated starting slot number.
0069  */
0070 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
0071              unsigned int count)
0072 {
0073     if (!start)
0074         return;
0075 
0076     if (cmd_q->lsb == start) {
0077         /* An entry from the private LSB */
0078         bitmap_clear(cmd_q->lsbmap, start, count);
0079     } else {
0080         /* From the shared LSBs */
0081         struct ccp_device *ccp = cmd_q->ccp;
0082 
0083         mutex_lock(&ccp->sb_mutex);
0084         bitmap_clear(ccp->lsbmap, start, count);
0085         ccp->sb_avail = 1;
0086         mutex_unlock(&ccp->sb_mutex);
0087         wake_up_interruptible_all(&ccp->sb_queue);
0088     }
0089 }
0090 
0091 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
0092 union ccp_function {
0093     struct {
0094         u16 size:7;
0095         u16 encrypt:1;
0096         u16 mode:5;
0097         u16 type:2;
0098     } aes;
0099     struct {
0100         u16 size:7;
0101         u16 encrypt:1;
0102         u16 rsvd:5;
0103         u16 type:2;
0104     } aes_xts;
0105     struct {
0106         u16 size:7;
0107         u16 encrypt:1;
0108         u16 mode:5;
0109         u16 type:2;
0110     } des3;
0111     struct {
0112         u16 rsvd1:10;
0113         u16 type:4;
0114         u16 rsvd2:1;
0115     } sha;
0116     struct {
0117         u16 mode:3;
0118         u16 size:12;
0119     } rsa;
0120     struct {
0121         u16 byteswap:2;
0122         u16 bitwise:3;
0123         u16 reflect:2;
0124         u16 rsvd:8;
0125     } pt;
0126     struct  {
0127         u16 rsvd:13;
0128     } zlib;
0129     struct {
0130         u16 size:10;
0131         u16 type:2;
0132         u16 mode:3;
0133     } ecc;
0134     u16 raw;
0135 };
0136 
0137 #define CCP_AES_SIZE(p)     ((p)->aes.size)
0138 #define CCP_AES_ENCRYPT(p)  ((p)->aes.encrypt)
0139 #define CCP_AES_MODE(p)     ((p)->aes.mode)
0140 #define CCP_AES_TYPE(p)     ((p)->aes.type)
0141 #define CCP_XTS_SIZE(p)     ((p)->aes_xts.size)
0142 #define CCP_XTS_TYPE(p)     ((p)->aes_xts.type)
0143 #define CCP_XTS_ENCRYPT(p)  ((p)->aes_xts.encrypt)
0144 #define CCP_DES3_SIZE(p)    ((p)->des3.size)
0145 #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
0146 #define CCP_DES3_MODE(p)    ((p)->des3.mode)
0147 #define CCP_DES3_TYPE(p)    ((p)->des3.type)
0148 #define CCP_SHA_TYPE(p)     ((p)->sha.type)
0149 #define CCP_RSA_SIZE(p)     ((p)->rsa.size)
0150 #define CCP_PT_BYTESWAP(p)  ((p)->pt.byteswap)
0151 #define CCP_PT_BITWISE(p)   ((p)->pt.bitwise)
0152 #define CCP_ECC_MODE(p)     ((p)->ecc.mode)
0153 #define CCP_ECC_AFFINE(p)   ((p)->ecc.one)
0154 
0155 /* Word 0 */
0156 #define CCP5_CMD_DW0(p)     ((p)->dw0)
0157 #define CCP5_CMD_SOC(p)     (CCP5_CMD_DW0(p).soc)
0158 #define CCP5_CMD_IOC(p)     (CCP5_CMD_DW0(p).ioc)
0159 #define CCP5_CMD_INIT(p)    (CCP5_CMD_DW0(p).init)
0160 #define CCP5_CMD_EOM(p)     (CCP5_CMD_DW0(p).eom)
0161 #define CCP5_CMD_FUNCTION(p)    (CCP5_CMD_DW0(p).function)
0162 #define CCP5_CMD_ENGINE(p)  (CCP5_CMD_DW0(p).engine)
0163 #define CCP5_CMD_PROT(p)    (CCP5_CMD_DW0(p).prot)
0164 
0165 /* Word 1 */
0166 #define CCP5_CMD_DW1(p)     ((p)->length)
0167 #define CCP5_CMD_LEN(p)     (CCP5_CMD_DW1(p))
0168 
0169 /* Word 2 */
0170 #define CCP5_CMD_DW2(p)     ((p)->src_lo)
0171 #define CCP5_CMD_SRC_LO(p)  (CCP5_CMD_DW2(p))
0172 
0173 /* Word 3 */
0174 #define CCP5_CMD_DW3(p)     ((p)->dw3)
0175 #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
0176 #define CCP5_CMD_SRC_HI(p)  ((p)->dw3.src_hi)
0177 #define CCP5_CMD_LSB_ID(p)  ((p)->dw3.lsb_cxt_id)
0178 #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed)
0179 
0180 /* Words 4/5 */
0181 #define CCP5_CMD_DW4(p)     ((p)->dw4)
0182 #define CCP5_CMD_DST_LO(p)  (CCP5_CMD_DW4(p).dst_lo)
0183 #define CCP5_CMD_DW5(p)     ((p)->dw5.fields.dst_hi)
0184 #define CCP5_CMD_DST_HI(p)  (CCP5_CMD_DW5(p))
0185 #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
0186 #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
0187 #define CCP5_CMD_SHA_LO(p)  ((p)->dw4.sha_len_lo)
0188 #define CCP5_CMD_SHA_HI(p)  ((p)->dw5.sha_len_hi)
0189 
0190 /* Word 6/7 */
0191 #define CCP5_CMD_DW6(p)     ((p)->key_lo)
0192 #define CCP5_CMD_KEY_LO(p)  (CCP5_CMD_DW6(p))
0193 #define CCP5_CMD_DW7(p)     ((p)->dw7)
0194 #define CCP5_CMD_KEY_HI(p)  ((p)->dw7.key_hi)
0195 #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
0196 
0197 static inline u32 low_address(unsigned long addr)
0198 {
0199     return (u64)addr & 0x0ffffffff;
0200 }
0201 
0202 static inline u32 high_address(unsigned long addr)
0203 {
0204     return ((u64)addr >> 32) & 0x00000ffff;
0205 }
0206 
0207 static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
0208 {
0209     unsigned int head_idx, n;
0210     u32 head_lo, queue_start;
0211 
0212     queue_start = low_address(cmd_q->qdma_tail);
0213     head_lo = ioread32(cmd_q->reg_head_lo);
0214     head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
0215 
0216     n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
0217 
0218     return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
0219 }
0220 
0221 static int ccp5_do_cmd(struct ccp5_desc *desc,
0222                struct ccp_cmd_queue *cmd_q)
0223 {
0224     __le32 *mP;
0225     u32 *dP;
0226     u32 tail;
0227     int i;
0228     int ret = 0;
0229 
0230     cmd_q->total_ops++;
0231 
0232     if (CCP5_CMD_SOC(desc)) {
0233         CCP5_CMD_IOC(desc) = 1;
0234         CCP5_CMD_SOC(desc) = 0;
0235     }
0236     mutex_lock(&cmd_q->q_mutex);
0237 
0238     mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx];
0239     dP = (u32 *)desc;
0240     for (i = 0; i < 8; i++)
0241         mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
0242 
0243     cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
0244 
0245     /* The data used by this command must be flushed to memory */
0246     wmb();
0247 
0248     /* Write the new tail address back to the queue register */
0249     tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
0250     iowrite32(tail, cmd_q->reg_tail_lo);
0251 
0252     /* Turn the queue back on using our cached control register */
0253     iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
0254     mutex_unlock(&cmd_q->q_mutex);
0255 
0256     if (CCP5_CMD_IOC(desc)) {
0257         /* Wait for the job to complete */
0258         ret = wait_event_interruptible(cmd_q->int_queue,
0259                            cmd_q->int_rcvd);
0260         if (ret || cmd_q->cmd_error) {
0261             /* Log the error and flush the queue by
0262              * moving the head pointer
0263              */
0264             if (cmd_q->cmd_error)
0265                 ccp_log_error(cmd_q->ccp,
0266                           cmd_q->cmd_error);
0267             iowrite32(tail, cmd_q->reg_head_lo);
0268             if (!ret)
0269                 ret = -EIO;
0270         }
0271         cmd_q->int_rcvd = 0;
0272     }
0273 
0274     return ret;
0275 }
0276 
0277 static int ccp5_perform_aes(struct ccp_op *op)
0278 {
0279     struct ccp5_desc desc;
0280     union ccp_function function;
0281     u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
0282 
0283     op->cmd_q->total_aes_ops++;
0284 
0285     /* Zero out all the fields of the command desc */
0286     memset(&desc, 0, Q_DESC_SIZE);
0287 
0288     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
0289 
0290     CCP5_CMD_SOC(&desc) = op->soc;
0291     CCP5_CMD_IOC(&desc) = 1;
0292     CCP5_CMD_INIT(&desc) = op->init;
0293     CCP5_CMD_EOM(&desc) = op->eom;
0294     CCP5_CMD_PROT(&desc) = 0;
0295 
0296     function.raw = 0;
0297     CCP_AES_ENCRYPT(&function) = op->u.aes.action;
0298     CCP_AES_MODE(&function) = op->u.aes.mode;
0299     CCP_AES_TYPE(&function) = op->u.aes.type;
0300     CCP_AES_SIZE(&function) = op->u.aes.size;
0301 
0302     CCP5_CMD_FUNCTION(&desc) = function.raw;
0303 
0304     CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
0305 
0306     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0307     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0308     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0309 
0310     CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0311     CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0312     CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0313 
0314     CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
0315     CCP5_CMD_KEY_HI(&desc) = 0;
0316     CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
0317     CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
0318 
0319     return ccp5_do_cmd(&desc, op->cmd_q);
0320 }
0321 
0322 static int ccp5_perform_xts_aes(struct ccp_op *op)
0323 {
0324     struct ccp5_desc desc;
0325     union ccp_function function;
0326     u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
0327 
0328     op->cmd_q->total_xts_aes_ops++;
0329 
0330     /* Zero out all the fields of the command desc */
0331     memset(&desc, 0, Q_DESC_SIZE);
0332 
0333     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
0334 
0335     CCP5_CMD_SOC(&desc) = op->soc;
0336     CCP5_CMD_IOC(&desc) = 1;
0337     CCP5_CMD_INIT(&desc) = op->init;
0338     CCP5_CMD_EOM(&desc) = op->eom;
0339     CCP5_CMD_PROT(&desc) = 0;
0340 
0341     function.raw = 0;
0342     CCP_XTS_TYPE(&function) = op->u.xts.type;
0343     CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
0344     CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
0345     CCP5_CMD_FUNCTION(&desc) = function.raw;
0346 
0347     CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
0348 
0349     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0350     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0351     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0352 
0353     CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0354     CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0355     CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0356 
0357     CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
0358     CCP5_CMD_KEY_HI(&desc) =  0;
0359     CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
0360     CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
0361 
0362     return ccp5_do_cmd(&desc, op->cmd_q);
0363 }
0364 
0365 static int ccp5_perform_sha(struct ccp_op *op)
0366 {
0367     struct ccp5_desc desc;
0368     union ccp_function function;
0369 
0370     op->cmd_q->total_sha_ops++;
0371 
0372     /* Zero out all the fields of the command desc */
0373     memset(&desc, 0, Q_DESC_SIZE);
0374 
0375     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
0376 
0377     CCP5_CMD_SOC(&desc) = op->soc;
0378     CCP5_CMD_IOC(&desc) = 1;
0379     CCP5_CMD_INIT(&desc) = 1;
0380     CCP5_CMD_EOM(&desc) = op->eom;
0381     CCP5_CMD_PROT(&desc) = 0;
0382 
0383     function.raw = 0;
0384     CCP_SHA_TYPE(&function) = op->u.sha.type;
0385     CCP5_CMD_FUNCTION(&desc) = function.raw;
0386 
0387     CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
0388 
0389     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0390     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0391     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0392 
0393     CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
0394 
0395     if (op->eom) {
0396         CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
0397         CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
0398     } else {
0399         CCP5_CMD_SHA_LO(&desc) = 0;
0400         CCP5_CMD_SHA_HI(&desc) = 0;
0401     }
0402 
0403     return ccp5_do_cmd(&desc, op->cmd_q);
0404 }
0405 
0406 static int ccp5_perform_des3(struct ccp_op *op)
0407 {
0408     struct ccp5_desc desc;
0409     union ccp_function function;
0410     u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
0411 
0412     op->cmd_q->total_3des_ops++;
0413 
0414     /* Zero out all the fields of the command desc */
0415     memset(&desc, 0, sizeof(struct ccp5_desc));
0416 
0417     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
0418 
0419     CCP5_CMD_SOC(&desc) = op->soc;
0420     CCP5_CMD_IOC(&desc) = 1;
0421     CCP5_CMD_INIT(&desc) = op->init;
0422     CCP5_CMD_EOM(&desc) = op->eom;
0423     CCP5_CMD_PROT(&desc) = 0;
0424 
0425     function.raw = 0;
0426     CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
0427     CCP_DES3_MODE(&function) = op->u.des3.mode;
0428     CCP_DES3_TYPE(&function) = op->u.des3.type;
0429     CCP5_CMD_FUNCTION(&desc) = function.raw;
0430 
0431     CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
0432 
0433     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0434     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0435     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0436 
0437     CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0438     CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0439     CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0440 
0441     CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
0442     CCP5_CMD_KEY_HI(&desc) = 0;
0443     CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
0444     CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
0445 
0446     return ccp5_do_cmd(&desc, op->cmd_q);
0447 }
0448 
0449 static int ccp5_perform_rsa(struct ccp_op *op)
0450 {
0451     struct ccp5_desc desc;
0452     union ccp_function function;
0453 
0454     op->cmd_q->total_rsa_ops++;
0455 
0456     /* Zero out all the fields of the command desc */
0457     memset(&desc, 0, Q_DESC_SIZE);
0458 
0459     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
0460 
0461     CCP5_CMD_SOC(&desc) = op->soc;
0462     CCP5_CMD_IOC(&desc) = 1;
0463     CCP5_CMD_INIT(&desc) = 0;
0464     CCP5_CMD_EOM(&desc) = 1;
0465     CCP5_CMD_PROT(&desc) = 0;
0466 
0467     function.raw = 0;
0468     CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
0469     CCP5_CMD_FUNCTION(&desc) = function.raw;
0470 
0471     CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
0472 
0473     /* Source is from external memory */
0474     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0475     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0476     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0477 
0478     /* Destination is in external memory */
0479     CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0480     CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0481     CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0482 
0483     /* Key (Exponent) is in external memory */
0484     CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
0485     CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
0486     CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0487 
0488     return ccp5_do_cmd(&desc, op->cmd_q);
0489 }
0490 
0491 static int ccp5_perform_passthru(struct ccp_op *op)
0492 {
0493     struct ccp5_desc desc;
0494     union ccp_function function;
0495     struct ccp_dma_info *saddr = &op->src.u.dma;
0496     struct ccp_dma_info *daddr = &op->dst.u.dma;
0497 
0498 
0499     op->cmd_q->total_pt_ops++;
0500 
0501     memset(&desc, 0, Q_DESC_SIZE);
0502 
0503     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
0504 
0505     CCP5_CMD_SOC(&desc) = 0;
0506     CCP5_CMD_IOC(&desc) = 1;
0507     CCP5_CMD_INIT(&desc) = 0;
0508     CCP5_CMD_EOM(&desc) = op->eom;
0509     CCP5_CMD_PROT(&desc) = 0;
0510 
0511     function.raw = 0;
0512     CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
0513     CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
0514     CCP5_CMD_FUNCTION(&desc) = function.raw;
0515 
0516     /* Length of source data is always 256 bytes */
0517     if (op->src.type == CCP_MEMTYPE_SYSTEM)
0518         CCP5_CMD_LEN(&desc) = saddr->length;
0519     else
0520         CCP5_CMD_LEN(&desc) = daddr->length;
0521 
0522     if (op->src.type == CCP_MEMTYPE_SYSTEM) {
0523         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0524         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0525         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0526 
0527         if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
0528             CCP5_CMD_LSB_ID(&desc) = op->sb_key;
0529     } else {
0530         u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
0531 
0532         CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
0533         CCP5_CMD_SRC_HI(&desc) = 0;
0534         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
0535     }
0536 
0537     if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
0538         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0539         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0540         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0541     } else {
0542         u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
0543 
0544         CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
0545         CCP5_CMD_DST_HI(&desc) = 0;
0546         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
0547     }
0548 
0549     return ccp5_do_cmd(&desc, op->cmd_q);
0550 }
0551 
0552 static int ccp5_perform_ecc(struct ccp_op *op)
0553 {
0554     struct ccp5_desc desc;
0555     union ccp_function function;
0556 
0557     op->cmd_q->total_ecc_ops++;
0558 
0559     /* Zero out all the fields of the command desc */
0560     memset(&desc, 0, Q_DESC_SIZE);
0561 
0562     CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
0563 
0564     CCP5_CMD_SOC(&desc) = 0;
0565     CCP5_CMD_IOC(&desc) = 1;
0566     CCP5_CMD_INIT(&desc) = 0;
0567     CCP5_CMD_EOM(&desc) = 1;
0568     CCP5_CMD_PROT(&desc) = 0;
0569 
0570     function.raw = 0;
0571     function.ecc.mode = op->u.ecc.function;
0572     CCP5_CMD_FUNCTION(&desc) = function.raw;
0573 
0574     CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
0575 
0576     CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
0577     CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
0578     CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0579 
0580     CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
0581     CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
0582     CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
0583 
0584     return ccp5_do_cmd(&desc, op->cmd_q);
0585 }
0586 
0587 static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
0588 {
0589     int q_mask = 1 << cmd_q->id;
0590     int queues = 0;
0591     int j;
0592 
0593     /* Build a bit mask to know which LSBs this queue has access to.
0594      * Don't bother with segment 0 as it has special privileges.
0595      */
0596     for (j = 1; j < MAX_LSB_CNT; j++) {
0597         if (status & q_mask)
0598             bitmap_set(cmd_q->lsbmask, j, 1);
0599         status >>= LSB_REGION_WIDTH;
0600     }
0601     queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
0602     dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
0603          cmd_q->id, queues);
0604 
0605     return queues ? 0 : -EINVAL;
0606 }
0607 
0608 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
0609                     int lsb_cnt, int n_lsbs,
0610                     unsigned long *lsb_pub)
0611 {
0612     DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
0613     int bitno;
0614     int qlsb_wgt;
0615     int i;
0616 
0617     /* For each queue:
0618      * If the count of potential LSBs available to a queue matches the
0619      * ordinal given to us in lsb_cnt:
0620      * Copy the mask of possible LSBs for this queue into "qlsb";
0621      * For each bit in qlsb, see if the corresponding bit in the
0622      * aggregation mask is set; if so, we have a match.
0623      *     If we have a match, clear the bit in the aggregation to
0624      *     mark it as no longer available.
0625      *     If there is no match, clear the bit in qlsb and keep looking.
0626      */
0627     for (i = 0; i < ccp->cmd_q_count; i++) {
0628         struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
0629 
0630         qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
0631 
0632         if (qlsb_wgt == lsb_cnt) {
0633             bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
0634 
0635             bitno = find_first_bit(qlsb, MAX_LSB_CNT);
0636             while (bitno < MAX_LSB_CNT) {
0637                 if (test_bit(bitno, lsb_pub)) {
0638                     /* We found an available LSB
0639                      * that this queue can access
0640                      */
0641                     cmd_q->lsb = bitno;
0642                     bitmap_clear(lsb_pub, bitno, 1);
0643                     dev_dbg(ccp->dev,
0644                          "Queue %d gets LSB %d\n",
0645                          i, bitno);
0646                     break;
0647                 }
0648                 bitmap_clear(qlsb, bitno, 1);
0649                 bitno = find_first_bit(qlsb, MAX_LSB_CNT);
0650             }
0651             if (bitno >= MAX_LSB_CNT)
0652                 return -EINVAL;
0653             n_lsbs--;
0654         }
0655     }
0656     return n_lsbs;
0657 }
0658 
0659 /* For each queue, from the most- to least-constrained:
0660  * find an LSB that can be assigned to the queue. If there are N queues that
0661  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
0662  * dedicated LSB. Remaining LSB regions become a shared resource.
0663  * If we have fewer LSBs than queues, all LSB regions become shared resources.
0664  */
0665 static int ccp_assign_lsbs(struct ccp_device *ccp)
0666 {
0667     DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
0668     DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
0669     int n_lsbs = 0;
0670     int bitno;
0671     int i, lsb_cnt;
0672     int rc = 0;
0673 
0674     bitmap_zero(lsb_pub, MAX_LSB_CNT);
0675 
0676     /* Create an aggregate bitmap to get a total count of available LSBs */
0677     for (i = 0; i < ccp->cmd_q_count; i++)
0678         bitmap_or(lsb_pub,
0679               lsb_pub, ccp->cmd_q[i].lsbmask,
0680               MAX_LSB_CNT);
0681 
0682     n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
0683 
0684     if (n_lsbs >= ccp->cmd_q_count) {
0685         /* We have enough LSBS to give every queue a private LSB.
0686          * Brute force search to start with the queues that are more
0687          * constrained in LSB choice. When an LSB is privately
0688          * assigned, it is removed from the public mask.
0689          * This is an ugly N squared algorithm with some optimization.
0690          */
0691         for (lsb_cnt = 1;
0692              n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
0693              lsb_cnt++) {
0694             rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
0695                               lsb_pub);
0696             if (rc < 0)
0697                 return -EINVAL;
0698             n_lsbs = rc;
0699         }
0700     }
0701 
0702     rc = 0;
0703     /* What's left of the LSBs, according to the public mask, now become
0704      * shared. Any zero bits in the lsb_pub mask represent an LSB region
0705      * that can't be used as a shared resource, so mark the LSB slots for
0706      * them as "in use".
0707      */
0708     bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
0709 
0710     bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
0711     while (bitno < MAX_LSB_CNT) {
0712         bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
0713         bitmap_set(qlsb, bitno, 1);
0714         bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
0715     }
0716 
0717     return rc;
0718 }
0719 
0720 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
0721 {
0722     unsigned int i;
0723 
0724     for (i = 0; i < ccp->cmd_q_count; i++)
0725         iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
0726 }
0727 
0728 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
0729 {
0730     unsigned int i;
0731 
0732     for (i = 0; i < ccp->cmd_q_count; i++)
0733         iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
0734 }
0735 
0736 static void ccp5_irq_bh(unsigned long data)
0737 {
0738     struct ccp_device *ccp = (struct ccp_device *)data;
0739     u32 status;
0740     unsigned int i;
0741 
0742     for (i = 0; i < ccp->cmd_q_count; i++) {
0743         struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
0744 
0745         status = ioread32(cmd_q->reg_interrupt_status);
0746 
0747         if (status) {
0748             cmd_q->int_status = status;
0749             cmd_q->q_status = ioread32(cmd_q->reg_status);
0750             cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
0751 
0752             /* On error, only save the first error value */
0753             if ((status & INT_ERROR) && !cmd_q->cmd_error)
0754                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
0755 
0756             cmd_q->int_rcvd = 1;
0757 
0758             /* Acknowledge the interrupt and wake the kthread */
0759             iowrite32(status, cmd_q->reg_interrupt_status);
0760             wake_up_interruptible(&cmd_q->int_queue);
0761         }
0762     }
0763     ccp5_enable_queue_interrupts(ccp);
0764 }
0765 
0766 static irqreturn_t ccp5_irq_handler(int irq, void *data)
0767 {
0768     struct ccp_device *ccp = (struct ccp_device *)data;
0769 
0770     ccp5_disable_queue_interrupts(ccp);
0771     ccp->total_interrupts++;
0772     if (ccp->use_tasklet)
0773         tasklet_schedule(&ccp->irq_tasklet);
0774     else
0775         ccp5_irq_bh((unsigned long)ccp);
0776     return IRQ_HANDLED;
0777 }
0778 
0779 static int ccp5_init(struct ccp_device *ccp)
0780 {
0781     struct device *dev = ccp->dev;
0782     struct ccp_cmd_queue *cmd_q;
0783     struct dma_pool *dma_pool;
0784     char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
0785     unsigned int qmr, i;
0786     u64 status;
0787     u32 status_lo, status_hi;
0788     int ret;
0789 
0790     /* Find available queues */
0791     qmr = ioread32(ccp->io_regs + Q_MASK_REG);
0792     /*
0793      * Check for a access to the registers.  If this read returns
0794      * 0xffffffff, it's likely that the system is running a broken
0795      * BIOS which disallows access to the device. Stop here and fail
0796      * the initialization (but not the load, as the PSP could get
0797      * properly initialized).
0798      */
0799     if (qmr == 0xffffffff) {
0800         dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
0801         return 1;
0802     }
0803 
0804     for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
0805         if (!(qmr & (1 << i)))
0806             continue;
0807 
0808         /* Allocate a dma pool for this queue */
0809         snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
0810              ccp->name, i);
0811         dma_pool = dma_pool_create(dma_pool_name, dev,
0812                        CCP_DMAPOOL_MAX_SIZE,
0813                        CCP_DMAPOOL_ALIGN, 0);
0814         if (!dma_pool) {
0815             dev_err(dev, "unable to allocate dma pool\n");
0816             ret = -ENOMEM;
0817             goto e_pool;
0818         }
0819 
0820         cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
0821         ccp->cmd_q_count++;
0822 
0823         cmd_q->ccp = ccp;
0824         cmd_q->id = i;
0825         cmd_q->dma_pool = dma_pool;
0826         mutex_init(&cmd_q->q_mutex);
0827 
0828         /* Page alignment satisfies our needs for N <= 128 */
0829         BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
0830         cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
0831         cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize,
0832                            &cmd_q->qbase_dma,
0833                            GFP_KERNEL);
0834         if (!cmd_q->qbase) {
0835             dev_err(dev, "unable to allocate command queue\n");
0836             ret = -ENOMEM;
0837             goto e_pool;
0838         }
0839 
0840         cmd_q->qidx = 0;
0841         /* Preset some register values and masks that are queue
0842          * number dependent
0843          */
0844         cmd_q->reg_control = ccp->io_regs +
0845                      CMD5_Q_STATUS_INCR * (i + 1);
0846         cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
0847         cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
0848         cmd_q->reg_int_enable = cmd_q->reg_control +
0849                     CMD5_Q_INT_ENABLE_BASE;
0850         cmd_q->reg_interrupt_status = cmd_q->reg_control +
0851                           CMD5_Q_INTERRUPT_STATUS_BASE;
0852         cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
0853         cmd_q->reg_int_status = cmd_q->reg_control +
0854                     CMD5_Q_INT_STATUS_BASE;
0855         cmd_q->reg_dma_status = cmd_q->reg_control +
0856                     CMD5_Q_DMA_STATUS_BASE;
0857         cmd_q->reg_dma_read_status = cmd_q->reg_control +
0858                          CMD5_Q_DMA_READ_STATUS_BASE;
0859         cmd_q->reg_dma_write_status = cmd_q->reg_control +
0860                           CMD5_Q_DMA_WRITE_STATUS_BASE;
0861 
0862         init_waitqueue_head(&cmd_q->int_queue);
0863 
0864         dev_dbg(dev, "queue #%u available\n", i);
0865     }
0866 
0867     if (ccp->cmd_q_count == 0) {
0868         dev_notice(dev, "no command queues available\n");
0869         ret = 1;
0870         goto e_pool;
0871     }
0872 
0873     /* Turn off the queues and disable interrupts until ready */
0874     ccp5_disable_queue_interrupts(ccp);
0875     for (i = 0; i < ccp->cmd_q_count; i++) {
0876         cmd_q = &ccp->cmd_q[i];
0877 
0878         cmd_q->qcontrol = 0; /* Start with nothing */
0879         iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
0880 
0881         ioread32(cmd_q->reg_int_status);
0882         ioread32(cmd_q->reg_status);
0883 
0884         /* Clear the interrupt status */
0885         iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
0886     }
0887 
0888     dev_dbg(dev, "Requesting an IRQ...\n");
0889     /* Request an irq */
0890     ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
0891     if (ret) {
0892         dev_err(dev, "unable to allocate an IRQ\n");
0893         goto e_pool;
0894     }
0895     /* Initialize the ISR tasklet */
0896     if (ccp->use_tasklet)
0897         tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
0898                  (unsigned long)ccp);
0899 
0900     dev_dbg(dev, "Loading LSB map...\n");
0901     /* Copy the private LSB mask to the public registers */
0902     status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
0903     status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
0904     iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
0905     iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
0906     status = ((u64)status_hi<<30) | (u64)status_lo;
0907 
0908     dev_dbg(dev, "Configuring virtual queues...\n");
0909     /* Configure size of each virtual queue accessible to host */
0910     for (i = 0; i < ccp->cmd_q_count; i++) {
0911         u32 dma_addr_lo;
0912         u32 dma_addr_hi;
0913 
0914         cmd_q = &ccp->cmd_q[i];
0915 
0916         cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
0917         cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
0918 
0919         cmd_q->qdma_tail = cmd_q->qbase_dma;
0920         dma_addr_lo = low_address(cmd_q->qdma_tail);
0921         iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
0922         iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
0923 
0924         dma_addr_hi = high_address(cmd_q->qdma_tail);
0925         cmd_q->qcontrol |= (dma_addr_hi << 16);
0926         iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
0927 
0928         /* Find the LSB regions accessible to the queue */
0929         ccp_find_lsb_regions(cmd_q, status);
0930         cmd_q->lsb = -1; /* Unassigned value */
0931     }
0932 
0933     dev_dbg(dev, "Assigning LSBs...\n");
0934     ret = ccp_assign_lsbs(ccp);
0935     if (ret) {
0936         dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
0937         goto e_irq;
0938     }
0939 
0940     /* Optimization: pre-allocate LSB slots for each queue */
0941     for (i = 0; i < ccp->cmd_q_count; i++) {
0942         ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
0943         ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
0944     }
0945 
0946     dev_dbg(dev, "Starting threads...\n");
0947     /* Create a kthread for each queue */
0948     for (i = 0; i < ccp->cmd_q_count; i++) {
0949         struct task_struct *kthread;
0950 
0951         cmd_q = &ccp->cmd_q[i];
0952 
0953         kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
0954                       "%s-q%u", ccp->name, cmd_q->id);
0955         if (IS_ERR(kthread)) {
0956             dev_err(dev, "error creating queue thread (%ld)\n",
0957                 PTR_ERR(kthread));
0958             ret = PTR_ERR(kthread);
0959             goto e_kthread;
0960         }
0961 
0962         cmd_q->kthread = kthread;
0963     }
0964 
0965     dev_dbg(dev, "Enabling interrupts...\n");
0966     ccp5_enable_queue_interrupts(ccp);
0967 
0968     dev_dbg(dev, "Registering device...\n");
0969     /* Put this on the unit list to make it available */
0970     ccp_add_device(ccp);
0971 
0972     ret = ccp_register_rng(ccp);
0973     if (ret)
0974         goto e_kthread;
0975 
0976     /* Register the DMA engine support */
0977     ret = ccp_dmaengine_register(ccp);
0978     if (ret)
0979         goto e_hwrng;
0980 
0981 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
0982     /* Set up debugfs entries */
0983     ccp5_debugfs_setup(ccp);
0984 #endif
0985 
0986     return 0;
0987 
0988 e_hwrng:
0989     ccp_unregister_rng(ccp);
0990 
0991 e_kthread:
0992     for (i = 0; i < ccp->cmd_q_count; i++)
0993         if (ccp->cmd_q[i].kthread)
0994             kthread_stop(ccp->cmd_q[i].kthread);
0995 
0996 e_irq:
0997     sp_free_ccp_irq(ccp->sp, ccp);
0998 
0999 e_pool:
1000     for (i = 0; i < ccp->cmd_q_count; i++)
1001         dma_pool_destroy(ccp->cmd_q[i].dma_pool);
1002 
1003     return ret;
1004 }
1005 
1006 static void ccp5_destroy(struct ccp_device *ccp)
1007 {
1008     struct ccp_cmd_queue *cmd_q;
1009     struct ccp_cmd *cmd;
1010     unsigned int i;
1011 
1012     /* Unregister the DMA engine */
1013     ccp_dmaengine_unregister(ccp);
1014 
1015     /* Unregister the RNG */
1016     ccp_unregister_rng(ccp);
1017 
1018     /* Remove this device from the list of available units first */
1019     ccp_del_device(ccp);
1020 
1021 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
1022     /* We're in the process of tearing down the entire driver;
1023      * when all the devices are gone clean up debugfs
1024      */
1025     if (ccp_present())
1026         ccp5_debugfs_destroy();
1027 #endif
1028 
1029     /* Disable and clear interrupts */
1030     ccp5_disable_queue_interrupts(ccp);
1031     for (i = 0; i < ccp->cmd_q_count; i++) {
1032         cmd_q = &ccp->cmd_q[i];
1033 
1034         /* Turn off the run bit */
1035         iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
1036 
1037         /* Clear the interrupt status */
1038         iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
1039         ioread32(cmd_q->reg_int_status);
1040         ioread32(cmd_q->reg_status);
1041     }
1042 
1043     /* Stop the queue kthreads */
1044     for (i = 0; i < ccp->cmd_q_count; i++)
1045         if (ccp->cmd_q[i].kthread)
1046             kthread_stop(ccp->cmd_q[i].kthread);
1047 
1048     sp_free_ccp_irq(ccp->sp, ccp);
1049 
1050     /* Flush the cmd and backlog queue */
1051     while (!list_empty(&ccp->cmd)) {
1052         /* Invoke the callback directly with an error code */
1053         cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1054         list_del(&cmd->entry);
1055         cmd->callback(cmd->data, -ENODEV);
1056     }
1057     while (!list_empty(&ccp->backlog)) {
1058         /* Invoke the callback directly with an error code */
1059         cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1060         list_del(&cmd->entry);
1061         cmd->callback(cmd->data, -ENODEV);
1062     }
1063 }
1064 
1065 static void ccp5_config(struct ccp_device *ccp)
1066 {
1067     /* Public side */
1068     iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1069 }
1070 
1071 static void ccp5other_config(struct ccp_device *ccp)
1072 {
1073     int i;
1074     u32 rnd;
1075 
1076     /* We own all of the queues on the NTB CCP */
1077 
1078     iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1079     iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1080     for (i = 0; i < 12; i++) {
1081         rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1082         iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1083     }
1084 
1085     iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1086     iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1087     iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1088 
1089     iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1090     iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1091 
1092     iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1093 
1094     ccp5_config(ccp);
1095 }
1096 
1097 /* Version 5 adds some function, but is essentially the same as v5 */
1098 static const struct ccp_actions ccp5_actions = {
1099     .aes = ccp5_perform_aes,
1100     .xts_aes = ccp5_perform_xts_aes,
1101     .sha = ccp5_perform_sha,
1102     .des3 = ccp5_perform_des3,
1103     .rsa = ccp5_perform_rsa,
1104     .passthru = ccp5_perform_passthru,
1105     .ecc = ccp5_perform_ecc,
1106     .sballoc = ccp_lsb_alloc,
1107     .sbfree = ccp_lsb_free,
1108     .init = ccp5_init,
1109     .destroy = ccp5_destroy,
1110     .get_free_slots = ccp5_get_free_slots,
1111 };
1112 
1113 const struct ccp_vdata ccpv5a = {
1114     .version = CCP_VERSION(5, 0),
1115     .setup = ccp5_config,
1116     .perform = &ccp5_actions,
1117     .offset = 0x0,
1118     .rsamax = CCP5_RSA_MAX_WIDTH,
1119 };
1120 
1121 const struct ccp_vdata ccpv5b = {
1122     .version = CCP_VERSION(5, 0),
1123     .dma_chan_attr = DMA_PRIVATE,
1124     .setup = ccp5other_config,
1125     .perform = &ccp5_actions,
1126     .offset = 0x0,
1127     .rsamax = CCP5_RSA_MAX_WIDTH,
1128 };