Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Intel Keem Bay OCS AES Crypto Driver.
0004  *
0005  * Copyright (C) 2018-2020 Intel Corporation
0006  */
0007 
0008 #include <linux/dma-mapping.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/platform_device.h>
0011 #include <linux/slab.h>
0012 #include <linux/swab.h>
0013 
0014 #include <asm/byteorder.h>
0015 #include <asm/errno.h>
0016 
0017 #include <crypto/aes.h>
0018 #include <crypto/gcm.h>
0019 
0020 #include "ocs-aes.h"
0021 
0022 #define AES_COMMAND_OFFSET          0x0000
0023 #define AES_KEY_0_OFFSET            0x0004
0024 #define AES_KEY_1_OFFSET            0x0008
0025 #define AES_KEY_2_OFFSET            0x000C
0026 #define AES_KEY_3_OFFSET            0x0010
0027 #define AES_KEY_4_OFFSET            0x0014
0028 #define AES_KEY_5_OFFSET            0x0018
0029 #define AES_KEY_6_OFFSET            0x001C
0030 #define AES_KEY_7_OFFSET            0x0020
0031 #define AES_IV_0_OFFSET             0x0024
0032 #define AES_IV_1_OFFSET             0x0028
0033 #define AES_IV_2_OFFSET             0x002C
0034 #define AES_IV_3_OFFSET             0x0030
0035 #define AES_ACTIVE_OFFSET           0x0034
0036 #define AES_STATUS_OFFSET           0x0038
0037 #define AES_KEY_SIZE_OFFSET         0x0044
0038 #define AES_IER_OFFSET              0x0048
0039 #define AES_ISR_OFFSET              0x005C
0040 #define AES_MULTIPURPOSE1_0_OFFSET      0x0200
0041 #define AES_MULTIPURPOSE1_1_OFFSET      0x0204
0042 #define AES_MULTIPURPOSE1_2_OFFSET      0x0208
0043 #define AES_MULTIPURPOSE1_3_OFFSET      0x020C
0044 #define AES_MULTIPURPOSE2_0_OFFSET      0x0220
0045 #define AES_MULTIPURPOSE2_1_OFFSET      0x0224
0046 #define AES_MULTIPURPOSE2_2_OFFSET      0x0228
0047 #define AES_MULTIPURPOSE2_3_OFFSET      0x022C
0048 #define AES_BYTE_ORDER_CFG_OFFSET       0x02C0
0049 #define AES_TLEN_OFFSET             0x0300
0050 #define AES_T_MAC_0_OFFSET          0x0304
0051 #define AES_T_MAC_1_OFFSET          0x0308
0052 #define AES_T_MAC_2_OFFSET          0x030C
0053 #define AES_T_MAC_3_OFFSET          0x0310
0054 #define AES_PLEN_OFFSET             0x0314
0055 #define AES_A_DMA_SRC_ADDR_OFFSET       0x0400
0056 #define AES_A_DMA_DST_ADDR_OFFSET       0x0404
0057 #define AES_A_DMA_SRC_SIZE_OFFSET       0x0408
0058 #define AES_A_DMA_DST_SIZE_OFFSET       0x040C
0059 #define AES_A_DMA_DMA_MODE_OFFSET       0x0410
0060 #define AES_A_DMA_NEXT_SRC_DESCR_OFFSET     0x0418
0061 #define AES_A_DMA_NEXT_DST_DESCR_OFFSET     0x041C
0062 #define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET  0x0420
0063 #define AES_A_DMA_LOG_OFFSET            0x0424
0064 #define AES_A_DMA_STATUS_OFFSET         0x0428
0065 #define AES_A_DMA_PERF_CNTR_OFFSET      0x042C
0066 #define AES_A_DMA_MSI_ISR_OFFSET        0x0480
0067 #define AES_A_DMA_MSI_IER_OFFSET        0x0484
0068 #define AES_A_DMA_MSI_MASK_OFFSET       0x0488
0069 #define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET    0x0600
0070 #define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET    0x0700
0071 
0072 /*
0073  * AES_A_DMA_DMA_MODE register.
0074  * Default: 0x00000000.
0075  * bit[31]  ACTIVE
0076  *      This bit activates the DMA. When the DMA finishes, it resets
0077  *      this bit to zero.
0078  * bit[30:26]   Unused by this driver.
0079  * bit[25]  SRC_LINK_LIST_EN
0080  *      Source link list enable bit. When the linked list is terminated
0081  *      this bit is reset by the DMA.
0082  * bit[24]  DST_LINK_LIST_EN
0083  *      Destination link list enable bit. When the linked list is
0084  *      terminated this bit is reset by the DMA.
0085  * bit[23:0]    Unused by this driver.
0086  */
0087 #define AES_A_DMA_DMA_MODE_ACTIVE       BIT(31)
0088 #define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)
0089 #define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)
0090 
0091 /*
0092  * AES_ACTIVE register
0093  * default 0x00000000
0094  * bit[31:10]   Reserved
0095  * bit[9]   LAST_ADATA
0096  * bit[8]   LAST_GCX
0097  * bit[7:2] Reserved
0098  * bit[1]   TERMINATION
0099  * bit[0]   TRIGGER
0100  */
0101 #define AES_ACTIVE_LAST_ADATA           BIT(9)
0102 #define AES_ACTIVE_LAST_CCM_GCM         BIT(8)
0103 #define AES_ACTIVE_TERMINATION          BIT(1)
0104 #define AES_ACTIVE_TRIGGER          BIT(0)
0105 
0106 #define AES_DISABLE_INT             0x00000000
0107 #define AES_DMA_CPD_ERR_INT         BIT(8)
0108 #define AES_DMA_OUTBUF_RD_ERR_INT       BIT(7)
0109 #define AES_DMA_OUTBUF_WR_ERR_INT       BIT(6)
0110 #define AES_DMA_INBUF_RD_ERR_INT        BIT(5)
0111 #define AES_DMA_INBUF_WR_ERR_INT        BIT(4)
0112 #define AES_DMA_BAD_COMP_INT            BIT(3)
0113 #define AES_DMA_SAI_INT             BIT(2)
0114 #define AES_DMA_SRC_DONE_INT            BIT(0)
0115 #define AES_COMPLETE_INT            BIT(1)
0116 
0117 #define AES_DMA_MSI_MASK_CLEAR          BIT(0)
0118 
0119 #define AES_128_BIT_KEY             0x00000000
0120 #define AES_256_BIT_KEY             BIT(0)
0121 
0122 #define AES_DEACTIVATE_PERF_CNTR        0x00000000
0123 #define AES_ACTIVATE_PERF_CNTR          BIT(0)
0124 
0125 #define AES_MAX_TAG_SIZE_U32            4
0126 
0127 #define OCS_LL_DMA_FLAG_TERMINATE       BIT(31)
0128 
0129 /*
0130  * There is an inconsistency in the documentation. This is documented as a
0131  * 11-bit value, but it is actually 10-bits.
0132  */
0133 #define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK  0x3FF
0134 
0135 /*
0136  * During CCM decrypt, the OCS block needs to finish processing the ciphertext
0137  * before the tag is written. For 128-bit mode this required delay is 28 OCS
0138  * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
0139  */
0140 #define CCM_DECRYPT_DELAY_TAG_CLK_COUNT     36UL
0141 
0142 /*
0143  * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
0144  * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
0145  * bit in the same register (as stated in the OCS databook)
0146  */
0147 #define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT    42UL
0148 
0149 /* See RFC3610 section 2.2 */
0150 #define L_PRIME_MIN (1)
0151 #define L_PRIME_MAX (7)
0152 /*
0153  * CCM IV format from RFC 3610 section 2.3
0154  *
0155  *   Octet Number   Contents
0156  *   ------------   ---------
0157  *   0              Flags
0158  *   1 ... 15-L     Nonce N
0159  *   16-L ... 15    Counter i
0160  *
0161  * Flags = L' = L - 1
0162  */
0163 #define L_PRIME_IDX     0
0164 #define COUNTER_START(lprime)   (16 - ((lprime) + 1))
0165 #define COUNTER_LEN(lprime) ((lprime) + 1)
0166 
0167 enum aes_counter_mode {
0168     AES_CTR_M_NO_INC = 0,
0169     AES_CTR_M_32_INC = 1,
0170     AES_CTR_M_64_INC = 2,
0171     AES_CTR_M_128_INC = 3,
0172 };
0173 
0174 /**
0175  * struct ocs_dma_linked_list - OCS DMA linked list entry.
0176  * @src_addr:   Source address of the data.
0177  * @src_len:    Length of data to be fetched.
0178  * @next:   Next dma_list to fetch.
0179  * @ll_flags:   Flags (Freeze @ terminate) for the DMA engine.
0180  */
0181 struct ocs_dma_linked_list {
0182     u32 src_addr;
0183     u32 src_len;
0184     u32 next;
0185     u32 ll_flags;
0186 } __packed;
0187 
0188 /*
0189  * Set endianness of inputs and outputs
0190  * AES_BYTE_ORDER_CFG
0191  * default 0x00000000
0192  * bit [10] - KEY_HI_LO_SWAP
0193  * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
0194  * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
0195  * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
0196  * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
0197  * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
0198  * bit [4] - IV_SWAP_BYTES_IN_DWORD
0199  * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
0200  * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
0201  * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
0202  * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
0203  */
0204 static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
0205 {
0206     iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
0207 }
0208 
0209 /* Trigger AES process start. */
0210 static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
0211 {
0212     iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
0213 }
0214 
0215 /* Indicate last bulk of data. */
0216 static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
0217 {
0218     iowrite32(AES_ACTIVE_TERMINATION,
0219           aes_dev->base_reg + AES_ACTIVE_OFFSET);
0220 }
0221 
0222 /*
0223  * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
0224  *
0225  * Called when DMA is programmed to fetch the last batch of data.
0226  * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
0227  *   data.
0228  * - For AES-GCM, it is called for the last batch of Plaintext data and
0229  *   Ciphertext data.
0230  */
0231 static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
0232 {
0233     iowrite32(AES_ACTIVE_LAST_CCM_GCM,
0234           aes_dev->base_reg + AES_ACTIVE_OFFSET);
0235 }
0236 
0237 /* Wait for LAST_CCM_GCM bit to be unset. */
0238 static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
0239 {
0240     u32 aes_active_reg;
0241 
0242     do {
0243         aes_active_reg = ioread32(aes_dev->base_reg +
0244                       AES_ACTIVE_OFFSET);
0245     } while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
0246 }
0247 
0248 /* Wait for 10 bits of input occupancy. */
0249 static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
0250 {
0251     u32 reg;
0252 
0253     do {
0254         reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
0255     } while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
0256 }
0257 
0258  /*
0259   * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
0260   * other bits).
0261   *
0262   * Called when DMA is programmed to fetch the last batch of Associated Data
0263   * (CCM case) or Additional Authenticated Data (GCM case).
0264   */
0265 static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
0266 {
0267     iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
0268           aes_dev->base_reg + AES_ACTIVE_OFFSET);
0269 }
0270 
0271 /* Set DMA src and dst transfer size to 0 */
0272 static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
0273 {
0274     iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
0275     iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
0276 }
0277 
0278 /* Activate DMA for zero-byte transfer case. */
0279 static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
0280 {
0281     iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
0282           aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
0283 }
0284 
0285 /* Activate DMA and enable src linked list */
0286 static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
0287 {
0288     iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
0289           AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
0290           aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
0291 }
0292 
0293 /* Activate DMA and enable dst linked list */
0294 static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
0295 {
0296     iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
0297           AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
0298           aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
0299 }
0300 
0301 /* Activate DMA and enable src and dst linked lists */
0302 static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
0303 {
0304     iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
0305           AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
0306           AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
0307           aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
0308 }
0309 
0310 /* Reset PERF_CNTR to 0 and activate it */
0311 static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
0312 {
0313     iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
0314     iowrite32(AES_ACTIVATE_PERF_CNTR,
0315           aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
0316 }
0317 
0318 /* Wait until PERF_CNTR is > delay, then deactivate it */
0319 static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
0320                                int delay)
0321 {
0322     while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
0323         ;
0324     iowrite32(AES_DEACTIVATE_PERF_CNTR,
0325           aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
0326 }
0327 
0328 /* Disable AES and DMA IRQ. */
0329 static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
0330 {
0331     u32 isr_val = 0;
0332 
0333     /* Disable interrupts */
0334     iowrite32(AES_DISABLE_INT,
0335           aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
0336     iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
0337 
0338     /* Clear any pending interrupt */
0339     isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
0340     if (isr_val)
0341         iowrite32(isr_val,
0342               aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
0343 
0344     isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
0345     if (isr_val)
0346         iowrite32(isr_val,
0347               aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
0348 
0349     isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
0350     if (isr_val)
0351         iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
0352 }
0353 
0354 /* Enable AES or DMA IRQ.  IRQ is disabled once fired. */
0355 static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
0356 {
0357     if (irq == AES_COMPLETE_INT) {
0358         /* Ensure DMA error interrupts are enabled */
0359         iowrite32(AES_DMA_CPD_ERR_INT |
0360               AES_DMA_OUTBUF_RD_ERR_INT |
0361               AES_DMA_OUTBUF_WR_ERR_INT |
0362               AES_DMA_INBUF_RD_ERR_INT |
0363               AES_DMA_INBUF_WR_ERR_INT |
0364               AES_DMA_BAD_COMP_INT |
0365               AES_DMA_SAI_INT,
0366               aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
0367         /*
0368          * AES_IER
0369          * default 0x00000000
0370          * bits [31:3] - reserved
0371          * bit [2] - EN_SKS_ERR
0372          * bit [1] - EN_AES_COMPLETE
0373          * bit [0] - reserved
0374          */
0375         iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
0376         return;
0377     }
0378     if (irq == AES_DMA_SRC_DONE_INT) {
0379         /* Ensure AES interrupts are disabled */
0380         iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
0381         /*
0382          * DMA_MSI_IER
0383          * default 0x00000000
0384          * bits [31:9] - reserved
0385          * bit [8] - CPD_ERR_INT_EN
0386          * bit [7] - OUTBUF_RD_ERR_INT_EN
0387          * bit [6] - OUTBUF_WR_ERR_INT_EN
0388          * bit [5] - INBUF_RD_ERR_INT_EN
0389          * bit [4] - INBUF_WR_ERR_INT_EN
0390          * bit [3] - BAD_COMP_INT_EN
0391          * bit [2] - SAI_INT_EN
0392          * bit [1] - DST_DONE_INT_EN
0393          * bit [0] - SRC_DONE_INT_EN
0394          */
0395         iowrite32(AES_DMA_CPD_ERR_INT |
0396               AES_DMA_OUTBUF_RD_ERR_INT |
0397               AES_DMA_OUTBUF_WR_ERR_INT |
0398               AES_DMA_INBUF_RD_ERR_INT |
0399               AES_DMA_INBUF_WR_ERR_INT |
0400               AES_DMA_BAD_COMP_INT |
0401               AES_DMA_SAI_INT |
0402               AES_DMA_SRC_DONE_INT,
0403               aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
0404     }
0405 }
0406 
0407 /* Enable and wait for IRQ (either from OCS AES engine or DMA) */
0408 static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
0409 {
0410     int rc;
0411 
0412     reinit_completion(&aes_dev->irq_completion);
0413     aes_irq_enable(aes_dev, irq);
0414     rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
0415     if (rc)
0416         return rc;
0417 
0418     return aes_dev->dma_err_mask ? -EIO : 0;
0419 }
0420 
0421 /* Configure DMA to OCS, linked list mode */
0422 static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
0423                      dma_addr_t dma_list)
0424 {
0425     iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
0426     iowrite32(dma_list,
0427           aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
0428 }
0429 
0430 /* Configure DMA from OCS, linked list mode */
0431 static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
0432                        dma_addr_t dma_list)
0433 {
0434     iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
0435     iowrite32(dma_list,
0436           aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
0437 }
0438 
0439 irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
0440 {
0441     struct ocs_aes_dev *aes_dev = dev_id;
0442     u32 aes_dma_isr;
0443 
0444     /* Read DMA ISR status. */
0445     aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
0446 
0447     /* Disable and clear interrupts. */
0448     aes_irq_disable(aes_dev);
0449 
0450     /* Save DMA error status. */
0451     aes_dev->dma_err_mask = aes_dma_isr &
0452                 (AES_DMA_CPD_ERR_INT |
0453                  AES_DMA_OUTBUF_RD_ERR_INT |
0454                  AES_DMA_OUTBUF_WR_ERR_INT |
0455                  AES_DMA_INBUF_RD_ERR_INT |
0456                  AES_DMA_INBUF_WR_ERR_INT |
0457                  AES_DMA_BAD_COMP_INT |
0458                  AES_DMA_SAI_INT);
0459 
0460     /* Signal IRQ completion. */
0461     complete(&aes_dev->irq_completion);
0462 
0463     return IRQ_HANDLED;
0464 }
0465 
0466 /**
0467  * ocs_aes_set_key() - Write key into OCS AES hardware.
0468  * @aes_dev:    The OCS AES device to write the key to.
0469  * @key_size:   The size of the key (in bytes).
0470  * @key:    The key to write.
0471  * @cipher: The cipher the key is for.
0472  *
0473  * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
0474  *
0475  * Return:  0 on success, negative error code otherwise.
0476  */
0477 int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
0478             enum ocs_cipher cipher)
0479 {
0480     const u32 *key_u32;
0481     u32 val;
0482     int i;
0483 
0484     /* OCS AES supports 128-bit and 256-bit keys only. */
0485     if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
0486         dev_err(aes_dev->dev,
0487             "%d-bit keys not supported by AES cipher\n",
0488             key_size * 8);
0489         return -EINVAL;
0490     }
0491     /* OCS SM4 supports 128-bit keys only. */
0492     if (cipher == OCS_SM4 && key_size != 16) {
0493         dev_err(aes_dev->dev,
0494             "%d-bit keys not supported for SM4 cipher\n",
0495             key_size * 8);
0496         return -EINVAL;
0497     }
0498 
0499     if (!key)
0500         return -EINVAL;
0501 
0502     key_u32 = (const u32 *)key;
0503 
0504     /* Write key to AES_KEY[0-7] registers */
0505     for (i = 0; i < (key_size / sizeof(u32)); i++) {
0506         iowrite32(key_u32[i],
0507               aes_dev->base_reg + AES_KEY_0_OFFSET +
0508               (i * sizeof(u32)));
0509     }
0510     /*
0511      * Write key size
0512      * bits [31:1] - reserved
0513      * bit [0] - AES_KEY_SIZE
0514      *           0 - 128 bit key
0515      *           1 - 256 bit key
0516      */
0517     val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
0518     iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
0519 
0520     return 0;
0521 }
0522 
0523 /* Write AES_COMMAND */
0524 static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
0525                        enum ocs_cipher cipher,
0526                        enum ocs_mode mode,
0527                        enum ocs_instruction instruction)
0528 {
0529     u32 val;
0530 
0531     /* AES_COMMAND
0532      * default 0x000000CC
0533      * bit [14] - CIPHER_SELECT
0534      *            0 - AES
0535      *            1 - SM4
0536      * bits [11:8] - OCS_AES_MODE
0537      *               0000 - ECB
0538      *               0001 - CBC
0539      *               0010 - CTR
0540      *               0110 - CCM
0541      *               0111 - GCM
0542      *               1001 - CTS
0543      * bits [7:6] - AES_INSTRUCTION
0544      *              00 - ENCRYPT
0545      *              01 - DECRYPT
0546      *              10 - EXPAND
0547      *              11 - BYPASS
0548      * bits [3:2] - CTR_M_BITS
0549      *              00 - No increment
0550      *              01 - Least significant 32 bits are incremented
0551      *              10 - Least significant 64 bits are incremented
0552      *              11 - Full 128 bits are incremented
0553      */
0554     val = (cipher << 14) | (mode << 8) | (instruction << 6) |
0555           (AES_CTR_M_128_INC << 2);
0556     iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
0557 }
0558 
0559 static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
0560              enum ocs_mode mode,
0561              enum ocs_cipher cipher,
0562              enum ocs_instruction instruction)
0563 {
0564     /* Ensure interrupts are disabled and pending interrupts cleared. */
0565     aes_irq_disable(aes_dev);
0566 
0567     /* Set endianness recommended by data-sheet. */
0568     aes_a_set_endianness(aes_dev);
0569 
0570     /* Set AES_COMMAND register. */
0571     set_ocs_aes_command(aes_dev, cipher, mode, instruction);
0572 }
0573 
0574 /*
0575  * Write the byte length of the last AES/SM4 block of Payload data (without
0576  * zero padding and without the length of the MAC) in register AES_PLEN.
0577  */
0578 static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
0579                            u32 size)
0580 {
0581     u32 val;
0582 
0583     if (size == 0) {
0584         val = 0;
0585         goto exit;
0586     }
0587 
0588     val = size % AES_BLOCK_SIZE;
0589     if (val == 0)
0590         val = AES_BLOCK_SIZE;
0591 
0592 exit:
0593     iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
0594 }
0595 
0596 /*
0597  * Validate inputs according to mode.
0598  * If OK return 0; else return -EINVAL.
0599  */
0600 static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
0601                    const u8 *iv, u32 iv_size,
0602                    dma_addr_t aad_dma_list, u32 aad_size,
0603                    const u8 *tag, u32 tag_size,
0604                    enum ocs_cipher cipher, enum ocs_mode mode,
0605                    enum ocs_instruction instruction,
0606                    dma_addr_t dst_dma_list)
0607 {
0608     /* Ensure cipher, mode and instruction are valid. */
0609     if (!(cipher == OCS_AES || cipher == OCS_SM4))
0610         return -EINVAL;
0611 
0612     if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
0613         mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
0614         mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
0615         return -EINVAL;
0616 
0617     if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
0618         instruction != OCS_EXPAND  && instruction != OCS_BYPASS)
0619         return -EINVAL;
0620 
0621     /*
0622      * When instruction is OCS_BYPASS, OCS simply copies data from source
0623      * to destination using DMA.
0624      *
0625      * AES mode is irrelevant, but both source and destination DMA
0626      * linked-list must be defined.
0627      */
0628     if (instruction == OCS_BYPASS) {
0629         if (src_dma_list == DMA_MAPPING_ERROR ||
0630             dst_dma_list == DMA_MAPPING_ERROR)
0631             return -EINVAL;
0632 
0633         return 0;
0634     }
0635 
0636     /*
0637      * For performance reasons switch based on mode to limit unnecessary
0638      * conditionals for each mode
0639      */
0640     switch (mode) {
0641     case OCS_MODE_ECB:
0642         /* Ensure input length is multiple of block size */
0643         if (src_size % AES_BLOCK_SIZE != 0)
0644             return -EINVAL;
0645 
0646         /* Ensure source and destination linked lists are created */
0647         if (src_dma_list == DMA_MAPPING_ERROR ||
0648             dst_dma_list == DMA_MAPPING_ERROR)
0649             return -EINVAL;
0650 
0651         return 0;
0652 
0653     case OCS_MODE_CBC:
0654         /* Ensure input length is multiple of block size */
0655         if (src_size % AES_BLOCK_SIZE != 0)
0656             return -EINVAL;
0657 
0658         /* Ensure source and destination linked lists are created */
0659         if (src_dma_list == DMA_MAPPING_ERROR ||
0660             dst_dma_list == DMA_MAPPING_ERROR)
0661             return -EINVAL;
0662 
0663         /* Ensure IV is present and block size in length */
0664         if (!iv || iv_size != AES_BLOCK_SIZE)
0665             return -EINVAL;
0666 
0667         return 0;
0668 
0669     case OCS_MODE_CTR:
0670         /* Ensure input length of 1 byte or greater */
0671         if (src_size == 0)
0672             return -EINVAL;
0673 
0674         /* Ensure source and destination linked lists are created */
0675         if (src_dma_list == DMA_MAPPING_ERROR ||
0676             dst_dma_list == DMA_MAPPING_ERROR)
0677             return -EINVAL;
0678 
0679         /* Ensure IV is present and block size in length */
0680         if (!iv || iv_size != AES_BLOCK_SIZE)
0681             return -EINVAL;
0682 
0683         return 0;
0684 
0685     case OCS_MODE_CTS:
0686         /* Ensure input length >= block size */
0687         if (src_size < AES_BLOCK_SIZE)
0688             return -EINVAL;
0689 
0690         /* Ensure source and destination linked lists are created */
0691         if (src_dma_list == DMA_MAPPING_ERROR ||
0692             dst_dma_list == DMA_MAPPING_ERROR)
0693             return -EINVAL;
0694 
0695         /* Ensure IV is present and block size in length */
0696         if (!iv || iv_size != AES_BLOCK_SIZE)
0697             return -EINVAL;
0698 
0699         return 0;
0700 
0701     case OCS_MODE_GCM:
0702         /* Ensure IV is present and GCM_AES_IV_SIZE in length */
0703         if (!iv || iv_size != GCM_AES_IV_SIZE)
0704             return -EINVAL;
0705 
0706         /*
0707          * If input data present ensure source and destination linked
0708          * lists are created
0709          */
0710         if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
0711                  dst_dma_list == DMA_MAPPING_ERROR))
0712             return -EINVAL;
0713 
0714         /* If aad present ensure aad linked list is created */
0715         if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
0716             return -EINVAL;
0717 
0718         /* Ensure tag destination is set */
0719         if (!tag)
0720             return -EINVAL;
0721 
0722         /* Just ensure that tag_size doesn't cause overflows. */
0723         if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
0724             return -EINVAL;
0725 
0726         return 0;
0727 
0728     case OCS_MODE_CCM:
0729         /* Ensure IV is present and block size in length */
0730         if (!iv || iv_size != AES_BLOCK_SIZE)
0731             return -EINVAL;
0732 
0733         /* 2 <= L <= 8, so 1 <= L' <= 7 */
0734         if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
0735             iv[L_PRIME_IDX] > L_PRIME_MAX)
0736             return -EINVAL;
0737 
0738         /* If aad present ensure aad linked list is created */
0739         if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
0740             return -EINVAL;
0741 
0742         /* Just ensure that tag_size doesn't cause overflows. */
0743         if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
0744             return -EINVAL;
0745 
0746         if (instruction == OCS_DECRYPT) {
0747             /*
0748              * If input data present ensure source and destination
0749              * linked lists are created
0750              */
0751             if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
0752                      dst_dma_list == DMA_MAPPING_ERROR))
0753                 return -EINVAL;
0754 
0755             /* Ensure input tag is present */
0756             if (!tag)
0757                 return -EINVAL;
0758 
0759             return 0;
0760         }
0761 
0762         /* Instruction == OCS_ENCRYPT */
0763 
0764         /*
0765          * Destination linked list always required (for tag even if no
0766          * input data)
0767          */
0768         if (dst_dma_list == DMA_MAPPING_ERROR)
0769             return -EINVAL;
0770 
0771         /* If input data present ensure src linked list is created */
0772         if (src_size && src_dma_list == DMA_MAPPING_ERROR)
0773             return -EINVAL;
0774 
0775         return 0;
0776 
0777     default:
0778         return -EINVAL;
0779     }
0780 }
0781 
0782 /**
0783  * ocs_aes_op() - Perform AES/SM4 operation.
0784  * @aes_dev:        The OCS AES device to use.
0785  * @mode:       The mode to use (ECB, CBC, CTR, or CTS).
0786  * @cipher:     The cipher to use (AES or SM4).
0787  * @instruction:    The instruction to perform (encrypt or decrypt).
0788  * @dst_dma_list:   The OCS DMA list mapping output memory.
0789  * @src_dma_list:   The OCS DMA list mapping input payload data.
0790  * @src_size:       The amount of data mapped by @src_dma_list.
0791  * @iv:         The IV vector.
0792  * @iv_size:        The size (in bytes) of @iv.
0793  *
0794  * Return: 0 on success, negative error code otherwise.
0795  */
0796 int ocs_aes_op(struct ocs_aes_dev *aes_dev,
0797            enum ocs_mode mode,
0798            enum ocs_cipher cipher,
0799            enum ocs_instruction instruction,
0800            dma_addr_t dst_dma_list,
0801            dma_addr_t src_dma_list,
0802            u32 src_size,
0803            u8 *iv,
0804            u32 iv_size)
0805 {
0806     u32 *iv32;
0807     int rc;
0808 
0809     rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
0810                      NULL, 0, cipher, mode, instruction,
0811                      dst_dma_list);
0812     if (rc)
0813         return rc;
0814     /*
0815      * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
0816      * GCM or CCM.
0817      */
0818     if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
0819         return -EINVAL;
0820 
0821     /* Cast IV to u32 array. */
0822     iv32 = (u32 *)iv;
0823 
0824     ocs_aes_init(aes_dev, mode, cipher, instruction);
0825 
0826     if (mode == OCS_MODE_CTS) {
0827         /* Write the byte length of the last data block to engine. */
0828         ocs_aes_write_last_data_blk_len(aes_dev, src_size);
0829     }
0830 
0831     /* ECB is the only mode that doesn't use IV. */
0832     if (mode != OCS_MODE_ECB) {
0833         iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
0834         iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
0835         iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
0836         iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
0837     }
0838 
0839     /* Set AES_ACTIVE.TRIGGER to start the operation. */
0840     aes_a_op_trigger(aes_dev);
0841 
0842     /* Configure and activate input / output DMA. */
0843     dma_to_ocs_aes_ll(aes_dev, src_dma_list);
0844     dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
0845     aes_a_dma_active_src_dst_ll_en(aes_dev);
0846 
0847     if (mode == OCS_MODE_CTS) {
0848         /*
0849          * For CTS mode, instruct engine to activate ciphertext
0850          * stealing if last block of data is incomplete.
0851          */
0852         aes_a_set_last_gcx(aes_dev);
0853     } else {
0854         /* For all other modes, just write the 'termination' bit. */
0855         aes_a_op_termination(aes_dev);
0856     }
0857 
0858     /* Wait for engine to complete processing. */
0859     rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
0860     if (rc)
0861         return rc;
0862 
0863     if (mode == OCS_MODE_CTR) {
0864         /* Read back IV for streaming mode */
0865         iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
0866         iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
0867         iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
0868         iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
0869     }
0870 
0871     return 0;
0872 }
0873 
0874 /* Compute and write J0 to engine registers. */
0875 static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
0876                  const u8 *iv)
0877 {
0878     const u32 *j0 = (u32 *)iv;
0879 
0880     /*
0881      * IV must be 12 bytes; Other sizes not supported as Linux crypto API
0882      * does only expects/allows 12 byte IV for GCM
0883      */
0884     iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
0885     iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
0886     iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
0887     iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
0888 }
0889 
0890 /* Read GCM tag from engine registers. */
0891 static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
0892                     u8 *tag, u32 tag_size)
0893 {
0894     u32 tag_u32[AES_MAX_TAG_SIZE_U32];
0895 
0896     /*
0897      * The Authentication Tag T is stored in Little Endian order in the
0898      * registers with the most significant bytes stored from AES_T_MAC[3]
0899      * downward.
0900      */
0901     tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
0902     tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
0903     tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
0904     tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
0905 
0906     memcpy(tag, tag_u32, tag_size);
0907 }
0908 
0909 /**
0910  * ocs_aes_gcm_op() - Perform GCM operation.
0911  * @aes_dev:        The OCS AES device to use.
0912  * @cipher:     The Cipher to use (AES or SM4).
0913  * @instruction:    The instruction to perform (encrypt or decrypt).
0914  * @dst_dma_list:   The OCS DMA list mapping output memory.
0915  * @src_dma_list:   The OCS DMA list mapping input payload data.
0916  * @src_size:       The amount of data mapped by @src_dma_list.
0917  * @iv:         The input IV vector.
0918  * @aad_dma_list:   The OCS DMA list mapping input AAD data.
0919  * @aad_size:       The amount of data mapped by @aad_dma_list.
0920  * @out_tag:        Where to store computed tag.
0921  * @tag_size:       The size (in bytes) of @out_tag.
0922  *
0923  * Return: 0 on success, negative error code otherwise.
0924  */
0925 int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
0926            enum ocs_cipher cipher,
0927            enum ocs_instruction instruction,
0928            dma_addr_t dst_dma_list,
0929            dma_addr_t src_dma_list,
0930            u32 src_size,
0931            const u8 *iv,
0932            dma_addr_t aad_dma_list,
0933            u32 aad_size,
0934            u8 *out_tag,
0935            u32 tag_size)
0936 {
0937     u64 bit_len;
0938     u32 val;
0939     int rc;
0940 
0941     rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
0942                      GCM_AES_IV_SIZE, aad_dma_list,
0943                      aad_size, out_tag, tag_size, cipher,
0944                      OCS_MODE_GCM, instruction,
0945                      dst_dma_list);
0946     if (rc)
0947         return rc;
0948 
0949     ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
0950 
0951     /* Compute and write J0 to OCS HW. */
0952     ocs_aes_gcm_write_j0(aes_dev, iv);
0953 
0954     /* Write out_tag byte length */
0955     iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
0956 
0957     /* Write the byte length of the last plaintext / ciphertext block. */
0958     ocs_aes_write_last_data_blk_len(aes_dev, src_size);
0959 
0960     /* Write ciphertext bit length */
0961     bit_len = (u64)src_size * 8;
0962     val = bit_len & 0xFFFFFFFF;
0963     iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
0964     val = bit_len >> 32;
0965     iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
0966 
0967     /* Write aad bit length */
0968     bit_len = (u64)aad_size * 8;
0969     val = bit_len & 0xFFFFFFFF;
0970     iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
0971     val = bit_len >> 32;
0972     iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
0973 
0974     /* Set AES_ACTIVE.TRIGGER to start the operation. */
0975     aes_a_op_trigger(aes_dev);
0976 
0977     /* Process AAD. */
0978     if (aad_size) {
0979         /* If aad present, configure DMA to feed it to the engine. */
0980         dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
0981         aes_a_dma_active_src_ll_en(aes_dev);
0982 
0983         /* Instructs engine to pad last block of aad, if needed. */
0984         aes_a_set_last_gcx_and_adata(aes_dev);
0985 
0986         /* Wait for DMA transfer to complete. */
0987         rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
0988         if (rc)
0989             return rc;
0990     } else {
0991         aes_a_set_last_gcx_and_adata(aes_dev);
0992     }
0993 
0994     /* Wait until adata (if present) has been processed. */
0995     aes_a_wait_last_gcx(aes_dev);
0996     aes_a_dma_wait_input_buffer_occupancy(aes_dev);
0997 
0998     /* Now process payload. */
0999     if (src_size) {
1000         /* Configure and activate DMA for both input and output data. */
1001         dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1002         dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1003         aes_a_dma_active_src_dst_ll_en(aes_dev);
1004     } else {
1005         aes_a_dma_set_xfer_size_zero(aes_dev);
1006         aes_a_dma_active(aes_dev);
1007     }
1008 
1009     /* Instruct AES/SMA4 engine payload processing is over. */
1010     aes_a_set_last_gcx(aes_dev);
1011 
1012     /* Wait for OCS AES engine to complete processing. */
1013     rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1014     if (rc)
1015         return rc;
1016 
1017     ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
1018 
1019     return 0;
1020 }
1021 
1022 /* Write encrypted tag to AES/SM4 engine. */
1023 static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
1024                         const u8 *in_tag, u32 tag_size)
1025 {
1026     int i;
1027 
1028     /* Ensure DMA input buffer is empty */
1029     aes_a_dma_wait_input_buffer_occupancy(aes_dev);
1030 
1031     /*
1032      * During CCM decrypt, the OCS block needs to finish processing the
1033      * ciphertext before the tag is written.  So delay needed after DMA has
1034      * completed writing the ciphertext
1035      */
1036     aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
1037     aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
1038                         CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
1039 
1040     /* Write encrypted tag to AES/SM4 engine. */
1041     for (i = 0; i < tag_size; i++) {
1042         iowrite8(in_tag[i], aes_dev->base_reg +
1043                     AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1044     }
1045 }
1046 
1047 /*
1048  * Write B0 CCM block to OCS AES HW.
1049  *
1050  * Note: B0 format is documented in NIST Special Publication 800-38C
1051  * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1052  * (see Section A.2.1)
1053  */
1054 static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
1055                 const u8 *iv, u32 adata_size, u32 tag_size,
1056                 u32 cryptlen)
1057 {
1058     u8 b0[16]; /* CCM B0 block is 16 bytes long. */
1059     int i, q;
1060 
1061     /* Initialize B0 to 0. */
1062     memset(b0, 0, sizeof(b0));
1063 
1064     /*
1065      * B0[0] is the 'Flags Octet' and has the following structure:
1066      *   bit 7: Reserved
1067      *   bit 6: Adata flag
1068      *   bit 5-3: t value encoded as (t-2)/2
1069      *   bit 2-0: q value encoded as q - 1
1070      */
1071     /* If there is AAD data, set the Adata flag. */
1072     if (adata_size)
1073         b0[0] |= BIT(6);
1074     /*
1075      * t denotes the octet length of T.
1076      * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
1077      * encoded as (t - 2) / 2
1078      */
1079     b0[0] |= (((tag_size - 2) / 2) & 0x7)  << 3;
1080     /*
1081      * q is the octet length of Q.
1082      * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
1083      * q - 1 == iv[0] & 0x7;
1084      */
1085     b0[0] |= iv[0] & 0x7;
1086     /*
1087      * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
1088      * and must be copied to b0[1]..b0[15-q].
1089      * q == (iv[0] & 0x7) + 1
1090      */
1091     q = (iv[0] & 0x7) + 1;
1092     for (i = 1; i <= 15 - q; i++)
1093         b0[i] = iv[i];
1094     /*
1095      * The rest of B0 must contain Q, i.e., the message length.
1096      * Q is encoded in q octets, in big-endian order, so to write it, we
1097      * start from the end of B0 and we move backward.
1098      */
1099     i = sizeof(b0) - 1;
1100     while (q) {
1101         b0[i] = cryptlen & 0xff;
1102         cryptlen >>= 8;
1103         i--;
1104         q--;
1105     }
1106     /*
1107      * If cryptlen is not zero at this point, it means that its original
1108      * value was too big.
1109      */
1110     if (cryptlen)
1111         return -EOVERFLOW;
1112     /* Now write B0 to OCS AES input buffer. */
1113     for (i = 0; i < sizeof(b0); i++)
1114         iowrite8(b0[i], aes_dev->base_reg +
1115                 AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1116     return 0;
1117 }
1118 
1119 /*
1120  * Write adata length to OCS AES HW.
1121  *
1122  * Note: adata len encoding is documented in NIST Special Publication 800-38C
1123  * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1124  * (see Section A.2.2)
1125  */
1126 static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
1127                     u64 adata_len)
1128 {
1129     u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
1130     int i, len;
1131 
1132     /*
1133      * adata_len ('a') is encoded as follows:
1134      * If 0 < a < 2^16 - 2^8    ==> 'a' encoded as [a]16, i.e., two octets
1135      *              (big endian).
1136      * If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
1137      *              i.e., six octets (big endian).
1138      * If 2^32 ≤ a < 2^64       ==> 'a' encoded as 0xff || 0xff || [a]64,
1139      *              i.e., ten octets (big endian).
1140      */
1141     if (adata_len < 65280) {
1142         len = 2;
1143         *(__be16 *)enc_a = cpu_to_be16(adata_len);
1144     } else if (adata_len <= 0xFFFFFFFF) {
1145         len = 6;
1146         *(__be16 *)enc_a = cpu_to_be16(0xfffe);
1147         *(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
1148     } else { /* adata_len >= 2^32 */
1149         len = 10;
1150         *(__be16 *)enc_a = cpu_to_be16(0xffff);
1151         *(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
1152     }
1153     for (i = 0; i < len; i++)
1154         iowrite8(enc_a[i],
1155              aes_dev->base_reg +
1156              AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1157 }
1158 
1159 static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
1160                 dma_addr_t adata_dma_list, u32 adata_size)
1161 {
1162     int rc;
1163 
1164     if (!adata_size) {
1165         /* Since no aad the LAST_GCX bit can be set now */
1166         aes_a_set_last_gcx_and_adata(aes_dev);
1167         goto exit;
1168     }
1169 
1170     /* Adata case. */
1171 
1172     /*
1173      * Form the encoding of the Associated data length and write it
1174      * to the AES/SM4 input buffer.
1175      */
1176     ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
1177 
1178     /* Configure the AES/SM4 DMA to fetch the Associated Data */
1179     dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
1180 
1181     /* Activate DMA to fetch Associated data. */
1182     aes_a_dma_active_src_ll_en(aes_dev);
1183 
1184     /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
1185     aes_a_set_last_gcx_and_adata(aes_dev);
1186 
1187     /* Wait for DMA transfer to complete. */
1188     rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
1189     if (rc)
1190         return rc;
1191 
1192 exit:
1193     /* Wait until adata (if present) has been processed. */
1194     aes_a_wait_last_gcx(aes_dev);
1195     aes_a_dma_wait_input_buffer_occupancy(aes_dev);
1196 
1197     return 0;
1198 }
1199 
1200 static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
1201                       dma_addr_t dst_dma_list,
1202                       dma_addr_t src_dma_list,
1203                       u32 src_size)
1204 {
1205     if (src_size) {
1206         /*
1207          * Configure and activate DMA for both input and output
1208          * data.
1209          */
1210         dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1211         dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1212         aes_a_dma_active_src_dst_ll_en(aes_dev);
1213     } else {
1214         /* Configure and activate DMA for output data only. */
1215         dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1216         aes_a_dma_active_dst_ll_en(aes_dev);
1217     }
1218 
1219     /*
1220      * Set the LAST GCX bit in AES_ACTIVE Register to instruct
1221      * AES/SM4 engine to pad the last block of data.
1222      */
1223     aes_a_set_last_gcx(aes_dev);
1224 
1225     /* We are done, wait for IRQ and return. */
1226     return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1227 }
1228 
1229 static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
1230                       dma_addr_t dst_dma_list,
1231                       dma_addr_t src_dma_list,
1232                       u32 src_size)
1233 {
1234     if (!src_size) {
1235         /* Let engine process 0-length input. */
1236         aes_a_dma_set_xfer_size_zero(aes_dev);
1237         aes_a_dma_active(aes_dev);
1238         aes_a_set_last_gcx(aes_dev);
1239 
1240         return 0;
1241     }
1242 
1243     /*
1244      * Configure and activate DMA for both input and output
1245      * data.
1246      */
1247     dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1248     dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1249     aes_a_dma_active_src_dst_ll_en(aes_dev);
1250     /*
1251      * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
1252      * AES/SM4 engine to differentiate between encrypted data and
1253      * encrypted MAC.
1254      */
1255     aes_a_set_last_gcx(aes_dev);
1256      /*
1257       * Enable DMA DONE interrupt; once DMA transfer is over,
1258       * interrupt handler will process the MAC/tag.
1259       */
1260     return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
1261 }
1262 
1263 /*
1264  * Compare Tag to Yr.
1265  *
1266  * Only used at the end of CCM decrypt. If tag == yr, message authentication
1267  * has succeeded.
1268  */
1269 static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
1270                     u8 tag_size_bytes)
1271 {
1272     u32 tag[AES_MAX_TAG_SIZE_U32];
1273     u32 yr[AES_MAX_TAG_SIZE_U32];
1274     u8 i;
1275 
1276     /* Read Tag and Yr from AES registers. */
1277     for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
1278         tag[i] = ioread32(aes_dev->base_reg +
1279                   AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
1280         yr[i] = ioread32(aes_dev->base_reg +
1281                  AES_MULTIPURPOSE2_0_OFFSET +
1282                  (i * sizeof(u32)));
1283     }
1284 
1285     return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
1286 }
1287 
1288 /**
1289  * ocs_aes_ccm_op() - Perform CCM operation.
1290  * @aes_dev:        The OCS AES device to use.
1291  * @cipher:     The Cipher to use (AES or SM4).
1292  * @instruction:    The instruction to perform (encrypt or decrypt).
1293  * @dst_dma_list:   The OCS DMA list mapping output memory.
1294  * @src_dma_list:   The OCS DMA list mapping input payload data.
1295  * @src_size:       The amount of data mapped by @src_dma_list.
1296  * @iv:         The input IV vector.
1297  * @adata_dma_list: The OCS DMA list mapping input A-data.
1298  * @adata_size:     The amount of data mapped by @adata_dma_list.
1299  * @in_tag:     Input tag.
1300  * @tag_size:       The size (in bytes) of @in_tag.
1301  *
1302  * Note: for encrypt the tag is appended to the ciphertext (in the memory
1303  *   mapped by @dst_dma_list).
1304  *
1305  * Return: 0 on success, negative error code otherwise.
1306  */
1307 int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
1308            enum ocs_cipher cipher,
1309            enum ocs_instruction instruction,
1310            dma_addr_t dst_dma_list,
1311            dma_addr_t src_dma_list,
1312            u32 src_size,
1313            u8 *iv,
1314            dma_addr_t adata_dma_list,
1315            u32 adata_size,
1316            u8 *in_tag,
1317            u32 tag_size)
1318 {
1319     u32 *iv_32;
1320     u8 lprime;
1321     int rc;
1322 
1323     rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
1324                      AES_BLOCK_SIZE, adata_dma_list, adata_size,
1325                      in_tag, tag_size, cipher, OCS_MODE_CCM,
1326                      instruction, dst_dma_list);
1327     if (rc)
1328         return rc;
1329 
1330     ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
1331 
1332     /*
1333      * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
1334      * auth tag so ensure this is the case
1335      */
1336     lprime = iv[L_PRIME_IDX];
1337     memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
1338 
1339     /*
1340      * Nonce is already converted to ctr0 before being passed into this
1341      * function as iv.
1342      */
1343     iv_32 = (u32 *)iv;
1344     iowrite32(__swab32(iv_32[0]),
1345           aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
1346     iowrite32(__swab32(iv_32[1]),
1347           aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
1348     iowrite32(__swab32(iv_32[2]),
1349           aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
1350     iowrite32(__swab32(iv_32[3]),
1351           aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
1352 
1353     /* Write MAC/tag length in register AES_TLEN */
1354     iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
1355     /*
1356      * Write the byte length of the last AES/SM4 block of Payload data
1357      * (without zero padding and without the length of the MAC) in register
1358      * AES_PLEN.
1359      */
1360     ocs_aes_write_last_data_blk_len(aes_dev, src_size);
1361 
1362     /* Set AES_ACTIVE.TRIGGER to start the operation. */
1363     aes_a_op_trigger(aes_dev);
1364 
1365     aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
1366 
1367     /* Form block B0 and write it to the AES/SM4 input buffer. */
1368     rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
1369     if (rc)
1370         return rc;
1371     /*
1372      * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
1373      * clock cycles since TRIGGER bit was set
1374      */
1375     aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
1376                         CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
1377 
1378     /* Process Adata. */
1379     ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
1380 
1381     /* For Encrypt case we just process the payload and return. */
1382     if (instruction == OCS_ENCRYPT) {
1383         return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
1384                               src_dma_list, src_size);
1385     }
1386     /* For Decypt we need to process the payload and then the tag. */
1387     rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
1388                         src_dma_list, src_size);
1389     if (rc)
1390         return rc;
1391 
1392     /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
1393     ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
1394     rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1395     if (rc)
1396         return rc;
1397 
1398     return ccm_compare_tag_to_yr(aes_dev, tag_size);
1399 }
1400 
1401 /**
1402  * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
1403  * @aes_dev:      The OCS AES device the list will be created for.
1404  * @sg:       The SG list OCS DMA linked list will be created from. When
1405  *        passed to this function, @sg must have been already mapped
1406  *        with dma_map_sg().
1407  * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
1408  *        value returned by dma_map_sg() when @sg was mapped.
1409  * @dll_desc:     The OCS DMA dma_list to use to store information about the
1410  *        created linked list.
1411  * @data_size:    The size of the data (from the SG list) to be mapped into the
1412  *        OCS DMA linked list.
1413  * @data_offset:  The offset (within the SG list) of the data to be mapped.
1414  *
1415  * Return:  0 on success, negative error code otherwise.
1416  */
1417 int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
1418                    struct scatterlist *sg,
1419                    int sg_dma_count,
1420                    struct ocs_dll_desc *dll_desc,
1421                    size_t data_size, size_t data_offset)
1422 {
1423     struct ocs_dma_linked_list *ll = NULL;
1424     struct scatterlist *sg_tmp;
1425     unsigned int tmp;
1426     int dma_nents;
1427     int i;
1428 
1429     if (!dll_desc || !sg || !aes_dev)
1430         return -EINVAL;
1431 
1432     /* Default values for when no ddl_desc is created. */
1433     dll_desc->vaddr = NULL;
1434     dll_desc->dma_addr = DMA_MAPPING_ERROR;
1435     dll_desc->size = 0;
1436 
1437     if (data_size == 0)
1438         return 0;
1439 
1440     /* Loop over sg_list until we reach entry at specified offset. */
1441     while (data_offset >= sg_dma_len(sg)) {
1442         data_offset -= sg_dma_len(sg);
1443         sg_dma_count--;
1444         sg = sg_next(sg);
1445         /* If we reach the end of the list, offset was invalid. */
1446         if (!sg || sg_dma_count == 0)
1447             return -EINVAL;
1448     }
1449 
1450     /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
1451     dma_nents = 0;
1452     tmp = 0;
1453     sg_tmp = sg;
1454     while (tmp < data_offset + data_size) {
1455         /* If we reach the end of the list, data_size was invalid. */
1456         if (!sg_tmp)
1457             return -EINVAL;
1458         tmp += sg_dma_len(sg_tmp);
1459         dma_nents++;
1460         sg_tmp = sg_next(sg_tmp);
1461     }
1462     if (dma_nents > sg_dma_count)
1463         return -EINVAL;
1464 
1465     /* Allocate the DMA list, one entry for each SG entry. */
1466     dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
1467     dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
1468                          &dll_desc->dma_addr, GFP_KERNEL);
1469     if (!dll_desc->vaddr)
1470         return -ENOMEM;
1471 
1472     /* Populate DMA linked list entries. */
1473     ll = dll_desc->vaddr;
1474     for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
1475         ll[i].src_addr = sg_dma_address(sg) + data_offset;
1476         ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
1477                 (sg_dma_len(sg) - data_offset) : data_size;
1478         data_offset = 0;
1479         data_size -= ll[i].src_len;
1480         /* Current element points to the DMA address of the next one. */
1481         ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
1482         ll[i].ll_flags = 0;
1483     }
1484     /* Terminate last element. */
1485     ll[i - 1].next = 0;
1486     ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
1487 
1488     return 0;
1489 }