Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Intel Keem Bay OCS HCU Crypto Driver.
0004  *
0005  * Copyright (C) 2018-2020 Intel Corporation
0006  */
0007 
0008 #include <linux/delay.h>
0009 #include <linux/device.h>
0010 #include <linux/iopoll.h>
0011 #include <linux/irq.h>
0012 #include <linux/module.h>
0013 
0014 #include <crypto/sha2.h>
0015 
0016 #include "ocs-hcu.h"
0017 
0018 /* Registers. */
0019 #define OCS_HCU_MODE            0x00
0020 #define OCS_HCU_CHAIN           0x04
0021 #define OCS_HCU_OPERATION       0x08
0022 #define OCS_HCU_KEY_0           0x0C
0023 #define OCS_HCU_ISR         0x50
0024 #define OCS_HCU_IER         0x54
0025 #define OCS_HCU_STATUS          0x58
0026 #define OCS_HCU_MSG_LEN_LO      0x60
0027 #define OCS_HCU_MSG_LEN_HI      0x64
0028 #define OCS_HCU_KEY_BYTE_ORDER_CFG  0x80
0029 #define OCS_HCU_DMA_SRC_ADDR        0x400
0030 #define OCS_HCU_DMA_SRC_SIZE        0x408
0031 #define OCS_HCU_DMA_DST_SIZE        0x40C
0032 #define OCS_HCU_DMA_DMA_MODE        0x410
0033 #define OCS_HCU_DMA_NEXT_SRC_DESCR  0x418
0034 #define OCS_HCU_DMA_MSI_ISR     0x480
0035 #define OCS_HCU_DMA_MSI_IER     0x484
0036 #define OCS_HCU_DMA_MSI_MASK        0x488
0037 
0038 /* Register bit definitions. */
0039 #define HCU_MODE_ALGO_SHIFT     16
0040 #define HCU_MODE_HMAC_SHIFT     22
0041 
0042 #define HCU_STATUS_BUSY         BIT(0)
0043 
0044 #define HCU_BYTE_ORDER_SWAP     BIT(0)
0045 
0046 #define HCU_IRQ_HASH_DONE       BIT(2)
0047 #define HCU_IRQ_HASH_ERR_MASK       (BIT(3) | BIT(1) | BIT(0))
0048 
0049 #define HCU_DMA_IRQ_SRC_DONE        BIT(0)
0050 #define HCU_DMA_IRQ_SAI_ERR     BIT(2)
0051 #define HCU_DMA_IRQ_BAD_COMP_ERR    BIT(3)
0052 #define HCU_DMA_IRQ_INBUF_RD_ERR    BIT(4)
0053 #define HCU_DMA_IRQ_INBUF_WD_ERR    BIT(5)
0054 #define HCU_DMA_IRQ_OUTBUF_WR_ERR   BIT(6)
0055 #define HCU_DMA_IRQ_OUTBUF_RD_ERR   BIT(7)
0056 #define HCU_DMA_IRQ_CRD_ERR     BIT(8)
0057 #define HCU_DMA_IRQ_ERR_MASK        (HCU_DMA_IRQ_SAI_ERR | \
0058                      HCU_DMA_IRQ_BAD_COMP_ERR | \
0059                      HCU_DMA_IRQ_INBUF_RD_ERR | \
0060                      HCU_DMA_IRQ_INBUF_WD_ERR | \
0061                      HCU_DMA_IRQ_OUTBUF_WR_ERR | \
0062                      HCU_DMA_IRQ_OUTBUF_RD_ERR | \
0063                      HCU_DMA_IRQ_CRD_ERR)
0064 
0065 #define HCU_DMA_SNOOP_MASK      (0x7 << 28)
0066 #define HCU_DMA_SRC_LL_EN       BIT(25)
0067 #define HCU_DMA_EN          BIT(31)
0068 
0069 #define OCS_HCU_ENDIANNESS_VALUE    0x2A
0070 
0071 #define HCU_DMA_MSI_UNMASK      BIT(0)
0072 #define HCU_DMA_MSI_DISABLE     0
0073 #define HCU_IRQ_DISABLE         0
0074 
0075 #define OCS_HCU_START           BIT(0)
0076 #define OCS_HCU_TERMINATE       BIT(1)
0077 
0078 #define OCS_LL_DMA_FLAG_TERMINATE   BIT(31)
0079 
0080 #define OCS_HCU_HW_KEY_LEN_U32      (OCS_HCU_HW_KEY_LEN / sizeof(u32))
0081 
0082 #define HCU_DATA_WRITE_ENDIANNESS_OFFSET    26
0083 
0084 #define OCS_HCU_NUM_CHAINS_SHA256_224_SM3   (SHA256_DIGEST_SIZE / sizeof(u32))
0085 #define OCS_HCU_NUM_CHAINS_SHA384_512       (SHA512_DIGEST_SIZE / sizeof(u32))
0086 
0087 /*
0088  * While polling on a busy HCU, wait maximum 200us between one check and the
0089  * other.
0090  */
0091 #define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US    200
0092 /* Wait on a busy HCU for maximum 1 second. */
0093 #define OCS_HCU_WAIT_BUSY_TIMEOUT_US        1000000
0094 
0095 /**
0096  * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
0097  * @src_addr:  Source address of the data.
0098  * @src_len:   Length of data to be fetched.
0099  * @nxt_desc:  Next descriptor to fetch.
0100  * @ll_flags:  Flags (Freeze @ terminate) for the DMA engine.
0101  */
0102 struct ocs_hcu_dma_entry {
0103     u32 src_addr;
0104     u32 src_len;
0105     u32 nxt_desc;
0106     u32 ll_flags;
0107 };
0108 
0109 /**
0110  * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
0111  * @head:   The head of the list (points to the array backing the list).
0112  * @tail:   The current tail of the list; NULL if the list is empty.
0113  * @dma_addr:   The DMA address of @head (i.e., the DMA address of the backing
0114  *      array).
0115  * @max_nents:  Maximum number of entries in the list (i.e., number of elements
0116  *      in the backing array).
0117  *
0118  * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
0119  * backing the list is allocated with dma_alloc_coherent() and pointed by
0120  * @head.
0121  */
0122 struct ocs_hcu_dma_list {
0123     struct ocs_hcu_dma_entry    *head;
0124     struct ocs_hcu_dma_entry    *tail;
0125     dma_addr_t          dma_addr;
0126     size_t              max_nents;
0127 };
0128 
0129 static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
0130 {
0131     switch (algo) {
0132     case OCS_HCU_ALGO_SHA224:
0133     case OCS_HCU_ALGO_SHA256:
0134     case OCS_HCU_ALGO_SM3:
0135         return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
0136     case OCS_HCU_ALGO_SHA384:
0137     case OCS_HCU_ALGO_SHA512:
0138         return OCS_HCU_NUM_CHAINS_SHA384_512;
0139     default:
0140         return 0;
0141     };
0142 }
0143 
0144 static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
0145 {
0146     switch (algo) {
0147     case OCS_HCU_ALGO_SHA224:
0148         return SHA224_DIGEST_SIZE;
0149     case OCS_HCU_ALGO_SHA256:
0150     case OCS_HCU_ALGO_SM3:
0151         /* SM3 shares the same block size. */
0152         return SHA256_DIGEST_SIZE;
0153     case OCS_HCU_ALGO_SHA384:
0154         return SHA384_DIGEST_SIZE;
0155     case OCS_HCU_ALGO_SHA512:
0156         return SHA512_DIGEST_SIZE;
0157     default:
0158         return 0;
0159     }
0160 }
0161 
0162 /**
0163  * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
0164  * @hcu_dev:    OCS HCU device to wait for.
0165  *
0166  * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
0167  *     expired.
0168  */
0169 static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
0170 {
0171     long val;
0172 
0173     return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
0174                   !(val & HCU_STATUS_BUSY),
0175                   OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
0176                   OCS_HCU_WAIT_BUSY_TIMEOUT_US);
0177 }
0178 
0179 static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
0180 {
0181     /* Clear any pending interrupts. */
0182     writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
0183     hcu_dev->irq_err = false;
0184     /* Enable error and HCU done interrupts. */
0185     writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
0186            hcu_dev->io_base + OCS_HCU_IER);
0187 }
0188 
0189 static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
0190 {
0191     /* Clear any pending interrupts. */
0192     writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0193     hcu_dev->irq_err = false;
0194     /* Only operating on DMA source completion and error interrupts. */
0195     writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
0196            hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
0197     /* Unmask */
0198     writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
0199 }
0200 
0201 static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
0202 {
0203     writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
0204     writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
0205 }
0206 
0207 static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
0208 {
0209     int rc;
0210 
0211     rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
0212     if (rc)
0213         goto exit;
0214 
0215     if (hcu_dev->irq_err) {
0216         /* Unset flag and return error. */
0217         hcu_dev->irq_err = false;
0218         rc = -EIO;
0219         goto exit;
0220     }
0221 
0222 exit:
0223     ocs_hcu_irq_dis(hcu_dev);
0224 
0225     return rc;
0226 }
0227 
0228 /**
0229  * ocs_hcu_get_intermediate_data() - Get intermediate data.
0230  * @hcu_dev:    The target HCU device.
0231  * @data:   Where to store the intermediate.
0232  * @algo:   The algorithm being used.
0233  *
0234  * This function is used to save the current hashing process state in order to
0235  * continue it in the future.
0236  *
0237  * Note: once all data has been processed, the intermediate data actually
0238  * contains the hashing result. So this function is also used to retrieve the
0239  * final result of a hashing process.
0240  *
0241  * Return: 0 on success, negative error code otherwise.
0242  */
0243 static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
0244                      struct ocs_hcu_idata *data,
0245                      enum ocs_hcu_algo algo)
0246 {
0247     const int n = ocs_hcu_num_chains(algo);
0248     u32 *chain;
0249     int rc;
0250     int i;
0251 
0252     /* Data not requested. */
0253     if (!data)
0254         return -EINVAL;
0255 
0256     chain = (u32 *)data->digest;
0257 
0258     /* Ensure that the OCS is no longer busy before reading the chains. */
0259     rc = ocs_hcu_wait_busy(hcu_dev);
0260     if (rc)
0261         return rc;
0262 
0263     /*
0264      * This loops is safe because data->digest is an array of
0265      * SHA512_DIGEST_SIZE bytes and the maximum value returned by
0266      * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
0267      * to SHA512_DIGEST_SIZE / sizeof(u32).
0268      */
0269     for (i = 0; i < n; i++)
0270         chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
0271 
0272     data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
0273     data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
0274 
0275     return 0;
0276 }
0277 
0278 /**
0279  * ocs_hcu_set_intermediate_data() - Set intermediate data.
0280  * @hcu_dev:    The target HCU device.
0281  * @data:   The intermediate data to be set.
0282  * @algo:   The algorithm being used.
0283  *
0284  * This function is used to continue a previous hashing process.
0285  */
0286 static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
0287                       const struct ocs_hcu_idata *data,
0288                       enum ocs_hcu_algo algo)
0289 {
0290     const int n = ocs_hcu_num_chains(algo);
0291     u32 *chain = (u32 *)data->digest;
0292     int i;
0293 
0294     /*
0295      * This loops is safe because data->digest is an array of
0296      * SHA512_DIGEST_SIZE bytes and the maximum value returned by
0297      * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
0298      * to SHA512_DIGEST_SIZE / sizeof(u32).
0299      */
0300     for (i = 0; i < n; i++)
0301         writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
0302 
0303     writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
0304     writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
0305 }
0306 
0307 static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
0308                   enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
0309 {
0310     u32 *chain;
0311     int rc;
0312     int i;
0313 
0314     if (!dgst)
0315         return -EINVAL;
0316 
0317     /* Length of the output buffer must match the algo digest size. */
0318     if (dgst_len != ocs_hcu_digest_size(algo))
0319         return -EINVAL;
0320 
0321     /* Ensure that the OCS is no longer busy before reading the chains. */
0322     rc = ocs_hcu_wait_busy(hcu_dev);
0323     if (rc)
0324         return rc;
0325 
0326     chain = (u32 *)dgst;
0327     for (i = 0; i < dgst_len / sizeof(u32); i++)
0328         chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
0329 
0330     return 0;
0331 }
0332 
0333 /**
0334  * ocs_hcu_hw_cfg() - Configure the HCU hardware.
0335  * @hcu_dev:    The HCU device to configure.
0336  * @algo:   The algorithm to be used by the HCU device.
0337  * @use_hmac:   Whether or not HW HMAC should be used.
0338  *
0339  * Return: 0 on success, negative error code otherwise.
0340  */
0341 static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0342               bool use_hmac)
0343 {
0344     u32 cfg;
0345     int rc;
0346 
0347     if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
0348         algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
0349         algo != OCS_HCU_ALGO_SM3)
0350         return -EINVAL;
0351 
0352     rc = ocs_hcu_wait_busy(hcu_dev);
0353     if (rc)
0354         return rc;
0355 
0356     /* Ensure interrupts are disabled. */
0357     ocs_hcu_irq_dis(hcu_dev);
0358 
0359     /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
0360     cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
0361     cfg |= algo << HCU_MODE_ALGO_SHIFT;
0362     if (use_hmac)
0363         cfg |= BIT(HCU_MODE_HMAC_SHIFT);
0364 
0365     writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
0366 
0367     return 0;
0368 }
0369 
0370 /**
0371  * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
0372  * @hcu_dev:    The OCS HCU device whose key registers should be cleared.
0373  */
0374 static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
0375 {
0376     int reg_off;
0377 
0378     /* Clear OCS_HCU_KEY_[0..15] */
0379     for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
0380         writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
0381 }
0382 
0383 /**
0384  * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
0385  * @hcu_dev:    The OCS HCU device the key should be written to.
0386  * @key:    The key to be written.
0387  * @len:    The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
0388  *
0389  * Return:  0 on success, negative error code otherwise.
0390  */
0391 static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
0392 {
0393     u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
0394     int i;
0395 
0396     if (len > OCS_HCU_HW_KEY_LEN)
0397         return -EINVAL;
0398 
0399     /* Copy key into temporary u32 array. */
0400     memcpy(key_u32, key, len);
0401 
0402     /*
0403      * Hardware requires all the bytes of the HW Key vector to be
0404      * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
0405      */
0406     memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
0407 
0408     /*
0409      * OCS hardware expects the MSB of the key to be written at the highest
0410      * address of the HCU Key vector; in other word, the key must be
0411      * written in reverse order.
0412      *
0413      * Therefore, we first enable byte swapping for the HCU key vector;
0414      * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
0415      * swapped:
0416      * 3 <---> 0, 2 <---> 1.
0417      */
0418     writel(HCU_BYTE_ORDER_SWAP,
0419            hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
0420     /*
0421      * And then we write the 32-bit words composing the key starting from
0422      * the end of the key.
0423      */
0424     for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
0425         writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
0426                hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
0427 
0428     memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
0429 
0430     return 0;
0431 }
0432 
0433 /**
0434  * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
0435  * @hcu_dev:    The OCS HCU device to use.
0436  * @dma_list:   The OCS DMA list mapping the data to hash.
0437  * @finalize:   Whether or not this is the last hashing operation and therefore
0438  *      the final hash should be compute even if data is not
0439  *      block-aligned.
0440  *
0441  * Return: 0 on success, negative error code otherwise.
0442  */
0443 static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
0444                 const struct ocs_hcu_dma_list *dma_list,
0445                 bool finalize)
0446 {
0447     u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
0448     int rc;
0449 
0450     if (!dma_list)
0451         return -EINVAL;
0452 
0453     /*
0454      * For final requests we use HCU_DONE IRQ to be notified when all input
0455      * data has been processed by the HCU; however, we cannot do so for
0456      * non-final requests, because we don't get a HCU_DONE IRQ when we
0457      * don't terminate the operation.
0458      *
0459      * Therefore, for non-final requests, we use the DMA IRQ, which
0460      * triggers when DMA has finishing feeding all the input data to the
0461      * HCU, but the HCU may still be processing it. This is fine, since we
0462      * will wait for the HCU processing to be completed when we try to read
0463      * intermediate results, in ocs_hcu_get_intermediate_data().
0464      */
0465     if (finalize)
0466         ocs_hcu_done_irq_en(hcu_dev);
0467     else
0468         ocs_hcu_dma_irq_en(hcu_dev);
0469 
0470     reinit_completion(&hcu_dev->irq_done);
0471     writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
0472     writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
0473     writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
0474 
0475     writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
0476 
0477     writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
0478 
0479     if (finalize)
0480         writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0481 
0482     rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0483     if (rc)
0484         return rc;
0485 
0486     return 0;
0487 }
0488 
0489 struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
0490                         int max_nents)
0491 {
0492     struct ocs_hcu_dma_list *dma_list;
0493 
0494     dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
0495     if (!dma_list)
0496         return NULL;
0497 
0498     /* Total size of the DMA list to allocate. */
0499     dma_list->head = dma_alloc_coherent(hcu_dev->dev,
0500                         sizeof(*dma_list->head) * max_nents,
0501                         &dma_list->dma_addr, GFP_KERNEL);
0502     if (!dma_list->head) {
0503         kfree(dma_list);
0504         return NULL;
0505     }
0506     dma_list->max_nents = max_nents;
0507     dma_list->tail = NULL;
0508 
0509     return dma_list;
0510 }
0511 
0512 void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
0513                struct ocs_hcu_dma_list *dma_list)
0514 {
0515     if (!dma_list)
0516         return;
0517 
0518     dma_free_coherent(hcu_dev->dev,
0519               sizeof(*dma_list->head) * dma_list->max_nents,
0520               dma_list->head, dma_list->dma_addr);
0521 
0522     kfree(dma_list);
0523 }
0524 
0525 /* Add a new DMA entry at the end of the OCS DMA list. */
0526 int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
0527                   struct ocs_hcu_dma_list *dma_list,
0528                   dma_addr_t addr, u32 len)
0529 {
0530     struct device *dev = hcu_dev->dev;
0531     struct ocs_hcu_dma_entry *old_tail;
0532     struct ocs_hcu_dma_entry *new_tail;
0533 
0534     if (!len)
0535         return 0;
0536 
0537     if (!dma_list)
0538         return -EINVAL;
0539 
0540     if (addr & ~OCS_HCU_DMA_BIT_MASK) {
0541         dev_err(dev,
0542             "Unexpected error: Invalid DMA address for OCS HCU\n");
0543         return -EINVAL;
0544     }
0545 
0546     old_tail = dma_list->tail;
0547     new_tail = old_tail ? old_tail + 1 : dma_list->head;
0548 
0549     /* Check if list is full. */
0550     if (new_tail - dma_list->head >= dma_list->max_nents)
0551         return -ENOMEM;
0552 
0553     /*
0554      * If there was an old tail (i.e., this is not the first element we are
0555      * adding), un-terminate the old tail and make it point to the new one.
0556      */
0557     if (old_tail) {
0558         old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
0559         /*
0560          * The old tail 'nxt_desc' must point to the DMA address of the
0561          * new tail.
0562          */
0563         old_tail->nxt_desc = dma_list->dma_addr +
0564                      sizeof(*dma_list->tail) * (new_tail -
0565                                 dma_list->head);
0566     }
0567 
0568     new_tail->src_addr = (u32)addr;
0569     new_tail->src_len = (u32)len;
0570     new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
0571     new_tail->nxt_desc = 0;
0572 
0573     /* Update list tail with new tail. */
0574     dma_list->tail = new_tail;
0575 
0576     return 0;
0577 }
0578 
0579 /**
0580  * ocs_hcu_hash_init() - Initialize hash operation context.
0581  * @ctx:    The context to initialize.
0582  * @algo:   The hashing algorithm to use.
0583  *
0584  * Return:  0 on success, negative error code otherwise.
0585  */
0586 int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
0587 {
0588     if (!ctx)
0589         return -EINVAL;
0590 
0591     ctx->algo = algo;
0592     ctx->idata.msg_len_lo = 0;
0593     ctx->idata.msg_len_hi = 0;
0594     /* No need to set idata.digest to 0. */
0595 
0596     return 0;
0597 }
0598 
0599 /**
0600  * ocs_hcu_hash_update() - Perform a hashing iteration.
0601  * @hcu_dev:    The OCS HCU device to use.
0602  * @ctx:    The OCS HCU hashing context.
0603  * @dma_list:   The OCS DMA list mapping the input data to process.
0604  *
0605  * Return: 0 on success; negative error code otherwise.
0606  */
0607 int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
0608             struct ocs_hcu_hash_ctx *ctx,
0609             const struct ocs_hcu_dma_list *dma_list)
0610 {
0611     int rc;
0612 
0613     if (!hcu_dev || !ctx)
0614         return -EINVAL;
0615 
0616     /* Configure the hardware for the current request. */
0617     rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0618     if (rc)
0619         return rc;
0620 
0621     /* If we already processed some data, idata needs to be set. */
0622     if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0623         ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0624 
0625     /* Start linked-list DMA hashing. */
0626     rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
0627     if (rc)
0628         return rc;
0629 
0630     /* Update idata and return. */
0631     return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0632 }
0633 
0634 /**
0635  * ocs_hcu_hash_finup() - Update and finalize hash computation.
0636  * @hcu_dev:    The OCS HCU device to use.
0637  * @ctx:    The OCS HCU hashing context.
0638  * @dma_list:   The OCS DMA list mapping the input data to process.
0639  * @dgst:   The buffer where to save the computed digest.
0640  * @dgst_len:   The length of @dgst.
0641  *
0642  * Return: 0 on success; negative error code otherwise.
0643  */
0644 int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
0645                const struct ocs_hcu_hash_ctx *ctx,
0646                const struct ocs_hcu_dma_list *dma_list,
0647                u8 *dgst, size_t dgst_len)
0648 {
0649     int rc;
0650 
0651     if (!hcu_dev || !ctx)
0652         return -EINVAL;
0653 
0654     /* Configure the hardware for the current request. */
0655     rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0656     if (rc)
0657         return rc;
0658 
0659     /* If we already processed some data, idata needs to be set. */
0660     if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0661         ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0662 
0663     /* Start linked-list DMA hashing. */
0664     rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
0665     if (rc)
0666         return rc;
0667 
0668     /* Get digest and return. */
0669     return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
0670 }
0671 
0672 /**
0673  * ocs_hcu_hash_final() - Finalize hash computation.
0674  * @hcu_dev:        The OCS HCU device to use.
0675  * @ctx:        The OCS HCU hashing context.
0676  * @dgst:       The buffer where to save the computed digest.
0677  * @dgst_len:       The length of @dgst.
0678  *
0679  * Return: 0 on success; negative error code otherwise.
0680  */
0681 int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
0682                const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
0683                size_t dgst_len)
0684 {
0685     int rc;
0686 
0687     if (!hcu_dev || !ctx)
0688         return -EINVAL;
0689 
0690     /* Configure the hardware for the current request. */
0691     rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0692     if (rc)
0693         return rc;
0694 
0695     /* If we already processed some data, idata needs to be set. */
0696     if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0697         ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0698 
0699     /*
0700      * Enable HCU interrupts, so that HCU_DONE will be triggered once the
0701      * final hash is computed.
0702      */
0703     ocs_hcu_done_irq_en(hcu_dev);
0704     reinit_completion(&hcu_dev->irq_done);
0705     writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0706 
0707     rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0708     if (rc)
0709         return rc;
0710 
0711     /* Get digest and return. */
0712     return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
0713 }
0714 
0715 /**
0716  * ocs_hcu_digest() - Compute hash digest.
0717  * @hcu_dev:        The OCS HCU device to use.
0718  * @algo:       The hash algorithm to use.
0719  * @data:       The input data to process.
0720  * @data_len:       The length of @data.
0721  * @dgst:       The buffer where to save the computed digest.
0722  * @dgst_len:       The length of @dgst.
0723  *
0724  * Return: 0 on success; negative error code otherwise.
0725  */
0726 int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0727            void *data, size_t data_len, u8 *dgst, size_t dgst_len)
0728 {
0729     struct device *dev = hcu_dev->dev;
0730     dma_addr_t dma_handle;
0731     u32 reg;
0732     int rc;
0733 
0734     /* Configure the hardware for the current request. */
0735     rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
0736     if (rc)
0737         return rc;
0738 
0739     dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
0740     if (dma_mapping_error(dev, dma_handle))
0741         return -EIO;
0742 
0743     reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
0744 
0745     ocs_hcu_done_irq_en(hcu_dev);
0746 
0747     reinit_completion(&hcu_dev->irq_done);
0748 
0749     writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
0750     writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
0751     writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
0752     writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
0753 
0754     writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0755 
0756     rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0757     if (rc)
0758         return rc;
0759 
0760     dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
0761 
0762     return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
0763 }
0764 
0765 /**
0766  * ocs_hcu_hmac() - Compute HMAC.
0767  * @hcu_dev:        The OCS HCU device to use.
0768  * @algo:       The hash algorithm to use with HMAC.
0769  * @key:        The key to use.
0770  * @dma_list:   The OCS DMA list mapping the input data to process.
0771  * @key_len:        The length of @key.
0772  * @dgst:       The buffer where to save the computed HMAC.
0773  * @dgst_len:       The length of @dgst.
0774  *
0775  * Return: 0 on success; negative error code otherwise.
0776  */
0777 int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0778          const u8 *key, size_t key_len,
0779          const struct ocs_hcu_dma_list *dma_list,
0780          u8 *dgst, size_t dgst_len)
0781 {
0782     int rc;
0783 
0784     /* Ensure 'key' is not NULL. */
0785     if (!key || key_len == 0)
0786         return -EINVAL;
0787 
0788     /* Configure the hardware for the current request. */
0789     rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
0790     if (rc)
0791         return rc;
0792 
0793     rc = ocs_hcu_write_key(hcu_dev, key, key_len);
0794     if (rc)
0795         return rc;
0796 
0797     rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
0798 
0799     /* Clear HW key before processing return code. */
0800     ocs_hcu_clear_key(hcu_dev);
0801 
0802     if (rc)
0803         return rc;
0804 
0805     return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
0806 }
0807 
0808 irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
0809 {
0810     struct ocs_hcu_dev *hcu_dev = dev_id;
0811     u32 hcu_irq;
0812     u32 dma_irq;
0813 
0814     /* Read and clear the HCU interrupt. */
0815     hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
0816     writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
0817 
0818     /* Read and clear the HCU DMA interrupt. */
0819     dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0820     writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0821 
0822     /* Check for errors. */
0823     if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
0824         hcu_dev->irq_err = true;
0825         goto complete;
0826     }
0827 
0828     /* Check for DONE IRQs. */
0829     if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
0830         goto complete;
0831 
0832     return IRQ_NONE;
0833 
0834 complete:
0835     complete(&hcu_dev->irq_done);
0836 
0837     return IRQ_HANDLED;
0838 }
0839 
0840 MODULE_LICENSE("GPL");