Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
0003 
0004 /* \file cc_driver.h
0005  * ARM CryptoCell Linux Crypto Driver
0006  */
0007 
0008 #ifndef __CC_DRIVER_H__
0009 #define __CC_DRIVER_H__
0010 
0011 #ifdef COMP_IN_WQ
0012 #include <linux/workqueue.h>
0013 #else
0014 #include <linux/interrupt.h>
0015 #endif
0016 #include <linux/dma-mapping.h>
0017 #include <crypto/algapi.h>
0018 #include <crypto/internal/skcipher.h>
0019 #include <crypto/aes.h>
0020 #include <crypto/sha1.h>
0021 #include <crypto/sha2.h>
0022 #include <crypto/aead.h>
0023 #include <crypto/authenc.h>
0024 #include <crypto/hash.h>
0025 #include <crypto/skcipher.h>
0026 #include <linux/clk.h>
0027 #include <linux/platform_device.h>
0028 
0029 #include "cc_host_regs.h"
0030 #include "cc_crypto_ctx.h"
0031 #include "cc_hw_queue_defs.h"
0032 #include "cc_sram_mgr.h"
0033 
0034 extern bool cc_dump_desc;
0035 extern bool cc_dump_bytes;
0036 
0037 #define DRV_MODULE_VERSION "5.0"
0038 
0039 enum cc_hw_rev {
0040     CC_HW_REV_630 = 630,
0041     CC_HW_REV_710 = 710,
0042     CC_HW_REV_712 = 712,
0043     CC_HW_REV_713 = 713
0044 };
0045 
0046 enum cc_std_body {
0047     CC_STD_NIST = 0x1,
0048     CC_STD_OSCCA = 0x2,
0049     CC_STD_ALL = 0x3
0050 };
0051 
0052 #define CC_PINS_FULL    0x0
0053 #define CC_PINS_SLIM    0x9F
0054 
0055 /* Maximum DMA mask supported by IP */
0056 #define DMA_BIT_MASK_LEN 48
0057 
0058 #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
0059               (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
0060               (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
0061               (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
0062 
0063 #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
0064 
0065 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
0066 
0067 #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
0068 
0069 #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
0070 
0071 #define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
0072 
0073 #define CC_CPP_AES_ABORT_MASK ( \
0074     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
0075     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
0076     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
0077     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
0078     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
0079     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
0080     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
0081     BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
0082 
0083 #define CC_CPP_SM4_ABORT_MASK ( \
0084     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
0085     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
0086     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
0087     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
0088     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
0089     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
0090     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
0091     BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
0092 
0093 /* Register name mangling macro */
0094 #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
0095 
0096 /* TEE FIPS status interrupt */
0097 #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
0098 
0099 #define CC_CRA_PRIO 400
0100 
0101 #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
0102 
0103 #define MAX_REQUEST_QUEUE_SIZE 4096
0104 #define MAX_MLLI_BUFF_SIZE 2080
0105 
0106 /* Definitions for HW descriptors DIN/DOUT fields */
0107 #define NS_BIT 1
0108 #define AXI_ID 0
0109 /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
0110  * field in the HW descriptor. The DMA engine +8 that value.
0111  */
0112 
0113 struct cc_cpp_req {
0114     bool is_cpp;
0115     enum cc_cpp_alg alg;
0116     u8 slot;
0117 };
0118 
0119 #define CC_MAX_IVGEN_DMA_ADDRESSES  3
0120 struct cc_crypto_req {
0121     void (*user_cb)(struct device *dev, void *req, int err);
0122     void *user_arg;
0123     struct completion seq_compl; /* request completion */
0124     struct cc_cpp_req cpp;
0125 };
0126 
0127 /**
0128  * struct cc_drvdata - driver private data context
0129  * @cc_base:    virt address of the CC registers
0130  * @irq:    bitmap indicating source of last interrupt
0131  */
0132 struct cc_drvdata {
0133     void __iomem *cc_base;
0134     int irq;
0135     struct completion hw_queue_avail; /* wait for HW queue availability */
0136     struct platform_device *plat_dev;
0137     u32 mlli_sram_addr;
0138     struct dma_pool *mlli_buffs_pool;
0139     struct list_head alg_list;
0140     void *hash_handle;
0141     void *aead_handle;
0142     void *request_mgr_handle;
0143     void *fips_handle;
0144     u32 sram_free_offset;   /* offset to non-allocated area in SRAM */
0145     struct dentry *dir; /* for debugfs */
0146     struct clk *clk;
0147     bool coherent;
0148     char *hw_rev_name;
0149     enum cc_hw_rev hw_rev;
0150     u32 axim_mon_offset;
0151     u32 sig_offset;
0152     u32 ver_offset;
0153     int std_bodies;
0154     bool sec_disabled;
0155     u32 comp_mask;
0156     u32 cache_params;
0157     u32 ace_const;
0158 };
0159 
0160 struct cc_crypto_alg {
0161     struct list_head entry;
0162     int cipher_mode;
0163     int flow_mode; /* Note: currently, refers to the cipher mode only. */
0164     int auth_mode;
0165     struct cc_drvdata *drvdata;
0166     struct skcipher_alg skcipher_alg;
0167     struct aead_alg aead_alg;
0168 };
0169 
0170 struct cc_alg_template {
0171     char name[CRYPTO_MAX_ALG_NAME];
0172     char driver_name[CRYPTO_MAX_ALG_NAME];
0173     unsigned int blocksize;
0174     union {
0175         struct skcipher_alg skcipher;
0176         struct aead_alg aead;
0177     } template_u;
0178     int cipher_mode;
0179     int flow_mode; /* Note: currently, refers to the cipher mode only. */
0180     int auth_mode;
0181     u32 min_hw_rev;
0182     enum cc_std_body std_body;
0183     bool sec_func;
0184     unsigned int data_unit;
0185     struct cc_drvdata *drvdata;
0186 };
0187 
0188 struct async_gen_req_ctx {
0189     dma_addr_t iv_dma_addr;
0190     u8 *iv;
0191     enum drv_crypto_direction op_type;
0192 };
0193 
0194 static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
0195 {
0196     return &drvdata->plat_dev->dev;
0197 }
0198 
0199 void __dump_byte_array(const char *name, const u8 *buf, size_t len);
0200 static inline void dump_byte_array(const char *name, const u8 *the_array,
0201                    size_t size)
0202 {
0203     if (cc_dump_bytes)
0204         __dump_byte_array(name, the_array, size);
0205 }
0206 
0207 bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
0208 int init_cc_regs(struct cc_drvdata *drvdata);
0209 void fini_cc_regs(struct cc_drvdata *drvdata);
0210 unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
0211 
0212 static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
0213 {
0214     iowrite32(val, (drvdata->cc_base + reg));
0215 }
0216 
0217 static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
0218 {
0219     return ioread32(drvdata->cc_base + reg);
0220 }
0221 
0222 static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
0223 {
0224     return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0225             GFP_KERNEL : GFP_ATOMIC;
0226 }
0227 
0228 static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
0229                       struct cc_hw_desc *pdesc)
0230 {
0231     if (drvdata->hw_rev >= CC_HW_REV_712)
0232         set_queue_last_ind_bit(pdesc);
0233 }
0234 
0235 #endif /*__CC_DRIVER_H__*/