Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * K3 SA2UL crypto accelerator driver
0004  *
0005  * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
0006  *
0007  * Authors: Keerthy
0008  *      Vitaly Andrianov
0009  *      Tero Kristo
0010  */
0011 #include <linux/bitfield.h>
0012 #include <linux/clk.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/dmapool.h>
0016 #include <linux/kernel.h>
0017 #include <linux/module.h>
0018 #include <linux/of_device.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/pm_runtime.h>
0021 
0022 #include <crypto/aes.h>
0023 #include <crypto/authenc.h>
0024 #include <crypto/des.h>
0025 #include <crypto/internal/aead.h>
0026 #include <crypto/internal/hash.h>
0027 #include <crypto/internal/skcipher.h>
0028 #include <crypto/scatterwalk.h>
0029 #include <crypto/sha1.h>
0030 #include <crypto/sha2.h>
0031 
0032 #include "sa2ul.h"
0033 
0034 /* Byte offset for key in encryption security context */
0035 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
0036 /* Byte offset for Aux-1 in encryption security context */
0037 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
0038 
0039 #define SA_CMDL_UPD_ENC         0x0001
0040 #define SA_CMDL_UPD_AUTH        0x0002
0041 #define SA_CMDL_UPD_ENC_IV      0x0004
0042 #define SA_CMDL_UPD_AUTH_IV     0x0008
0043 #define SA_CMDL_UPD_AUX_KEY     0x0010
0044 
0045 #define SA_AUTH_SUBKEY_LEN  16
0046 #define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
0047 #define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
0048 
0049 #define MODE_CONTROL_BYTES  27
0050 #define SA_HASH_PROCESSING  0
0051 #define SA_CRYPTO_PROCESSING    0
0052 #define SA_UPLOAD_HASH_TO_TLR   BIT(6)
0053 
0054 #define SA_SW0_FLAGS_MASK   0xF0000
0055 #define SA_SW0_CMDL_INFO_MASK   0x1F00000
0056 #define SA_SW0_CMDL_PRESENT BIT(4)
0057 #define SA_SW0_ENG_ID_MASK  0x3E000000
0058 #define SA_SW0_DEST_INFO_PRESENT    BIT(30)
0059 #define SA_SW2_EGRESS_LENGTH        0xFF000000
0060 #define SA_BASIC_HASH       0x10
0061 
0062 #define SHA256_DIGEST_WORDS    8
0063 /* Make 32-bit word from 4 bytes */
0064 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
0065                    ((b2) << 8) | (b3))
0066 
0067 /* size of SCCTL structure in bytes */
0068 #define SA_SCCTL_SZ 16
0069 
0070 /* Max Authentication tag size */
0071 #define SA_MAX_AUTH_TAG_SZ 64
0072 
0073 enum sa_algo_id {
0074     SA_ALG_CBC_AES = 0,
0075     SA_ALG_EBC_AES,
0076     SA_ALG_CBC_DES3,
0077     SA_ALG_ECB_DES3,
0078     SA_ALG_SHA1,
0079     SA_ALG_SHA256,
0080     SA_ALG_SHA512,
0081     SA_ALG_AUTHENC_SHA1_AES,
0082     SA_ALG_AUTHENC_SHA256_AES,
0083 };
0084 
0085 struct sa_match_data {
0086     u8 priv;
0087     u8 priv_id;
0088     u32 supported_algos;
0089 };
0090 
0091 static struct device *sa_k3_dev;
0092 
0093 /**
0094  * struct sa_cmdl_cfg - Command label configuration descriptor
0095  * @aalg: authentication algorithm ID
0096  * @enc_eng_id: Encryption Engine ID supported by the SA hardware
0097  * @auth_eng_id: Authentication Engine ID
0098  * @iv_size: Initialization Vector size
0099  * @akey: Authentication key
0100  * @akey_len: Authentication key length
0101  * @enc: True, if this is an encode request
0102  */
0103 struct sa_cmdl_cfg {
0104     int aalg;
0105     u8 enc_eng_id;
0106     u8 auth_eng_id;
0107     u8 iv_size;
0108     const u8 *akey;
0109     u16 akey_len;
0110     bool enc;
0111 };
0112 
0113 /**
0114  * struct algo_data - Crypto algorithm specific data
0115  * @enc_eng: Encryption engine info structure
0116  * @auth_eng: Authentication engine info structure
0117  * @auth_ctrl: Authentication control word
0118  * @hash_size: Size of digest
0119  * @iv_idx: iv index in psdata
0120  * @iv_out_size: iv out size
0121  * @ealg_id: Encryption Algorithm ID
0122  * @aalg_id: Authentication algorithm ID
0123  * @mci_enc: Mode Control Instruction for Encryption algorithm
0124  * @mci_dec: Mode Control Instruction for Decryption
0125  * @inv_key: Whether the encryption algorithm demands key inversion
0126  * @ctx: Pointer to the algorithm context
0127  * @keyed_mac: Whether the authentication algorithm has key
0128  * @prep_iopad: Function pointer to generate intermediate ipad/opad
0129  */
0130 struct algo_data {
0131     struct sa_eng_info enc_eng;
0132     struct sa_eng_info auth_eng;
0133     u8 auth_ctrl;
0134     u8 hash_size;
0135     u8 iv_idx;
0136     u8 iv_out_size;
0137     u8 ealg_id;
0138     u8 aalg_id;
0139     u8 *mci_enc;
0140     u8 *mci_dec;
0141     bool inv_key;
0142     struct sa_tfm_ctx *ctx;
0143     bool keyed_mac;
0144     void (*prep_iopad)(struct algo_data *algo, const u8 *key,
0145                u16 key_sz, __be32 *ipad, __be32 *opad);
0146 };
0147 
0148 /**
0149  * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
0150  * @type: Type of the crypto algorithm.
0151  * @alg: Union of crypto algorithm definitions.
0152  * @registered: Flag indicating if the crypto algorithm is already registered
0153  */
0154 struct sa_alg_tmpl {
0155     u32 type;       /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
0156     union {
0157         struct skcipher_alg skcipher;
0158         struct ahash_alg ahash;
0159         struct aead_alg aead;
0160     } alg;
0161     bool registered;
0162 };
0163 
0164 /**
0165  * struct sa_mapped_sg: scatterlist information for tx and rx
0166  * @mapped: Set to true if the @sgt is mapped
0167  * @dir: mapping direction used for @sgt
0168  * @split_sg: Set if the sg is split and needs to be freed up
0169  * @static_sg: Static scatterlist entry for overriding data
0170  * @sgt: scatterlist table for DMA API use
0171  */
0172 struct sa_mapped_sg {
0173     bool mapped;
0174     enum dma_data_direction dir;
0175     struct scatterlist static_sg;
0176     struct scatterlist *split_sg;
0177     struct sg_table sgt;
0178 };
0179 /**
0180  * struct sa_rx_data: RX Packet miscellaneous data place holder
0181  * @req: crypto request data pointer
0182  * @ddev: pointer to the DMA device
0183  * @tx_in: dma_async_tx_descriptor pointer for rx channel
0184  * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
0185  * @enc: Flag indicating either encryption or decryption
0186  * @enc_iv_size: Initialisation vector size
0187  * @iv_idx: Initialisation vector index
0188  */
0189 struct sa_rx_data {
0190     void *req;
0191     struct device *ddev;
0192     struct dma_async_tx_descriptor *tx_in;
0193     struct sa_mapped_sg mapped_sg[2];
0194     u8 enc;
0195     u8 enc_iv_size;
0196     u8 iv_idx;
0197 };
0198 
0199 /**
0200  * struct sa_req: SA request definition
0201  * @dev: device for the request
0202  * @size: total data to the xmitted via DMA
0203  * @enc_offset: offset of cipher data
0204  * @enc_size: data to be passed to cipher engine
0205  * @enc_iv: cipher IV
0206  * @auth_offset: offset of the authentication data
0207  * @auth_size: size of the authentication data
0208  * @auth_iv: authentication IV
0209  * @type: algorithm type for the request
0210  * @cmdl: command label pointer
0211  * @base: pointer to the base request
0212  * @ctx: pointer to the algorithm context data
0213  * @enc: true if this is an encode request
0214  * @src: source data
0215  * @dst: destination data
0216  * @callback: DMA callback for the request
0217  * @mdata_size: metadata size passed to DMA
0218  */
0219 struct sa_req {
0220     struct device *dev;
0221     u16 size;
0222     u8 enc_offset;
0223     u16 enc_size;
0224     u8 *enc_iv;
0225     u8 auth_offset;
0226     u16 auth_size;
0227     u8 *auth_iv;
0228     u32 type;
0229     u32 *cmdl;
0230     struct crypto_async_request *base;
0231     struct sa_tfm_ctx *ctx;
0232     bool enc;
0233     struct scatterlist *src;
0234     struct scatterlist *dst;
0235     dma_async_tx_callback callback;
0236     u16 mdata_size;
0237 };
0238 
0239 /*
0240  * Mode Control Instructions for various Key lengths 128, 192, 256
0241  * For CBC (Cipher Block Chaining) mode for encryption
0242  */
0243 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
0244     {   0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
0245         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0246         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0247     {   0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
0248         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0249         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0250     {   0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
0251         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0252         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0253 };
0254 
0255 /*
0256  * Mode Control Instructions for various Key lengths 128, 192, 256
0257  * For CBC (Cipher Block Chaining) mode for decryption
0258  */
0259 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
0260     {   0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0261         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0262         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0263     {   0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0264         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0265         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0266     {   0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0267         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0268         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0269 };
0270 
0271 /*
0272  * Mode Control Instructions for various Key lengths 128, 192, 256
0273  * For CBC (Cipher Block Chaining) mode for encryption
0274  */
0275 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
0276     {   0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
0277         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0278         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0279     {   0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
0280         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0281         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0282     {   0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
0283         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0284         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0285 };
0286 
0287 /*
0288  * Mode Control Instructions for various Key lengths 128, 192, 256
0289  * For CBC (Cipher Block Chaining) mode for decryption
0290  */
0291 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
0292     {   0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0293         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0294         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0295     {   0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0296         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0297         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0298     {   0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
0299         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0300         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0301 };
0302 
0303 /*
0304  * Mode Control Instructions for various Key lengths 128, 192, 256
0305  * For ECB (Electronic Code Book) mode for encryption
0306  */
0307 static u8 mci_ecb_enc_array[3][27] = {
0308     {   0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0309         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0310         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0311     {   0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0312         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0313         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0314     {   0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0315         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0316         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0317 };
0318 
0319 /*
0320  * Mode Control Instructions for various Key lengths 128, 192, 256
0321  * For ECB (Electronic Code Book) mode for decryption
0322  */
0323 static u8 mci_ecb_dec_array[3][27] = {
0324     {   0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0325         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0326         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0327     {   0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0328         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0329         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0330     {   0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
0331         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0332         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00    },
0333 };
0334 
0335 /*
0336  * Mode Control Instructions for DES algorithm
0337  * For CBC (Cipher Block Chaining) mode and ECB mode
0338  * encryption and for decryption respectively
0339  */
0340 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
0341     0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
0342     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0343     0x00, 0x00, 0x00,
0344 };
0345 
0346 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
0347     0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
0348     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0349     0x00, 0x00, 0x00,
0350 };
0351 
0352 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
0353     0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
0354     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0355     0x00, 0x00, 0x00,
0356 };
0357 
0358 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
0359     0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
0360     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0361     0x00, 0x00, 0x00,
0362 };
0363 
0364 /*
0365  * Perform 16 byte or 128 bit swizzling
0366  * The SA2UL Expects the security context to
0367  * be in little Endian and the bus width is 128 bits or 16 bytes
0368  * Hence swap 16 bytes at a time from higher to lower address
0369  */
0370 static void sa_swiz_128(u8 *in, u16 len)
0371 {
0372     u8 data[16];
0373     int i, j;
0374 
0375     for (i = 0; i < len; i += 16) {
0376         memcpy(data, &in[i], 16);
0377         for (j = 0; j < 16; j++)
0378             in[i + j] = data[15 - j];
0379     }
0380 }
0381 
0382 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
0383 static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
0384 {
0385     int i;
0386 
0387     for (i = 0; i < key_sz; i++)
0388         k_ipad[i] = key[i] ^ 0x36;
0389 
0390     /* Instead of XOR with 0 */
0391     for (; i < SHA1_BLOCK_SIZE; i++)
0392         k_ipad[i] = 0x36;
0393 }
0394 
0395 static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
0396 {
0397     int i;
0398 
0399     for (i = 0; i < key_sz; i++)
0400         k_opad[i] = key[i] ^ 0x5c;
0401 
0402     /* Instead of XOR with 0 */
0403     for (; i < SHA1_BLOCK_SIZE; i++)
0404         k_opad[i] = 0x5c;
0405 }
0406 
0407 static void sa_export_shash(void *state, struct shash_desc *hash,
0408                 int digest_size, __be32 *out)
0409 {
0410     struct sha1_state *sha1;
0411     struct sha256_state *sha256;
0412     u32 *result;
0413 
0414     switch (digest_size) {
0415     case SHA1_DIGEST_SIZE:
0416         sha1 = state;
0417         result = sha1->state;
0418         break;
0419     case SHA256_DIGEST_SIZE:
0420         sha256 = state;
0421         result = sha256->state;
0422         break;
0423     default:
0424         dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
0425             digest_size);
0426         return;
0427     }
0428 
0429     crypto_shash_export(hash, state);
0430 
0431     cpu_to_be32_array(out, result, digest_size / 4);
0432 }
0433 
0434 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
0435                   u16 key_sz, __be32 *ipad, __be32 *opad)
0436 {
0437     SHASH_DESC_ON_STACK(shash, data->ctx->shash);
0438     int block_size = crypto_shash_blocksize(data->ctx->shash);
0439     int digest_size = crypto_shash_digestsize(data->ctx->shash);
0440     union {
0441         struct sha1_state sha1;
0442         struct sha256_state sha256;
0443         u8 k_pad[SHA1_BLOCK_SIZE];
0444     } sha;
0445 
0446     shash->tfm = data->ctx->shash;
0447 
0448     prepare_kipad(sha.k_pad, key, key_sz);
0449 
0450     crypto_shash_init(shash);
0451     crypto_shash_update(shash, sha.k_pad, block_size);
0452     sa_export_shash(&sha, shash, digest_size, ipad);
0453 
0454     prepare_kopad(sha.k_pad, key, key_sz);
0455 
0456     crypto_shash_init(shash);
0457     crypto_shash_update(shash, sha.k_pad, block_size);
0458 
0459     sa_export_shash(&sha, shash, digest_size, opad);
0460 
0461     memzero_explicit(&sha, sizeof(sha));
0462 }
0463 
0464 /* Derive the inverse key used in AES-CBC decryption operation */
0465 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
0466 {
0467     struct crypto_aes_ctx ctx;
0468     int key_pos;
0469 
0470     if (aes_expandkey(&ctx, key, key_sz)) {
0471         dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
0472         return -EINVAL;
0473     }
0474 
0475     /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
0476     if (key_sz == AES_KEYSIZE_192) {
0477         ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
0478         ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
0479     }
0480 
0481     /* Based crypto_aes_expand_key logic */
0482     switch (key_sz) {
0483     case AES_KEYSIZE_128:
0484     case AES_KEYSIZE_192:
0485         key_pos = key_sz + 24;
0486         break;
0487 
0488     case AES_KEYSIZE_256:
0489         key_pos = key_sz + 24 - 4;
0490         break;
0491 
0492     default:
0493         dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
0494         return -EINVAL;
0495     }
0496 
0497     memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
0498     return 0;
0499 }
0500 
0501 /* Set Security context for the encryption engine */
0502 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
0503              u8 enc, u8 *sc_buf)
0504 {
0505     const u8 *mci = NULL;
0506 
0507     /* Set Encryption mode selector to crypto processing */
0508     sc_buf[0] = SA_CRYPTO_PROCESSING;
0509 
0510     if (enc)
0511         mci = ad->mci_enc;
0512     else
0513         mci = ad->mci_dec;
0514     /* Set the mode control instructions in security context */
0515     if (mci)
0516         memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
0517 
0518     /* For AES-CBC decryption get the inverse key */
0519     if (ad->inv_key && !enc) {
0520         if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
0521             return -EINVAL;
0522     /* For all other cases: key is used */
0523     } else {
0524         memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
0525     }
0526 
0527     return 0;
0528 }
0529 
0530 /* Set Security context for the authentication engine */
0531 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
0532                u8 *sc_buf)
0533 {
0534     __be32 *ipad = (void *)(sc_buf + 32);
0535     __be32 *opad = (void *)(sc_buf + 64);
0536 
0537     /* Set Authentication mode selector to hash processing */
0538     sc_buf[0] = SA_HASH_PROCESSING;
0539     /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
0540     sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
0541     sc_buf[1] |= ad->auth_ctrl;
0542 
0543     /* Copy the keys or ipad/opad */
0544     if (ad->keyed_mac)
0545         ad->prep_iopad(ad, key, key_sz, ipad, opad);
0546     else {
0547         /* basic hash */
0548         sc_buf[1] |= SA_BASIC_HASH;
0549     }
0550 }
0551 
0552 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
0553 {
0554     int j;
0555 
0556     for (j = 0; j < ((size16) ? 4 : 2); j++) {
0557         *out = cpu_to_be32(*((u32 *)iv));
0558         iv += 4;
0559         out++;
0560     }
0561 }
0562 
0563 /* Format general command label */
0564 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
0565                   struct sa_cmdl_upd_info *upd_info)
0566 {
0567     u8 enc_offset = 0, auth_offset = 0, total = 0;
0568     u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
0569     u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
0570     u32 *word_ptr = (u32 *)cmdl;
0571     int i;
0572 
0573     /* Clear the command label */
0574     memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
0575 
0576     /* Iniialize the command update structure */
0577     memzero_explicit(upd_info, sizeof(*upd_info));
0578 
0579     if (cfg->enc_eng_id && cfg->auth_eng_id) {
0580         if (cfg->enc) {
0581             auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
0582             enc_next_eng = cfg->auth_eng_id;
0583 
0584             if (cfg->iv_size)
0585                 auth_offset += cfg->iv_size;
0586         } else {
0587             enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
0588             auth_next_eng = cfg->enc_eng_id;
0589         }
0590     }
0591 
0592     if (cfg->enc_eng_id) {
0593         upd_info->flags |= SA_CMDL_UPD_ENC;
0594         upd_info->enc_size.index = enc_offset >> 2;
0595         upd_info->enc_offset.index = upd_info->enc_size.index + 1;
0596         /* Encryption command label */
0597         cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
0598 
0599         /* Encryption modes requiring IV */
0600         if (cfg->iv_size) {
0601             upd_info->flags |= SA_CMDL_UPD_ENC_IV;
0602             upd_info->enc_iv.index =
0603                 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
0604             upd_info->enc_iv.size = cfg->iv_size;
0605 
0606             cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
0607                 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
0608 
0609             cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
0610                 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
0611             total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
0612         } else {
0613             cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
0614                         SA_CMDL_HEADER_SIZE_BYTES;
0615             total += SA_CMDL_HEADER_SIZE_BYTES;
0616         }
0617     }
0618 
0619     if (cfg->auth_eng_id) {
0620         upd_info->flags |= SA_CMDL_UPD_AUTH;
0621         upd_info->auth_size.index = auth_offset >> 2;
0622         upd_info->auth_offset.index = upd_info->auth_size.index + 1;
0623         cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
0624         cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
0625             SA_CMDL_HEADER_SIZE_BYTES;
0626         total += SA_CMDL_HEADER_SIZE_BYTES;
0627     }
0628 
0629     total = roundup(total, 8);
0630 
0631     for (i = 0; i < total / 4; i++)
0632         word_ptr[i] = swab32(word_ptr[i]);
0633 
0634     return total;
0635 }
0636 
0637 /* Update Command label */
0638 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
0639                   struct sa_cmdl_upd_info *upd_info)
0640 {
0641     int i = 0, j;
0642 
0643     if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
0644         cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
0645         cmdl[upd_info->enc_size.index] |= req->enc_size;
0646         cmdl[upd_info->enc_offset.index] &=
0647                         ~SA_CMDL_SOP_BYPASS_LEN_MASK;
0648         cmdl[upd_info->enc_offset.index] |=
0649             FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
0650                    req->enc_offset);
0651 
0652         if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
0653             __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
0654             u32 *enc_iv = (u32 *)req->enc_iv;
0655 
0656             for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
0657                 data[j] = cpu_to_be32(*enc_iv);
0658                 enc_iv++;
0659             }
0660         }
0661     }
0662 
0663     if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
0664         cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
0665         cmdl[upd_info->auth_size.index] |= req->auth_size;
0666         cmdl[upd_info->auth_offset.index] &=
0667             ~SA_CMDL_SOP_BYPASS_LEN_MASK;
0668         cmdl[upd_info->auth_offset.index] |=
0669             FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
0670                    req->auth_offset);
0671         if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
0672             sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
0673                    req->auth_iv,
0674                    (upd_info->auth_iv.size > 8));
0675         }
0676         if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
0677             int offset = (req->auth_size & 0xF) ? 4 : 0;
0678 
0679             memcpy(&cmdl[upd_info->aux_key_info.index],
0680                    &upd_info->aux_key[offset], 16);
0681         }
0682     }
0683 }
0684 
0685 /* Format SWINFO words to be sent to SA */
0686 static
0687 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
0688            u8 cmdl_present, u8 cmdl_offset, u8 flags,
0689            u8 hash_size, u32 *swinfo)
0690 {
0691     swinfo[0] = sc_id;
0692     swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
0693     if (likely(cmdl_present))
0694         swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
0695                     cmdl_offset | SA_SW0_CMDL_PRESENT);
0696     swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
0697 
0698     swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
0699     swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
0700     swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
0701     swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
0702 }
0703 
0704 /* Dump the security context */
0705 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
0706 {
0707 #ifdef DEBUG
0708     dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
0709     print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
0710                16, 1, buf, SA_CTX_MAX_SZ, false);
0711 #endif
0712 }
0713 
0714 static
0715 int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
0716            const u8 *enc_key, u16 enc_key_sz,
0717            const u8 *auth_key, u16 auth_key_sz,
0718            struct algo_data *ad, u8 enc, u32 *swinfo)
0719 {
0720     int enc_sc_offset = 0;
0721     int auth_sc_offset = 0;
0722     u8 *sc_buf = ctx->sc;
0723     u16 sc_id = ctx->sc_id;
0724     u8 first_engine = 0;
0725 
0726     memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
0727 
0728     if (ad->auth_eng.eng_id) {
0729         if (enc)
0730             first_engine = ad->enc_eng.eng_id;
0731         else
0732             first_engine = ad->auth_eng.eng_id;
0733 
0734         enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
0735         auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
0736         sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
0737         if (!ad->hash_size)
0738             return -EINVAL;
0739         ad->hash_size = roundup(ad->hash_size, 8);
0740 
0741     } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
0742         enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
0743         first_engine = ad->enc_eng.eng_id;
0744         sc_buf[1] = SA_SCCTL_FE_ENC;
0745         ad->hash_size = ad->iv_out_size;
0746     }
0747 
0748     /* SCCTL Owner info: 0=host, 1=CP_ACE */
0749     sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
0750     memcpy(&sc_buf[2], &sc_id, 2);
0751     sc_buf[4] = 0x0;
0752     sc_buf[5] = match_data->priv_id;
0753     sc_buf[6] = match_data->priv;
0754     sc_buf[7] = 0x0;
0755 
0756     /* Prepare context for encryption engine */
0757     if (ad->enc_eng.sc_size) {
0758         if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
0759                   &sc_buf[enc_sc_offset]))
0760             return -EINVAL;
0761     }
0762 
0763     /* Prepare context for authentication engine */
0764     if (ad->auth_eng.sc_size)
0765         sa_set_sc_auth(ad, auth_key, auth_key_sz,
0766                    &sc_buf[auth_sc_offset]);
0767 
0768     /* Set the ownership of context to CP_ACE */
0769     sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
0770 
0771     /* swizzle the security context */
0772     sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
0773 
0774     sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
0775               SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
0776 
0777     sa_dump_sc(sc_buf, ctx->sc_phys);
0778 
0779     return 0;
0780 }
0781 
0782 /* Free the per direction context memory */
0783 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
0784                  struct sa_crypto_data *data)
0785 {
0786     unsigned long bn;
0787 
0788     bn = ctx->sc_id - data->sc_id_start;
0789     spin_lock(&data->scid_lock);
0790     __clear_bit(bn, data->ctx_bm);
0791     data->sc_id--;
0792     spin_unlock(&data->scid_lock);
0793 
0794     if (ctx->sc) {
0795         dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
0796         ctx->sc = NULL;
0797     }
0798 }
0799 
0800 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
0801                 struct sa_crypto_data *data)
0802 {
0803     unsigned long bn;
0804     int err;
0805 
0806     spin_lock(&data->scid_lock);
0807     bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
0808     __set_bit(bn, data->ctx_bm);
0809     data->sc_id++;
0810     spin_unlock(&data->scid_lock);
0811 
0812     ctx->sc_id = (u16)(data->sc_id_start + bn);
0813 
0814     ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
0815     if (!ctx->sc) {
0816         dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
0817         err = -ENOMEM;
0818         goto scid_rollback;
0819     }
0820 
0821     return 0;
0822 
0823 scid_rollback:
0824     spin_lock(&data->scid_lock);
0825     __clear_bit(bn, data->ctx_bm);
0826     data->sc_id--;
0827     spin_unlock(&data->scid_lock);
0828 
0829     return err;
0830 }
0831 
0832 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
0833 {
0834     struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0835     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
0836 
0837     dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
0838         __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
0839         ctx->dec.sc_id, &ctx->dec.sc_phys);
0840 
0841     sa_free_ctx_info(&ctx->enc, data);
0842     sa_free_ctx_info(&ctx->dec, data);
0843 
0844     crypto_free_skcipher(ctx->fallback.skcipher);
0845 }
0846 
0847 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
0848 {
0849     struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0850     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
0851     const char *name = crypto_tfm_alg_name(&tfm->base);
0852     struct crypto_skcipher *child;
0853     int ret;
0854 
0855     memzero_explicit(ctx, sizeof(*ctx));
0856     ctx->dev_data = data;
0857 
0858     ret = sa_init_ctx_info(&ctx->enc, data);
0859     if (ret)
0860         return ret;
0861     ret = sa_init_ctx_info(&ctx->dec, data);
0862     if (ret) {
0863         sa_free_ctx_info(&ctx->enc, data);
0864         return ret;
0865     }
0866 
0867     child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
0868 
0869     if (IS_ERR(child)) {
0870         dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
0871         return PTR_ERR(child);
0872     }
0873 
0874     ctx->fallback.skcipher = child;
0875     crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
0876                      sizeof(struct skcipher_request));
0877 
0878     dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
0879         __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
0880         ctx->dec.sc_id, &ctx->dec.sc_phys);
0881     return 0;
0882 }
0883 
0884 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
0885                 unsigned int keylen, struct algo_data *ad)
0886 {
0887     struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0888     struct crypto_skcipher *child = ctx->fallback.skcipher;
0889     int cmdl_len;
0890     struct sa_cmdl_cfg cfg;
0891     int ret;
0892 
0893     if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
0894         keylen != AES_KEYSIZE_256)
0895         return -EINVAL;
0896 
0897     ad->enc_eng.eng_id = SA_ENG_ID_EM1;
0898     ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
0899 
0900     memzero_explicit(&cfg, sizeof(cfg));
0901     cfg.enc_eng_id = ad->enc_eng.eng_id;
0902     cfg.iv_size = crypto_skcipher_ivsize(tfm);
0903 
0904     crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
0905     crypto_skcipher_set_flags(child, tfm->base.crt_flags &
0906                      CRYPTO_TFM_REQ_MASK);
0907     ret = crypto_skcipher_setkey(child, key, keylen);
0908     if (ret)
0909         return ret;
0910 
0911     /* Setup Encryption Security Context & Command label template */
0912     if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
0913                ad, 1, &ctx->enc.epib[1]))
0914         goto badkey;
0915 
0916     cmdl_len = sa_format_cmdl_gen(&cfg,
0917                       (u8 *)ctx->enc.cmdl,
0918                       &ctx->enc.cmdl_upd_info);
0919     if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
0920         goto badkey;
0921 
0922     ctx->enc.cmdl_size = cmdl_len;
0923 
0924     /* Setup Decryption Security Context & Command label template */
0925     if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
0926                ad, 0, &ctx->dec.epib[1]))
0927         goto badkey;
0928 
0929     cfg.enc_eng_id = ad->enc_eng.eng_id;
0930     cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
0931                       &ctx->dec.cmdl_upd_info);
0932 
0933     if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
0934         goto badkey;
0935 
0936     ctx->dec.cmdl_size = cmdl_len;
0937     ctx->iv_idx = ad->iv_idx;
0938 
0939     return 0;
0940 
0941 badkey:
0942     dev_err(sa_k3_dev, "%s: badkey\n", __func__);
0943     return -EINVAL;
0944 }
0945 
0946 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
0947                  unsigned int keylen)
0948 {
0949     struct algo_data ad = { 0 };
0950     /* Convert the key size (16/24/32) to the key size index (0/1/2) */
0951     int key_idx = (keylen >> 3) - 2;
0952 
0953     if (key_idx >= 3)
0954         return -EINVAL;
0955 
0956     ad.mci_enc = mci_cbc_enc_array[key_idx];
0957     ad.mci_dec = mci_cbc_dec_array[key_idx];
0958     ad.inv_key = true;
0959     ad.ealg_id = SA_EALG_ID_AES_CBC;
0960     ad.iv_idx = 4;
0961     ad.iv_out_size = 16;
0962 
0963     return sa_cipher_setkey(tfm, key, keylen, &ad);
0964 }
0965 
0966 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
0967                  unsigned int keylen)
0968 {
0969     struct algo_data ad = { 0 };
0970     /* Convert the key size (16/24/32) to the key size index (0/1/2) */
0971     int key_idx = (keylen >> 3) - 2;
0972 
0973     if (key_idx >= 3)
0974         return -EINVAL;
0975 
0976     ad.mci_enc = mci_ecb_enc_array[key_idx];
0977     ad.mci_dec = mci_ecb_dec_array[key_idx];
0978     ad.inv_key = true;
0979     ad.ealg_id = SA_EALG_ID_AES_ECB;
0980 
0981     return sa_cipher_setkey(tfm, key, keylen, &ad);
0982 }
0983 
0984 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
0985                   unsigned int keylen)
0986 {
0987     struct algo_data ad = { 0 };
0988 
0989     ad.mci_enc = mci_cbc_3des_enc_array;
0990     ad.mci_dec = mci_cbc_3des_dec_array;
0991     ad.ealg_id = SA_EALG_ID_3DES_CBC;
0992     ad.iv_idx = 6;
0993     ad.iv_out_size = 8;
0994 
0995     return sa_cipher_setkey(tfm, key, keylen, &ad);
0996 }
0997 
0998 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
0999                   unsigned int keylen)
1000 {
1001     struct algo_data ad = { 0 };
1002 
1003     ad.mci_enc = mci_ecb_3des_enc_array;
1004     ad.mci_dec = mci_ecb_3des_dec_array;
1005 
1006     return sa_cipher_setkey(tfm, key, keylen, &ad);
1007 }
1008 
1009 static void sa_sync_from_device(struct sa_rx_data *rxd)
1010 {
1011     struct sg_table *sgt;
1012 
1013     if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1014         sgt = &rxd->mapped_sg[0].sgt;
1015     else
1016         sgt = &rxd->mapped_sg[1].sgt;
1017 
1018     dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1019 }
1020 
1021 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1022 {
1023     int i;
1024 
1025     for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1026         struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1027 
1028         if (mapped_sg->mapped) {
1029             dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1030                       mapped_sg->dir, 0);
1031             kfree(mapped_sg->split_sg);
1032         }
1033     }
1034 
1035     kfree(rxd);
1036 }
1037 
1038 static void sa_aes_dma_in_callback(void *data)
1039 {
1040     struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1041     struct skcipher_request *req;
1042     u32 *result;
1043     __be32 *mdptr;
1044     size_t ml, pl;
1045     int i;
1046 
1047     sa_sync_from_device(rxd);
1048     req = container_of(rxd->req, struct skcipher_request, base);
1049 
1050     if (req->iv) {
1051         mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1052                                    &ml);
1053         result = (u32 *)req->iv;
1054 
1055         for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1056             result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1057     }
1058 
1059     sa_free_sa_rx_data(rxd);
1060 
1061     skcipher_request_complete(req, 0);
1062 }
1063 
1064 static void
1065 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1066 {
1067     u32 *out, *in;
1068     int i;
1069 
1070     for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1071         *out++ = *in++;
1072 
1073     mdptr[4] = (0xFFFF << 16);
1074     for (out = &mdptr[5], in = psdata, i = 0;
1075          i < pslen / sizeof(u32); i++)
1076         *out++ = *in++;
1077 }
1078 
1079 static int sa_run(struct sa_req *req)
1080 {
1081     struct sa_rx_data *rxd;
1082     gfp_t gfp_flags;
1083     u32 cmdl[SA_MAX_CMDL_WORDS];
1084     struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1085     struct device *ddev;
1086     struct dma_chan *dma_rx;
1087     int sg_nents, src_nents, dst_nents;
1088     struct scatterlist *src, *dst;
1089     size_t pl, ml, split_size;
1090     struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1091     int ret;
1092     struct dma_async_tx_descriptor *tx_out;
1093     u32 *mdptr;
1094     bool diff_dst;
1095     enum dma_data_direction dir_src;
1096     struct sa_mapped_sg *mapped_sg;
1097 
1098     gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1099         GFP_KERNEL : GFP_ATOMIC;
1100 
1101     rxd = kzalloc(sizeof(*rxd), gfp_flags);
1102     if (!rxd)
1103         return -ENOMEM;
1104 
1105     if (req->src != req->dst) {
1106         diff_dst = true;
1107         dir_src = DMA_TO_DEVICE;
1108     } else {
1109         diff_dst = false;
1110         dir_src = DMA_BIDIRECTIONAL;
1111     }
1112 
1113     /*
1114      * SA2UL has an interesting feature where the receive DMA channel
1115      * is selected based on the data passed to the engine. Within the
1116      * transition range, there is also a space where it is impossible
1117      * to determine where the data will end up, and this should be
1118      * avoided. This will be handled by the SW fallback mechanism by
1119      * the individual algorithm implementations.
1120      */
1121     if (req->size >= 256)
1122         dma_rx = pdata->dma_rx2;
1123     else
1124         dma_rx = pdata->dma_rx1;
1125 
1126     ddev = dmaengine_get_dma_device(pdata->dma_tx);
1127     rxd->ddev = ddev;
1128 
1129     memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1130 
1131     sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1132 
1133     if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1134         if (req->enc)
1135             req->type |=
1136                 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1137         else
1138             req->type |=
1139                 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1140     }
1141 
1142     cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1143 
1144     /*
1145      * Map the packets, first we check if the data fits into a single
1146      * sg entry and use that if possible. If it does not fit, we check
1147      * if we need to do sg_split to align the scatterlist data on the
1148      * actual data size being processed by the crypto engine.
1149      */
1150     src = req->src;
1151     sg_nents = sg_nents_for_len(src, req->size);
1152 
1153     split_size = req->size;
1154 
1155     mapped_sg = &rxd->mapped_sg[0];
1156     if (sg_nents == 1 && split_size <= req->src->length) {
1157         src = &mapped_sg->static_sg;
1158         src_nents = 1;
1159         sg_init_table(src, 1);
1160         sg_set_page(src, sg_page(req->src), split_size,
1161                 req->src->offset);
1162 
1163         mapped_sg->sgt.sgl = src;
1164         mapped_sg->sgt.orig_nents = src_nents;
1165         ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1166         if (ret) {
1167             kfree(rxd);
1168             return ret;
1169         }
1170 
1171         mapped_sg->dir = dir_src;
1172         mapped_sg->mapped = true;
1173     } else {
1174         mapped_sg->sgt.sgl = req->src;
1175         mapped_sg->sgt.orig_nents = sg_nents;
1176         ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1177         if (ret) {
1178             kfree(rxd);
1179             return ret;
1180         }
1181 
1182         mapped_sg->dir = dir_src;
1183         mapped_sg->mapped = true;
1184 
1185         ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1186                    &split_size, &src, &src_nents, gfp_flags);
1187         if (ret) {
1188             src_nents = mapped_sg->sgt.nents;
1189             src = mapped_sg->sgt.sgl;
1190         } else {
1191             mapped_sg->split_sg = src;
1192         }
1193     }
1194 
1195     dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1196 
1197     if (!diff_dst) {
1198         dst_nents = src_nents;
1199         dst = src;
1200     } else {
1201         dst_nents = sg_nents_for_len(req->dst, req->size);
1202         mapped_sg = &rxd->mapped_sg[1];
1203 
1204         if (dst_nents == 1 && split_size <= req->dst->length) {
1205             dst = &mapped_sg->static_sg;
1206             dst_nents = 1;
1207             sg_init_table(dst, 1);
1208             sg_set_page(dst, sg_page(req->dst), split_size,
1209                     req->dst->offset);
1210 
1211             mapped_sg->sgt.sgl = dst;
1212             mapped_sg->sgt.orig_nents = dst_nents;
1213             ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1214                           DMA_FROM_DEVICE, 0);
1215             if (ret)
1216                 goto err_cleanup;
1217 
1218             mapped_sg->dir = DMA_FROM_DEVICE;
1219             mapped_sg->mapped = true;
1220         } else {
1221             mapped_sg->sgt.sgl = req->dst;
1222             mapped_sg->sgt.orig_nents = dst_nents;
1223             ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1224                           DMA_FROM_DEVICE, 0);
1225             if (ret)
1226                 goto err_cleanup;
1227 
1228             mapped_sg->dir = DMA_FROM_DEVICE;
1229             mapped_sg->mapped = true;
1230 
1231             ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1232                        0, 1, &split_size, &dst, &dst_nents,
1233                        gfp_flags);
1234             if (ret) {
1235                 dst_nents = mapped_sg->sgt.nents;
1236                 dst = mapped_sg->sgt.sgl;
1237             } else {
1238                 mapped_sg->split_sg = dst;
1239             }
1240         }
1241     }
1242 
1243     rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1244                          DMA_DEV_TO_MEM,
1245                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1246     if (!rxd->tx_in) {
1247         dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1248         ret = -EINVAL;
1249         goto err_cleanup;
1250     }
1251 
1252     rxd->req = (void *)req->base;
1253     rxd->enc = req->enc;
1254     rxd->iv_idx = req->ctx->iv_idx;
1255     rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1256     rxd->tx_in->callback = req->callback;
1257     rxd->tx_in->callback_param = rxd;
1258 
1259     tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1260                      src_nents, DMA_MEM_TO_DEV,
1261                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1262 
1263     if (!tx_out) {
1264         dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1265         ret = -EINVAL;
1266         goto err_cleanup;
1267     }
1268 
1269     /*
1270      * Prepare metadata for DMA engine. This essentially describes the
1271      * crypto algorithm to be used, data sizes, different keys etc.
1272      */
1273     mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1274 
1275     sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1276                    sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1277                sa_ctx->epib);
1278 
1279     ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1280     dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1281 
1282     dmaengine_submit(tx_out);
1283     dmaengine_submit(rxd->tx_in);
1284 
1285     dma_async_issue_pending(dma_rx);
1286     dma_async_issue_pending(pdata->dma_tx);
1287 
1288     return -EINPROGRESS;
1289 
1290 err_cleanup:
1291     sa_free_sa_rx_data(rxd);
1292 
1293     return ret;
1294 }
1295 
1296 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1297 {
1298     struct sa_tfm_ctx *ctx =
1299         crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1300     struct crypto_alg *alg = req->base.tfm->__crt_alg;
1301     struct sa_req sa_req = { 0 };
1302 
1303     if (!req->cryptlen)
1304         return 0;
1305 
1306     if (req->cryptlen % alg->cra_blocksize)
1307         return -EINVAL;
1308 
1309     /* Use SW fallback if the data size is not supported */
1310     if (req->cryptlen > SA_MAX_DATA_SZ ||
1311         (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1312          req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1313         struct skcipher_request *subreq = skcipher_request_ctx(req);
1314 
1315         skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1316         skcipher_request_set_callback(subreq, req->base.flags,
1317                           req->base.complete,
1318                           req->base.data);
1319         skcipher_request_set_crypt(subreq, req->src, req->dst,
1320                        req->cryptlen, req->iv);
1321         if (enc)
1322             return crypto_skcipher_encrypt(subreq);
1323         else
1324             return crypto_skcipher_decrypt(subreq);
1325     }
1326 
1327     sa_req.size = req->cryptlen;
1328     sa_req.enc_size = req->cryptlen;
1329     sa_req.src = req->src;
1330     sa_req.dst = req->dst;
1331     sa_req.enc_iv = iv;
1332     sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1333     sa_req.enc = enc;
1334     sa_req.callback = sa_aes_dma_in_callback;
1335     sa_req.mdata_size = 44;
1336     sa_req.base = &req->base;
1337     sa_req.ctx = ctx;
1338 
1339     return sa_run(&sa_req);
1340 }
1341 
1342 static int sa_encrypt(struct skcipher_request *req)
1343 {
1344     return sa_cipher_run(req, req->iv, 1);
1345 }
1346 
1347 static int sa_decrypt(struct skcipher_request *req)
1348 {
1349     return sa_cipher_run(req, req->iv, 0);
1350 }
1351 
1352 static void sa_sha_dma_in_callback(void *data)
1353 {
1354     struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1355     struct ahash_request *req;
1356     struct crypto_ahash *tfm;
1357     unsigned int authsize;
1358     int i;
1359     size_t ml, pl;
1360     u32 *result;
1361     __be32 *mdptr;
1362 
1363     sa_sync_from_device(rxd);
1364     req = container_of(rxd->req, struct ahash_request, base);
1365     tfm = crypto_ahash_reqtfm(req);
1366     authsize = crypto_ahash_digestsize(tfm);
1367 
1368     mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1369     result = (u32 *)req->result;
1370 
1371     for (i = 0; i < (authsize / 4); i++)
1372         result[i] = be32_to_cpu(mdptr[i + 4]);
1373 
1374     sa_free_sa_rx_data(rxd);
1375 
1376     ahash_request_complete(req, 0);
1377 }
1378 
1379 static int zero_message_process(struct ahash_request *req)
1380 {
1381     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1382     int sa_digest_size = crypto_ahash_digestsize(tfm);
1383 
1384     switch (sa_digest_size) {
1385     case SHA1_DIGEST_SIZE:
1386         memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1387         break;
1388     case SHA256_DIGEST_SIZE:
1389         memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1390         break;
1391     case SHA512_DIGEST_SIZE:
1392         memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1393         break;
1394     default:
1395         return -EINVAL;
1396     }
1397 
1398     return 0;
1399 }
1400 
1401 static int sa_sha_run(struct ahash_request *req)
1402 {
1403     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1404     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1405     struct sa_req sa_req = { 0 };
1406     size_t auth_len;
1407 
1408     auth_len = req->nbytes;
1409 
1410     if (!auth_len)
1411         return zero_message_process(req);
1412 
1413     if (auth_len > SA_MAX_DATA_SZ ||
1414         (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1415          auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1416         struct ahash_request *subreq = &rctx->fallback_req;
1417         int ret = 0;
1418 
1419         ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1420         subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1421 
1422         crypto_ahash_init(subreq);
1423 
1424         subreq->nbytes = auth_len;
1425         subreq->src = req->src;
1426         subreq->result = req->result;
1427 
1428         ret |= crypto_ahash_update(subreq);
1429 
1430         subreq->nbytes = 0;
1431 
1432         ret |= crypto_ahash_final(subreq);
1433 
1434         return ret;
1435     }
1436 
1437     sa_req.size = auth_len;
1438     sa_req.auth_size = auth_len;
1439     sa_req.src = req->src;
1440     sa_req.dst = req->src;
1441     sa_req.enc = true;
1442     sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1443     sa_req.callback = sa_sha_dma_in_callback;
1444     sa_req.mdata_size = 28;
1445     sa_req.ctx = ctx;
1446     sa_req.base = &req->base;
1447 
1448     return sa_run(&sa_req);
1449 }
1450 
1451 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1452 {
1453     int bs = crypto_shash_blocksize(ctx->shash);
1454     int cmdl_len;
1455     struct sa_cmdl_cfg cfg;
1456 
1457     ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1458     ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1459     ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1460 
1461     memset(ctx->authkey, 0, bs);
1462     memset(&cfg, 0, sizeof(cfg));
1463     cfg.aalg = ad->aalg_id;
1464     cfg.enc_eng_id = ad->enc_eng.eng_id;
1465     cfg.auth_eng_id = ad->auth_eng.eng_id;
1466     cfg.iv_size = 0;
1467     cfg.akey = NULL;
1468     cfg.akey_len = 0;
1469 
1470     ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1471     /* Setup Encryption Security Context & Command label template */
1472     if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1473                ad, 0, &ctx->enc.epib[1]))
1474         goto badkey;
1475 
1476     cmdl_len = sa_format_cmdl_gen(&cfg,
1477                       (u8 *)ctx->enc.cmdl,
1478                       &ctx->enc.cmdl_upd_info);
1479     if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1480         goto badkey;
1481 
1482     ctx->enc.cmdl_size = cmdl_len;
1483 
1484     return 0;
1485 
1486 badkey:
1487     dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1488     return -EINVAL;
1489 }
1490 
1491 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1492 {
1493     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1494     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1495     int ret;
1496 
1497     memset(ctx, 0, sizeof(*ctx));
1498     ctx->dev_data = data;
1499     ret = sa_init_ctx_info(&ctx->enc, data);
1500     if (ret)
1501         return ret;
1502 
1503     if (alg_base) {
1504         ctx->shash = crypto_alloc_shash(alg_base, 0,
1505                         CRYPTO_ALG_NEED_FALLBACK);
1506         if (IS_ERR(ctx->shash)) {
1507             dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1508                 alg_base);
1509             return PTR_ERR(ctx->shash);
1510         }
1511         /* for fallback */
1512         ctx->fallback.ahash =
1513             crypto_alloc_ahash(alg_base, 0,
1514                        CRYPTO_ALG_NEED_FALLBACK);
1515         if (IS_ERR(ctx->fallback.ahash)) {
1516             dev_err(ctx->dev_data->dev,
1517                 "Could not load fallback driver\n");
1518             return PTR_ERR(ctx->fallback.ahash);
1519         }
1520     }
1521 
1522     dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1523         __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1524         ctx->dec.sc_id, &ctx->dec.sc_phys);
1525 
1526     crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1527                  sizeof(struct sa_sha_req_ctx) +
1528                  crypto_ahash_reqsize(ctx->fallback.ahash));
1529 
1530     return 0;
1531 }
1532 
1533 static int sa_sha_digest(struct ahash_request *req)
1534 {
1535     return sa_sha_run(req);
1536 }
1537 
1538 static int sa_sha_init(struct ahash_request *req)
1539 {
1540     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1541     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1542     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1543 
1544     dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1545         crypto_ahash_digestsize(tfm), rctx);
1546 
1547     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1548     rctx->fallback_req.base.flags =
1549         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1550 
1551     return crypto_ahash_init(&rctx->fallback_req);
1552 }
1553 
1554 static int sa_sha_update(struct ahash_request *req)
1555 {
1556     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1557     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1558     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1559 
1560     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1561     rctx->fallback_req.base.flags =
1562         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1563     rctx->fallback_req.nbytes = req->nbytes;
1564     rctx->fallback_req.src = req->src;
1565 
1566     return crypto_ahash_update(&rctx->fallback_req);
1567 }
1568 
1569 static int sa_sha_final(struct ahash_request *req)
1570 {
1571     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1572     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1573     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1574 
1575     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1576     rctx->fallback_req.base.flags =
1577         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1578     rctx->fallback_req.result = req->result;
1579 
1580     return crypto_ahash_final(&rctx->fallback_req);
1581 }
1582 
1583 static int sa_sha_finup(struct ahash_request *req)
1584 {
1585     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1586     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1587     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1588 
1589     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1590     rctx->fallback_req.base.flags =
1591         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1592 
1593     rctx->fallback_req.nbytes = req->nbytes;
1594     rctx->fallback_req.src = req->src;
1595     rctx->fallback_req.result = req->result;
1596 
1597     return crypto_ahash_finup(&rctx->fallback_req);
1598 }
1599 
1600 static int sa_sha_import(struct ahash_request *req, const void *in)
1601 {
1602     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1603     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1604     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1605 
1606     ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1607     rctx->fallback_req.base.flags = req->base.flags &
1608         CRYPTO_TFM_REQ_MAY_SLEEP;
1609 
1610     return crypto_ahash_import(&rctx->fallback_req, in);
1611 }
1612 
1613 static int sa_sha_export(struct ahash_request *req, void *out)
1614 {
1615     struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1616     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1617     struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1618     struct ahash_request *subreq = &rctx->fallback_req;
1619 
1620     ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1621     subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1622 
1623     return crypto_ahash_export(subreq, out);
1624 }
1625 
1626 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1627 {
1628     struct algo_data ad = { 0 };
1629     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1630 
1631     sa_sha_cra_init_alg(tfm, "sha1");
1632 
1633     ad.aalg_id = SA_AALG_ID_SHA1;
1634     ad.hash_size = SHA1_DIGEST_SIZE;
1635     ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1636 
1637     sa_sha_setup(ctx, &ad);
1638 
1639     return 0;
1640 }
1641 
1642 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1643 {
1644     struct algo_data ad = { 0 };
1645     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1646 
1647     sa_sha_cra_init_alg(tfm, "sha256");
1648 
1649     ad.aalg_id = SA_AALG_ID_SHA2_256;
1650     ad.hash_size = SHA256_DIGEST_SIZE;
1651     ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1652 
1653     sa_sha_setup(ctx, &ad);
1654 
1655     return 0;
1656 }
1657 
1658 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1659 {
1660     struct algo_data ad = { 0 };
1661     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1662 
1663     sa_sha_cra_init_alg(tfm, "sha512");
1664 
1665     ad.aalg_id = SA_AALG_ID_SHA2_512;
1666     ad.hash_size = SHA512_DIGEST_SIZE;
1667     ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1668 
1669     sa_sha_setup(ctx, &ad);
1670 
1671     return 0;
1672 }
1673 
1674 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1675 {
1676     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1677     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1678 
1679     dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1680         __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1681         ctx->dec.sc_id, &ctx->dec.sc_phys);
1682 
1683     if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1684         sa_free_ctx_info(&ctx->enc, data);
1685 
1686     crypto_free_shash(ctx->shash);
1687     crypto_free_ahash(ctx->fallback.ahash);
1688 }
1689 
1690 static void sa_aead_dma_in_callback(void *data)
1691 {
1692     struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1693     struct aead_request *req;
1694     struct crypto_aead *tfm;
1695     unsigned int start;
1696     unsigned int authsize;
1697     u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1698     size_t pl, ml;
1699     int i;
1700     int err = 0;
1701     u32 *mdptr;
1702 
1703     sa_sync_from_device(rxd);
1704     req = container_of(rxd->req, struct aead_request, base);
1705     tfm = crypto_aead_reqtfm(req);
1706     start = req->assoclen + req->cryptlen;
1707     authsize = crypto_aead_authsize(tfm);
1708 
1709     mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1710     for (i = 0; i < (authsize / 4); i++)
1711         mdptr[i + 4] = swab32(mdptr[i + 4]);
1712 
1713     if (rxd->enc) {
1714         scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1715                      1);
1716     } else {
1717         start -= authsize;
1718         scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1719                      0);
1720 
1721         err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1722     }
1723 
1724     sa_free_sa_rx_data(rxd);
1725 
1726     aead_request_complete(req, err);
1727 }
1728 
1729 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1730                 const char *fallback)
1731 {
1732     struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1733     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1734     int ret;
1735 
1736     memzero_explicit(ctx, sizeof(*ctx));
1737     ctx->dev_data = data;
1738 
1739     ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1740     if (IS_ERR(ctx->shash)) {
1741         dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1742         return PTR_ERR(ctx->shash);
1743     }
1744 
1745     ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1746                            CRYPTO_ALG_NEED_FALLBACK);
1747 
1748     if (IS_ERR(ctx->fallback.aead)) {
1749         dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1750             fallback);
1751         return PTR_ERR(ctx->fallback.aead);
1752     }
1753 
1754     crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1755                 crypto_aead_reqsize(ctx->fallback.aead));
1756 
1757     ret = sa_init_ctx_info(&ctx->enc, data);
1758     if (ret)
1759         return ret;
1760 
1761     ret = sa_init_ctx_info(&ctx->dec, data);
1762     if (ret) {
1763         sa_free_ctx_info(&ctx->enc, data);
1764         return ret;
1765     }
1766 
1767     dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1768         __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1769         ctx->dec.sc_id, &ctx->dec.sc_phys);
1770 
1771     return ret;
1772 }
1773 
1774 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1775 {
1776     return sa_cra_init_aead(tfm, "sha1",
1777                 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1778 }
1779 
1780 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1781 {
1782     return sa_cra_init_aead(tfm, "sha256",
1783                 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1784 }
1785 
1786 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1787 {
1788     struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1789     struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1790 
1791     crypto_free_shash(ctx->shash);
1792     crypto_free_aead(ctx->fallback.aead);
1793 
1794     sa_free_ctx_info(&ctx->enc, data);
1795     sa_free_ctx_info(&ctx->dec, data);
1796 }
1797 
1798 /* AEAD algorithm configuration interface function */
1799 static int sa_aead_setkey(struct crypto_aead *authenc,
1800               const u8 *key, unsigned int keylen,
1801               struct algo_data *ad)
1802 {
1803     struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1804     struct crypto_authenc_keys keys;
1805     int cmdl_len;
1806     struct sa_cmdl_cfg cfg;
1807     int key_idx;
1808 
1809     if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1810         return -EINVAL;
1811 
1812     /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1813     key_idx = (keys.enckeylen >> 3) - 2;
1814     if (key_idx >= 3)
1815         return -EINVAL;
1816 
1817     ad->ctx = ctx;
1818     ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1819     ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1820     ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1821     ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1822     ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1823     ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1824     ad->inv_key = true;
1825     ad->keyed_mac = true;
1826     ad->ealg_id = SA_EALG_ID_AES_CBC;
1827     ad->prep_iopad = sa_prepare_iopads;
1828 
1829     memset(&cfg, 0, sizeof(cfg));
1830     cfg.enc = true;
1831     cfg.aalg = ad->aalg_id;
1832     cfg.enc_eng_id = ad->enc_eng.eng_id;
1833     cfg.auth_eng_id = ad->auth_eng.eng_id;
1834     cfg.iv_size = crypto_aead_ivsize(authenc);
1835     cfg.akey = keys.authkey;
1836     cfg.akey_len = keys.authkeylen;
1837 
1838     /* Setup Encryption Security Context & Command label template */
1839     if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1840                keys.enckeylen, keys.authkey, keys.authkeylen,
1841                ad, 1, &ctx->enc.epib[1]))
1842         return -EINVAL;
1843 
1844     cmdl_len = sa_format_cmdl_gen(&cfg,
1845                       (u8 *)ctx->enc.cmdl,
1846                       &ctx->enc.cmdl_upd_info);
1847     if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1848         return -EINVAL;
1849 
1850     ctx->enc.cmdl_size = cmdl_len;
1851 
1852     /* Setup Decryption Security Context & Command label template */
1853     if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1854                keys.enckeylen, keys.authkey, keys.authkeylen,
1855                ad, 0, &ctx->dec.epib[1]))
1856         return -EINVAL;
1857 
1858     cfg.enc = false;
1859     cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1860                       &ctx->dec.cmdl_upd_info);
1861 
1862     if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1863         return -EINVAL;
1864 
1865     ctx->dec.cmdl_size = cmdl_len;
1866 
1867     crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1868     crypto_aead_set_flags(ctx->fallback.aead,
1869                   crypto_aead_get_flags(authenc) &
1870                   CRYPTO_TFM_REQ_MASK);
1871     crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1872 
1873     return 0;
1874 }
1875 
1876 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1877 {
1878     struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1879 
1880     return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1881 }
1882 
1883 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1884                    const u8 *key, unsigned int keylen)
1885 {
1886     struct algo_data ad = { 0 };
1887 
1888     ad.ealg_id = SA_EALG_ID_AES_CBC;
1889     ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1890     ad.hash_size = SHA1_DIGEST_SIZE;
1891     ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1892 
1893     return sa_aead_setkey(authenc, key, keylen, &ad);
1894 }
1895 
1896 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1897                      const u8 *key, unsigned int keylen)
1898 {
1899     struct algo_data ad = { 0 };
1900 
1901     ad.ealg_id = SA_EALG_ID_AES_CBC;
1902     ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1903     ad.hash_size = SHA256_DIGEST_SIZE;
1904     ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1905 
1906     return sa_aead_setkey(authenc, key, keylen, &ad);
1907 }
1908 
1909 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1910 {
1911     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912     struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1913     struct sa_req sa_req = { 0 };
1914     size_t auth_size, enc_size;
1915 
1916     enc_size = req->cryptlen;
1917     auth_size = req->assoclen + req->cryptlen;
1918 
1919     if (!enc) {
1920         enc_size -= crypto_aead_authsize(tfm);
1921         auth_size -= crypto_aead_authsize(tfm);
1922     }
1923 
1924     if (auth_size > SA_MAX_DATA_SZ ||
1925         (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1926          auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1927         struct aead_request *subreq = aead_request_ctx(req);
1928         int ret;
1929 
1930         aead_request_set_tfm(subreq, ctx->fallback.aead);
1931         aead_request_set_callback(subreq, req->base.flags,
1932                       req->base.complete, req->base.data);
1933         aead_request_set_crypt(subreq, req->src, req->dst,
1934                        req->cryptlen, req->iv);
1935         aead_request_set_ad(subreq, req->assoclen);
1936 
1937         ret = enc ? crypto_aead_encrypt(subreq) :
1938             crypto_aead_decrypt(subreq);
1939         return ret;
1940     }
1941 
1942     sa_req.enc_offset = req->assoclen;
1943     sa_req.enc_size = enc_size;
1944     sa_req.auth_size = auth_size;
1945     sa_req.size = auth_size;
1946     sa_req.enc_iv = iv;
1947     sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1948     sa_req.enc = enc;
1949     sa_req.callback = sa_aead_dma_in_callback;
1950     sa_req.mdata_size = 52;
1951     sa_req.base = &req->base;
1952     sa_req.ctx = ctx;
1953     sa_req.src = req->src;
1954     sa_req.dst = req->dst;
1955 
1956     return sa_run(&sa_req);
1957 }
1958 
1959 /* AEAD algorithm encrypt interface function */
1960 static int sa_aead_encrypt(struct aead_request *req)
1961 {
1962     return sa_aead_run(req, req->iv, 1);
1963 }
1964 
1965 /* AEAD algorithm decrypt interface function */
1966 static int sa_aead_decrypt(struct aead_request *req)
1967 {
1968     return sa_aead_run(req, req->iv, 0);
1969 }
1970 
1971 static struct sa_alg_tmpl sa_algs[] = {
1972     [SA_ALG_CBC_AES] = {
1973         .type = CRYPTO_ALG_TYPE_SKCIPHER,
1974         .alg.skcipher = {
1975             .base.cra_name      = "cbc(aes)",
1976             .base.cra_driver_name   = "cbc-aes-sa2ul",
1977             .base.cra_priority  = 30000,
1978             .base.cra_flags     = CRYPTO_ALG_TYPE_SKCIPHER |
1979                           CRYPTO_ALG_KERN_DRIVER_ONLY |
1980                           CRYPTO_ALG_ASYNC |
1981                           CRYPTO_ALG_NEED_FALLBACK,
1982             .base.cra_blocksize = AES_BLOCK_SIZE,
1983             .base.cra_ctxsize   = sizeof(struct sa_tfm_ctx),
1984             .base.cra_module    = THIS_MODULE,
1985             .init           = sa_cipher_cra_init,
1986             .exit           = sa_cipher_cra_exit,
1987             .min_keysize        = AES_MIN_KEY_SIZE,
1988             .max_keysize        = AES_MAX_KEY_SIZE,
1989             .ivsize         = AES_BLOCK_SIZE,
1990             .setkey         = sa_aes_cbc_setkey,
1991             .encrypt        = sa_encrypt,
1992             .decrypt        = sa_decrypt,
1993         }
1994     },
1995     [SA_ALG_EBC_AES] = {
1996         .type = CRYPTO_ALG_TYPE_SKCIPHER,
1997         .alg.skcipher = {
1998             .base.cra_name      = "ecb(aes)",
1999             .base.cra_driver_name   = "ecb-aes-sa2ul",
2000             .base.cra_priority  = 30000,
2001             .base.cra_flags     = CRYPTO_ALG_TYPE_SKCIPHER |
2002                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2003                           CRYPTO_ALG_ASYNC |
2004                           CRYPTO_ALG_NEED_FALLBACK,
2005             .base.cra_blocksize = AES_BLOCK_SIZE,
2006             .base.cra_ctxsize   = sizeof(struct sa_tfm_ctx),
2007             .base.cra_module    = THIS_MODULE,
2008             .init           = sa_cipher_cra_init,
2009             .exit           = sa_cipher_cra_exit,
2010             .min_keysize        = AES_MIN_KEY_SIZE,
2011             .max_keysize        = AES_MAX_KEY_SIZE,
2012             .setkey         = sa_aes_ecb_setkey,
2013             .encrypt        = sa_encrypt,
2014             .decrypt        = sa_decrypt,
2015         }
2016     },
2017     [SA_ALG_CBC_DES3] = {
2018         .type = CRYPTO_ALG_TYPE_SKCIPHER,
2019         .alg.skcipher = {
2020             .base.cra_name      = "cbc(des3_ede)",
2021             .base.cra_driver_name   = "cbc-des3-sa2ul",
2022             .base.cra_priority  = 30000,
2023             .base.cra_flags     = CRYPTO_ALG_TYPE_SKCIPHER |
2024                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2025                           CRYPTO_ALG_ASYNC |
2026                           CRYPTO_ALG_NEED_FALLBACK,
2027             .base.cra_blocksize = DES_BLOCK_SIZE,
2028             .base.cra_ctxsize   = sizeof(struct sa_tfm_ctx),
2029             .base.cra_module    = THIS_MODULE,
2030             .init           = sa_cipher_cra_init,
2031             .exit           = sa_cipher_cra_exit,
2032             .min_keysize        = 3 * DES_KEY_SIZE,
2033             .max_keysize        = 3 * DES_KEY_SIZE,
2034             .ivsize         = DES_BLOCK_SIZE,
2035             .setkey         = sa_3des_cbc_setkey,
2036             .encrypt        = sa_encrypt,
2037             .decrypt        = sa_decrypt,
2038         }
2039     },
2040     [SA_ALG_ECB_DES3] = {
2041         .type = CRYPTO_ALG_TYPE_SKCIPHER,
2042         .alg.skcipher = {
2043             .base.cra_name      = "ecb(des3_ede)",
2044             .base.cra_driver_name   = "ecb-des3-sa2ul",
2045             .base.cra_priority  = 30000,
2046             .base.cra_flags     = CRYPTO_ALG_TYPE_SKCIPHER |
2047                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2048                           CRYPTO_ALG_ASYNC |
2049                           CRYPTO_ALG_NEED_FALLBACK,
2050             .base.cra_blocksize = DES_BLOCK_SIZE,
2051             .base.cra_ctxsize   = sizeof(struct sa_tfm_ctx),
2052             .base.cra_module    = THIS_MODULE,
2053             .init           = sa_cipher_cra_init,
2054             .exit           = sa_cipher_cra_exit,
2055             .min_keysize        = 3 * DES_KEY_SIZE,
2056             .max_keysize        = 3 * DES_KEY_SIZE,
2057             .setkey         = sa_3des_ecb_setkey,
2058             .encrypt        = sa_encrypt,
2059             .decrypt        = sa_decrypt,
2060         }
2061     },
2062     [SA_ALG_SHA1] = {
2063         .type = CRYPTO_ALG_TYPE_AHASH,
2064         .alg.ahash = {
2065             .halg.base = {
2066                 .cra_name   = "sha1",
2067                 .cra_driver_name    = "sha1-sa2ul",
2068                 .cra_priority   = 400,
2069                 .cra_flags  = CRYPTO_ALG_TYPE_AHASH |
2070                           CRYPTO_ALG_ASYNC |
2071                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2072                           CRYPTO_ALG_NEED_FALLBACK,
2073                 .cra_blocksize  = SHA1_BLOCK_SIZE,
2074                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2075                 .cra_module = THIS_MODULE,
2076                 .cra_init   = sa_sha1_cra_init,
2077                 .cra_exit   = sa_sha_cra_exit,
2078             },
2079             .halg.digestsize    = SHA1_DIGEST_SIZE,
2080             .halg.statesize     = sizeof(struct sa_sha_req_ctx) +
2081                           sizeof(struct sha1_state),
2082             .init           = sa_sha_init,
2083             .update         = sa_sha_update,
2084             .final          = sa_sha_final,
2085             .finup          = sa_sha_finup,
2086             .digest         = sa_sha_digest,
2087             .export         = sa_sha_export,
2088             .import         = sa_sha_import,
2089         },
2090     },
2091     [SA_ALG_SHA256] = {
2092         .type = CRYPTO_ALG_TYPE_AHASH,
2093         .alg.ahash = {
2094             .halg.base = {
2095                 .cra_name   = "sha256",
2096                 .cra_driver_name    = "sha256-sa2ul",
2097                 .cra_priority   = 400,
2098                 .cra_flags  = CRYPTO_ALG_TYPE_AHASH |
2099                           CRYPTO_ALG_ASYNC |
2100                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2101                           CRYPTO_ALG_NEED_FALLBACK,
2102                 .cra_blocksize  = SHA256_BLOCK_SIZE,
2103                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2104                 .cra_module = THIS_MODULE,
2105                 .cra_init   = sa_sha256_cra_init,
2106                 .cra_exit   = sa_sha_cra_exit,
2107             },
2108             .halg.digestsize    = SHA256_DIGEST_SIZE,
2109             .halg.statesize     = sizeof(struct sa_sha_req_ctx) +
2110                           sizeof(struct sha256_state),
2111             .init           = sa_sha_init,
2112             .update         = sa_sha_update,
2113             .final          = sa_sha_final,
2114             .finup          = sa_sha_finup,
2115             .digest         = sa_sha_digest,
2116             .export         = sa_sha_export,
2117             .import         = sa_sha_import,
2118         },
2119     },
2120     [SA_ALG_SHA512] = {
2121         .type = CRYPTO_ALG_TYPE_AHASH,
2122         .alg.ahash = {
2123             .halg.base = {
2124                 .cra_name   = "sha512",
2125                 .cra_driver_name    = "sha512-sa2ul",
2126                 .cra_priority   = 400,
2127                 .cra_flags  = CRYPTO_ALG_TYPE_AHASH |
2128                           CRYPTO_ALG_ASYNC |
2129                           CRYPTO_ALG_KERN_DRIVER_ONLY |
2130                           CRYPTO_ALG_NEED_FALLBACK,
2131                 .cra_blocksize  = SHA512_BLOCK_SIZE,
2132                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2133                 .cra_module = THIS_MODULE,
2134                 .cra_init   = sa_sha512_cra_init,
2135                 .cra_exit   = sa_sha_cra_exit,
2136             },
2137             .halg.digestsize    = SHA512_DIGEST_SIZE,
2138             .halg.statesize     = sizeof(struct sa_sha_req_ctx) +
2139                           sizeof(struct sha512_state),
2140             .init           = sa_sha_init,
2141             .update         = sa_sha_update,
2142             .final          = sa_sha_final,
2143             .finup          = sa_sha_finup,
2144             .digest         = sa_sha_digest,
2145             .export         = sa_sha_export,
2146             .import         = sa_sha_import,
2147         },
2148     },
2149     [SA_ALG_AUTHENC_SHA1_AES] = {
2150         .type   = CRYPTO_ALG_TYPE_AEAD,
2151         .alg.aead = {
2152             .base = {
2153                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2154                 .cra_driver_name =
2155                     "authenc(hmac(sha1),cbc(aes))-sa2ul",
2156                 .cra_blocksize = AES_BLOCK_SIZE,
2157                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2158                     CRYPTO_ALG_KERN_DRIVER_ONLY |
2159                     CRYPTO_ALG_ASYNC |
2160                     CRYPTO_ALG_NEED_FALLBACK,
2161                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2162                 .cra_module = THIS_MODULE,
2163                 .cra_priority = 3000,
2164             },
2165             .ivsize = AES_BLOCK_SIZE,
2166             .maxauthsize = SHA1_DIGEST_SIZE,
2167 
2168             .init = sa_cra_init_aead_sha1,
2169             .exit = sa_exit_tfm_aead,
2170             .setkey = sa_aead_cbc_sha1_setkey,
2171             .setauthsize = sa_aead_setauthsize,
2172             .encrypt = sa_aead_encrypt,
2173             .decrypt = sa_aead_decrypt,
2174         },
2175     },
2176     [SA_ALG_AUTHENC_SHA256_AES] = {
2177         .type   = CRYPTO_ALG_TYPE_AEAD,
2178         .alg.aead = {
2179             .base = {
2180                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2181                 .cra_driver_name =
2182                     "authenc(hmac(sha256),cbc(aes))-sa2ul",
2183                 .cra_blocksize = AES_BLOCK_SIZE,
2184                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2185                     CRYPTO_ALG_KERN_DRIVER_ONLY |
2186                     CRYPTO_ALG_ASYNC |
2187                     CRYPTO_ALG_NEED_FALLBACK,
2188                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2189                 .cra_module = THIS_MODULE,
2190                 .cra_alignmask = 0,
2191                 .cra_priority = 3000,
2192             },
2193             .ivsize = AES_BLOCK_SIZE,
2194             .maxauthsize = SHA256_DIGEST_SIZE,
2195 
2196             .init = sa_cra_init_aead_sha256,
2197             .exit = sa_exit_tfm_aead,
2198             .setkey = sa_aead_cbc_sha256_setkey,
2199             .setauthsize = sa_aead_setauthsize,
2200             .encrypt = sa_aead_encrypt,
2201             .decrypt = sa_aead_decrypt,
2202         },
2203     },
2204 };
2205 
2206 /* Register the algorithms in crypto framework */
2207 static void sa_register_algos(struct sa_crypto_data *dev_data)
2208 {
2209     const struct sa_match_data *match_data = dev_data->match_data;
2210     struct device *dev = dev_data->dev;
2211     char *alg_name;
2212     u32 type;
2213     int i, err;
2214 
2215     for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2216         /* Skip unsupported algos */
2217         if (!(match_data->supported_algos & BIT(i)))
2218             continue;
2219 
2220         type = sa_algs[i].type;
2221         if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2222             alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2223             err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2224         } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2225             alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2226             err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2227         } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2228             alg_name = sa_algs[i].alg.aead.base.cra_name;
2229             err = crypto_register_aead(&sa_algs[i].alg.aead);
2230         } else {
2231             dev_err(dev,
2232                 "un-supported crypto algorithm (%d)",
2233                 sa_algs[i].type);
2234             continue;
2235         }
2236 
2237         if (err)
2238             dev_err(dev, "Failed to register '%s'\n", alg_name);
2239         else
2240             sa_algs[i].registered = true;
2241     }
2242 }
2243 
2244 /* Unregister the algorithms in crypto framework */
2245 static void sa_unregister_algos(const struct device *dev)
2246 {
2247     u32 type;
2248     int i;
2249 
2250     for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2251         type = sa_algs[i].type;
2252         if (!sa_algs[i].registered)
2253             continue;
2254         if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2255             crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2256         else if (type == CRYPTO_ALG_TYPE_AHASH)
2257             crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2258         else if (type == CRYPTO_ALG_TYPE_AEAD)
2259             crypto_unregister_aead(&sa_algs[i].alg.aead);
2260 
2261         sa_algs[i].registered = false;
2262     }
2263 }
2264 
2265 static int sa_init_mem(struct sa_crypto_data *dev_data)
2266 {
2267     struct device *dev = &dev_data->pdev->dev;
2268     /* Setup dma pool for security context buffers */
2269     dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2270                         SA_CTX_MAX_SZ, 64, 0);
2271     if (!dev_data->sc_pool) {
2272         dev_err(dev, "Failed to create dma pool");
2273         return -ENOMEM;
2274     }
2275 
2276     return 0;
2277 }
2278 
2279 static int sa_dma_init(struct sa_crypto_data *dd)
2280 {
2281     int ret;
2282     struct dma_slave_config cfg;
2283 
2284     dd->dma_rx1 = NULL;
2285     dd->dma_tx = NULL;
2286     dd->dma_rx2 = NULL;
2287 
2288     ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2289     if (ret)
2290         return ret;
2291 
2292     dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2293     if (IS_ERR(dd->dma_rx1))
2294         return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2295                      "Unable to request rx1 DMA channel\n");
2296 
2297     dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2298     if (IS_ERR(dd->dma_rx2)) {
2299         ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2300                     "Unable to request rx2 DMA channel\n");
2301         goto err_dma_rx2;
2302     }
2303 
2304     dd->dma_tx = dma_request_chan(dd->dev, "tx");
2305     if (IS_ERR(dd->dma_tx)) {
2306         ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2307                     "Unable to request tx DMA channel\n");
2308         goto err_dma_tx;
2309     }
2310 
2311     memzero_explicit(&cfg, sizeof(cfg));
2312 
2313     cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2314     cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315     cfg.src_maxburst = 4;
2316     cfg.dst_maxburst = 4;
2317 
2318     ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2319     if (ret) {
2320         dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2321             ret);
2322         goto err_dma_config;
2323     }
2324 
2325     ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2326     if (ret) {
2327         dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2328             ret);
2329         goto err_dma_config;
2330     }
2331 
2332     ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2333     if (ret) {
2334         dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2335             ret);
2336         goto err_dma_config;
2337     }
2338 
2339     return 0;
2340 
2341 err_dma_config:
2342     dma_release_channel(dd->dma_tx);
2343 err_dma_tx:
2344     dma_release_channel(dd->dma_rx2);
2345 err_dma_rx2:
2346     dma_release_channel(dd->dma_rx1);
2347 
2348     return ret;
2349 }
2350 
2351 static int sa_link_child(struct device *dev, void *data)
2352 {
2353     struct device *parent = data;
2354 
2355     device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2356 
2357     return 0;
2358 }
2359 
2360 static struct sa_match_data am654_match_data = {
2361     .priv = 1,
2362     .priv_id = 1,
2363     .supported_algos = BIT(SA_ALG_CBC_AES) |
2364                BIT(SA_ALG_EBC_AES) |
2365                BIT(SA_ALG_CBC_DES3) |
2366                BIT(SA_ALG_ECB_DES3) |
2367                BIT(SA_ALG_SHA1) |
2368                BIT(SA_ALG_SHA256) |
2369                BIT(SA_ALG_SHA512) |
2370                BIT(SA_ALG_AUTHENC_SHA1_AES) |
2371                BIT(SA_ALG_AUTHENC_SHA256_AES),
2372 };
2373 
2374 static struct sa_match_data am64_match_data = {
2375     .priv = 0,
2376     .priv_id = 0,
2377     .supported_algos = BIT(SA_ALG_CBC_AES) |
2378                BIT(SA_ALG_EBC_AES) |
2379                BIT(SA_ALG_SHA256) |
2380                BIT(SA_ALG_SHA512) |
2381                BIT(SA_ALG_AUTHENC_SHA256_AES),
2382 };
2383 
2384 static const struct of_device_id of_match[] = {
2385     { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2386     { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2387     { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2388     { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2389     {},
2390 };
2391 MODULE_DEVICE_TABLE(of, of_match);
2392 
2393 static int sa_ul_probe(struct platform_device *pdev)
2394 {
2395     struct device *dev = &pdev->dev;
2396     struct device_node *node = dev->of_node;
2397     static void __iomem *saul_base;
2398     struct sa_crypto_data *dev_data;
2399     u32 status, val;
2400     int ret;
2401 
2402     dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2403     if (!dev_data)
2404         return -ENOMEM;
2405 
2406     dev_data->match_data = of_device_get_match_data(dev);
2407     if (!dev_data->match_data)
2408         return -ENODEV;
2409 
2410     saul_base = devm_platform_ioremap_resource(pdev, 0);
2411     if (IS_ERR(saul_base))
2412         return PTR_ERR(saul_base);
2413 
2414     sa_k3_dev = dev;
2415     dev_data->dev = dev;
2416     dev_data->pdev = pdev;
2417     dev_data->base = saul_base;
2418     platform_set_drvdata(pdev, dev_data);
2419     dev_set_drvdata(sa_k3_dev, dev_data);
2420 
2421     pm_runtime_enable(dev);
2422     ret = pm_runtime_resume_and_get(dev);
2423     if (ret < 0) {
2424         dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2425         pm_runtime_disable(dev);
2426         return ret;
2427     }
2428 
2429     sa_init_mem(dev_data);
2430     ret = sa_dma_init(dev_data);
2431     if (ret)
2432         goto destroy_dma_pool;
2433 
2434     spin_lock_init(&dev_data->scid_lock);
2435 
2436     val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2437           SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2438           SA_EEC_TRNG_EN;
2439     status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2440     /* Only enable engines if all are not already enabled */
2441     if (val & ~status)
2442         writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2443 
2444     sa_register_algos(dev_data);
2445 
2446     ret = of_platform_populate(node, NULL, NULL, dev);
2447     if (ret)
2448         goto release_dma;
2449 
2450     device_for_each_child(dev, dev, sa_link_child);
2451 
2452     return 0;
2453 
2454 release_dma:
2455     sa_unregister_algos(dev);
2456 
2457     dma_release_channel(dev_data->dma_rx2);
2458     dma_release_channel(dev_data->dma_rx1);
2459     dma_release_channel(dev_data->dma_tx);
2460 
2461 destroy_dma_pool:
2462     dma_pool_destroy(dev_data->sc_pool);
2463 
2464     pm_runtime_put_sync(dev);
2465     pm_runtime_disable(dev);
2466 
2467     return ret;
2468 }
2469 
2470 static int sa_ul_remove(struct platform_device *pdev)
2471 {
2472     struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2473 
2474     of_platform_depopulate(&pdev->dev);
2475 
2476     sa_unregister_algos(&pdev->dev);
2477 
2478     dma_release_channel(dev_data->dma_rx2);
2479     dma_release_channel(dev_data->dma_rx1);
2480     dma_release_channel(dev_data->dma_tx);
2481 
2482     dma_pool_destroy(dev_data->sc_pool);
2483 
2484     platform_set_drvdata(pdev, NULL);
2485 
2486     pm_runtime_put_sync(&pdev->dev);
2487     pm_runtime_disable(&pdev->dev);
2488 
2489     return 0;
2490 }
2491 
2492 static struct platform_driver sa_ul_driver = {
2493     .probe = sa_ul_probe,
2494     .remove = sa_ul_remove,
2495     .driver = {
2496            .name = "saul-crypto",
2497            .of_match_table = of_match,
2498            },
2499 };
2500 module_platform_driver(sa_ul_driver);
2501 MODULE_LICENSE("GPL v2");