0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <crypto/internal/hash.h>
0011 #include <crypto/sha2.h>
0012 #include <linux/module.h>
0013 #include <asm/vio.h>
0014
0015 #include "nx_csbcpb.h"
0016 #include "nx.h"
0017
0018 struct sha512_state_be {
0019 __be64 state[SHA512_DIGEST_SIZE / 8];
0020 u64 count[2];
0021 u8 buf[SHA512_BLOCK_SIZE];
0022 };
0023
0024 static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
0025 {
0026 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
0027 int err;
0028
0029 err = nx_crypto_ctx_sha_init(tfm);
0030 if (err)
0031 return err;
0032
0033 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
0034
0035 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
0036
0037 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
0038
0039 return 0;
0040 }
0041
0042 static int nx_sha512_init(struct shash_desc *desc)
0043 {
0044 struct sha512_state_be *sctx = shash_desc_ctx(desc);
0045
0046 memset(sctx, 0, sizeof *sctx);
0047
0048 sctx->state[0] = __cpu_to_be64(SHA512_H0);
0049 sctx->state[1] = __cpu_to_be64(SHA512_H1);
0050 sctx->state[2] = __cpu_to_be64(SHA512_H2);
0051 sctx->state[3] = __cpu_to_be64(SHA512_H3);
0052 sctx->state[4] = __cpu_to_be64(SHA512_H4);
0053 sctx->state[5] = __cpu_to_be64(SHA512_H5);
0054 sctx->state[6] = __cpu_to_be64(SHA512_H6);
0055 sctx->state[7] = __cpu_to_be64(SHA512_H7);
0056 sctx->count[0] = 0;
0057
0058 return 0;
0059 }
0060
0061 static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
0062 unsigned int len)
0063 {
0064 struct sha512_state_be *sctx = shash_desc_ctx(desc);
0065 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
0066 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
0067 struct nx_sg *out_sg;
0068 u64 to_process, leftover = 0, total;
0069 unsigned long irq_flags;
0070 int rc = 0;
0071 int data_len;
0072 u32 max_sg_len;
0073 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
0074
0075 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
0076
0077
0078
0079
0080
0081 total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
0082 if (total < SHA512_BLOCK_SIZE) {
0083 memcpy(sctx->buf + buf_len, data, len);
0084 sctx->count[0] += len;
0085 goto out;
0086 }
0087
0088 memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
0089 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
0090 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
0091
0092 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
0093 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
0094 max_sg_len = min_t(u64, max_sg_len,
0095 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
0096
0097 data_len = SHA512_DIGEST_SIZE;
0098 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
0099 &data_len, max_sg_len);
0100 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
0101
0102 if (data_len != SHA512_DIGEST_SIZE) {
0103 rc = -EINVAL;
0104 goto out;
0105 }
0106
0107 do {
0108 int used_sgs = 0;
0109 struct nx_sg *in_sg = nx_ctx->in_sg;
0110
0111 if (buf_len) {
0112 data_len = buf_len;
0113 in_sg = nx_build_sg_list(in_sg,
0114 (u8 *) sctx->buf,
0115 &data_len, max_sg_len);
0116
0117 if (data_len != buf_len) {
0118 rc = -EINVAL;
0119 goto out;
0120 }
0121 used_sgs = in_sg - nx_ctx->in_sg;
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131 to_process = min_t(u64, total,
0132 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
0133 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
0134
0135 data_len = to_process - buf_len;
0136 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
0137 &data_len, max_sg_len);
0138
0139 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
0140
0141 if (data_len != (to_process - buf_len)) {
0142 rc = -EINVAL;
0143 goto out;
0144 }
0145
0146 to_process = data_len + buf_len;
0147 leftover = total - to_process;
0148
0149
0150
0151
0152
0153 memcpy(csbcpb->cpb.sha512.input_partial_digest,
0154 csbcpb->cpb.sha512.message_digest,
0155 SHA512_DIGEST_SIZE);
0156
0157 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
0158 rc = -EINVAL;
0159 goto out;
0160 }
0161
0162 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
0163 if (rc)
0164 goto out;
0165
0166 atomic_inc(&(nx_ctx->stats->sha512_ops));
0167
0168 total -= to_process;
0169 data += to_process - buf_len;
0170 buf_len = 0;
0171
0172 } while (leftover >= SHA512_BLOCK_SIZE);
0173
0174
0175 if (leftover)
0176 memcpy(sctx->buf, data, leftover);
0177 sctx->count[0] += len;
0178 memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
0179 out:
0180 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
0181 return rc;
0182 }
0183
0184 static int nx_sha512_final(struct shash_desc *desc, u8 *out)
0185 {
0186 struct sha512_state_be *sctx = shash_desc_ctx(desc);
0187 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
0188 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
0189 struct nx_sg *in_sg, *out_sg;
0190 u32 max_sg_len;
0191 u64 count0;
0192 unsigned long irq_flags;
0193 int rc = 0;
0194 int len;
0195
0196 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
0197
0198 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
0199 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
0200 max_sg_len = min_t(u64, max_sg_len,
0201 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
0202
0203
0204
0205 if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
0206
0207
0208 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
0209 SHA512_DIGEST_SIZE);
0210 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
0211 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
0212 } else {
0213 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
0214 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
0215 }
0216
0217 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
0218
0219 count0 = sctx->count[0] * 8;
0220
0221 csbcpb->cpb.sha512.message_bit_length_lo = count0;
0222
0223 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
0224 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
0225 max_sg_len);
0226
0227 if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
0228 rc = -EINVAL;
0229 goto out;
0230 }
0231
0232 len = SHA512_DIGEST_SIZE;
0233 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
0234 max_sg_len);
0235
0236 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
0237 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
0238
0239 if (!nx_ctx->op.outlen) {
0240 rc = -EINVAL;
0241 goto out;
0242 }
0243
0244 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
0245 if (rc)
0246 goto out;
0247
0248 atomic_inc(&(nx_ctx->stats->sha512_ops));
0249 atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
0250
0251 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
0252 out:
0253 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
0254 return rc;
0255 }
0256
0257 static int nx_sha512_export(struct shash_desc *desc, void *out)
0258 {
0259 struct sha512_state_be *sctx = shash_desc_ctx(desc);
0260
0261 memcpy(out, sctx, sizeof(*sctx));
0262
0263 return 0;
0264 }
0265
0266 static int nx_sha512_import(struct shash_desc *desc, const void *in)
0267 {
0268 struct sha512_state_be *sctx = shash_desc_ctx(desc);
0269
0270 memcpy(sctx, in, sizeof(*sctx));
0271
0272 return 0;
0273 }
0274
0275 struct shash_alg nx_shash_sha512_alg = {
0276 .digestsize = SHA512_DIGEST_SIZE,
0277 .init = nx_sha512_init,
0278 .update = nx_sha512_update,
0279 .final = nx_sha512_final,
0280 .export = nx_sha512_export,
0281 .import = nx_sha512_import,
0282 .descsize = sizeof(struct sha512_state_be),
0283 .statesize = sizeof(struct sha512_state_be),
0284 .base = {
0285 .cra_name = "sha512",
0286 .cra_driver_name = "sha512-nx",
0287 .cra_priority = 300,
0288 .cra_blocksize = SHA512_BLOCK_SIZE,
0289 .cra_module = THIS_MODULE,
0290 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
0291 .cra_init = nx_crypto_ctx_sha512_init,
0292 .cra_exit = nx_crypto_ctx_exit,
0293 }
0294 };