0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <crypto/internal/skcipher.h>
0017 #include <crypto/scatterwalk.h>
0018 #include <linux/err.h>
0019 #include <linux/init.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/scatterlist.h>
0023 #include <linux/slab.h>
0024
0025 #include <crypto/b128ops.h>
0026 #include <crypto/gf128mul.h>
0027
0028 #define LRW_BLOCK_SIZE 16
0029
0030 struct lrw_tfm_ctx {
0031 struct crypto_skcipher *child;
0032
0033
0034
0035
0036
0037
0038
0039 struct gf128mul_64k *table;
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 be128 mulinc[128];
0050 };
0051
0052 struct lrw_request_ctx {
0053 be128 t;
0054 struct skcipher_request subreq;
0055 };
0056
0057 static inline void lrw_setbit128_bbe(void *b, int bit)
0058 {
0059 __set_bit(bit ^ (0x80 -
0060 #ifdef __BIG_ENDIAN
0061 BITS_PER_LONG
0062 #else
0063 BITS_PER_BYTE
0064 #endif
0065 ), b);
0066 }
0067
0068 static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
0069 unsigned int keylen)
0070 {
0071 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
0072 struct crypto_skcipher *child = ctx->child;
0073 int err, bsize = LRW_BLOCK_SIZE;
0074 const u8 *tweak = key + keylen - bsize;
0075 be128 tmp = { 0 };
0076 int i;
0077
0078 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
0079 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
0080 CRYPTO_TFM_REQ_MASK);
0081 err = crypto_skcipher_setkey(child, key, keylen - bsize);
0082 if (err)
0083 return err;
0084
0085 if (ctx->table)
0086 gf128mul_free_64k(ctx->table);
0087
0088
0089 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
0090 if (!ctx->table)
0091 return -ENOMEM;
0092
0093
0094 for (i = 0; i < 128; i++) {
0095 lrw_setbit128_bbe(&tmp, i);
0096 ctx->mulinc[i] = tmp;
0097 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
0098 }
0099
0100 return 0;
0101 }
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 static int lrw_next_index(u32 *counter)
0115 {
0116 int i, res = 0;
0117
0118 for (i = 0; i < 4; i++) {
0119 if (counter[i] + 1 != 0)
0120 return res + ffz(counter[i]++);
0121
0122 counter[i] = 0;
0123 res += 32;
0124 }
0125
0126
0127
0128
0129
0130
0131 return 127;
0132 }
0133
0134
0135
0136
0137
0138
0139
0140 static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
0141 {
0142 const int bs = LRW_BLOCK_SIZE;
0143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0144 const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0145 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
0146 be128 t = rctx->t;
0147 struct skcipher_walk w;
0148 __be32 *iv;
0149 u32 counter[4];
0150 int err;
0151
0152 if (second_pass) {
0153 req = &rctx->subreq;
0154
0155 skcipher_request_set_tfm(req, tfm);
0156 }
0157
0158 err = skcipher_walk_virt(&w, req, false);
0159 if (err)
0160 return err;
0161
0162 iv = (__be32 *)w.iv;
0163 counter[0] = be32_to_cpu(iv[3]);
0164 counter[1] = be32_to_cpu(iv[2]);
0165 counter[2] = be32_to_cpu(iv[1]);
0166 counter[3] = be32_to_cpu(iv[0]);
0167
0168 while (w.nbytes) {
0169 unsigned int avail = w.nbytes;
0170 be128 *wsrc;
0171 be128 *wdst;
0172
0173 wsrc = w.src.virt.addr;
0174 wdst = w.dst.virt.addr;
0175
0176 do {
0177 be128_xor(wdst++, &t, wsrc++);
0178
0179
0180
0181 be128_xor(&t, &t,
0182 &ctx->mulinc[lrw_next_index(counter)]);
0183 } while ((avail -= bs) >= bs);
0184
0185 if (second_pass && w.nbytes == w.total) {
0186 iv[0] = cpu_to_be32(counter[3]);
0187 iv[1] = cpu_to_be32(counter[2]);
0188 iv[2] = cpu_to_be32(counter[1]);
0189 iv[3] = cpu_to_be32(counter[0]);
0190 }
0191
0192 err = skcipher_walk_done(&w, avail);
0193 }
0194
0195 return err;
0196 }
0197
0198 static int lrw_xor_tweak_pre(struct skcipher_request *req)
0199 {
0200 return lrw_xor_tweak(req, false);
0201 }
0202
0203 static int lrw_xor_tweak_post(struct skcipher_request *req)
0204 {
0205 return lrw_xor_tweak(req, true);
0206 }
0207
0208 static void lrw_crypt_done(struct crypto_async_request *areq, int err)
0209 {
0210 struct skcipher_request *req = areq->data;
0211
0212 if (!err) {
0213 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
0214
0215 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
0216 err = lrw_xor_tweak_post(req);
0217 }
0218
0219 skcipher_request_complete(req, err);
0220 }
0221
0222 static void lrw_init_crypt(struct skcipher_request *req)
0223 {
0224 const struct lrw_tfm_ctx *ctx =
0225 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
0226 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
0227 struct skcipher_request *subreq = &rctx->subreq;
0228
0229 skcipher_request_set_tfm(subreq, ctx->child);
0230 skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
0231 req);
0232
0233 skcipher_request_set_crypt(subreq, req->dst, req->dst,
0234 req->cryptlen, req->iv);
0235
0236
0237 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
0238
0239
0240 gf128mul_64k_bbe(&rctx->t, ctx->table);
0241 }
0242
0243 static int lrw_encrypt(struct skcipher_request *req)
0244 {
0245 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
0246 struct skcipher_request *subreq = &rctx->subreq;
0247
0248 lrw_init_crypt(req);
0249 return lrw_xor_tweak_pre(req) ?:
0250 crypto_skcipher_encrypt(subreq) ?:
0251 lrw_xor_tweak_post(req);
0252 }
0253
0254 static int lrw_decrypt(struct skcipher_request *req)
0255 {
0256 struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
0257 struct skcipher_request *subreq = &rctx->subreq;
0258
0259 lrw_init_crypt(req);
0260 return lrw_xor_tweak_pre(req) ?:
0261 crypto_skcipher_decrypt(subreq) ?:
0262 lrw_xor_tweak_post(req);
0263 }
0264
0265 static int lrw_init_tfm(struct crypto_skcipher *tfm)
0266 {
0267 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
0268 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
0269 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0270 struct crypto_skcipher *cipher;
0271
0272 cipher = crypto_spawn_skcipher(spawn);
0273 if (IS_ERR(cipher))
0274 return PTR_ERR(cipher);
0275
0276 ctx->child = cipher;
0277
0278 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
0279 sizeof(struct lrw_request_ctx));
0280
0281 return 0;
0282 }
0283
0284 static void lrw_exit_tfm(struct crypto_skcipher *tfm)
0285 {
0286 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0287
0288 if (ctx->table)
0289 gf128mul_free_64k(ctx->table);
0290 crypto_free_skcipher(ctx->child);
0291 }
0292
0293 static void lrw_free_instance(struct skcipher_instance *inst)
0294 {
0295 crypto_drop_skcipher(skcipher_instance_ctx(inst));
0296 kfree(inst);
0297 }
0298
0299 static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
0300 {
0301 struct crypto_skcipher_spawn *spawn;
0302 struct skcipher_instance *inst;
0303 struct skcipher_alg *alg;
0304 const char *cipher_name;
0305 char ecb_name[CRYPTO_MAX_ALG_NAME];
0306 u32 mask;
0307 int err;
0308
0309 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
0310 if (err)
0311 return err;
0312
0313 cipher_name = crypto_attr_alg_name(tb[1]);
0314 if (IS_ERR(cipher_name))
0315 return PTR_ERR(cipher_name);
0316
0317 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
0318 if (!inst)
0319 return -ENOMEM;
0320
0321 spawn = skcipher_instance_ctx(inst);
0322
0323 err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
0324 cipher_name, 0, mask);
0325 if (err == -ENOENT) {
0326 err = -ENAMETOOLONG;
0327 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
0328 cipher_name) >= CRYPTO_MAX_ALG_NAME)
0329 goto err_free_inst;
0330
0331 err = crypto_grab_skcipher(spawn,
0332 skcipher_crypto_instance(inst),
0333 ecb_name, 0, mask);
0334 }
0335
0336 if (err)
0337 goto err_free_inst;
0338
0339 alg = crypto_skcipher_spawn_alg(spawn);
0340
0341 err = -EINVAL;
0342 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
0343 goto err_free_inst;
0344
0345 if (crypto_skcipher_alg_ivsize(alg))
0346 goto err_free_inst;
0347
0348 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
0349 &alg->base);
0350 if (err)
0351 goto err_free_inst;
0352
0353 err = -EINVAL;
0354 cipher_name = alg->base.cra_name;
0355
0356
0357
0358
0359 if (!strncmp(cipher_name, "ecb(", 4)) {
0360 unsigned len;
0361
0362 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
0363 if (len < 2 || len >= sizeof(ecb_name))
0364 goto err_free_inst;
0365
0366 if (ecb_name[len - 1] != ')')
0367 goto err_free_inst;
0368
0369 ecb_name[len - 1] = 0;
0370
0371 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
0372 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
0373 err = -ENAMETOOLONG;
0374 goto err_free_inst;
0375 }
0376 } else
0377 goto err_free_inst;
0378
0379 inst->alg.base.cra_priority = alg->base.cra_priority;
0380 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
0381 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
0382 (__alignof__(be128) - 1);
0383
0384 inst->alg.ivsize = LRW_BLOCK_SIZE;
0385 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
0386 LRW_BLOCK_SIZE;
0387 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
0388 LRW_BLOCK_SIZE;
0389
0390 inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
0391
0392 inst->alg.init = lrw_init_tfm;
0393 inst->alg.exit = lrw_exit_tfm;
0394
0395 inst->alg.setkey = lrw_setkey;
0396 inst->alg.encrypt = lrw_encrypt;
0397 inst->alg.decrypt = lrw_decrypt;
0398
0399 inst->free = lrw_free_instance;
0400
0401 err = skcipher_register_instance(tmpl, inst);
0402 if (err) {
0403 err_free_inst:
0404 lrw_free_instance(inst);
0405 }
0406 return err;
0407 }
0408
0409 static struct crypto_template lrw_tmpl = {
0410 .name = "lrw",
0411 .create = lrw_create,
0412 .module = THIS_MODULE,
0413 };
0414
0415 static int __init lrw_module_init(void)
0416 {
0417 return crypto_register_template(&lrw_tmpl);
0418 }
0419
0420 static void __exit lrw_module_exit(void)
0421 {
0422 crypto_unregister_template(&lrw_tmpl);
0423 }
0424
0425 subsys_initcall(lrw_module_init);
0426 module_exit(lrw_module_exit);
0427
0428 MODULE_LICENSE("GPL");
0429 MODULE_DESCRIPTION("LRW block cipher mode");
0430 MODULE_ALIAS_CRYPTO("lrw");
0431 MODULE_SOFTDEP("pre: ecb");