0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/pagemap.h>
0024 #include <linux/mempool.h>
0025 #include <linux/module.h>
0026 #include <linux/scatterlist.h>
0027 #include <linux/ratelimit.h>
0028 #include <crypto/skcipher.h>
0029 #include "fscrypt_private.h"
0030
0031 static unsigned int num_prealloc_crypto_pages = 32;
0032
0033 module_param(num_prealloc_crypto_pages, uint, 0444);
0034 MODULE_PARM_DESC(num_prealloc_crypto_pages,
0035 "Number of crypto pages to preallocate");
0036
0037 static mempool_t *fscrypt_bounce_page_pool = NULL;
0038
0039 static struct workqueue_struct *fscrypt_read_workqueue;
0040 static DEFINE_MUTEX(fscrypt_init_mutex);
0041
0042 struct kmem_cache *fscrypt_info_cachep;
0043
0044 void fscrypt_enqueue_decrypt_work(struct work_struct *work)
0045 {
0046 queue_work(fscrypt_read_workqueue, work);
0047 }
0048 EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
0049
0050 struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
0051 {
0052 return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
0053 }
0054
0055
0056
0057
0058
0059
0060
0061
0062 void fscrypt_free_bounce_page(struct page *bounce_page)
0063 {
0064 if (!bounce_page)
0065 return;
0066 set_page_private(bounce_page, (unsigned long)NULL);
0067 ClearPagePrivate(bounce_page);
0068 mempool_free(bounce_page, fscrypt_bounce_page_pool);
0069 }
0070 EXPORT_SYMBOL(fscrypt_free_bounce_page);
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
0081 const struct fscrypt_info *ci)
0082 {
0083 u8 flags = fscrypt_policy_flags(&ci->ci_policy);
0084
0085 memset(iv, 0, ci->ci_mode->ivsize);
0086
0087 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
0088 WARN_ON_ONCE(lblk_num > U32_MAX);
0089 WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
0090 lblk_num |= (u64)ci->ci_inode->i_ino << 32;
0091 } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
0092 WARN_ON_ONCE(lblk_num > U32_MAX);
0093 lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
0094 } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
0095 memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
0096 }
0097 iv->lblk_num = cpu_to_le64(lblk_num);
0098 }
0099
0100
0101 int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
0102 u64 lblk_num, struct page *src_page,
0103 struct page *dest_page, unsigned int len,
0104 unsigned int offs, gfp_t gfp_flags)
0105 {
0106 union fscrypt_iv iv;
0107 struct skcipher_request *req = NULL;
0108 DECLARE_CRYPTO_WAIT(wait);
0109 struct scatterlist dst, src;
0110 struct fscrypt_info *ci = inode->i_crypt_info;
0111 struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
0112 int res = 0;
0113
0114 if (WARN_ON_ONCE(len <= 0))
0115 return -EINVAL;
0116 if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
0117 return -EINVAL;
0118
0119 fscrypt_generate_iv(&iv, lblk_num, ci);
0120
0121 req = skcipher_request_alloc(tfm, gfp_flags);
0122 if (!req)
0123 return -ENOMEM;
0124
0125 skcipher_request_set_callback(
0126 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
0127 crypto_req_done, &wait);
0128
0129 sg_init_table(&dst, 1);
0130 sg_set_page(&dst, dest_page, len, offs);
0131 sg_init_table(&src, 1);
0132 sg_set_page(&src, src_page, len, offs);
0133 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
0134 if (rw == FS_DECRYPT)
0135 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
0136 else
0137 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
0138 skcipher_request_free(req);
0139 if (res) {
0140 fscrypt_err(inode, "%scryption failed for block %llu: %d",
0141 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
0142 return res;
0143 }
0144 return 0;
0145 }
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
0173 unsigned int len,
0174 unsigned int offs,
0175 gfp_t gfp_flags)
0176
0177 {
0178 const struct inode *inode = page->mapping->host;
0179 const unsigned int blockbits = inode->i_blkbits;
0180 const unsigned int blocksize = 1 << blockbits;
0181 struct page *ciphertext_page;
0182 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
0183 (offs >> blockbits);
0184 unsigned int i;
0185 int err;
0186
0187 if (WARN_ON_ONCE(!PageLocked(page)))
0188 return ERR_PTR(-EINVAL);
0189
0190 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
0191 return ERR_PTR(-EINVAL);
0192
0193 ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
0194 if (!ciphertext_page)
0195 return ERR_PTR(-ENOMEM);
0196
0197 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
0198 err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
0199 page, ciphertext_page,
0200 blocksize, i, gfp_flags);
0201 if (err) {
0202 fscrypt_free_bounce_page(ciphertext_page);
0203 return ERR_PTR(err);
0204 }
0205 }
0206 SetPagePrivate(ciphertext_page);
0207 set_page_private(ciphertext_page, (unsigned long)page);
0208 return ciphertext_page;
0209 }
0210 EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
0230 unsigned int len, unsigned int offs,
0231 u64 lblk_num, gfp_t gfp_flags)
0232 {
0233 return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
0234 len, offs, gfp_flags);
0235 }
0236 EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
0256 unsigned int offs)
0257 {
0258 const struct inode *inode = page->mapping->host;
0259 const unsigned int blockbits = inode->i_blkbits;
0260 const unsigned int blocksize = 1 << blockbits;
0261 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
0262 (offs >> blockbits);
0263 unsigned int i;
0264 int err;
0265
0266 if (WARN_ON_ONCE(!PageLocked(page)))
0267 return -EINVAL;
0268
0269 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
0270 return -EINVAL;
0271
0272 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
0273 err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
0274 page, blocksize, i, GFP_NOFS);
0275 if (err)
0276 return err;
0277 }
0278 return 0;
0279 }
0280 EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
0299 unsigned int len, unsigned int offs,
0300 u64 lblk_num)
0301 {
0302 return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
0303 len, offs, GFP_NOFS);
0304 }
0305 EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 int fscrypt_initialize(unsigned int cop_flags)
0317 {
0318 int err = 0;
0319
0320
0321 if (cop_flags & FS_CFLG_OWN_PAGES)
0322 return 0;
0323
0324 mutex_lock(&fscrypt_init_mutex);
0325 if (fscrypt_bounce_page_pool)
0326 goto out_unlock;
0327
0328 err = -ENOMEM;
0329 fscrypt_bounce_page_pool =
0330 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
0331 if (!fscrypt_bounce_page_pool)
0332 goto out_unlock;
0333
0334 err = 0;
0335 out_unlock:
0336 mutex_unlock(&fscrypt_init_mutex);
0337 return err;
0338 }
0339
0340 void fscrypt_msg(const struct inode *inode, const char *level,
0341 const char *fmt, ...)
0342 {
0343 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
0344 DEFAULT_RATELIMIT_BURST);
0345 struct va_format vaf;
0346 va_list args;
0347
0348 if (!__ratelimit(&rs))
0349 return;
0350
0351 va_start(args, fmt);
0352 vaf.fmt = fmt;
0353 vaf.va = &args;
0354 if (inode && inode->i_ino)
0355 printk("%sfscrypt (%s, inode %lu): %pV\n",
0356 level, inode->i_sb->s_id, inode->i_ino, &vaf);
0357 else if (inode)
0358 printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
0359 else
0360 printk("%sfscrypt: %pV\n", level, &vaf);
0361 va_end(args);
0362 }
0363
0364
0365
0366
0367
0368
0369 static int __init fscrypt_init(void)
0370 {
0371 int err = -ENOMEM;
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
0382 WQ_UNBOUND | WQ_HIGHPRI,
0383 num_online_cpus());
0384 if (!fscrypt_read_workqueue)
0385 goto fail;
0386
0387 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
0388 if (!fscrypt_info_cachep)
0389 goto fail_free_queue;
0390
0391 err = fscrypt_init_keyring();
0392 if (err)
0393 goto fail_free_info;
0394
0395 return 0;
0396
0397 fail_free_info:
0398 kmem_cache_destroy(fscrypt_info_cachep);
0399 fail_free_queue:
0400 destroy_workqueue(fscrypt_read_workqueue);
0401 fail:
0402 return err;
0403 }
0404 late_initcall(fscrypt_init)