0001
0002
0003
0004
0005
0006 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
0007 #define __LINUX_BLK_CRYPTO_INTERNAL_H
0008
0009 #include <linux/bio.h>
0010 #include <linux/blk-mq.h>
0011
0012
0013 struct blk_crypto_mode {
0014 const char *name;
0015 const char *cipher_str;
0016 unsigned int keysize;
0017 unsigned int ivsize;
0018 };
0019
0020 extern const struct blk_crypto_mode blk_crypto_modes[];
0021
0022 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0023
0024 int blk_crypto_sysfs_register(struct request_queue *q);
0025
0026 void blk_crypto_sysfs_unregister(struct request_queue *q);
0027
0028 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
0029 unsigned int inc);
0030
0031 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
0032
0033 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
0034 struct bio_crypt_ctx *bc2);
0035
0036 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
0037 struct bio *bio)
0038 {
0039 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
0040 bio->bi_crypt_context);
0041 }
0042
0043 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
0044 struct bio *bio)
0045 {
0046 return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
0047 bio->bi_iter.bi_size, req->crypt_ctx);
0048 }
0049
0050 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
0051 struct request *next)
0052 {
0053 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
0054 next->crypt_ctx);
0055 }
0056
0057 static inline void blk_crypto_rq_set_defaults(struct request *rq)
0058 {
0059 rq->crypt_ctx = NULL;
0060 rq->crypt_keyslot = NULL;
0061 }
0062
0063 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
0064 {
0065 return rq->crypt_ctx;
0066 }
0067
0068 #else
0069
0070 static inline int blk_crypto_sysfs_register(struct request_queue *q)
0071 {
0072 return 0;
0073 }
0074
0075 static inline void blk_crypto_sysfs_unregister(struct request_queue *q) { }
0076
0077 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
0078 struct bio *bio)
0079 {
0080 return true;
0081 }
0082
0083 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
0084 struct bio *bio)
0085 {
0086 return true;
0087 }
0088
0089 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
0090 struct bio *bio)
0091 {
0092 return true;
0093 }
0094
0095 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
0096 struct request *next)
0097 {
0098 return true;
0099 }
0100
0101 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
0102
0103 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
0104 {
0105 return false;
0106 }
0107
0108 #endif
0109
0110 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
0111 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
0112 {
0113 if (bio_has_crypt_ctx(bio))
0114 __bio_crypt_advance(bio, bytes);
0115 }
0116
0117 void __bio_crypt_free_ctx(struct bio *bio);
0118 static inline void bio_crypt_free_ctx(struct bio *bio)
0119 {
0120 if (bio_has_crypt_ctx(bio))
0121 __bio_crypt_free_ctx(bio);
0122 }
0123
0124 static inline void bio_crypt_do_front_merge(struct request *rq,
0125 struct bio *bio)
0126 {
0127 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0128 if (bio_has_crypt_ctx(bio))
0129 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
0130 sizeof(rq->crypt_ctx->bc_dun));
0131 #endif
0132 }
0133
0134 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
0135 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
0136 {
0137 if (bio_has_crypt_ctx(*bio_ptr))
0138 return __blk_crypto_bio_prep(bio_ptr);
0139 return true;
0140 }
0141
0142 blk_status_t __blk_crypto_init_request(struct request *rq);
0143 static inline blk_status_t blk_crypto_init_request(struct request *rq)
0144 {
0145 if (blk_crypto_rq_is_encrypted(rq))
0146 return __blk_crypto_init_request(rq);
0147 return BLK_STS_OK;
0148 }
0149
0150 void __blk_crypto_free_request(struct request *rq);
0151 static inline void blk_crypto_free_request(struct request *rq)
0152 {
0153 if (blk_crypto_rq_is_encrypted(rq))
0154 __blk_crypto_free_request(rq);
0155 }
0156
0157 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
0158 gfp_t gfp_mask);
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
0170 gfp_t gfp_mask)
0171 {
0172 if (bio_has_crypt_ctx(bio))
0173 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
0174 return 0;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
0185 {
0186
0187 if (blk_crypto_rq_is_encrypted(rq))
0188 return blk_crypto_init_request(rq);
0189 return BLK_STS_OK;
0190 }
0191
0192 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
0193
0194 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
0195
0196 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
0197
0198 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
0199
0200 #else
0201
0202 static inline int
0203 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
0204 {
0205 pr_warn_once("crypto API fallback is disabled\n");
0206 return -ENOPKG;
0207 }
0208
0209 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
0210 {
0211 pr_warn_once("crypto API fallback disabled; failing request.\n");
0212 (*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
0213 return false;
0214 }
0215
0216 static inline int
0217 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
0218 {
0219 return 0;
0220 }
0221
0222 #endif
0223
0224 #endif