0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #define pr_fmt(fmt) "blk-crypto: " fmt
0026
0027 #include <linux/blk-crypto-profile.h>
0028 #include <linux/device.h>
0029 #include <linux/atomic.h>
0030 #include <linux/mutex.h>
0031 #include <linux/pm_runtime.h>
0032 #include <linux/wait.h>
0033 #include <linux/blkdev.h>
0034 #include <linux/blk-integrity.h>
0035
0036 struct blk_crypto_keyslot {
0037 atomic_t slot_refs;
0038 struct list_head idle_slot_node;
0039 struct hlist_node hash_node;
0040 const struct blk_crypto_key *key;
0041 struct blk_crypto_profile *profile;
0042 };
0043
0044 static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile)
0045 {
0046
0047
0048
0049
0050
0051 if (profile->dev)
0052 pm_runtime_get_sync(profile->dev);
0053 down_write(&profile->lock);
0054 }
0055
0056 static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile)
0057 {
0058 up_write(&profile->lock);
0059 if (profile->dev)
0060 pm_runtime_put_sync(profile->dev);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 int blk_crypto_profile_init(struct blk_crypto_profile *profile,
0074 unsigned int num_slots)
0075 {
0076 unsigned int slot;
0077 unsigned int i;
0078 unsigned int slot_hashtable_size;
0079
0080 memset(profile, 0, sizeof(*profile));
0081 init_rwsem(&profile->lock);
0082
0083 if (num_slots == 0)
0084 return 0;
0085
0086
0087
0088 profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
0089 GFP_KERNEL);
0090 if (!profile->slots)
0091 return -ENOMEM;
0092
0093 profile->num_slots = num_slots;
0094
0095 init_waitqueue_head(&profile->idle_slots_wait_queue);
0096 INIT_LIST_HEAD(&profile->idle_slots);
0097
0098 for (slot = 0; slot < num_slots; slot++) {
0099 profile->slots[slot].profile = profile;
0100 list_add_tail(&profile->slots[slot].idle_slot_node,
0101 &profile->idle_slots);
0102 }
0103
0104 spin_lock_init(&profile->idle_slots_lock);
0105
0106 slot_hashtable_size = roundup_pow_of_two(num_slots);
0107
0108
0109
0110
0111 if (slot_hashtable_size < 2)
0112 slot_hashtable_size = 2;
0113
0114 profile->log_slot_ht_size = ilog2(slot_hashtable_size);
0115 profile->slot_hashtable =
0116 kvmalloc_array(slot_hashtable_size,
0117 sizeof(profile->slot_hashtable[0]), GFP_KERNEL);
0118 if (!profile->slot_hashtable)
0119 goto err_destroy;
0120 for (i = 0; i < slot_hashtable_size; i++)
0121 INIT_HLIST_HEAD(&profile->slot_hashtable[i]);
0122
0123 return 0;
0124
0125 err_destroy:
0126 blk_crypto_profile_destroy(profile);
0127 return -ENOMEM;
0128 }
0129 EXPORT_SYMBOL_GPL(blk_crypto_profile_init);
0130
0131 static void blk_crypto_profile_destroy_callback(void *profile)
0132 {
0133 blk_crypto_profile_destroy(profile);
0134 }
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 int devm_blk_crypto_profile_init(struct device *dev,
0148 struct blk_crypto_profile *profile,
0149 unsigned int num_slots)
0150 {
0151 int err = blk_crypto_profile_init(profile, num_slots);
0152
0153 if (err)
0154 return err;
0155
0156 return devm_add_action_or_reset(dev,
0157 blk_crypto_profile_destroy_callback,
0158 profile);
0159 }
0160 EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init);
0161
0162 static inline struct hlist_head *
0163 blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile,
0164 const struct blk_crypto_key *key)
0165 {
0166 return &profile->slot_hashtable[
0167 hash_ptr(key, profile->log_slot_ht_size)];
0168 }
0169
0170 static void
0171 blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot)
0172 {
0173 struct blk_crypto_profile *profile = slot->profile;
0174 unsigned long flags;
0175
0176 spin_lock_irqsave(&profile->idle_slots_lock, flags);
0177 list_del(&slot->idle_slot_node);
0178 spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
0179 }
0180
0181 static struct blk_crypto_keyslot *
0182 blk_crypto_find_keyslot(struct blk_crypto_profile *profile,
0183 const struct blk_crypto_key *key)
0184 {
0185 const struct hlist_head *head =
0186 blk_crypto_hash_bucket_for_key(profile, key);
0187 struct blk_crypto_keyslot *slotp;
0188
0189 hlist_for_each_entry(slotp, head, hash_node) {
0190 if (slotp->key == key)
0191 return slotp;
0192 }
0193 return NULL;
0194 }
0195
0196 static struct blk_crypto_keyslot *
0197 blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile,
0198 const struct blk_crypto_key *key)
0199 {
0200 struct blk_crypto_keyslot *slot;
0201
0202 slot = blk_crypto_find_keyslot(profile, key);
0203 if (!slot)
0204 return NULL;
0205 if (atomic_inc_return(&slot->slot_refs) == 1) {
0206
0207 blk_crypto_remove_slot_from_lru_list(slot);
0208 }
0209 return slot;
0210 }
0211
0212
0213
0214
0215
0216
0217
0218 unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot)
0219 {
0220 return slot - slot->profile->slots;
0221 }
0222 EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index);
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
0242 const struct blk_crypto_key *key,
0243 struct blk_crypto_keyslot **slot_ptr)
0244 {
0245 struct blk_crypto_keyslot *slot;
0246 int slot_idx;
0247 int err;
0248
0249 *slot_ptr = NULL;
0250
0251
0252
0253
0254
0255 if (profile->num_slots == 0)
0256 return BLK_STS_OK;
0257
0258 down_read(&profile->lock);
0259 slot = blk_crypto_find_and_grab_keyslot(profile, key);
0260 up_read(&profile->lock);
0261 if (slot)
0262 goto success;
0263
0264 for (;;) {
0265 blk_crypto_hw_enter(profile);
0266 slot = blk_crypto_find_and_grab_keyslot(profile, key);
0267 if (slot) {
0268 blk_crypto_hw_exit(profile);
0269 goto success;
0270 }
0271
0272
0273
0274
0275
0276 if (!list_empty(&profile->idle_slots))
0277 break;
0278
0279 blk_crypto_hw_exit(profile);
0280 wait_event(profile->idle_slots_wait_queue,
0281 !list_empty(&profile->idle_slots));
0282 }
0283
0284 slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot,
0285 idle_slot_node);
0286 slot_idx = blk_crypto_keyslot_index(slot);
0287
0288 err = profile->ll_ops.keyslot_program(profile, key, slot_idx);
0289 if (err) {
0290 wake_up(&profile->idle_slots_wait_queue);
0291 blk_crypto_hw_exit(profile);
0292 return errno_to_blk_status(err);
0293 }
0294
0295
0296 if (slot->key)
0297 hlist_del(&slot->hash_node);
0298 slot->key = key;
0299 hlist_add_head(&slot->hash_node,
0300 blk_crypto_hash_bucket_for_key(profile, key));
0301
0302 atomic_set(&slot->slot_refs, 1);
0303
0304 blk_crypto_remove_slot_from_lru_list(slot);
0305
0306 blk_crypto_hw_exit(profile);
0307 success:
0308 *slot_ptr = slot;
0309 return BLK_STS_OK;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot)
0319 {
0320 struct blk_crypto_profile *profile;
0321 unsigned long flags;
0322
0323 if (!slot)
0324 return;
0325
0326 profile = slot->profile;
0327
0328 if (atomic_dec_and_lock_irqsave(&slot->slot_refs,
0329 &profile->idle_slots_lock, flags)) {
0330 list_add_tail(&slot->idle_slot_node, &profile->idle_slots);
0331 spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
0332 wake_up(&profile->idle_slots_wait_queue);
0333 }
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
0345 const struct blk_crypto_config *cfg)
0346 {
0347 if (!profile)
0348 return false;
0349 if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size))
0350 return false;
0351 if (profile->max_dun_bytes_supported < cfg->dun_bytes)
0352 return false;
0353 return true;
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
0374 const struct blk_crypto_key *key)
0375 {
0376 struct blk_crypto_keyslot *slot;
0377 int err = 0;
0378
0379 if (profile->num_slots == 0) {
0380 if (profile->ll_ops.keyslot_evict) {
0381 blk_crypto_hw_enter(profile);
0382 err = profile->ll_ops.keyslot_evict(profile, key, -1);
0383 blk_crypto_hw_exit(profile);
0384 return err;
0385 }
0386 return 0;
0387 }
0388
0389 blk_crypto_hw_enter(profile);
0390 slot = blk_crypto_find_keyslot(profile, key);
0391 if (!slot)
0392 goto out_unlock;
0393
0394 if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
0395 err = -EBUSY;
0396 goto out_unlock;
0397 }
0398 err = profile->ll_ops.keyslot_evict(profile, key,
0399 blk_crypto_keyslot_index(slot));
0400 if (err)
0401 goto out_unlock;
0402
0403 hlist_del(&slot->hash_node);
0404 slot->key = NULL;
0405 err = 0;
0406 out_unlock:
0407 blk_crypto_hw_exit(profile);
0408 return err;
0409 }
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile)
0421 {
0422 unsigned int slot;
0423
0424 if (profile->num_slots == 0)
0425 return;
0426
0427
0428 down_write(&profile->lock);
0429 for (slot = 0; slot < profile->num_slots; slot++) {
0430 const struct blk_crypto_key *key = profile->slots[slot].key;
0431 int err;
0432
0433 if (!key)
0434 continue;
0435
0436 err = profile->ll_ops.keyslot_program(profile, key, slot);
0437 WARN_ON(err);
0438 }
0439 up_write(&profile->lock);
0440 }
0441 EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys);
0442
0443 void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
0444 {
0445 if (!profile)
0446 return;
0447 kvfree(profile->slot_hashtable);
0448 kvfree_sensitive(profile->slots,
0449 sizeof(profile->slots[0]) * profile->num_slots);
0450 memzero_explicit(profile, sizeof(*profile));
0451 }
0452 EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy);
0453
0454 bool blk_crypto_register(struct blk_crypto_profile *profile,
0455 struct request_queue *q)
0456 {
0457 if (blk_integrity_queue_supports_integrity(q)) {
0458 pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
0459 return false;
0460 }
0461 q->crypto_profile = profile;
0462 return true;
0463 }
0464 EXPORT_SYMBOL_GPL(blk_crypto_register);
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
0479 const struct blk_crypto_profile *child)
0480 {
0481 if (child) {
0482 unsigned int i;
0483
0484 parent->max_dun_bytes_supported =
0485 min(parent->max_dun_bytes_supported,
0486 child->max_dun_bytes_supported);
0487 for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++)
0488 parent->modes_supported[i] &= child->modes_supported[i];
0489 } else {
0490 parent->max_dun_bytes_supported = 0;
0491 memset(parent->modes_supported, 0,
0492 sizeof(parent->modes_supported));
0493 }
0494 }
0495 EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities);
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
0506 const struct blk_crypto_profile *reference)
0507 {
0508 int i;
0509
0510 if (!reference)
0511 return true;
0512
0513 if (!target)
0514 return false;
0515
0516 for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) {
0517 if (reference->modes_supported[i] & ~target->modes_supported[i])
0518 return false;
0519 }
0520
0521 if (reference->max_dun_bytes_supported >
0522 target->max_dun_bytes_supported)
0523 return false;
0524
0525 return true;
0526 }
0527 EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities);
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
0553 const struct blk_crypto_profile *src)
0554 {
0555 memcpy(dst->modes_supported, src->modes_supported,
0556 sizeof(dst->modes_supported));
0557
0558 dst->max_dun_bytes_supported = src->max_dun_bytes_supported;
0559 }
0560 EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);