0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <crypto/aead.h>
0038 #include <crypto/aes.h>
0039 #include <crypto/rng.h>
0040 #include "crypto.h"
0041 #include "msg.h"
0042 #include "bcast.h"
0043
0044 #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000)
0045 #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000)
0046 #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000)
0047 #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000)
0048
0049 #define TIPC_MAX_TFMS_DEF 10
0050 #define TIPC_MAX_TFMS_LIM 1000
0051
0052 #define TIPC_REKEYING_INTV_DEF (60 * 24)
0053
0054
0055
0056
0057 enum {
0058 KEY_MASTER = 0,
0059 KEY_MIN = KEY_MASTER,
0060 KEY_1 = 1,
0061 KEY_2,
0062 KEY_3,
0063 KEY_MAX = KEY_3,
0064 };
0065
0066
0067
0068
0069 enum {
0070 STAT_OK,
0071 STAT_NOK,
0072 STAT_ASYNC,
0073 STAT_ASYNC_OK,
0074 STAT_ASYNC_NOK,
0075 STAT_BADKEYS,
0076 STAT_BADMSGS = STAT_BADKEYS,
0077 STAT_NOKEYS,
0078 STAT_SWITCHES,
0079
0080 MAX_STATS,
0081 };
0082
0083
0084 static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
0085 "async_nok", "badmsgs", "nokeys",
0086 "switches"};
0087
0088
0089 int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
0090
0091 int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 struct tipc_key {
0102 #define KEY_BITS (2)
0103 #define KEY_MASK ((1 << KEY_BITS) - 1)
0104 union {
0105 struct {
0106 #if defined(__LITTLE_ENDIAN_BITFIELD)
0107 u8 pending:2,
0108 active:2,
0109 passive:2,
0110 reserved:2;
0111 #elif defined(__BIG_ENDIAN_BITFIELD)
0112 u8 reserved:2,
0113 passive:2,
0114 active:2,
0115 pending:2;
0116 #else
0117 #error "Please fix <asm/byteorder.h>"
0118 #endif
0119 } __packed;
0120 u8 keys;
0121 };
0122 };
0123
0124
0125
0126
0127
0128
0129 struct tipc_tfm {
0130 struct crypto_aead *tfm;
0131 struct list_head list;
0132 };
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 struct tipc_aead {
0151 #define TIPC_AEAD_HINT_LEN (5)
0152 struct tipc_tfm * __percpu *tfm_entry;
0153 struct tipc_crypto *crypto;
0154 struct tipc_aead *cloned;
0155 atomic_t users;
0156 u32 salt;
0157 u8 authsize;
0158 u8 mode;
0159 char hint[2 * TIPC_AEAD_HINT_LEN + 1];
0160 struct rcu_head rcu;
0161 struct tipc_aead_key *key;
0162 u16 gen;
0163
0164 atomic64_t seqno ____cacheline_aligned;
0165 refcount_t refcnt ____cacheline_aligned;
0166
0167 } ____cacheline_aligned;
0168
0169
0170
0171
0172
0173 struct tipc_crypto_stats {
0174 unsigned int stat[MAX_STATS];
0175 };
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 struct tipc_crypto {
0204 struct net *net;
0205 struct tipc_node *node;
0206 struct tipc_aead __rcu *aead[KEY_MAX + 1];
0207 atomic_t peer_rx_active;
0208 u16 key_gen;
0209 struct tipc_key key;
0210 u8 skey_mode;
0211 struct tipc_aead_key *skey;
0212 struct workqueue_struct *wq;
0213 struct delayed_work work;
0214 #define KEY_DISTR_SCHED 1
0215 #define KEY_DISTR_COMPL 2
0216 atomic_t key_distr;
0217 u32 rekeying_intv;
0218
0219 struct tipc_crypto_stats __percpu *stats;
0220 char name[48];
0221
0222 atomic64_t sndnxt ____cacheline_aligned;
0223 unsigned long timer1;
0224 unsigned long timer2;
0225 union {
0226 struct {
0227 u8 working:1;
0228 u8 key_master:1;
0229 u8 legacy_user:1;
0230 u8 nokey: 1;
0231 };
0232 u8 flags;
0233 };
0234 spinlock_t lock;
0235
0236 } ____cacheline_aligned;
0237
0238
0239 struct tipc_crypto_tx_ctx {
0240 struct tipc_aead *aead;
0241 struct tipc_bearer *bearer;
0242 struct tipc_media_addr dst;
0243 };
0244
0245
0246 struct tipc_crypto_rx_ctx {
0247 struct tipc_aead *aead;
0248 struct tipc_bearer *bearer;
0249 };
0250
0251 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
0252 static inline void tipc_aead_put(struct tipc_aead *aead);
0253 static void tipc_aead_free(struct rcu_head *rp);
0254 static int tipc_aead_users(struct tipc_aead __rcu *aead);
0255 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
0256 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
0257 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
0258 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
0259 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
0260 u8 mode);
0261 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
0262 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
0263 unsigned int crypto_ctx_size,
0264 u8 **iv, struct aead_request **req,
0265 struct scatterlist **sg, int nsg);
0266 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
0267 struct tipc_bearer *b,
0268 struct tipc_media_addr *dst,
0269 struct tipc_node *__dnode);
0270 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err);
0271 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
0272 struct sk_buff *skb, struct tipc_bearer *b);
0273 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err);
0274 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
0275 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
0276 u8 tx_key, struct sk_buff *skb,
0277 struct tipc_crypto *__rx);
0278 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
0279 u8 new_passive,
0280 u8 new_active,
0281 u8 new_pending);
0282 static int tipc_crypto_key_attach(struct tipc_crypto *c,
0283 struct tipc_aead *aead, u8 pos,
0284 bool master_key);
0285 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
0286 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
0287 struct tipc_crypto *rx,
0288 struct sk_buff *skb,
0289 u8 tx_key);
0290 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
0291 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
0292 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
0293 struct tipc_bearer *b,
0294 struct tipc_media_addr *dst,
0295 struct tipc_node *__dnode, u8 type);
0296 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
0297 struct tipc_bearer *b,
0298 struct sk_buff **skb, int err);
0299 static void tipc_crypto_do_cmd(struct net *net, int cmd);
0300 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
0301 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
0302 char *buf);
0303 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
0304 u16 gen, u8 mode, u32 dnode);
0305 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
0306 static void tipc_crypto_work_tx(struct work_struct *work);
0307 static void tipc_crypto_work_rx(struct work_struct *work);
0308 static int tipc_aead_key_generate(struct tipc_aead_key *skey);
0309
0310 #define is_tx(crypto) (!(crypto)->node)
0311 #define is_rx(crypto) (!is_tx(crypto))
0312
0313 #define key_next(cur) ((cur) % KEY_MAX + 1)
0314
0315 #define tipc_aead_rcu_ptr(rcu_ptr, lock) \
0316 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
0317
0318 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
0319 do { \
0320 struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
0321 lockdep_is_held(lock)); \
0322 rcu_assign_pointer((rcu_ptr), (ptr)); \
0323 tipc_aead_put(__tmp); \
0324 } while (0)
0325
0326 #define tipc_crypto_key_detach(rcu_ptr, lock) \
0327 tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
0328
0329
0330
0331
0332
0333
0334 int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
0335 {
0336 int keylen;
0337
0338
0339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
0340 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
0341 return -ENODEV;
0342 }
0343
0344
0345 if (strcmp(ukey->alg_name, "gcm(aes)")) {
0346 GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
0347 return -ENOTSUPP;
0348 }
0349
0350
0351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
0352 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
0353 keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
0354 keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
0355 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
0356 return -EKEYREJECTED;
0357 }
0358
0359 return 0;
0360 }
0361
0362
0363
0364
0365
0366
0367
0368 static int tipc_aead_key_generate(struct tipc_aead_key *skey)
0369 {
0370 int rc = 0;
0371
0372
0373 rc = crypto_get_default_rng();
0374 if (likely(!rc)) {
0375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
0376 skey->keylen);
0377 crypto_put_default_rng();
0378 }
0379
0380 return rc;
0381 }
0382
0383 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
0384 {
0385 struct tipc_aead *tmp;
0386
0387 rcu_read_lock();
0388 tmp = rcu_dereference(aead);
0389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
0390 tmp = NULL;
0391 rcu_read_unlock();
0392
0393 return tmp;
0394 }
0395
0396 static inline void tipc_aead_put(struct tipc_aead *aead)
0397 {
0398 if (aead && refcount_dec_and_test(&aead->refcnt))
0399 call_rcu(&aead->rcu, tipc_aead_free);
0400 }
0401
0402
0403
0404
0405
0406 static void tipc_aead_free(struct rcu_head *rp)
0407 {
0408 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
0409 struct tipc_tfm *tfm_entry, *head, *tmp;
0410
0411 if (aead->cloned) {
0412 tipc_aead_put(aead->cloned);
0413 } else {
0414 head = *get_cpu_ptr(aead->tfm_entry);
0415 put_cpu_ptr(aead->tfm_entry);
0416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
0417 crypto_free_aead(tfm_entry->tfm);
0418 list_del(&tfm_entry->list);
0419 kfree(tfm_entry);
0420 }
0421
0422 crypto_free_aead(head->tfm);
0423 list_del(&head->list);
0424 kfree(head);
0425 }
0426 free_percpu(aead->tfm_entry);
0427 kfree_sensitive(aead->key);
0428 kfree(aead);
0429 }
0430
0431 static int tipc_aead_users(struct tipc_aead __rcu *aead)
0432 {
0433 struct tipc_aead *tmp;
0434 int users = 0;
0435
0436 rcu_read_lock();
0437 tmp = rcu_dereference(aead);
0438 if (tmp)
0439 users = atomic_read(&tmp->users);
0440 rcu_read_unlock();
0441
0442 return users;
0443 }
0444
0445 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
0446 {
0447 struct tipc_aead *tmp;
0448
0449 rcu_read_lock();
0450 tmp = rcu_dereference(aead);
0451 if (tmp)
0452 atomic_add_unless(&tmp->users, 1, lim);
0453 rcu_read_unlock();
0454 }
0455
0456 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
0457 {
0458 struct tipc_aead *tmp;
0459
0460 rcu_read_lock();
0461 tmp = rcu_dereference(aead);
0462 if (tmp)
0463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
0464 rcu_read_unlock();
0465 }
0466
0467 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
0468 {
0469 struct tipc_aead *tmp;
0470 int cur;
0471
0472 rcu_read_lock();
0473 tmp = rcu_dereference(aead);
0474 if (tmp) {
0475 do {
0476 cur = atomic_read(&tmp->users);
0477 if (cur == val)
0478 break;
0479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
0480 }
0481 rcu_read_unlock();
0482 }
0483
0484
0485
0486
0487
0488 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
0489 {
0490 struct tipc_tfm **tfm_entry;
0491 struct crypto_aead *tfm;
0492
0493 tfm_entry = get_cpu_ptr(aead->tfm_entry);
0494 *tfm_entry = list_next_entry(*tfm_entry, list);
0495 tfm = (*tfm_entry)->tfm;
0496 put_cpu_ptr(tfm_entry);
0497
0498 return tfm;
0499 }
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
0515 u8 mode)
0516 {
0517 struct tipc_tfm *tfm_entry, *head;
0518 struct crypto_aead *tfm;
0519 struct tipc_aead *tmp;
0520 int keylen, err, cpu;
0521 int tfm_cnt = 0;
0522
0523 if (unlikely(*aead))
0524 return -EEXIST;
0525
0526
0527 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
0528 if (unlikely(!tmp))
0529 return -ENOMEM;
0530
0531
0532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
0533
0534
0535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
0536 if (!tmp->tfm_entry) {
0537 kfree_sensitive(tmp);
0538 return -ENOMEM;
0539 }
0540
0541
0542 do {
0543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
0544 if (IS_ERR(tfm)) {
0545 err = PTR_ERR(tfm);
0546 break;
0547 }
0548
0549 if (unlikely(!tfm_cnt &&
0550 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
0551 crypto_free_aead(tfm);
0552 err = -ENOTSUPP;
0553 break;
0554 }
0555
0556 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
0557 err |= crypto_aead_setkey(tfm, ukey->key, keylen);
0558 if (unlikely(err)) {
0559 crypto_free_aead(tfm);
0560 break;
0561 }
0562
0563 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
0564 if (unlikely(!tfm_entry)) {
0565 crypto_free_aead(tfm);
0566 err = -ENOMEM;
0567 break;
0568 }
0569 INIT_LIST_HEAD(&tfm_entry->list);
0570 tfm_entry->tfm = tfm;
0571
0572
0573 if (!tfm_cnt) {
0574 head = tfm_entry;
0575 for_each_possible_cpu(cpu) {
0576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
0577 }
0578 } else {
0579 list_add_tail(&tfm_entry->list, &head->list);
0580 }
0581
0582 } while (++tfm_cnt < sysctl_tipc_max_tfms);
0583
0584
0585 if (!tfm_cnt) {
0586 free_percpu(tmp->tfm_entry);
0587 kfree_sensitive(tmp);
0588 return err;
0589 }
0590
0591
0592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
0593 TIPC_AEAD_HINT_LEN);
0594
0595
0596 tmp->mode = mode;
0597 tmp->cloned = NULL;
0598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
0599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
0600 if (!tmp->key) {
0601 tipc_aead_free(&tmp->rcu);
0602 return -ENOMEM;
0603 }
0604 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
0605 atomic_set(&tmp->users, 0);
0606 atomic64_set(&tmp->seqno, 0);
0607 refcount_set(&tmp->refcnt, 1);
0608
0609 *aead = tmp;
0610 return 0;
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
0627 {
0628 struct tipc_aead *aead;
0629 int cpu;
0630
0631 if (!src)
0632 return -ENOKEY;
0633
0634 if (src->mode != CLUSTER_KEY)
0635 return -EINVAL;
0636
0637 if (unlikely(*dst))
0638 return -EEXIST;
0639
0640 aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
0641 if (unlikely(!aead))
0642 return -ENOMEM;
0643
0644 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
0645 if (unlikely(!aead->tfm_entry)) {
0646 kfree_sensitive(aead);
0647 return -ENOMEM;
0648 }
0649
0650 for_each_possible_cpu(cpu) {
0651 *per_cpu_ptr(aead->tfm_entry, cpu) =
0652 *per_cpu_ptr(src->tfm_entry, cpu);
0653 }
0654
0655 memcpy(aead->hint, src->hint, sizeof(src->hint));
0656 aead->mode = src->mode;
0657 aead->salt = src->salt;
0658 aead->authsize = src->authsize;
0659 atomic_set(&aead->users, 0);
0660 atomic64_set(&aead->seqno, 0);
0661 refcount_set(&aead->refcnt, 1);
0662
0663 WARN_ON(!refcount_inc_not_zero(&src->refcnt));
0664 aead->cloned = src;
0665
0666 *dst = aead;
0667 return 0;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
0686 unsigned int crypto_ctx_size,
0687 u8 **iv, struct aead_request **req,
0688 struct scatterlist **sg, int nsg)
0689 {
0690 unsigned int iv_size, req_size;
0691 unsigned int len;
0692 u8 *mem;
0693
0694 iv_size = crypto_aead_ivsize(tfm);
0695 req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
0696
0697 len = crypto_ctx_size;
0698 len += iv_size;
0699 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
0700 len = ALIGN(len, crypto_tfm_ctx_alignment());
0701 len += req_size;
0702 len = ALIGN(len, __alignof__(struct scatterlist));
0703 len += nsg * sizeof(**sg);
0704
0705 mem = kmalloc(len, GFP_ATOMIC);
0706 if (!mem)
0707 return NULL;
0708
0709 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
0710 crypto_aead_alignmask(tfm) + 1);
0711 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
0712 crypto_tfm_ctx_alignment());
0713 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
0714 __alignof__(struct scatterlist));
0715
0716 return (void *)mem;
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
0733 struct tipc_bearer *b,
0734 struct tipc_media_addr *dst,
0735 struct tipc_node *__dnode)
0736 {
0737 struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
0738 struct tipc_crypto_tx_ctx *tx_ctx;
0739 struct aead_request *req;
0740 struct sk_buff *trailer;
0741 struct scatterlist *sg;
0742 struct tipc_ehdr *ehdr;
0743 int ehsz, len, tailen, nsg, rc;
0744 void *ctx;
0745 u32 salt;
0746 u8 *iv;
0747
0748
0749 len = ALIGN(skb->len, 4);
0750 tailen = len - skb->len + aead->authsize;
0751
0752
0753
0754
0755
0756
0757
0758 SKB_LINEAR_ASSERT(skb);
0759 if (tailen > skb_tailroom(skb)) {
0760 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
0761 skb_tailroom(skb), tailen);
0762 }
0763
0764 nsg = skb_cow_data(skb, tailen, &trailer);
0765 if (unlikely(nsg < 0)) {
0766 pr_err("TX: skb_cow_data() returned %d\n", nsg);
0767 return nsg;
0768 }
0769
0770 pskb_put(skb, trailer, tailen);
0771
0772
0773 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
0774 if (unlikely(!ctx))
0775 return -ENOMEM;
0776 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
0777
0778
0779 sg_init_table(sg, nsg);
0780 rc = skb_to_sgvec(skb, sg, 0, skb->len);
0781 if (unlikely(rc < 0)) {
0782 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
0783 goto exit;
0784 }
0785
0786
0787
0788
0789
0790
0791 ehdr = (struct tipc_ehdr *)skb->data;
0792 salt = aead->salt;
0793 if (aead->mode == CLUSTER_KEY)
0794 salt ^= __be32_to_cpu(ehdr->addr);
0795 else if (__dnode)
0796 salt ^= tipc_node_get_addr(__dnode);
0797 memcpy(iv, &salt, 4);
0798 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
0799
0800
0801 ehsz = tipc_ehdr_size(ehdr);
0802 aead_request_set_tfm(req, tfm);
0803 aead_request_set_ad(req, ehsz);
0804 aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
0805
0806
0807 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
0808 tipc_aead_encrypt_done, skb);
0809 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
0810 tx_ctx->aead = aead;
0811 tx_ctx->bearer = b;
0812 memcpy(&tx_ctx->dst, dst, sizeof(*dst));
0813
0814
0815 if (unlikely(!tipc_bearer_hold(b))) {
0816 rc = -ENODEV;
0817 goto exit;
0818 }
0819
0820
0821 rc = crypto_aead_encrypt(req);
0822 if (rc == -EINPROGRESS || rc == -EBUSY)
0823 return rc;
0824
0825 tipc_bearer_put(b);
0826
0827 exit:
0828 kfree(ctx);
0829 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
0830 return rc;
0831 }
0832
0833 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
0834 {
0835 struct sk_buff *skb = base->data;
0836 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
0837 struct tipc_bearer *b = tx_ctx->bearer;
0838 struct tipc_aead *aead = tx_ctx->aead;
0839 struct tipc_crypto *tx = aead->crypto;
0840 struct net *net = tx->net;
0841
0842 switch (err) {
0843 case 0:
0844 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
0845 rcu_read_lock();
0846 if (likely(test_bit(0, &b->up)))
0847 b->media->send_msg(net, skb, b, &tx_ctx->dst);
0848 else
0849 kfree_skb(skb);
0850 rcu_read_unlock();
0851 break;
0852 case -EINPROGRESS:
0853 return;
0854 default:
0855 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
0856 kfree_skb(skb);
0857 break;
0858 }
0859
0860 kfree(tx_ctx);
0861 tipc_bearer_put(b);
0862 tipc_aead_put(aead);
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
0878 struct sk_buff *skb, struct tipc_bearer *b)
0879 {
0880 struct tipc_crypto_rx_ctx *rx_ctx;
0881 struct aead_request *req;
0882 struct crypto_aead *tfm;
0883 struct sk_buff *unused;
0884 struct scatterlist *sg;
0885 struct tipc_ehdr *ehdr;
0886 int ehsz, nsg, rc;
0887 void *ctx;
0888 u32 salt;
0889 u8 *iv;
0890
0891 if (unlikely(!aead))
0892 return -ENOKEY;
0893
0894 nsg = skb_cow_data(skb, 0, &unused);
0895 if (unlikely(nsg < 0)) {
0896 pr_err("RX: skb_cow_data() returned %d\n", nsg);
0897 return nsg;
0898 }
0899
0900
0901 tfm = tipc_aead_tfm_next(aead);
0902 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
0903 if (unlikely(!ctx))
0904 return -ENOMEM;
0905 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
0906
0907
0908 sg_init_table(sg, nsg);
0909 rc = skb_to_sgvec(skb, sg, 0, skb->len);
0910 if (unlikely(rc < 0)) {
0911 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
0912 goto exit;
0913 }
0914
0915
0916 ehdr = (struct tipc_ehdr *)skb->data;
0917 salt = aead->salt;
0918 if (aead->mode == CLUSTER_KEY)
0919 salt ^= __be32_to_cpu(ehdr->addr);
0920 else if (ehdr->destined)
0921 salt ^= tipc_own_addr(net);
0922 memcpy(iv, &salt, 4);
0923 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
0924
0925
0926 ehsz = tipc_ehdr_size(ehdr);
0927 aead_request_set_tfm(req, tfm);
0928 aead_request_set_ad(req, ehsz);
0929 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
0930
0931
0932 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
0933 tipc_aead_decrypt_done, skb);
0934 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
0935 rx_ctx->aead = aead;
0936 rx_ctx->bearer = b;
0937
0938
0939 if (unlikely(!tipc_bearer_hold(b))) {
0940 rc = -ENODEV;
0941 goto exit;
0942 }
0943
0944
0945 rc = crypto_aead_decrypt(req);
0946 if (rc == -EINPROGRESS || rc == -EBUSY)
0947 return rc;
0948
0949 tipc_bearer_put(b);
0950
0951 exit:
0952 kfree(ctx);
0953 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
0954 return rc;
0955 }
0956
0957 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err)
0958 {
0959 struct sk_buff *skb = base->data;
0960 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
0961 struct tipc_bearer *b = rx_ctx->bearer;
0962 struct tipc_aead *aead = rx_ctx->aead;
0963 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
0964 struct net *net = aead->crypto->net;
0965
0966 switch (err) {
0967 case 0:
0968 this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
0969 break;
0970 case -EINPROGRESS:
0971 return;
0972 default:
0973 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
0974 break;
0975 }
0976
0977 kfree(rx_ctx);
0978 tipc_crypto_rcv_complete(net, aead, b, &skb, err);
0979 if (likely(skb)) {
0980 if (likely(test_bit(0, &b->up)))
0981 tipc_rcv(net, skb, b);
0982 else
0983 kfree_skb(skb);
0984 }
0985
0986 tipc_bearer_put(b);
0987 }
0988
0989 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
0990 {
0991 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
0992 }
0993
0994
0995
0996
0997
0998
0999
1000 bool tipc_ehdr_validate(struct sk_buff *skb)
1001 {
1002 struct tipc_ehdr *ehdr;
1003 int ehsz;
1004
1005 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
1006 return false;
1007
1008 ehdr = (struct tipc_ehdr *)skb->data;
1009 if (unlikely(ehdr->version != TIPC_EVERSION))
1010 return false;
1011 ehsz = tipc_ehdr_size(ehdr);
1012 if (unlikely(!pskb_may_pull(skb, ehsz)))
1013 return false;
1014 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
1015 return false;
1016
1017 return true;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
1031 u8 tx_key, struct sk_buff *skb,
1032 struct tipc_crypto *__rx)
1033 {
1034 struct tipc_msg *hdr = buf_msg(skb);
1035 struct tipc_ehdr *ehdr;
1036 u32 user = msg_user(hdr);
1037 u64 seqno;
1038 int ehsz;
1039
1040
1041 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
1042 WARN_ON(skb_headroom(skb) < ehsz);
1043 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
1044
1045
1046
1047
1048
1049 if (!__rx || aead->mode == CLUSTER_KEY)
1050 seqno = atomic64_inc_return(&aead->seqno);
1051 else
1052 seqno = atomic64_inc_return(&__rx->sndnxt);
1053
1054
1055 if (unlikely(!seqno))
1056 return tipc_crypto_key_revoke(net, tx_key);
1057
1058
1059 ehdr->seqno = cpu_to_be64(seqno);
1060
1061
1062 ehdr->version = TIPC_EVERSION;
1063 ehdr->user = 0;
1064 ehdr->keepalive = 0;
1065 ehdr->tx_key = tx_key;
1066 ehdr->destined = (__rx) ? 1 : 0;
1067 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
1068 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
1069 ehdr->master_key = aead->crypto->key_master;
1070 ehdr->reserved_1 = 0;
1071 ehdr->reserved_2 = 0;
1072
1073 switch (user) {
1074 case LINK_CONFIG:
1075 ehdr->user = LINK_CONFIG;
1076 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
1077 break;
1078 default:
1079 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
1080 ehdr->user = LINK_PROTOCOL;
1081 ehdr->keepalive = msg_is_keepalive(hdr);
1082 }
1083 ehdr->addr = hdr->hdr[3];
1084 break;
1085 }
1086
1087 return ehsz;
1088 }
1089
1090 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
1091 u8 new_passive,
1092 u8 new_active,
1093 u8 new_pending)
1094 {
1095 struct tipc_key old = c->key;
1096 char buf[32];
1097
1098 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
1099 ((new_active & KEY_MASK) << (KEY_BITS)) |
1100 ((new_pending & KEY_MASK));
1101
1102 pr_debug("%s: key changing %s ::%pS\n", c->name,
1103 tipc_key_change_dump(old, c->key, buf),
1104 __builtin_return_address(0));
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
1120 u8 mode, bool master_key)
1121 {
1122 struct tipc_aead *aead = NULL;
1123 int rc = 0;
1124
1125
1126 rc = tipc_aead_init(&aead, ukey, mode);
1127
1128
1129 if (likely(!rc)) {
1130 rc = tipc_crypto_key_attach(c, aead, 0, master_key);
1131 if (rc < 0)
1132 tipc_aead_free(&aead->rcu);
1133 }
1134
1135 return rc;
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static int tipc_crypto_key_attach(struct tipc_crypto *c,
1148 struct tipc_aead *aead, u8 pos,
1149 bool master_key)
1150 {
1151 struct tipc_key key;
1152 int rc = -EBUSY;
1153 u8 new_key;
1154
1155 spin_lock_bh(&c->lock);
1156 key = c->key;
1157 if (master_key) {
1158 new_key = KEY_MASTER;
1159 goto attach;
1160 }
1161 if (key.active && key.passive)
1162 goto exit;
1163 if (key.pending) {
1164 if (tipc_aead_users(c->aead[key.pending]) > 0)
1165 goto exit;
1166
1167
1168 new_key = key.pending;
1169 } else {
1170 if (pos) {
1171 if (key.active && pos != key_next(key.active)) {
1172 key.passive = pos;
1173 new_key = pos;
1174 goto attach;
1175 } else if (!key.active && !key.passive) {
1176 key.pending = pos;
1177 new_key = pos;
1178 goto attach;
1179 }
1180 }
1181 key.pending = key_next(key.active ?: key.passive);
1182 new_key = key.pending;
1183 }
1184
1185 attach:
1186 aead->crypto = c;
1187 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
1188 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
1189 if (likely(c->key.keys != key.keys))
1190 tipc_crypto_key_set_state(c, key.passive, key.active,
1191 key.pending);
1192 c->working = 1;
1193 c->nokey = 0;
1194 c->key_master |= master_key;
1195 rc = new_key;
1196
1197 exit:
1198 spin_unlock_bh(&c->lock);
1199 return rc;
1200 }
1201
1202 void tipc_crypto_key_flush(struct tipc_crypto *c)
1203 {
1204 struct tipc_crypto *tx, *rx;
1205 int k;
1206
1207 spin_lock_bh(&c->lock);
1208 if (is_rx(c)) {
1209
1210 rx = c;
1211 tx = tipc_net(rx->net)->crypto_tx;
1212 if (cancel_delayed_work(&rx->work)) {
1213 kfree(rx->skey);
1214 rx->skey = NULL;
1215 atomic_xchg(&rx->key_distr, 0);
1216 tipc_node_put(rx->node);
1217 }
1218
1219 k = atomic_xchg(&rx->peer_rx_active, 0);
1220 if (k) {
1221 tipc_aead_users_dec(tx->aead[k], 0);
1222
1223 tx->timer1 = jiffies;
1224 }
1225 }
1226
1227 c->flags = 0;
1228 tipc_crypto_key_set_state(c, 0, 0, 0);
1229 for (k = KEY_MIN; k <= KEY_MAX; k++)
1230 tipc_crypto_key_detach(c->aead[k], &c->lock);
1231 atomic64_set(&c->sndnxt, 0);
1232 spin_unlock_bh(&c->lock);
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
1249 {
1250 struct tipc_aead *tmp1, *tmp2 = NULL;
1251 struct tipc_key key;
1252 bool aligned = false;
1253 u8 new_passive = 0;
1254 int x;
1255
1256 spin_lock(&rx->lock);
1257 key = rx->key;
1258 if (key.pending == new_pending) {
1259 aligned = true;
1260 goto exit;
1261 }
1262 if (key.active)
1263 goto exit;
1264 if (!key.pending)
1265 goto exit;
1266 if (tipc_aead_users(rx->aead[key.pending]) > 0)
1267 goto exit;
1268
1269
1270 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
1271 if (!refcount_dec_if_one(&tmp1->refcnt))
1272 goto exit;
1273 rcu_assign_pointer(rx->aead[key.pending], NULL);
1274
1275
1276 if (key.passive) {
1277 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
1278 x = (key.passive - key.pending + new_pending) % KEY_MAX;
1279 new_passive = (x <= 0) ? x + KEY_MAX : x;
1280 }
1281
1282
1283 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
1284 rcu_assign_pointer(rx->aead[new_pending], tmp1);
1285 if (new_passive)
1286 rcu_assign_pointer(rx->aead[new_passive], tmp2);
1287 refcount_set(&tmp1->refcnt, 1);
1288 aligned = true;
1289 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
1290 new_pending);
1291
1292 exit:
1293 spin_unlock(&rx->lock);
1294 return aligned;
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
1311 struct tipc_crypto *rx,
1312 struct sk_buff *skb,
1313 u8 tx_key)
1314 {
1315 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
1316 struct tipc_aead *aead = NULL;
1317 struct tipc_key key = tx->key;
1318 u8 k, i = 0;
1319
1320
1321 if (!skb_cb->tx_clone_deferred) {
1322 skb_cb->tx_clone_deferred = 1;
1323 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1324 }
1325
1326 skb_cb->tx_clone_ctx.rx = rx;
1327 if (++skb_cb->tx_clone_ctx.recurs > 2)
1328 return NULL;
1329
1330
1331 spin_lock(&tx->lock);
1332 if (tx_key == KEY_MASTER) {
1333 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
1334 goto done;
1335 }
1336 do {
1337 k = (i == 0) ? key.pending :
1338 ((i == 1) ? key.active : key.passive);
1339 if (!k)
1340 continue;
1341 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
1342 if (!aead)
1343 continue;
1344 if (aead->mode != CLUSTER_KEY ||
1345 aead == skb_cb->tx_clone_ctx.last) {
1346 aead = NULL;
1347 continue;
1348 }
1349
1350 skb_cb->tx_clone_ctx.last = aead;
1351 WARN_ON(skb->next);
1352 skb->next = skb_clone(skb, GFP_ATOMIC);
1353 if (unlikely(!skb->next))
1354 pr_warn("Failed to clone skb for next round if any\n");
1355 break;
1356 } while (++i < 3);
1357
1358 done:
1359 if (likely(aead))
1360 WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
1361 spin_unlock(&tx->lock);
1362
1363 return aead;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
1382 {
1383 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
1384 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
1385 struct tipc_msg *hdr = buf_msg(skb);
1386 u32 self = tipc_own_addr(rx->net);
1387 u8 cur, new;
1388 unsigned long delay;
1389
1390
1391
1392
1393 rx->key_master = ehdr->master_key;
1394 if (!rx->key_master)
1395 tx->legacy_user = 1;
1396
1397
1398 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
1399 return;
1400
1401
1402 if (ehdr->rx_nokey) {
1403
1404 tx->timer2 = jiffies;
1405
1406 if (tx->key.keys &&
1407 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
1408 get_random_bytes(&delay, 2);
1409 delay %= 5;
1410 delay = msecs_to_jiffies(500 * ++delay);
1411 if (queue_delayed_work(tx->wq, &rx->work, delay))
1412 tipc_node_get(rx->node);
1413 }
1414 } else {
1415
1416 atomic_xchg(&rx->key_distr, 0);
1417 }
1418
1419
1420 cur = atomic_read(&rx->peer_rx_active);
1421 new = ehdr->rx_key_active;
1422 if (tx->key.keys &&
1423 cur != new &&
1424 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
1425 if (new)
1426 tipc_aead_users_inc(tx->aead[new], INT_MAX);
1427 if (cur)
1428 tipc_aead_users_dec(tx->aead[cur], 0);
1429
1430 atomic64_set(&rx->sndnxt, 0);
1431
1432 tx->timer1 = jiffies;
1433
1434 pr_debug("%s: key users changed %d-- %d++, peer %s\n",
1435 tx->name, cur, new, rx->name);
1436 }
1437 }
1438
1439 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
1440 {
1441 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1442 struct tipc_key key;
1443
1444 spin_lock(&tx->lock);
1445 key = tx->key;
1446 WARN_ON(!key.active || tx_key != key.active);
1447
1448
1449 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
1450 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1451 spin_unlock(&tx->lock);
1452
1453 pr_warn("%s: key is revoked\n", tx->name);
1454 return -EKEYREVOKED;
1455 }
1456
1457 int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
1458 struct tipc_node *node)
1459 {
1460 struct tipc_crypto *c;
1461
1462 if (*crypto)
1463 return -EEXIST;
1464
1465
1466 c = kzalloc(sizeof(*c), GFP_ATOMIC);
1467 if (!c)
1468 return -ENOMEM;
1469
1470
1471 if (!node) {
1472 c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
1473 if (!c->wq) {
1474 kfree(c);
1475 return -ENOMEM;
1476 }
1477 }
1478
1479
1480 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
1481 if (!c->stats) {
1482 if (c->wq)
1483 destroy_workqueue(c->wq);
1484 kfree_sensitive(c);
1485 return -ENOMEM;
1486 }
1487
1488 c->flags = 0;
1489 c->net = net;
1490 c->node = node;
1491 get_random_bytes(&c->key_gen, 2);
1492 tipc_crypto_key_set_state(c, 0, 0, 0);
1493 atomic_set(&c->key_distr, 0);
1494 atomic_set(&c->peer_rx_active, 0);
1495 atomic64_set(&c->sndnxt, 0);
1496 c->timer1 = jiffies;
1497 c->timer2 = jiffies;
1498 c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
1499 spin_lock_init(&c->lock);
1500 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
1501 (is_rx(c)) ? tipc_node_get_id_str(c->node) :
1502 tipc_own_id_string(c->net));
1503
1504 if (is_rx(c))
1505 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
1506 else
1507 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
1508
1509 *crypto = c;
1510 return 0;
1511 }
1512
1513 void tipc_crypto_stop(struct tipc_crypto **crypto)
1514 {
1515 struct tipc_crypto *c = *crypto;
1516 u8 k;
1517
1518 if (!c)
1519 return;
1520
1521
1522 if (is_tx(c)) {
1523 c->rekeying_intv = 0;
1524 cancel_delayed_work_sync(&c->work);
1525 destroy_workqueue(c->wq);
1526 }
1527
1528
1529 rcu_read_lock();
1530 for (k = KEY_MIN; k <= KEY_MAX; k++)
1531 tipc_aead_put(rcu_dereference(c->aead[k]));
1532 rcu_read_unlock();
1533 pr_debug("%s: has been stopped\n", c->name);
1534
1535
1536 free_percpu(c->stats);
1537
1538 *crypto = NULL;
1539 kfree_sensitive(c);
1540 }
1541
1542 void tipc_crypto_timeout(struct tipc_crypto *rx)
1543 {
1544 struct tipc_net *tn = tipc_net(rx->net);
1545 struct tipc_crypto *tx = tn->crypto_tx;
1546 struct tipc_key key;
1547 int cmd;
1548
1549
1550 spin_lock(&tx->lock);
1551 key = tx->key;
1552 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
1553 goto s1;
1554 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
1555 goto s1;
1556 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
1557 goto s1;
1558
1559 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
1560 if (key.active)
1561 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1562 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
1563 pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
1564
1565 s1:
1566 spin_unlock(&tx->lock);
1567
1568
1569 spin_lock(&rx->lock);
1570 key = rx->key;
1571 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
1572 goto s2;
1573
1574 if (key.active)
1575 key.passive = key.active;
1576 key.active = key.pending;
1577 rx->timer2 = jiffies;
1578 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1579 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
1580 pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
1581 goto s5;
1582
1583 s2:
1584
1585 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
1586 goto s3;
1587
1588 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1589 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
1590 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
1591 goto s5;
1592
1593 s3:
1594
1595 if (!key.active)
1596 goto s4;
1597 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
1598 tipc_aead_users(rx->aead[key.active]) > 0)
1599 goto s4;
1600
1601 if (key.pending)
1602 key.passive = key.active;
1603 else
1604 key.pending = key.active;
1605 rx->timer2 = jiffies;
1606 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
1607 tipc_aead_users_set(rx->aead[key.pending], 0);
1608 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
1609 goto s5;
1610
1611 s4:
1612
1613 if (!key.passive)
1614 goto s5;
1615 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
1616 tipc_aead_users(rx->aead[key.passive]) > -10)
1617 goto s5;
1618
1619 tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
1620 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
1621 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
1622
1623 s5:
1624 spin_unlock(&rx->lock);
1625
1626
1627
1628
1629 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
1630 tx->legacy_user = 0;
1631
1632
1633 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
1634 return;
1635
1636 cmd = sysctl_tipc_max_tfms;
1637 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
1638 tipc_crypto_do_cmd(rx->net, cmd);
1639 }
1640
1641 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
1642 struct tipc_bearer *b,
1643 struct tipc_media_addr *dst,
1644 struct tipc_node *__dnode, u8 type)
1645 {
1646 struct sk_buff *skb;
1647
1648 skb = skb_clone(_skb, GFP_ATOMIC);
1649 if (skb) {
1650 TIPC_SKB_CB(skb)->xmit_type = type;
1651 tipc_crypto_xmit(net, &skb, b, dst, __dnode);
1652 if (skb)
1653 b->media->send_msg(net, skb, b, dst);
1654 }
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
1681 struct tipc_bearer *b, struct tipc_media_addr *dst,
1682 struct tipc_node *__dnode)
1683 {
1684 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
1685 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1686 struct tipc_crypto_stats __percpu *stats = tx->stats;
1687 struct tipc_msg *hdr = buf_msg(*skb);
1688 struct tipc_key key = tx->key;
1689 struct tipc_aead *aead = NULL;
1690 u32 user = msg_user(hdr);
1691 u32 type = msg_type(hdr);
1692 int rc = -ENOKEY;
1693 u8 tx_key = 0;
1694
1695
1696 if (!tx->working)
1697 return 0;
1698
1699
1700 if (unlikely(key.pending)) {
1701 tx_key = key.pending;
1702 if (!tx->key_master && !key.active)
1703 goto encrypt;
1704 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
1705 goto encrypt;
1706 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
1707 pr_debug("%s: probing for key[%d]\n", tx->name,
1708 key.pending);
1709 goto encrypt;
1710 }
1711 if (user == LINK_CONFIG || user == LINK_PROTOCOL)
1712 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
1713 SKB_PROBING);
1714 }
1715
1716
1717 if (tx->key_master) {
1718 tx_key = KEY_MASTER;
1719 if (!key.active)
1720 goto encrypt;
1721 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
1722 pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
1723 user, type);
1724 goto encrypt;
1725 }
1726 if (user == LINK_CONFIG ||
1727 (user == LINK_PROTOCOL && type == RESET_MSG) ||
1728 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
1729 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
1730 if (__rx && __rx->key_master &&
1731 !atomic_read(&__rx->peer_rx_active))
1732 goto encrypt;
1733 if (!__rx) {
1734 if (likely(!tx->legacy_user))
1735 goto encrypt;
1736 tipc_crypto_clone_msg(net, *skb, b, dst,
1737 __dnode, SKB_GRACING);
1738 }
1739 }
1740 }
1741
1742
1743 if (likely(key.active)) {
1744 tx_key = key.active;
1745 goto encrypt;
1746 }
1747
1748 goto exit;
1749
1750 encrypt:
1751 aead = tipc_aead_get(tx->aead[tx_key]);
1752 if (unlikely(!aead))
1753 goto exit;
1754 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
1755 if (likely(rc > 0))
1756 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
1757
1758 exit:
1759 switch (rc) {
1760 case 0:
1761 this_cpu_inc(stats->stat[STAT_OK]);
1762 break;
1763 case -EINPROGRESS:
1764 case -EBUSY:
1765 this_cpu_inc(stats->stat[STAT_ASYNC]);
1766 *skb = NULL;
1767 return rc;
1768 default:
1769 this_cpu_inc(stats->stat[STAT_NOK]);
1770 if (rc == -ENOKEY)
1771 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1772 else if (rc == -EKEYREVOKED)
1773 this_cpu_inc(stats->stat[STAT_BADKEYS]);
1774 kfree_skb(*skb);
1775 *skb = NULL;
1776 break;
1777 }
1778
1779 tipc_aead_put(aead);
1780 return rc;
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
1806 struct sk_buff **skb, struct tipc_bearer *b)
1807 {
1808 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1809 struct tipc_crypto_stats __percpu *stats;
1810 struct tipc_aead *aead = NULL;
1811 struct tipc_key key;
1812 int rc = -ENOKEY;
1813 u8 tx_key, n;
1814
1815 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
1816
1817
1818
1819
1820 if (unlikely(!rx || tx_key == KEY_MASTER))
1821 goto pick_tx;
1822
1823
1824 key = rx->key;
1825 if (tx_key == key.active || tx_key == key.pending ||
1826 tx_key == key.passive)
1827 goto decrypt;
1828
1829
1830 if (tipc_crypto_key_try_align(rx, tx_key))
1831 goto decrypt;
1832
1833 pick_tx:
1834
1835 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
1836 if (aead)
1837 goto decrypt;
1838 goto exit;
1839
1840 decrypt:
1841 rcu_read_lock();
1842 if (!aead)
1843 aead = tipc_aead_get(rx->aead[tx_key]);
1844 rc = tipc_aead_decrypt(net, aead, *skb, b);
1845 rcu_read_unlock();
1846
1847 exit:
1848 stats = ((rx) ?: tx)->stats;
1849 switch (rc) {
1850 case 0:
1851 this_cpu_inc(stats->stat[STAT_OK]);
1852 break;
1853 case -EINPROGRESS:
1854 case -EBUSY:
1855 this_cpu_inc(stats->stat[STAT_ASYNC]);
1856 *skb = NULL;
1857 return rc;
1858 default:
1859 this_cpu_inc(stats->stat[STAT_NOK]);
1860 if (rc == -ENOKEY) {
1861 kfree_skb(*skb);
1862 *skb = NULL;
1863 if (rx) {
1864
1865
1866
1867
1868 n = key_next(tx_key);
1869 rx->nokey = !(rx->skey ||
1870 rcu_access_pointer(rx->aead[n]));
1871 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
1872 rx->name, rx->nokey,
1873 tx_key, rx->key.keys);
1874 tipc_node_put(rx->node);
1875 }
1876 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1877 return rc;
1878 } else if (rc == -EBADMSG) {
1879 this_cpu_inc(stats->stat[STAT_BADMSGS]);
1880 }
1881 break;
1882 }
1883
1884 tipc_crypto_rcv_complete(net, aead, b, skb, rc);
1885 return rc;
1886 }
1887
1888 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
1889 struct tipc_bearer *b,
1890 struct sk_buff **skb, int err)
1891 {
1892 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
1893 struct tipc_crypto *rx = aead->crypto;
1894 struct tipc_aead *tmp = NULL;
1895 struct tipc_ehdr *ehdr;
1896 struct tipc_node *n;
1897
1898
1899 if (unlikely(is_tx(aead->crypto))) {
1900 rx = skb_cb->tx_clone_ctx.rx;
1901 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
1902 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
1903 (*skb)->next, skb_cb->flags);
1904 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
1905 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
1906 aead->crypto->aead[1], aead->crypto->aead[2],
1907 aead->crypto->aead[3]);
1908 if (unlikely(err)) {
1909 if (err == -EBADMSG && (*skb)->next)
1910 tipc_rcv(net, (*skb)->next, b);
1911 goto free_skb;
1912 }
1913
1914 if (likely((*skb)->next)) {
1915 kfree_skb((*skb)->next);
1916 (*skb)->next = NULL;
1917 }
1918 ehdr = (struct tipc_ehdr *)(*skb)->data;
1919 if (!rx) {
1920 WARN_ON(ehdr->user != LINK_CONFIG);
1921 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
1922 true);
1923 rx = tipc_node_crypto_rx(n);
1924 if (unlikely(!rx))
1925 goto free_skb;
1926 }
1927
1928
1929 if (ehdr->tx_key == KEY_MASTER)
1930 goto rcv;
1931 if (tipc_aead_clone(&tmp, aead) < 0)
1932 goto rcv;
1933 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
1934 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
1935 tipc_aead_free(&tmp->rcu);
1936 goto rcv;
1937 }
1938 tipc_aead_put(aead);
1939 aead = tmp;
1940 }
1941
1942 if (unlikely(err)) {
1943 tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
1944 goto free_skb;
1945 }
1946
1947
1948 tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
1949
1950
1951 rx->timer1 = jiffies;
1952
1953 rcv:
1954
1955 ehdr = (struct tipc_ehdr *)(*skb)->data;
1956
1957
1958 if (rx->key.passive && ehdr->tx_key == rx->key.passive)
1959 rx->timer2 = jiffies;
1960
1961 skb_reset_network_header(*skb);
1962 skb_pull(*skb, tipc_ehdr_size(ehdr));
1963 pskb_trim(*skb, (*skb)->len - aead->authsize);
1964
1965
1966 if (unlikely(!tipc_msg_validate(skb))) {
1967 pr_err_ratelimited("Packet dropped after decryption!\n");
1968 goto free_skb;
1969 }
1970
1971
1972 tipc_crypto_key_synch(rx, *skb);
1973
1974
1975 skb_cb->decrypted = 1;
1976
1977
1978 if (likely(!skb_cb->tx_clone_deferred))
1979 goto exit;
1980 skb_cb->tx_clone_deferred = 0;
1981 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1982 goto exit;
1983
1984 free_skb:
1985 kfree_skb(*skb);
1986 *skb = NULL;
1987
1988 exit:
1989 tipc_aead_put(aead);
1990 if (rx)
1991 tipc_node_put(rx->node);
1992 }
1993
1994 static void tipc_crypto_do_cmd(struct net *net, int cmd)
1995 {
1996 struct tipc_net *tn = tipc_net(net);
1997 struct tipc_crypto *tx = tn->crypto_tx, *rx;
1998 struct list_head *p;
1999 unsigned int stat;
2000 int i, j, cpu;
2001 char buf[200];
2002
2003
2004 switch (cmd) {
2005 case 0xfff1:
2006 goto print_stats;
2007 default:
2008 return;
2009 }
2010
2011 print_stats:
2012
2013 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
2014
2015
2016 pr_info("Key status:\n");
2017 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
2018 tipc_crypto_key_dump(tx, buf));
2019
2020 rcu_read_lock();
2021 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2022 rx = tipc_node_crypto_rx_by_list(p);
2023 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
2024 tipc_crypto_key_dump(rx, buf));
2025 }
2026 rcu_read_unlock();
2027
2028
2029 for (i = 0, j = 0; i < MAX_STATS; i++)
2030 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
2031 pr_info("Counter %s", buf);
2032
2033 memset(buf, '-', 115);
2034 buf[115] = '\0';
2035 pr_info("%s\n", buf);
2036
2037 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
2038 for_each_possible_cpu(cpu) {
2039 for (i = 0; i < MAX_STATS; i++) {
2040 stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
2041 j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
2042 }
2043 pr_info("%s", buf);
2044 j = scnprintf(buf, 200, "%12s", " ");
2045 }
2046
2047 rcu_read_lock();
2048 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2049 rx = tipc_node_crypto_rx_by_list(p);
2050 j = scnprintf(buf, 200, "RX(%7.7s) ",
2051 tipc_node_get_id_str(rx->node));
2052 for_each_possible_cpu(cpu) {
2053 for (i = 0; i < MAX_STATS; i++) {
2054 stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
2055 j += scnprintf(buf + j, 200 - j, "|%11d ",
2056 stat);
2057 }
2058 pr_info("%s", buf);
2059 j = scnprintf(buf, 200, "%12s", " ");
2060 }
2061 }
2062 rcu_read_unlock();
2063
2064 pr_info("\n======================== Done ========================\n");
2065 }
2066
2067 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
2068 {
2069 struct tipc_key key = c->key;
2070 struct tipc_aead *aead;
2071 int k, i = 0;
2072 char *s;
2073
2074 for (k = KEY_MIN; k <= KEY_MAX; k++) {
2075 if (k == KEY_MASTER) {
2076 if (is_rx(c))
2077 continue;
2078 if (time_before(jiffies,
2079 c->timer2 + TIPC_TX_GRACE_PERIOD))
2080 s = "ACT";
2081 else
2082 s = "PAS";
2083 } else {
2084 if (k == key.passive)
2085 s = "PAS";
2086 else if (k == key.active)
2087 s = "ACT";
2088 else if (k == key.pending)
2089 s = "PEN";
2090 else
2091 s = "-";
2092 }
2093 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
2094
2095 rcu_read_lock();
2096 aead = rcu_dereference(c->aead[k]);
2097 if (aead)
2098 i += scnprintf(buf + i, 200 - i,
2099 "{\"0x...%s\", \"%s\"}/%d:%d",
2100 aead->hint,
2101 (aead->mode == CLUSTER_KEY) ? "c" : "p",
2102 atomic_read(&aead->users),
2103 refcount_read(&aead->refcnt));
2104 rcu_read_unlock();
2105 i += scnprintf(buf + i, 200 - i, "\n");
2106 }
2107
2108 if (is_rx(c))
2109 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
2110 atomic_read(&c->peer_rx_active));
2111
2112 return buf;
2113 }
2114
2115 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
2116 char *buf)
2117 {
2118 struct tipc_key *key = &old;
2119 int k, i = 0;
2120 char *s;
2121
2122
2123 again:
2124 i += scnprintf(buf + i, 32 - i, "[");
2125 for (k = KEY_1; k <= KEY_3; k++) {
2126 if (k == key->passive)
2127 s = "pas";
2128 else if (k == key->active)
2129 s = "act";
2130 else if (k == key->pending)
2131 s = "pen";
2132 else
2133 s = "-";
2134 i += scnprintf(buf + i, 32 - i,
2135 (k != KEY_3) ? "%s " : "%s", s);
2136 }
2137 if (key != &new) {
2138 i += scnprintf(buf + i, 32 - i, "] -> ");
2139 key = &new;
2140 goto again;
2141 }
2142 i += scnprintf(buf + i, 32 - i, "]");
2143 return buf;
2144 }
2145
2146
2147
2148
2149
2150
2151 void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
2152 {
2153 struct tipc_crypto *rx;
2154 struct tipc_msg *hdr;
2155
2156 if (unlikely(skb_linearize(skb)))
2157 goto exit;
2158
2159 hdr = buf_msg(skb);
2160 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
2161 if (unlikely(!rx))
2162 goto exit;
2163
2164 switch (msg_type(hdr)) {
2165 case KEY_DISTR_MSG:
2166 if (tipc_crypto_key_rcv(rx, hdr))
2167 goto exit;
2168 break;
2169 default:
2170 break;
2171 }
2172
2173 tipc_node_put(rx->node);
2174
2175 exit:
2176 kfree_skb(skb);
2177 }
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
2188 struct tipc_node *dest)
2189 {
2190 struct tipc_aead *aead;
2191 u32 dnode = tipc_node_get_addr(dest);
2192 int rc = -ENOKEY;
2193
2194 if (!sysctl_tipc_key_exchange_enabled)
2195 return 0;
2196
2197 if (key) {
2198 rcu_read_lock();
2199 aead = tipc_aead_get(tx->aead[key]);
2200 if (likely(aead)) {
2201 rc = tipc_crypto_key_xmit(tx->net, aead->key,
2202 aead->gen, aead->mode,
2203 dnode);
2204 tipc_aead_put(aead);
2205 }
2206 rcu_read_unlock();
2207 }
2208
2209 return rc;
2210 }
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
2226 u16 gen, u8 mode, u32 dnode)
2227 {
2228 struct sk_buff_head pkts;
2229 struct tipc_msg *hdr;
2230 struct sk_buff *skb;
2231 u16 size, cong_link_cnt;
2232 u8 *data;
2233 int rc;
2234
2235 size = tipc_aead_key_size(skey);
2236 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
2237 if (!skb)
2238 return -ENOMEM;
2239
2240 hdr = buf_msg(skb);
2241 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
2242 INT_H_SIZE, dnode);
2243 msg_set_size(hdr, INT_H_SIZE + size);
2244 msg_set_key_gen(hdr, gen);
2245 msg_set_key_mode(hdr, mode);
2246
2247 data = msg_data(hdr);
2248 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
2249 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
2250 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
2251 skey->keylen);
2252
2253 __skb_queue_head_init(&pkts);
2254 __skb_queue_tail(&pkts, skb);
2255 if (dnode)
2256 rc = tipc_node_xmit(net, &pkts, dnode, 0);
2257 else
2258 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
2259
2260 return rc;
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
2275 {
2276 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2277 struct tipc_aead_key *skey = NULL;
2278 u16 key_gen = msg_key_gen(hdr);
2279 u32 size = msg_data_sz(hdr);
2280 u8 *data = msg_data(hdr);
2281 unsigned int keylen;
2282
2283
2284 if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
2285 pr_debug("%s: message data size is too small\n", rx->name);
2286 goto exit;
2287 }
2288
2289 keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
2290
2291
2292 if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
2293 keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
2294 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
2295 goto exit;
2296 }
2297
2298 spin_lock(&rx->lock);
2299 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
2300 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
2301 rx->skey, key_gen, rx->key_gen);
2302 goto exit_unlock;
2303 }
2304
2305
2306 skey = kmalloc(size, GFP_ATOMIC);
2307 if (unlikely(!skey)) {
2308 pr_err("%s: unable to allocate memory for skey\n", rx->name);
2309 goto exit_unlock;
2310 }
2311
2312
2313 skey->keylen = keylen;
2314 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
2315 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
2316 skey->keylen);
2317
2318 rx->key_gen = key_gen;
2319 rx->skey_mode = msg_key_mode(hdr);
2320 rx->skey = skey;
2321 rx->nokey = 0;
2322 mb();
2323
2324 exit_unlock:
2325 spin_unlock(&rx->lock);
2326
2327 exit:
2328
2329 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
2330 return true;
2331
2332 return false;
2333 }
2334
2335
2336
2337
2338
2339
2340
2341
2342 static void tipc_crypto_work_rx(struct work_struct *work)
2343 {
2344 struct delayed_work *dwork = to_delayed_work(work);
2345 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
2346 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2347 unsigned long delay = msecs_to_jiffies(5000);
2348 bool resched = false;
2349 u8 key;
2350 int rc;
2351
2352
2353 if (atomic_cmpxchg(&rx->key_distr,
2354 KEY_DISTR_SCHED,
2355 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
2356
2357 key = tx->key.pending ?: tx->key.active;
2358 rc = tipc_crypto_key_distr(tx, key, rx->node);
2359 if (unlikely(rc))
2360 pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
2361 tx->name, key, tipc_node_get_id_str(rx->node),
2362 rc);
2363
2364
2365 resched = true;
2366 } else {
2367 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
2368 }
2369
2370
2371 if (rx->skey) {
2372 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
2373 if (unlikely(rc < 0))
2374 pr_warn("%s: unable to attach received skey, err %d\n",
2375 rx->name, rc);
2376 switch (rc) {
2377 case -EBUSY:
2378 case -ENOMEM:
2379
2380 resched = true;
2381 break;
2382 default:
2383 synchronize_rcu();
2384 kfree(rx->skey);
2385 rx->skey = NULL;
2386 break;
2387 }
2388 }
2389
2390 if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
2391 return;
2392
2393 tipc_node_put(rx->node);
2394 }
2395
2396
2397
2398
2399
2400
2401
2402 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
2403 u32 new_intv)
2404 {
2405 unsigned long delay;
2406 bool now = false;
2407
2408 if (changed) {
2409 if (new_intv == TIPC_REKEYING_NOW)
2410 now = true;
2411 else
2412 tx->rekeying_intv = new_intv;
2413 cancel_delayed_work_sync(&tx->work);
2414 }
2415
2416 if (tx->rekeying_intv || now) {
2417 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
2418 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
2419 }
2420 }
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431 static void tipc_crypto_work_tx(struct work_struct *work)
2432 {
2433 struct delayed_work *dwork = to_delayed_work(work);
2434 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
2435 struct tipc_aead_key *skey = NULL;
2436 struct tipc_key key = tx->key;
2437 struct tipc_aead *aead;
2438 int rc = -ENOMEM;
2439
2440 if (unlikely(key.pending))
2441 goto resched;
2442
2443
2444 rcu_read_lock();
2445 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
2446 if (unlikely(!aead)) {
2447 rcu_read_unlock();
2448
2449 return;
2450 }
2451
2452
2453 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
2454 rcu_read_unlock();
2455
2456
2457 if (likely(skey)) {
2458 rc = tipc_aead_key_generate(skey) ?:
2459 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
2460 if (likely(rc > 0))
2461 rc = tipc_crypto_key_distr(tx, rc, NULL);
2462 kfree_sensitive(skey);
2463 }
2464
2465 if (unlikely(rc))
2466 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
2467
2468 resched:
2469
2470 tipc_crypto_rekeying_sched(tx, false, 0);
2471 }