0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <crypto/aead.h>
0033 #include <linux/highmem.h>
0034 #include <linux/module.h>
0035 #include <linux/netdevice.h>
0036 #include <net/dst.h>
0037 #include <net/inet_connection_sock.h>
0038 #include <net/tcp.h>
0039 #include <net/tls.h>
0040
0041 #include "tls.h"
0042 #include "trace.h"
0043
0044
0045
0046
0047 static DECLARE_RWSEM(device_offload_lock);
0048
0049 static struct workqueue_struct *destruct_wq __read_mostly;
0050
0051 static LIST_HEAD(tls_device_list);
0052 static LIST_HEAD(tls_device_down_list);
0053 static DEFINE_SPINLOCK(tls_device_lock);
0054
0055 static void tls_device_free_ctx(struct tls_context *ctx)
0056 {
0057 if (ctx->tx_conf == TLS_HW) {
0058 kfree(tls_offload_ctx_tx(ctx));
0059 kfree(ctx->tx.rec_seq);
0060 kfree(ctx->tx.iv);
0061 }
0062
0063 if (ctx->rx_conf == TLS_HW)
0064 kfree(tls_offload_ctx_rx(ctx));
0065
0066 tls_ctx_free(NULL, ctx);
0067 }
0068
0069 static void tls_device_tx_del_task(struct work_struct *work)
0070 {
0071 struct tls_offload_context_tx *offload_ctx =
0072 container_of(work, struct tls_offload_context_tx, destruct_work);
0073 struct tls_context *ctx = offload_ctx->ctx;
0074 struct net_device *netdev;
0075
0076
0077
0078
0079 netdev = rcu_dereference_protected(ctx->netdev,
0080 !refcount_read(&ctx->refcount));
0081
0082 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
0083 dev_put(netdev);
0084 ctx->netdev = NULL;
0085 tls_device_free_ctx(ctx);
0086 }
0087
0088 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
0089 {
0090 struct net_device *netdev;
0091 unsigned long flags;
0092 bool async_cleanup;
0093
0094 spin_lock_irqsave(&tls_device_lock, flags);
0095 if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
0096 spin_unlock_irqrestore(&tls_device_lock, flags);
0097 return;
0098 }
0099
0100 list_del(&ctx->list);
0101
0102
0103
0104
0105 netdev = rcu_dereference_protected(ctx->netdev,
0106 !refcount_read(&ctx->refcount));
0107
0108 async_cleanup = netdev && ctx->tx_conf == TLS_HW;
0109 if (async_cleanup) {
0110 struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
0111
0112
0113
0114
0115 queue_work(destruct_wq, &offload_ctx->destruct_work);
0116 }
0117 spin_unlock_irqrestore(&tls_device_lock, flags);
0118
0119 if (!async_cleanup)
0120 tls_device_free_ctx(ctx);
0121 }
0122
0123
0124 static struct net_device *get_netdev_for_sock(struct sock *sk)
0125 {
0126 struct dst_entry *dst = sk_dst_get(sk);
0127 struct net_device *netdev = NULL;
0128
0129 if (likely(dst)) {
0130 netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
0131 dev_hold(netdev);
0132 }
0133
0134 dst_release(dst);
0135
0136 return netdev;
0137 }
0138
0139 static void destroy_record(struct tls_record_info *record)
0140 {
0141 int i;
0142
0143 for (i = 0; i < record->num_frags; i++)
0144 __skb_frag_unref(&record->frags[i], false);
0145 kfree(record);
0146 }
0147
0148 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
0149 {
0150 struct tls_record_info *info, *temp;
0151
0152 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
0153 list_del(&info->list);
0154 destroy_record(info);
0155 }
0156
0157 offload_ctx->retransmit_hint = NULL;
0158 }
0159
0160 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
0161 {
0162 struct tls_context *tls_ctx = tls_get_ctx(sk);
0163 struct tls_record_info *info, *temp;
0164 struct tls_offload_context_tx *ctx;
0165 u64 deleted_records = 0;
0166 unsigned long flags;
0167
0168 if (!tls_ctx)
0169 return;
0170
0171 ctx = tls_offload_ctx_tx(tls_ctx);
0172
0173 spin_lock_irqsave(&ctx->lock, flags);
0174 info = ctx->retransmit_hint;
0175 if (info && !before(acked_seq, info->end_seq))
0176 ctx->retransmit_hint = NULL;
0177
0178 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
0179 if (before(acked_seq, info->end_seq))
0180 break;
0181 list_del(&info->list);
0182
0183 destroy_record(info);
0184 deleted_records++;
0185 }
0186
0187 ctx->unacked_record_sn += deleted_records;
0188 spin_unlock_irqrestore(&ctx->lock, flags);
0189 }
0190
0191
0192
0193
0194
0195 void tls_device_sk_destruct(struct sock *sk)
0196 {
0197 struct tls_context *tls_ctx = tls_get_ctx(sk);
0198 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
0199
0200 tls_ctx->sk_destruct(sk);
0201
0202 if (tls_ctx->tx_conf == TLS_HW) {
0203 if (ctx->open_record)
0204 destroy_record(ctx->open_record);
0205 delete_all_records(ctx);
0206 crypto_free_aead(ctx->aead_send);
0207 clean_acked_data_disable(inet_csk(sk));
0208 }
0209
0210 tls_device_queue_ctx_destruction(tls_ctx);
0211 }
0212 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
0213
0214 void tls_device_free_resources_tx(struct sock *sk)
0215 {
0216 struct tls_context *tls_ctx = tls_get_ctx(sk);
0217
0218 tls_free_partial_record(sk, tls_ctx);
0219 }
0220
0221 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
0222 {
0223 struct tls_context *tls_ctx = tls_get_ctx(sk);
0224
0225 trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
0226 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
0227 }
0228 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
0229
0230 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
0231 u32 seq)
0232 {
0233 struct net_device *netdev;
0234 struct sk_buff *skb;
0235 int err = 0;
0236 u8 *rcd_sn;
0237
0238 skb = tcp_write_queue_tail(sk);
0239 if (skb)
0240 TCP_SKB_CB(skb)->eor = 1;
0241
0242 rcd_sn = tls_ctx->tx.rec_seq;
0243
0244 trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
0245 down_read(&device_offload_lock);
0246 netdev = rcu_dereference_protected(tls_ctx->netdev,
0247 lockdep_is_held(&device_offload_lock));
0248 if (netdev)
0249 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
0250 rcd_sn,
0251 TLS_OFFLOAD_CTX_DIR_TX);
0252 up_read(&device_offload_lock);
0253 if (err)
0254 return;
0255
0256 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
0257 }
0258
0259 static void tls_append_frag(struct tls_record_info *record,
0260 struct page_frag *pfrag,
0261 int size)
0262 {
0263 skb_frag_t *frag;
0264
0265 frag = &record->frags[record->num_frags - 1];
0266 if (skb_frag_page(frag) == pfrag->page &&
0267 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
0268 skb_frag_size_add(frag, size);
0269 } else {
0270 ++frag;
0271 __skb_frag_set_page(frag, pfrag->page);
0272 skb_frag_off_set(frag, pfrag->offset);
0273 skb_frag_size_set(frag, size);
0274 ++record->num_frags;
0275 get_page(pfrag->page);
0276 }
0277
0278 pfrag->offset += size;
0279 record->len += size;
0280 }
0281
0282 static int tls_push_record(struct sock *sk,
0283 struct tls_context *ctx,
0284 struct tls_offload_context_tx *offload_ctx,
0285 struct tls_record_info *record,
0286 int flags)
0287 {
0288 struct tls_prot_info *prot = &ctx->prot_info;
0289 struct tcp_sock *tp = tcp_sk(sk);
0290 skb_frag_t *frag;
0291 int i;
0292
0293 record->end_seq = tp->write_seq + record->len;
0294 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
0295 offload_ctx->open_record = NULL;
0296
0297 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
0298 tls_device_resync_tx(sk, ctx, tp->write_seq);
0299
0300 tls_advance_record_sn(sk, prot, &ctx->tx);
0301
0302 for (i = 0; i < record->num_frags; i++) {
0303 frag = &record->frags[i];
0304 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
0305 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
0306 skb_frag_size(frag), skb_frag_off(frag));
0307 sk_mem_charge(sk, skb_frag_size(frag));
0308 get_page(skb_frag_page(frag));
0309 }
0310 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
0311
0312
0313 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
0314 }
0315
0316 static int tls_device_record_close(struct sock *sk,
0317 struct tls_context *ctx,
0318 struct tls_record_info *record,
0319 struct page_frag *pfrag,
0320 unsigned char record_type)
0321 {
0322 struct tls_prot_info *prot = &ctx->prot_info;
0323 int ret;
0324
0325
0326
0327
0328
0329
0330
0331 if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
0332 sk->sk_allocation))) {
0333 ret = 0;
0334 tls_append_frag(record, pfrag, prot->tag_size);
0335 } else {
0336 ret = prot->tag_size;
0337 if (record->len <= prot->overhead_size)
0338 return -ENOMEM;
0339 }
0340
0341
0342 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
0343 record->len - prot->overhead_size,
0344 record_type);
0345 return ret;
0346 }
0347
0348 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
0349 struct page_frag *pfrag,
0350 size_t prepend_size)
0351 {
0352 struct tls_record_info *record;
0353 skb_frag_t *frag;
0354
0355 record = kmalloc(sizeof(*record), GFP_KERNEL);
0356 if (!record)
0357 return -ENOMEM;
0358
0359 frag = &record->frags[0];
0360 __skb_frag_set_page(frag, pfrag->page);
0361 skb_frag_off_set(frag, pfrag->offset);
0362 skb_frag_size_set(frag, prepend_size);
0363
0364 get_page(pfrag->page);
0365 pfrag->offset += prepend_size;
0366
0367 record->num_frags = 1;
0368 record->len = prepend_size;
0369 offload_ctx->open_record = record;
0370 return 0;
0371 }
0372
0373 static int tls_do_allocation(struct sock *sk,
0374 struct tls_offload_context_tx *offload_ctx,
0375 struct page_frag *pfrag,
0376 size_t prepend_size)
0377 {
0378 int ret;
0379
0380 if (!offload_ctx->open_record) {
0381 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
0382 sk->sk_allocation))) {
0383 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
0384 sk_stream_moderate_sndbuf(sk);
0385 return -ENOMEM;
0386 }
0387
0388 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
0389 if (ret)
0390 return ret;
0391
0392 if (pfrag->size > pfrag->offset)
0393 return 0;
0394 }
0395
0396 if (!sk_page_frag_refill(sk, pfrag))
0397 return -ENOMEM;
0398
0399 return 0;
0400 }
0401
0402 static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
0403 {
0404 size_t pre_copy, nocache;
0405
0406 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
0407 if (pre_copy) {
0408 pre_copy = min(pre_copy, bytes);
0409 if (copy_from_iter(addr, pre_copy, i) != pre_copy)
0410 return -EFAULT;
0411 bytes -= pre_copy;
0412 addr += pre_copy;
0413 }
0414
0415 nocache = round_down(bytes, SMP_CACHE_BYTES);
0416 if (copy_from_iter_nocache(addr, nocache, i) != nocache)
0417 return -EFAULT;
0418 bytes -= nocache;
0419 addr += nocache;
0420
0421 if (bytes && copy_from_iter(addr, bytes, i) != bytes)
0422 return -EFAULT;
0423
0424 return 0;
0425 }
0426
0427 union tls_iter_offset {
0428 struct iov_iter *msg_iter;
0429 int offset;
0430 };
0431
0432 static int tls_push_data(struct sock *sk,
0433 union tls_iter_offset iter_offset,
0434 size_t size, int flags,
0435 unsigned char record_type,
0436 struct page *zc_page)
0437 {
0438 struct tls_context *tls_ctx = tls_get_ctx(sk);
0439 struct tls_prot_info *prot = &tls_ctx->prot_info;
0440 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
0441 struct tls_record_info *record;
0442 int tls_push_record_flags;
0443 struct page_frag *pfrag;
0444 size_t orig_size = size;
0445 u32 max_open_record_len;
0446 bool more = false;
0447 bool done = false;
0448 int copy, rc = 0;
0449 long timeo;
0450
0451 if (flags &
0452 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
0453 return -EOPNOTSUPP;
0454
0455 if (unlikely(sk->sk_err))
0456 return -sk->sk_err;
0457
0458 flags |= MSG_SENDPAGE_DECRYPTED;
0459 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
0460
0461 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
0462 if (tls_is_partially_sent_record(tls_ctx)) {
0463 rc = tls_push_partial_record(sk, tls_ctx, flags);
0464 if (rc < 0)
0465 return rc;
0466 }
0467
0468 pfrag = sk_page_frag(sk);
0469
0470
0471
0472
0473 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
0474 prot->prepend_size;
0475 do {
0476 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
0477 if (unlikely(rc)) {
0478 rc = sk_stream_wait_memory(sk, &timeo);
0479 if (!rc)
0480 continue;
0481
0482 record = ctx->open_record;
0483 if (!record)
0484 break;
0485 handle_error:
0486 if (record_type != TLS_RECORD_TYPE_DATA) {
0487
0488
0489
0490
0491 size = orig_size;
0492 destroy_record(record);
0493 ctx->open_record = NULL;
0494 } else if (record->len > prot->prepend_size) {
0495 goto last_record;
0496 }
0497
0498 break;
0499 }
0500
0501 record = ctx->open_record;
0502
0503 copy = min_t(size_t, size, max_open_record_len - record->len);
0504 if (copy && zc_page) {
0505 struct page_frag zc_pfrag;
0506
0507 zc_pfrag.page = zc_page;
0508 zc_pfrag.offset = iter_offset.offset;
0509 zc_pfrag.size = copy;
0510 tls_append_frag(record, &zc_pfrag, copy);
0511 } else if (copy) {
0512 copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
0513
0514 rc = tls_device_copy_data(page_address(pfrag->page) +
0515 pfrag->offset, copy,
0516 iter_offset.msg_iter);
0517 if (rc)
0518 goto handle_error;
0519 tls_append_frag(record, pfrag, copy);
0520 }
0521
0522 size -= copy;
0523 if (!size) {
0524 last_record:
0525 tls_push_record_flags = flags;
0526 if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
0527 more = true;
0528 break;
0529 }
0530
0531 done = true;
0532 }
0533
0534 if (done || record->len >= max_open_record_len ||
0535 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
0536 rc = tls_device_record_close(sk, tls_ctx, record,
0537 pfrag, record_type);
0538 if (rc) {
0539 if (rc > 0) {
0540 size += rc;
0541 } else {
0542 size = orig_size;
0543 destroy_record(record);
0544 ctx->open_record = NULL;
0545 break;
0546 }
0547 }
0548
0549 rc = tls_push_record(sk,
0550 tls_ctx,
0551 ctx,
0552 record,
0553 tls_push_record_flags);
0554 if (rc < 0)
0555 break;
0556 }
0557 } while (!done);
0558
0559 tls_ctx->pending_open_record_frags = more;
0560
0561 if (orig_size - size > 0)
0562 rc = orig_size - size;
0563
0564 return rc;
0565 }
0566
0567 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
0568 {
0569 unsigned char record_type = TLS_RECORD_TYPE_DATA;
0570 struct tls_context *tls_ctx = tls_get_ctx(sk);
0571 union tls_iter_offset iter;
0572 int rc;
0573
0574 mutex_lock(&tls_ctx->tx_lock);
0575 lock_sock(sk);
0576
0577 if (unlikely(msg->msg_controllen)) {
0578 rc = tls_process_cmsg(sk, msg, &record_type);
0579 if (rc)
0580 goto out;
0581 }
0582
0583 iter.msg_iter = &msg->msg_iter;
0584 rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL);
0585
0586 out:
0587 release_sock(sk);
0588 mutex_unlock(&tls_ctx->tx_lock);
0589 return rc;
0590 }
0591
0592 int tls_device_sendpage(struct sock *sk, struct page *page,
0593 int offset, size_t size, int flags)
0594 {
0595 struct tls_context *tls_ctx = tls_get_ctx(sk);
0596 union tls_iter_offset iter_offset;
0597 struct iov_iter msg_iter;
0598 char *kaddr;
0599 struct kvec iov;
0600 int rc;
0601
0602 if (flags & MSG_SENDPAGE_NOTLAST)
0603 flags |= MSG_MORE;
0604
0605 mutex_lock(&tls_ctx->tx_lock);
0606 lock_sock(sk);
0607
0608 if (flags & MSG_OOB) {
0609 rc = -EOPNOTSUPP;
0610 goto out;
0611 }
0612
0613 if (tls_ctx->zerocopy_sendfile) {
0614 iter_offset.offset = offset;
0615 rc = tls_push_data(sk, iter_offset, size,
0616 flags, TLS_RECORD_TYPE_DATA, page);
0617 goto out;
0618 }
0619
0620 kaddr = kmap(page);
0621 iov.iov_base = kaddr + offset;
0622 iov.iov_len = size;
0623 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
0624 iter_offset.msg_iter = &msg_iter;
0625 rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
0626 NULL);
0627 kunmap(page);
0628
0629 out:
0630 release_sock(sk);
0631 mutex_unlock(&tls_ctx->tx_lock);
0632 return rc;
0633 }
0634
0635 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
0636 u32 seq, u64 *p_record_sn)
0637 {
0638 u64 record_sn = context->hint_record_sn;
0639 struct tls_record_info *info, *last;
0640
0641 info = context->retransmit_hint;
0642 if (!info ||
0643 before(seq, info->end_seq - info->len)) {
0644
0645
0646
0647 info = list_first_entry_or_null(&context->records_list,
0648 struct tls_record_info, list);
0649 if (!info)
0650 return NULL;
0651
0652
0653
0654
0655
0656
0657
0658 if (likely(!tls_record_is_start_marker(info))) {
0659
0660
0661
0662 last = list_last_entry(&context->records_list,
0663 struct tls_record_info, list);
0664
0665 if (!between(seq, tls_record_start_seq(info),
0666 last->end_seq))
0667 return NULL;
0668 }
0669 record_sn = context->unacked_record_sn;
0670 }
0671
0672
0673 rcu_read_lock();
0674 list_for_each_entry_from_rcu(info, &context->records_list, list) {
0675 if (before(seq, info->end_seq)) {
0676 if (!context->retransmit_hint ||
0677 after(info->end_seq,
0678 context->retransmit_hint->end_seq)) {
0679 context->hint_record_sn = record_sn;
0680 context->retransmit_hint = info;
0681 }
0682 *p_record_sn = record_sn;
0683 goto exit_rcu_unlock;
0684 }
0685 record_sn++;
0686 }
0687 info = NULL;
0688
0689 exit_rcu_unlock:
0690 rcu_read_unlock();
0691 return info;
0692 }
0693 EXPORT_SYMBOL(tls_get_record);
0694
0695 static int tls_device_push_pending_record(struct sock *sk, int flags)
0696 {
0697 union tls_iter_offset iter;
0698 struct iov_iter msg_iter;
0699
0700 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
0701 iter.msg_iter = &msg_iter;
0702 return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
0703 }
0704
0705 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
0706 {
0707 if (tls_is_partially_sent_record(ctx)) {
0708 gfp_t sk_allocation = sk->sk_allocation;
0709
0710 WARN_ON_ONCE(sk->sk_write_pending);
0711
0712 sk->sk_allocation = GFP_ATOMIC;
0713 tls_push_partial_record(sk, ctx,
0714 MSG_DONTWAIT | MSG_NOSIGNAL |
0715 MSG_SENDPAGE_DECRYPTED);
0716 sk->sk_allocation = sk_allocation;
0717 }
0718 }
0719
0720 static void tls_device_resync_rx(struct tls_context *tls_ctx,
0721 struct sock *sk, u32 seq, u8 *rcd_sn)
0722 {
0723 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
0724 struct net_device *netdev;
0725
0726 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
0727 rcu_read_lock();
0728 netdev = rcu_dereference(tls_ctx->netdev);
0729 if (netdev)
0730 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
0731 TLS_OFFLOAD_CTX_DIR_RX);
0732 rcu_read_unlock();
0733 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
0734 }
0735
0736 static bool
0737 tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
0738 s64 resync_req, u32 *seq, u16 *rcd_delta)
0739 {
0740 u32 is_async = resync_req & RESYNC_REQ_ASYNC;
0741 u32 req_seq = resync_req >> 32;
0742 u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
0743 u16 i;
0744
0745 *rcd_delta = 0;
0746
0747 if (is_async) {
0748
0749
0750
0751 if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
0752 return false;
0753
0754
0755
0756
0757 if (before(*seq, req_seq))
0758 return false;
0759 if (!after(*seq, req_end) &&
0760 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
0761 resync_async->log[resync_async->loglen++] = *seq;
0762
0763 resync_async->rcd_delta++;
0764
0765 return false;
0766 }
0767
0768
0769
0770
0771 for (i = 0; i < resync_async->loglen; i++)
0772 if (req_seq == resync_async->log[i] &&
0773 atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
0774 *rcd_delta = resync_async->rcd_delta - i;
0775 *seq = req_seq;
0776 resync_async->loglen = 0;
0777 resync_async->rcd_delta = 0;
0778 return true;
0779 }
0780
0781 resync_async->loglen = 0;
0782 resync_async->rcd_delta = 0;
0783
0784 if (req_seq == *seq &&
0785 atomic64_try_cmpxchg(&resync_async->req,
0786 &resync_req, 0))
0787 return true;
0788
0789 return false;
0790 }
0791
0792 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
0793 {
0794 struct tls_context *tls_ctx = tls_get_ctx(sk);
0795 struct tls_offload_context_rx *rx_ctx;
0796 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
0797 u32 sock_data, is_req_pending;
0798 struct tls_prot_info *prot;
0799 s64 resync_req;
0800 u16 rcd_delta;
0801 u32 req_seq;
0802
0803 if (tls_ctx->rx_conf != TLS_HW)
0804 return;
0805 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
0806 return;
0807
0808 prot = &tls_ctx->prot_info;
0809 rx_ctx = tls_offload_ctx_rx(tls_ctx);
0810 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
0811
0812 switch (rx_ctx->resync_type) {
0813 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
0814 resync_req = atomic64_read(&rx_ctx->resync_req);
0815 req_seq = resync_req >> 32;
0816 seq += TLS_HEADER_SIZE - 1;
0817 is_req_pending = resync_req;
0818
0819 if (likely(!is_req_pending) || req_seq != seq ||
0820 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
0821 return;
0822 break;
0823 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
0824 if (likely(!rx_ctx->resync_nh_do_now))
0825 return;
0826
0827
0828
0829
0830 sock_data = tcp_inq(sk);
0831 if (sock_data > rcd_len) {
0832 trace_tls_device_rx_resync_nh_delay(sk, sock_data,
0833 rcd_len);
0834 return;
0835 }
0836
0837 rx_ctx->resync_nh_do_now = 0;
0838 seq += rcd_len;
0839 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
0840 break;
0841 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
0842 resync_req = atomic64_read(&rx_ctx->resync_async->req);
0843 is_req_pending = resync_req;
0844 if (likely(!is_req_pending))
0845 return;
0846
0847 if (!tls_device_rx_resync_async(rx_ctx->resync_async,
0848 resync_req, &seq, &rcd_delta))
0849 return;
0850 tls_bigint_subtract(rcd_sn, rcd_delta);
0851 break;
0852 }
0853
0854 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
0855 }
0856
0857 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
0858 struct tls_offload_context_rx *ctx,
0859 struct sock *sk, struct sk_buff *skb)
0860 {
0861 struct strp_msg *rxm;
0862
0863
0864 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
0865 return;
0866
0867 if (ctx->resync_nh_do_now)
0868 return;
0869
0870 if (ctx->resync_nh_reset) {
0871 ctx->resync_nh_reset = 0;
0872 ctx->resync_nh.decrypted_failed = 1;
0873 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
0874 return;
0875 }
0876
0877 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
0878 return;
0879
0880
0881 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
0882 ctx->resync_nh.decrypted_tgt *= 2;
0883 else
0884 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
0885
0886 rxm = strp_msg(skb);
0887
0888
0889 if (tcp_inq(sk) > rxm->full_len) {
0890 trace_tls_device_rx_resync_nh_schedule(sk);
0891 ctx->resync_nh_do_now = 1;
0892 } else {
0893 struct tls_prot_info *prot = &tls_ctx->prot_info;
0894 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
0895
0896 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
0897 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
0898
0899 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
0900 rcd_sn);
0901 }
0902 }
0903
0904 static int
0905 tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx)
0906 {
0907 int err, offset, copy, data_len, pos;
0908 struct sk_buff *skb, *skb_iter;
0909 struct scatterlist sg[1];
0910 struct strp_msg *rxm;
0911 char *orig_buf, *buf;
0912
0913 rxm = strp_msg(tls_strp_msg(sw_ctx));
0914 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
0915 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
0916 if (!orig_buf)
0917 return -ENOMEM;
0918 buf = orig_buf;
0919
0920 err = tls_strp_msg_cow(sw_ctx);
0921 if (unlikely(err))
0922 goto free_buf;
0923
0924 skb = tls_strp_msg(sw_ctx);
0925 rxm = strp_msg(skb);
0926 offset = rxm->offset;
0927
0928 sg_init_table(sg, 1);
0929 sg_set_buf(&sg[0], buf,
0930 rxm->full_len + TLS_HEADER_SIZE +
0931 TLS_CIPHER_AES_GCM_128_IV_SIZE);
0932 err = skb_copy_bits(skb, offset, buf,
0933 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
0934 if (err)
0935 goto free_buf;
0936
0937
0938 err = decrypt_skb(sk, sg);
0939 if (err != -EBADMSG)
0940 goto free_buf;
0941 else
0942 err = 0;
0943
0944 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
0945
0946 if (skb_pagelen(skb) > offset) {
0947 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
0948
0949 if (skb->decrypted) {
0950 err = skb_store_bits(skb, offset, buf, copy);
0951 if (err)
0952 goto free_buf;
0953 }
0954
0955 offset += copy;
0956 buf += copy;
0957 }
0958
0959 pos = skb_pagelen(skb);
0960 skb_walk_frags(skb, skb_iter) {
0961 int frag_pos;
0962
0963
0964
0965
0966
0967 if (pos + skb_iter->len <= offset)
0968 goto done_with_frag;
0969 if (pos >= data_len + rxm->offset)
0970 break;
0971
0972 frag_pos = offset - pos;
0973 copy = min_t(int, skb_iter->len - frag_pos,
0974 data_len + rxm->offset - offset);
0975
0976 if (skb_iter->decrypted) {
0977 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
0978 if (err)
0979 goto free_buf;
0980 }
0981
0982 offset += copy;
0983 buf += copy;
0984 done_with_frag:
0985 pos += skb_iter->len;
0986 }
0987
0988 free_buf:
0989 kfree(orig_buf);
0990 return err;
0991 }
0992
0993 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
0994 {
0995 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
0996 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
0997 struct sk_buff *skb = tls_strp_msg(sw_ctx);
0998 struct strp_msg *rxm = strp_msg(skb);
0999 int is_decrypted = skb->decrypted;
1000 int is_encrypted = !is_decrypted;
1001 struct sk_buff *skb_iter;
1002 int left;
1003
1004 left = rxm->full_len - skb->len;
1005
1006 skb_iter = skb_shinfo(skb)->frag_list;
1007 while (skb_iter && left > 0) {
1008 is_decrypted &= skb_iter->decrypted;
1009 is_encrypted &= !skb_iter->decrypted;
1010
1011 left -= skb_iter->len;
1012 skb_iter = skb_iter->next;
1013 }
1014
1015 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
1016 tls_ctx->rx.rec_seq, rxm->full_len,
1017 is_encrypted, is_decrypted);
1018
1019 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
1020 if (likely(is_encrypted || is_decrypted))
1021 return is_decrypted;
1022
1023
1024
1025
1026
1027 return tls_device_reencrypt(sk, sw_ctx);
1028 }
1029
1030
1031
1032
1033
1034 if (is_decrypted) {
1035 ctx->resync_nh_reset = 1;
1036 return is_decrypted;
1037 }
1038 if (is_encrypted) {
1039 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1040 return 0;
1041 }
1042
1043 ctx->resync_nh_reset = 1;
1044 return tls_device_reencrypt(sk, sw_ctx);
1045 }
1046
1047 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1048 struct net_device *netdev)
1049 {
1050 if (sk->sk_destruct != tls_device_sk_destruct) {
1051 refcount_set(&ctx->refcount, 1);
1052 dev_hold(netdev);
1053 RCU_INIT_POINTER(ctx->netdev, netdev);
1054 spin_lock_irq(&tls_device_lock);
1055 list_add_tail(&ctx->list, &tls_device_list);
1056 spin_unlock_irq(&tls_device_lock);
1057
1058 ctx->sk_destruct = sk->sk_destruct;
1059 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1060 }
1061 }
1062
1063 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
1064 {
1065 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
1066 struct tls_context *tls_ctx = tls_get_ctx(sk);
1067 struct tls_prot_info *prot = &tls_ctx->prot_info;
1068 struct tls_record_info *start_marker_record;
1069 struct tls_offload_context_tx *offload_ctx;
1070 struct tls_crypto_info *crypto_info;
1071 struct net_device *netdev;
1072 char *iv, *rec_seq;
1073 struct sk_buff *skb;
1074 __be64 rcd_sn;
1075 int rc;
1076
1077 if (!ctx)
1078 return -EINVAL;
1079
1080 if (ctx->priv_ctx_tx)
1081 return -EEXIST;
1082
1083 netdev = get_netdev_for_sock(sk);
1084 if (!netdev) {
1085 pr_err_ratelimited("%s: netdev not found\n", __func__);
1086 return -EINVAL;
1087 }
1088
1089 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1090 rc = -EOPNOTSUPP;
1091 goto release_netdev;
1092 }
1093
1094 crypto_info = &ctx->crypto_send.info;
1095 if (crypto_info->version != TLS_1_2_VERSION) {
1096 rc = -EOPNOTSUPP;
1097 goto release_netdev;
1098 }
1099
1100 switch (crypto_info->cipher_type) {
1101 case TLS_CIPHER_AES_GCM_128:
1102 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1103 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1104 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1105 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1106 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1107 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
1108 rec_seq =
1109 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1110 break;
1111 default:
1112 rc = -EINVAL;
1113 goto release_netdev;
1114 }
1115
1116
1117 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
1118 rc = -EINVAL;
1119 goto release_netdev;
1120 }
1121
1122 prot->version = crypto_info->version;
1123 prot->cipher_type = crypto_info->cipher_type;
1124 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
1125 prot->tag_size = tag_size;
1126 prot->overhead_size = prot->prepend_size + prot->tag_size;
1127 prot->iv_size = iv_size;
1128 prot->salt_size = salt_size;
1129 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1130 GFP_KERNEL);
1131 if (!ctx->tx.iv) {
1132 rc = -ENOMEM;
1133 goto release_netdev;
1134 }
1135
1136 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1137
1138 prot->rec_seq_size = rec_seq_size;
1139 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1140 if (!ctx->tx.rec_seq) {
1141 rc = -ENOMEM;
1142 goto free_iv;
1143 }
1144
1145 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1146 if (!start_marker_record) {
1147 rc = -ENOMEM;
1148 goto free_rec_seq;
1149 }
1150
1151 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
1152 if (!offload_ctx) {
1153 rc = -ENOMEM;
1154 goto free_marker_record;
1155 }
1156
1157 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1158 if (rc)
1159 goto free_offload_ctx;
1160
1161
1162 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1163 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1164
1165 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1166 start_marker_record->len = 0;
1167 start_marker_record->num_frags = 0;
1168
1169 INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1170 offload_ctx->ctx = ctx;
1171
1172 INIT_LIST_HEAD(&offload_ctx->records_list);
1173 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1174 spin_lock_init(&offload_ctx->lock);
1175 sg_init_table(offload_ctx->sg_tx_data,
1176 ARRAY_SIZE(offload_ctx->sg_tx_data));
1177
1178 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1179 ctx->push_pending_record = tls_device_push_pending_record;
1180
1181
1182
1183
1184
1185 skb = tcp_write_queue_tail(sk);
1186 if (skb)
1187 TCP_SKB_CB(skb)->eor = 1;
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 down_read(&device_offload_lock);
1198 if (!(netdev->flags & IFF_UP)) {
1199 rc = -EINVAL;
1200 goto release_lock;
1201 }
1202
1203 ctx->priv_ctx_tx = offload_ctx;
1204 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1205 &ctx->crypto_send.info,
1206 tcp_sk(sk)->write_seq);
1207 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1208 tcp_sk(sk)->write_seq, rec_seq, rc);
1209 if (rc)
1210 goto release_lock;
1211
1212 tls_device_attach(ctx, sk, netdev);
1213 up_read(&device_offload_lock);
1214
1215
1216
1217
1218
1219 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1220 dev_put(netdev);
1221
1222 return 0;
1223
1224 release_lock:
1225 up_read(&device_offload_lock);
1226 clean_acked_data_disable(inet_csk(sk));
1227 crypto_free_aead(offload_ctx->aead_send);
1228 free_offload_ctx:
1229 kfree(offload_ctx);
1230 ctx->priv_ctx_tx = NULL;
1231 free_marker_record:
1232 kfree(start_marker_record);
1233 free_rec_seq:
1234 kfree(ctx->tx.rec_seq);
1235 free_iv:
1236 kfree(ctx->tx.iv);
1237 release_netdev:
1238 dev_put(netdev);
1239 return rc;
1240 }
1241
1242 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1243 {
1244 struct tls12_crypto_info_aes_gcm_128 *info;
1245 struct tls_offload_context_rx *context;
1246 struct net_device *netdev;
1247 int rc = 0;
1248
1249 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1250 return -EOPNOTSUPP;
1251
1252 netdev = get_netdev_for_sock(sk);
1253 if (!netdev) {
1254 pr_err_ratelimited("%s: netdev not found\n", __func__);
1255 return -EINVAL;
1256 }
1257
1258 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1259 rc = -EOPNOTSUPP;
1260 goto release_netdev;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 down_read(&device_offload_lock);
1272 if (!(netdev->flags & IFF_UP)) {
1273 rc = -EINVAL;
1274 goto release_lock;
1275 }
1276
1277 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1278 if (!context) {
1279 rc = -ENOMEM;
1280 goto release_lock;
1281 }
1282 context->resync_nh_reset = 1;
1283
1284 ctx->priv_ctx_rx = context;
1285 rc = tls_set_sw_offload(sk, ctx, 0);
1286 if (rc)
1287 goto release_ctx;
1288
1289 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1290 &ctx->crypto_recv.info,
1291 tcp_sk(sk)->copied_seq);
1292 info = (void *)&ctx->crypto_recv.info;
1293 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1294 tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1295 if (rc)
1296 goto free_sw_resources;
1297
1298 tls_device_attach(ctx, sk, netdev);
1299 up_read(&device_offload_lock);
1300
1301 dev_put(netdev);
1302
1303 return 0;
1304
1305 free_sw_resources:
1306 up_read(&device_offload_lock);
1307 tls_sw_free_resources_rx(sk);
1308 down_read(&device_offload_lock);
1309 release_ctx:
1310 ctx->priv_ctx_rx = NULL;
1311 release_lock:
1312 up_read(&device_offload_lock);
1313 release_netdev:
1314 dev_put(netdev);
1315 return rc;
1316 }
1317
1318 void tls_device_offload_cleanup_rx(struct sock *sk)
1319 {
1320 struct tls_context *tls_ctx = tls_get_ctx(sk);
1321 struct net_device *netdev;
1322
1323 down_read(&device_offload_lock);
1324 netdev = rcu_dereference_protected(tls_ctx->netdev,
1325 lockdep_is_held(&device_offload_lock));
1326 if (!netdev)
1327 goto out;
1328
1329 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1330 TLS_OFFLOAD_CTX_DIR_RX);
1331
1332 if (tls_ctx->tx_conf != TLS_HW) {
1333 dev_put(netdev);
1334 rcu_assign_pointer(tls_ctx->netdev, NULL);
1335 } else {
1336 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1337 }
1338 out:
1339 up_read(&device_offload_lock);
1340 tls_sw_release_resources_rx(sk);
1341 }
1342
1343 static int tls_device_down(struct net_device *netdev)
1344 {
1345 struct tls_context *ctx, *tmp;
1346 unsigned long flags;
1347 LIST_HEAD(list);
1348
1349
1350 down_write(&device_offload_lock);
1351
1352 spin_lock_irqsave(&tls_device_lock, flags);
1353 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1354 struct net_device *ctx_netdev =
1355 rcu_dereference_protected(ctx->netdev,
1356 lockdep_is_held(&device_offload_lock));
1357
1358 if (ctx_netdev != netdev ||
1359 !refcount_inc_not_zero(&ctx->refcount))
1360 continue;
1361
1362 list_move(&ctx->list, &list);
1363 }
1364 spin_unlock_irqrestore(&tls_device_lock, flags);
1365
1366 list_for_each_entry_safe(ctx, tmp, &list, list) {
1367
1368
1369
1370 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1371
1372
1373
1374
1375 rcu_assign_pointer(ctx->netdev, NULL);
1376
1377
1378 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1379
1380
1381
1382
1383
1384 synchronize_net();
1385
1386
1387 if (ctx->tx_conf == TLS_HW)
1388 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1389 TLS_OFFLOAD_CTX_DIR_TX);
1390 if (ctx->rx_conf == TLS_HW &&
1391 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1392 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1393 TLS_OFFLOAD_CTX_DIR_RX);
1394
1395 dev_put(netdev);
1396
1397
1398
1399
1400
1401
1402 spin_lock_irqsave(&tls_device_lock, flags);
1403 list_move_tail(&ctx->list, &tls_device_down_list);
1404 spin_unlock_irqrestore(&tls_device_lock, flags);
1405
1406
1407
1408
1409
1410 if (refcount_dec_and_test(&ctx->refcount)) {
1411
1412
1413
1414 list_del(&ctx->list);
1415 tls_device_free_ctx(ctx);
1416 }
1417 }
1418
1419 up_write(&device_offload_lock);
1420
1421 flush_workqueue(destruct_wq);
1422
1423 return NOTIFY_DONE;
1424 }
1425
1426 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1427 void *ptr)
1428 {
1429 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1430
1431 if (!dev->tlsdev_ops &&
1432 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1433 return NOTIFY_DONE;
1434
1435 switch (event) {
1436 case NETDEV_REGISTER:
1437 case NETDEV_FEAT_CHANGE:
1438 if (netif_is_bond_master(dev))
1439 return NOTIFY_DONE;
1440 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1441 !dev->tlsdev_ops->tls_dev_resync)
1442 return NOTIFY_BAD;
1443
1444 if (dev->tlsdev_ops &&
1445 dev->tlsdev_ops->tls_dev_add &&
1446 dev->tlsdev_ops->tls_dev_del)
1447 return NOTIFY_DONE;
1448 else
1449 return NOTIFY_BAD;
1450 case NETDEV_DOWN:
1451 return tls_device_down(dev);
1452 }
1453 return NOTIFY_DONE;
1454 }
1455
1456 static struct notifier_block tls_dev_notifier = {
1457 .notifier_call = tls_dev_event,
1458 };
1459
1460 int __init tls_device_init(void)
1461 {
1462 int err;
1463
1464 destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
1465 if (!destruct_wq)
1466 return -ENOMEM;
1467
1468 err = register_netdevice_notifier(&tls_dev_notifier);
1469 if (err)
1470 destroy_workqueue(destruct_wq);
1471
1472 return err;
1473 }
1474
1475 void __exit tls_device_cleanup(void)
1476 {
1477 unregister_netdevice_notifier(&tls_dev_notifier);
1478 destroy_workqueue(destruct_wq);
1479 clean_acked_data_flush();
1480 }