0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #ifndef _TLS_OFFLOAD_H
0035 #define _TLS_OFFLOAD_H
0036
0037 #include <linux/types.h>
0038 #include <asm/byteorder.h>
0039 #include <linux/crypto.h>
0040 #include <linux/socket.h>
0041 #include <linux/tcp.h>
0042 #include <linux/mutex.h>
0043 #include <linux/netdevice.h>
0044 #include <linux/rcupdate.h>
0045
0046 #include <net/net_namespace.h>
0047 #include <net/tcp.h>
0048 #include <net/strparser.h>
0049 #include <crypto/aead.h>
0050 #include <uapi/linux/tls.h>
0051
0052 struct tls_rec;
0053
0054
0055 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
0056
0057 #define TLS_HEADER_SIZE 5
0058 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
0059
0060 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
0061
0062 #define TLS_RECORD_TYPE_DATA 0x17
0063
0064 #define TLS_AAD_SPACE_SIZE 13
0065
0066 #define MAX_IV_SIZE 16
0067 #define TLS_TAG_SIZE 16
0068 #define TLS_MAX_REC_SEQ_SIZE 8
0069 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
0070
0071
0072
0073
0074
0075
0076
0077
0078 #define TLS_AES_CCM_IV_B0_BYTE 2
0079 #define TLS_SM4_CCM_IV_B0_BYTE 2
0080
0081 enum {
0082 TLS_BASE,
0083 TLS_SW,
0084 TLS_HW,
0085 TLS_HW_RECORD,
0086 TLS_NUM_CONFIG,
0087 };
0088
0089 struct tx_work {
0090 struct delayed_work work;
0091 struct sock *sk;
0092 };
0093
0094 struct tls_sw_context_tx {
0095 struct crypto_aead *aead_send;
0096 struct crypto_wait async_wait;
0097 struct tx_work tx_work;
0098 struct tls_rec *open_rec;
0099 struct list_head tx_list;
0100 atomic_t encrypt_pending;
0101
0102 spinlock_t encrypt_compl_lock;
0103 int async_notify;
0104 u8 async_capable:1;
0105
0106 #define BIT_TX_SCHEDULED 0
0107 #define BIT_TX_CLOSING 1
0108 unsigned long tx_bitmask;
0109 };
0110
0111 struct tls_strparser {
0112 struct sock *sk;
0113
0114 u32 mark : 8;
0115 u32 stopped : 1;
0116 u32 copy_mode : 1;
0117 u32 msg_ready : 1;
0118
0119 struct strp_msg stm;
0120
0121 struct sk_buff *anchor;
0122 struct work_struct work;
0123 };
0124
0125 struct tls_sw_context_rx {
0126 struct crypto_aead *aead_recv;
0127 struct crypto_wait async_wait;
0128 struct sk_buff_head rx_list;
0129 void (*saved_data_ready)(struct sock *sk);
0130
0131 u8 reader_present;
0132 u8 async_capable:1;
0133 u8 zc_capable:1;
0134 u8 reader_contended:1;
0135
0136 struct tls_strparser strp;
0137
0138 atomic_t decrypt_pending;
0139
0140 spinlock_t decrypt_compl_lock;
0141 struct sk_buff_head async_hold;
0142 struct wait_queue_head wq;
0143 };
0144
0145 struct tls_record_info {
0146 struct list_head list;
0147 u32 end_seq;
0148 int len;
0149 int num_frags;
0150 skb_frag_t frags[MAX_SKB_FRAGS];
0151 };
0152
0153 struct tls_offload_context_tx {
0154 struct crypto_aead *aead_send;
0155 spinlock_t lock;
0156 struct list_head records_list;
0157 struct tls_record_info *open_record;
0158 struct tls_record_info *retransmit_hint;
0159 u64 hint_record_sn;
0160 u64 unacked_record_sn;
0161
0162 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
0163 void (*sk_destruct)(struct sock *sk);
0164 struct work_struct destruct_work;
0165 struct tls_context *ctx;
0166 u8 driver_state[] __aligned(8);
0167
0168
0169
0170
0171 #define TLS_DRIVER_STATE_SIZE_TX 16
0172 };
0173
0174 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
0175 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
0176
0177 enum tls_context_flags {
0178
0179
0180
0181
0182 TLS_RX_DEV_DEGRADED = 0,
0183
0184
0185
0186
0187 TLS_TX_SYNC_SCHED = 1,
0188
0189
0190
0191
0192
0193 TLS_RX_DEV_CLOSED = 2,
0194 };
0195
0196 struct cipher_context {
0197 char *iv;
0198 char *rec_seq;
0199 };
0200
0201 union tls_crypto_context {
0202 struct tls_crypto_info info;
0203 union {
0204 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
0205 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
0206 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
0207 struct tls12_crypto_info_sm4_gcm sm4_gcm;
0208 struct tls12_crypto_info_sm4_ccm sm4_ccm;
0209 };
0210 };
0211
0212 struct tls_prot_info {
0213 u16 version;
0214 u16 cipher_type;
0215 u16 prepend_size;
0216 u16 tag_size;
0217 u16 overhead_size;
0218 u16 iv_size;
0219 u16 salt_size;
0220 u16 rec_seq_size;
0221 u16 aad_size;
0222 u16 tail_size;
0223 };
0224
0225 struct tls_context {
0226
0227 struct tls_prot_info prot_info;
0228
0229 u8 tx_conf:3;
0230 u8 rx_conf:3;
0231 u8 zerocopy_sendfile:1;
0232 u8 rx_no_pad:1;
0233
0234 int (*push_pending_record)(struct sock *sk, int flags);
0235 void (*sk_write_space)(struct sock *sk);
0236
0237 void *priv_ctx_tx;
0238 void *priv_ctx_rx;
0239
0240 struct net_device __rcu *netdev;
0241
0242
0243 struct cipher_context tx;
0244 struct cipher_context rx;
0245
0246 struct scatterlist *partially_sent_record;
0247 u16 partially_sent_offset;
0248
0249 bool in_tcp_sendpages;
0250 bool pending_open_record_frags;
0251
0252 struct mutex tx_lock;
0253
0254
0255 unsigned long flags;
0256
0257
0258 struct proto *sk_proto;
0259 struct sock *sk;
0260
0261 void (*sk_destruct)(struct sock *sk);
0262
0263 union tls_crypto_context crypto_send;
0264 union tls_crypto_context crypto_recv;
0265
0266 struct list_head list;
0267 refcount_t refcount;
0268 struct rcu_head rcu;
0269 };
0270
0271 enum tls_offload_ctx_dir {
0272 TLS_OFFLOAD_CTX_DIR_RX,
0273 TLS_OFFLOAD_CTX_DIR_TX,
0274 };
0275
0276 struct tlsdev_ops {
0277 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
0278 enum tls_offload_ctx_dir direction,
0279 struct tls_crypto_info *crypto_info,
0280 u32 start_offload_tcp_sn);
0281 void (*tls_dev_del)(struct net_device *netdev,
0282 struct tls_context *ctx,
0283 enum tls_offload_ctx_dir direction);
0284 int (*tls_dev_resync)(struct net_device *netdev,
0285 struct sock *sk, u32 seq, u8 *rcd_sn,
0286 enum tls_offload_ctx_dir direction);
0287 };
0288
0289 enum tls_offload_sync_type {
0290 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
0291 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
0292 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
0293 };
0294
0295 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
0296 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
0297
0298 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
0299 struct tls_offload_resync_async {
0300 atomic64_t req;
0301 u16 loglen;
0302 u16 rcd_delta;
0303 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
0304 };
0305
0306 struct tls_offload_context_rx {
0307
0308 struct tls_sw_context_rx sw;
0309 enum tls_offload_sync_type resync_type;
0310
0311 u8 resync_nh_reset:1;
0312
0313 u8 resync_nh_do_now:1;
0314 union {
0315
0316 struct {
0317 atomic64_t resync_req;
0318 };
0319
0320 struct {
0321 u32 decrypted_failed;
0322 u32 decrypted_tgt;
0323 } resync_nh;
0324
0325 struct {
0326 struct tls_offload_resync_async *resync_async;
0327 };
0328 };
0329 u8 driver_state[] __aligned(8);
0330
0331
0332
0333
0334 #define TLS_DRIVER_STATE_SIZE_RX 8
0335 };
0336
0337 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
0338 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
0339
0340 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
0341 u32 seq, u64 *p_record_sn);
0342
0343 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
0344 {
0345 return rec->len == 0;
0346 }
0347
0348 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
0349 {
0350 return rec->end_seq - rec->len;
0351 }
0352
0353 struct sk_buff *
0354 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
0355 struct sk_buff *skb);
0356 struct sk_buff *
0357 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
0358 struct sk_buff *skb);
0359
0360 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
0361 {
0362 #ifdef CONFIG_SOCK_VALIDATE_XMIT
0363 return sk_fullsock(sk) &&
0364 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
0365 &tls_validate_xmit_skb);
0366 #else
0367 return false;
0368 #endif
0369 }
0370
0371 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
0372 {
0373 struct inet_connection_sock *icsk = inet_csk(sk);
0374
0375
0376
0377
0378 return (__force void *)icsk->icsk_ulp_data;
0379 }
0380
0381 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
0382 const struct tls_context *tls_ctx)
0383 {
0384 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
0385 }
0386
0387 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
0388 const struct tls_context *tls_ctx)
0389 {
0390 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
0391 }
0392
0393 static inline struct tls_offload_context_tx *
0394 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
0395 {
0396 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
0397 }
0398
0399 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
0400 {
0401 struct tls_context *ctx = tls_get_ctx(sk);
0402
0403 if (!ctx)
0404 return false;
0405 return !!tls_sw_ctx_tx(ctx);
0406 }
0407
0408 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
0409 {
0410 struct tls_context *ctx = tls_get_ctx(sk);
0411
0412 if (!ctx)
0413 return false;
0414 return !!tls_sw_ctx_rx(ctx);
0415 }
0416
0417 static inline struct tls_offload_context_rx *
0418 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
0419 {
0420 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
0421 }
0422
0423 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
0424 enum tls_offload_ctx_dir direction)
0425 {
0426 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
0427 return tls_offload_ctx_tx(tls_ctx)->driver_state;
0428 else
0429 return tls_offload_ctx_rx(tls_ctx)->driver_state;
0430 }
0431
0432 static inline void *
0433 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
0434 {
0435 return __tls_driver_ctx(tls_get_ctx(sk), direction);
0436 }
0437
0438 #define RESYNC_REQ BIT(0)
0439 #define RESYNC_REQ_ASYNC BIT(1)
0440
0441 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
0442 {
0443 struct tls_context *tls_ctx = tls_get_ctx(sk);
0444 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
0445
0446 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
0447 }
0448
0449
0450 static inline void
0451 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
0452 {
0453 struct tls_context *tls_ctx = tls_get_ctx(sk);
0454 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
0455
0456 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
0457 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
0458 rx_ctx->resync_async->loglen = 0;
0459 rx_ctx->resync_async->rcd_delta = 0;
0460 }
0461
0462 static inline void
0463 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
0464 {
0465 struct tls_context *tls_ctx = tls_get_ctx(sk);
0466 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
0467
0468 atomic64_set(&rx_ctx->resync_async->req,
0469 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
0470 }
0471
0472 static inline void
0473 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
0474 {
0475 struct tls_context *tls_ctx = tls_get_ctx(sk);
0476
0477 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
0478 }
0479
0480
0481 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
0482 {
0483 struct tls_context *tls_ctx = tls_get_ctx(sk);
0484 bool ret;
0485
0486 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
0487 smp_mb__after_atomic();
0488 return ret;
0489 }
0490
0491 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
0492
0493 #ifdef CONFIG_TLS_DEVICE
0494 void tls_device_sk_destruct(struct sock *sk);
0495 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
0496
0497 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
0498 {
0499 if (!sk_fullsock(sk) ||
0500 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
0501 return false;
0502 return tls_get_ctx(sk)->rx_conf == TLS_HW;
0503 }
0504 #endif
0505 #endif