0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #ifndef _TLS_INT_H
0036 #define _TLS_INT_H
0037
0038 #include <asm/byteorder.h>
0039 #include <linux/types.h>
0040 #include <linux/skmsg.h>
0041 #include <net/tls.h>
0042
0043 #define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \
0044 TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT))
0045
0046 #define __TLS_INC_STATS(net, field) \
0047 __SNMP_INC_STATS((net)->mib.tls_statistics, field)
0048 #define TLS_INC_STATS(net, field) \
0049 SNMP_INC_STATS((net)->mib.tls_statistics, field)
0050 #define TLS_DEC_STATS(net, field) \
0051 SNMP_DEC_STATS((net)->mib.tls_statistics, field)
0052
0053
0054
0055
0056
0057 struct tls_rec {
0058 struct list_head list;
0059 int tx_ready;
0060 int tx_flags;
0061
0062 struct sk_msg msg_plaintext;
0063 struct sk_msg msg_encrypted;
0064
0065
0066 struct scatterlist sg_aead_in[2];
0067
0068 struct scatterlist sg_aead_out[2];
0069
0070 char content_type;
0071 struct scatterlist sg_content_type;
0072
0073 char aad_space[TLS_AAD_SPACE_SIZE];
0074 u8 iv_data[MAX_IV_SIZE];
0075 struct aead_request aead_req;
0076 u8 aead_req_ctx[];
0077 };
0078
0079 int __net_init tls_proc_init(struct net *net);
0080 void __net_exit tls_proc_fini(struct net *net);
0081
0082 struct tls_context *tls_ctx_create(struct sock *sk);
0083 void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
0084 void update_sk_prot(struct sock *sk, struct tls_context *ctx);
0085
0086 int wait_on_pending_writer(struct sock *sk, long *timeo);
0087 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
0088 int __user *optlen);
0089 int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
0090 unsigned int optlen);
0091 void tls_err_abort(struct sock *sk, int err);
0092
0093 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
0094 void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
0095 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
0096 void tls_sw_strparser_done(struct tls_context *tls_ctx);
0097 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
0098 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
0099 int offset, size_t size, int flags);
0100 int tls_sw_sendpage(struct sock *sk, struct page *page,
0101 int offset, size_t size, int flags);
0102 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
0103 void tls_sw_release_resources_tx(struct sock *sk);
0104 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
0105 void tls_sw_free_resources_rx(struct sock *sk);
0106 void tls_sw_release_resources_rx(struct sock *sk);
0107 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
0108 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
0109 int flags, int *addr_len);
0110 bool tls_sw_sock_is_readable(struct sock *sk);
0111 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
0112 struct pipe_inode_info *pipe,
0113 size_t len, unsigned int flags);
0114
0115 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
0116 int tls_device_sendpage(struct sock *sk, struct page *page,
0117 int offset, size_t size, int flags);
0118 int tls_tx_records(struct sock *sk, int flags);
0119
0120 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
0121 void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
0122
0123 int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
0124 unsigned char *record_type);
0125 int decrypt_skb(struct sock *sk, struct scatterlist *sgout);
0126
0127 int tls_sw_fallback_init(struct sock *sk,
0128 struct tls_offload_context_tx *offload_ctx,
0129 struct tls_crypto_info *crypto_info);
0130
0131 int tls_strp_dev_init(void);
0132 void tls_strp_dev_exit(void);
0133
0134 void tls_strp_done(struct tls_strparser *strp);
0135 void tls_strp_stop(struct tls_strparser *strp);
0136 int tls_strp_init(struct tls_strparser *strp, struct sock *sk);
0137 void tls_strp_data_ready(struct tls_strparser *strp);
0138
0139 void tls_strp_check_rcv(struct tls_strparser *strp);
0140 void tls_strp_msg_done(struct tls_strparser *strp);
0141
0142 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
0143 void tls_rx_msg_ready(struct tls_strparser *strp);
0144
0145 void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
0146 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
0147 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
0148 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
0149
0150 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
0151 {
0152 struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
0153
0154 return &scb->tls;
0155 }
0156
0157 static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
0158 {
0159 DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len);
0160 return ctx->strp.anchor;
0161 }
0162
0163 static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
0164 {
0165 return ctx->strp.msg_ready;
0166 }
0167
0168 #ifdef CONFIG_TLS_DEVICE
0169 int tls_device_init(void);
0170 void tls_device_cleanup(void);
0171 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
0172 void tls_device_free_resources_tx(struct sock *sk);
0173 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
0174 void tls_device_offload_cleanup_rx(struct sock *sk);
0175 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
0176 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx);
0177 #else
0178 static inline int tls_device_init(void) { return 0; }
0179 static inline void tls_device_cleanup(void) {}
0180
0181 static inline int
0182 tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
0183 {
0184 return -EOPNOTSUPP;
0185 }
0186
0187 static inline void tls_device_free_resources_tx(struct sock *sk) {}
0188
0189 static inline int
0190 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
0191 {
0192 return -EOPNOTSUPP;
0193 }
0194
0195 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
0196 static inline void
0197 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
0198
0199 static inline int
0200 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
0201 {
0202 return 0;
0203 }
0204 #endif
0205
0206 int tls_push_sg(struct sock *sk, struct tls_context *ctx,
0207 struct scatterlist *sg, u16 first_offset,
0208 int flags);
0209 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
0210 int flags);
0211 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
0212
0213 static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
0214 {
0215 return !!ctx->partially_sent_record;
0216 }
0217
0218 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
0219 {
0220 return tls_ctx->pending_open_record_frags;
0221 }
0222
0223 static inline bool tls_bigint_increment(unsigned char *seq, int len)
0224 {
0225 int i;
0226
0227 for (i = len - 1; i >= 0; i--) {
0228 ++seq[i];
0229 if (seq[i] != 0)
0230 break;
0231 }
0232
0233 return (i == -1);
0234 }
0235
0236 static inline void tls_bigint_subtract(unsigned char *seq, int n)
0237 {
0238 u64 rcd_sn;
0239 __be64 *p;
0240
0241 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
0242
0243 p = (__be64 *)seq;
0244 rcd_sn = be64_to_cpu(*p);
0245 *p = cpu_to_be64(rcd_sn - n);
0246 }
0247
0248 static inline void
0249 tls_advance_record_sn(struct sock *sk, struct tls_prot_info *prot,
0250 struct cipher_context *ctx)
0251 {
0252 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
0253 tls_err_abort(sk, -EBADMSG);
0254
0255 if (prot->version != TLS_1_3_VERSION &&
0256 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
0257 tls_bigint_increment(ctx->iv + prot->salt_size,
0258 prot->iv_size);
0259 }
0260
0261 static inline void
0262 tls_xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
0263 {
0264 int i;
0265
0266 if (prot->version == TLS_1_3_VERSION ||
0267 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
0268 for (i = 0; i < 8; i++)
0269 iv[i + 4] ^= seq[i];
0270 }
0271 }
0272
0273 static inline void
0274 tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len,
0275 unsigned char record_type)
0276 {
0277 struct tls_prot_info *prot = &ctx->prot_info;
0278 size_t pkt_len, iv_size = prot->iv_size;
0279
0280 pkt_len = plaintext_len + prot->tag_size;
0281 if (prot->version != TLS_1_3_VERSION &&
0282 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
0283 pkt_len += iv_size;
0284
0285 memcpy(buf + TLS_NONCE_OFFSET,
0286 ctx->tx.iv + prot->salt_size, iv_size);
0287 }
0288
0289
0290
0291
0292 buf[0] = prot->version == TLS_1_3_VERSION ?
0293 TLS_RECORD_TYPE_DATA : record_type;
0294
0295 buf[1] = TLS_1_2_VERSION_MINOR;
0296 buf[2] = TLS_1_2_VERSION_MAJOR;
0297
0298 buf[3] = pkt_len >> 8;
0299 buf[4] = pkt_len & 0xFF;
0300 }
0301
0302 static inline
0303 void tls_make_aad(char *buf, size_t size, char *record_sequence,
0304 unsigned char record_type, struct tls_prot_info *prot)
0305 {
0306 if (prot->version != TLS_1_3_VERSION) {
0307 memcpy(buf, record_sequence, prot->rec_seq_size);
0308 buf += 8;
0309 } else {
0310 size += prot->tag_size;
0311 }
0312
0313 buf[0] = prot->version == TLS_1_3_VERSION ?
0314 TLS_RECORD_TYPE_DATA : record_type;
0315 buf[1] = TLS_1_2_VERSION_MAJOR;
0316 buf[2] = TLS_1_2_VERSION_MINOR;
0317 buf[3] = size >> 8;
0318 buf[4] = size & 0xFF;
0319 }
0320
0321 #endif