0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/indirect_call_wrapper.h>
0010 #include <linux/skbuff.h>
0011 #include <net/gro.h>
0012 #include <net/tcp.h>
0013 #include <net/protocol.h>
0014
0015 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
0016 unsigned int seq, unsigned int mss)
0017 {
0018 while (skb) {
0019 if (before(ts_seq, seq + mss)) {
0020 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
0021 skb_shinfo(skb)->tskey = ts_seq;
0022 return;
0023 }
0024
0025 skb = skb->next;
0026 seq += mss;
0027 }
0028 }
0029
0030 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
0031 netdev_features_t features)
0032 {
0033 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
0034 return ERR_PTR(-EINVAL);
0035
0036 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
0037 return ERR_PTR(-EINVAL);
0038
0039 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
0040 const struct iphdr *iph = ip_hdr(skb);
0041 struct tcphdr *th = tcp_hdr(skb);
0042
0043
0044
0045
0046
0047 th->check = 0;
0048 skb->ip_summed = CHECKSUM_PARTIAL;
0049 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
0050 }
0051
0052 return tcp_gso_segment(skb, features);
0053 }
0054
0055 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
0056 netdev_features_t features)
0057 {
0058 struct sk_buff *segs = ERR_PTR(-EINVAL);
0059 unsigned int sum_truesize = 0;
0060 struct tcphdr *th;
0061 unsigned int thlen;
0062 unsigned int seq;
0063 __be32 delta;
0064 unsigned int oldlen;
0065 unsigned int mss;
0066 struct sk_buff *gso_skb = skb;
0067 __sum16 newcheck;
0068 bool ooo_okay, copy_destructor;
0069
0070 th = tcp_hdr(skb);
0071 thlen = th->doff * 4;
0072 if (thlen < sizeof(*th))
0073 goto out;
0074
0075 if (!pskb_may_pull(skb, thlen))
0076 goto out;
0077
0078 oldlen = (u16)~skb->len;
0079 __skb_pull(skb, thlen);
0080
0081 mss = skb_shinfo(skb)->gso_size;
0082 if (unlikely(skb->len <= mss))
0083 goto out;
0084
0085 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
0086
0087
0088 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
0089
0090 segs = NULL;
0091 goto out;
0092 }
0093
0094 copy_destructor = gso_skb->destructor == tcp_wfree;
0095 ooo_okay = gso_skb->ooo_okay;
0096
0097 skb->ooo_okay = 0;
0098
0099 segs = skb_segment(skb, features);
0100 if (IS_ERR(segs))
0101 goto out;
0102
0103
0104 segs->ooo_okay = ooo_okay;
0105
0106
0107
0108
0109
0110 if (skb_is_gso(segs))
0111 mss *= skb_shinfo(segs)->gso_segs;
0112
0113 delta = htonl(oldlen + (thlen + mss));
0114
0115 skb = segs;
0116 th = tcp_hdr(skb);
0117 seq = ntohl(th->seq);
0118
0119 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
0120 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
0121
0122 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
0123 (__force u32)delta));
0124
0125 while (skb->next) {
0126 th->fin = th->psh = 0;
0127 th->check = newcheck;
0128
0129 if (skb->ip_summed == CHECKSUM_PARTIAL)
0130 gso_reset_checksum(skb, ~th->check);
0131 else
0132 th->check = gso_make_checksum(skb, ~th->check);
0133
0134 seq += mss;
0135 if (copy_destructor) {
0136 skb->destructor = gso_skb->destructor;
0137 skb->sk = gso_skb->sk;
0138 sum_truesize += skb->truesize;
0139 }
0140 skb = skb->next;
0141 th = tcp_hdr(skb);
0142
0143 th->seq = htonl(seq);
0144 th->cwr = 0;
0145 }
0146
0147
0148
0149
0150
0151
0152 if (copy_destructor) {
0153 int delta;
0154
0155 swap(gso_skb->sk, skb->sk);
0156 swap(gso_skb->destructor, skb->destructor);
0157 sum_truesize += skb->truesize;
0158 delta = sum_truesize - gso_skb->truesize;
0159
0160
0161
0162 if (likely(delta >= 0))
0163 refcount_add(delta, &skb->sk->sk_wmem_alloc);
0164 else
0165 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
0166 }
0167
0168 delta = htonl(oldlen + (skb_tail_pointer(skb) -
0169 skb_transport_header(skb)) +
0170 skb->data_len);
0171 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
0172 (__force u32)delta));
0173 if (skb->ip_summed == CHECKSUM_PARTIAL)
0174 gso_reset_checksum(skb, ~th->check);
0175 else
0176 th->check = gso_make_checksum(skb, ~th->check);
0177 out:
0178 return segs;
0179 }
0180
0181 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
0182 {
0183 struct sk_buff *pp = NULL;
0184 struct sk_buff *p;
0185 struct tcphdr *th;
0186 struct tcphdr *th2;
0187 unsigned int len;
0188 unsigned int thlen;
0189 __be32 flags;
0190 unsigned int mss = 1;
0191 unsigned int hlen;
0192 unsigned int off;
0193 int flush = 1;
0194 int i;
0195
0196 off = skb_gro_offset(skb);
0197 hlen = off + sizeof(*th);
0198 th = skb_gro_header_fast(skb, off);
0199 if (skb_gro_header_hard(skb, hlen)) {
0200 th = skb_gro_header_slow(skb, hlen, off);
0201 if (unlikely(!th))
0202 goto out;
0203 }
0204
0205 thlen = th->doff * 4;
0206 if (thlen < sizeof(*th))
0207 goto out;
0208
0209 hlen = off + thlen;
0210 if (skb_gro_header_hard(skb, hlen)) {
0211 th = skb_gro_header_slow(skb, hlen, off);
0212 if (unlikely(!th))
0213 goto out;
0214 }
0215
0216 skb_gro_pull(skb, thlen);
0217
0218 len = skb_gro_len(skb);
0219 flags = tcp_flag_word(th);
0220
0221 list_for_each_entry(p, head, list) {
0222 if (!NAPI_GRO_CB(p)->same_flow)
0223 continue;
0224
0225 th2 = tcp_hdr(p);
0226
0227 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
0228 NAPI_GRO_CB(p)->same_flow = 0;
0229 continue;
0230 }
0231
0232 goto found;
0233 }
0234 p = NULL;
0235 goto out_check_final;
0236
0237 found:
0238
0239 flush = NAPI_GRO_CB(p)->flush;
0240 flush |= (__force int)(flags & TCP_FLAG_CWR);
0241 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
0242 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
0243 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
0244 for (i = sizeof(*th); i < thlen; i += 4)
0245 flush |= *(u32 *)((u8 *)th + i) ^
0246 *(u32 *)((u8 *)th2 + i);
0247
0248
0249
0250
0251
0252 if (NAPI_GRO_CB(p)->flush_id != 1 ||
0253 NAPI_GRO_CB(p)->count != 1 ||
0254 !NAPI_GRO_CB(p)->is_atomic)
0255 flush |= NAPI_GRO_CB(p)->flush_id;
0256 else
0257 NAPI_GRO_CB(p)->is_atomic = false;
0258
0259 mss = skb_shinfo(p)->gso_size;
0260
0261 flush |= (len - 1) >= mss;
0262 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
0263 #ifdef CONFIG_TLS_DEVICE
0264 flush |= p->decrypted ^ skb->decrypted;
0265 #endif
0266
0267 if (flush || skb_gro_receive(p, skb)) {
0268 mss = 1;
0269 goto out_check_final;
0270 }
0271
0272 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
0273
0274 out_check_final:
0275 flush = len < mss;
0276 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
0277 TCP_FLAG_RST | TCP_FLAG_SYN |
0278 TCP_FLAG_FIN));
0279
0280 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
0281 pp = p;
0282
0283 out:
0284 NAPI_GRO_CB(skb)->flush |= (flush != 0);
0285
0286 return pp;
0287 }
0288
0289 int tcp_gro_complete(struct sk_buff *skb)
0290 {
0291 struct tcphdr *th = tcp_hdr(skb);
0292
0293 skb->csum_start = (unsigned char *)th - skb->head;
0294 skb->csum_offset = offsetof(struct tcphdr, check);
0295 skb->ip_summed = CHECKSUM_PARTIAL;
0296
0297 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
0298
0299 if (th->cwr)
0300 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
0301
0302 if (skb->encapsulation)
0303 skb->inner_transport_header = skb->transport_header;
0304
0305 return 0;
0306 }
0307 EXPORT_SYMBOL(tcp_gro_complete);
0308
0309 INDIRECT_CALLABLE_SCOPE
0310 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
0311 {
0312
0313 if (!NAPI_GRO_CB(skb)->flush &&
0314 skb_gro_checksum_validate(skb, IPPROTO_TCP,
0315 inet_gro_compute_pseudo)) {
0316 NAPI_GRO_CB(skb)->flush = 1;
0317 return NULL;
0318 }
0319
0320 return tcp_gro_receive(head, skb);
0321 }
0322
0323 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
0324 {
0325 const struct iphdr *iph = ip_hdr(skb);
0326 struct tcphdr *th = tcp_hdr(skb);
0327
0328 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
0329 iph->daddr, 0);
0330 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
0331
0332 if (NAPI_GRO_CB(skb)->is_atomic)
0333 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
0334
0335 return tcp_gro_complete(skb);
0336 }
0337
0338 static const struct net_offload tcpv4_offload = {
0339 .callbacks = {
0340 .gso_segment = tcp4_gso_segment,
0341 .gro_receive = tcp4_gro_receive,
0342 .gro_complete = tcp4_gro_complete,
0343 },
0344 };
0345
0346 int __init tcpv4_offload_init(void)
0347 {
0348 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
0349 }