0001
0002
0003 #ifndef _NET_IPV6_GRO_H
0004 #define _NET_IPV6_GRO_H
0005
0006 #include <linux/indirect_call_wrapper.h>
0007 #include <linux/ip.h>
0008 #include <linux/ipv6.h>
0009 #include <net/ip6_checksum.h>
0010 #include <linux/skbuff.h>
0011 #include <net/udp.h>
0012
0013 struct napi_gro_cb {
0014
0015 void *frag0;
0016
0017
0018 unsigned int frag0_len;
0019
0020
0021 int data_offset;
0022
0023
0024 u16 flush;
0025
0026
0027 u16 flush_id;
0028
0029
0030 u16 count;
0031
0032
0033 u16 proto;
0034
0035
0036 unsigned long age;
0037
0038
0039 #define NAPI_GRO_FREE 1
0040 #define NAPI_GRO_FREE_STOLEN_HEAD 2
0041
0042 struct_group(zeroed,
0043
0044
0045 u16 gro_remcsum_start;
0046
0047
0048 u8 same_flow:1;
0049
0050
0051 u8 encap_mark:1;
0052
0053
0054 u8 csum_valid:1;
0055
0056
0057 u8 csum_cnt:3;
0058
0059
0060 u8 free:2;
0061
0062
0063 u8 is_ipv6:1;
0064
0065
0066 u8 is_fou:1;
0067
0068
0069 u8 is_atomic:1;
0070
0071
0072 u8 recursion_counter:4;
0073
0074
0075 u8 is_flist:1;
0076 );
0077
0078
0079 __wsum csum;
0080
0081
0082 struct sk_buff *last;
0083 };
0084
0085 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
0086
0087 #define GRO_RECURSION_LIMIT 15
0088 static inline int gro_recursion_inc_test(struct sk_buff *skb)
0089 {
0090 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
0091 }
0092
0093 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
0094 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
0095 struct list_head *head,
0096 struct sk_buff *skb)
0097 {
0098 if (unlikely(gro_recursion_inc_test(skb))) {
0099 NAPI_GRO_CB(skb)->flush |= 1;
0100 return NULL;
0101 }
0102
0103 return cb(head, skb);
0104 }
0105
0106 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
0107 struct sk_buff *);
0108 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
0109 struct sock *sk,
0110 struct list_head *head,
0111 struct sk_buff *skb)
0112 {
0113 if (unlikely(gro_recursion_inc_test(skb))) {
0114 NAPI_GRO_CB(skb)->flush |= 1;
0115 return NULL;
0116 }
0117
0118 return cb(sk, head, skb);
0119 }
0120
0121 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
0122 {
0123 return NAPI_GRO_CB(skb)->data_offset;
0124 }
0125
0126 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
0127 {
0128 return skb->len - NAPI_GRO_CB(skb)->data_offset;
0129 }
0130
0131 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
0132 {
0133 NAPI_GRO_CB(skb)->data_offset += len;
0134 }
0135
0136 static inline void *skb_gro_header_fast(struct sk_buff *skb,
0137 unsigned int offset)
0138 {
0139 return NAPI_GRO_CB(skb)->frag0 + offset;
0140 }
0141
0142 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
0143 {
0144 return NAPI_GRO_CB(skb)->frag0_len < hlen;
0145 }
0146
0147 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
0148 {
0149 NAPI_GRO_CB(skb)->frag0 = NULL;
0150 NAPI_GRO_CB(skb)->frag0_len = 0;
0151 }
0152
0153 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
0154 unsigned int offset)
0155 {
0156 if (!pskb_may_pull(skb, hlen))
0157 return NULL;
0158
0159 skb_gro_frag0_invalidate(skb);
0160 return skb->data + offset;
0161 }
0162
0163 static inline void *skb_gro_network_header(struct sk_buff *skb)
0164 {
0165 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
0166 skb_network_offset(skb);
0167 }
0168
0169 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
0170 {
0171 const struct iphdr *iph = skb_gro_network_header(skb);
0172
0173 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
0174 skb_gro_len(skb), proto, 0);
0175 }
0176
0177 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
0178 const void *start, unsigned int len)
0179 {
0180 if (NAPI_GRO_CB(skb)->csum_valid)
0181 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
0182 wsum_negate(NAPI_GRO_CB(skb)->csum)));
0183 }
0184
0185
0186
0187
0188
0189
0190 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
0191
0192 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
0193 {
0194 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
0195 }
0196
0197 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
0198 bool zero_okay,
0199 __sum16 check)
0200 {
0201 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
0202 skb_checksum_start_offset(skb) <
0203 skb_gro_offset(skb)) &&
0204 !skb_at_gro_remcsum_start(skb) &&
0205 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
0206 (!zero_okay || check));
0207 }
0208
0209 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
0210 __wsum psum)
0211 {
0212 if (NAPI_GRO_CB(skb)->csum_valid &&
0213 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
0214 return 0;
0215
0216 NAPI_GRO_CB(skb)->csum = psum;
0217
0218 return __skb_gro_checksum_complete(skb);
0219 }
0220
0221 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
0222 {
0223 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
0224
0225 NAPI_GRO_CB(skb)->csum_cnt--;
0226 } else {
0227
0228
0229
0230
0231 __skb_incr_checksum_unnecessary(skb);
0232 }
0233 }
0234
0235 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
0236 compute_pseudo) \
0237 ({ \
0238 __sum16 __ret = 0; \
0239 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
0240 __ret = __skb_gro_checksum_validate_complete(skb, \
0241 compute_pseudo(skb, proto)); \
0242 if (!__ret) \
0243 skb_gro_incr_csum_unnecessary(skb); \
0244 __ret; \
0245 })
0246
0247 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
0248 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
0249
0250 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
0251 compute_pseudo) \
0252 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
0253
0254 #define skb_gro_checksum_simple_validate(skb) \
0255 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
0256
0257 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
0258 {
0259 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
0260 !NAPI_GRO_CB(skb)->csum_valid);
0261 }
0262
0263 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
0264 __wsum pseudo)
0265 {
0266 NAPI_GRO_CB(skb)->csum = ~pseudo;
0267 NAPI_GRO_CB(skb)->csum_valid = 1;
0268 }
0269
0270 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
0271 do { \
0272 if (__skb_gro_checksum_convert_check(skb)) \
0273 __skb_gro_checksum_convert(skb, \
0274 compute_pseudo(skb, proto)); \
0275 } while (0)
0276
0277 struct gro_remcsum {
0278 int offset;
0279 __wsum delta;
0280 };
0281
0282 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
0283 {
0284 grc->offset = 0;
0285 grc->delta = 0;
0286 }
0287
0288 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
0289 unsigned int off, size_t hdrlen,
0290 int start, int offset,
0291 struct gro_remcsum *grc,
0292 bool nopartial)
0293 {
0294 __wsum delta;
0295 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
0296
0297 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
0298
0299 if (!nopartial) {
0300 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
0301 return ptr;
0302 }
0303
0304 ptr = skb_gro_header_fast(skb, off);
0305 if (skb_gro_header_hard(skb, off + plen)) {
0306 ptr = skb_gro_header_slow(skb, off + plen, off);
0307 if (!ptr)
0308 return NULL;
0309 }
0310
0311 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
0312 start, offset);
0313
0314
0315 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
0316
0317 grc->offset = off + hdrlen + offset;
0318 grc->delta = delta;
0319
0320 return ptr;
0321 }
0322
0323 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
0324 struct gro_remcsum *grc)
0325 {
0326 void *ptr;
0327 size_t plen = grc->offset + sizeof(u16);
0328
0329 if (!grc->delta)
0330 return;
0331
0332 ptr = skb_gro_header_fast(skb, grc->offset);
0333 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
0334 ptr = skb_gro_header_slow(skb, plen, grc->offset);
0335 if (!ptr)
0336 return;
0337 }
0338
0339 remcsum_unadjust((__sum16 *)ptr, grc->delta);
0340 }
0341
0342 #ifdef CONFIG_XFRM_OFFLOAD
0343 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
0344 {
0345 if (PTR_ERR(pp) != -EINPROGRESS)
0346 NAPI_GRO_CB(skb)->flush |= flush;
0347 }
0348 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
0349 struct sk_buff *pp,
0350 int flush,
0351 struct gro_remcsum *grc)
0352 {
0353 if (PTR_ERR(pp) != -EINPROGRESS) {
0354 NAPI_GRO_CB(skb)->flush |= flush;
0355 skb_gro_remcsum_cleanup(skb, grc);
0356 skb->remcsum_offload = 0;
0357 }
0358 }
0359 #else
0360 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
0361 {
0362 NAPI_GRO_CB(skb)->flush |= flush;
0363 }
0364 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
0365 struct sk_buff *pp,
0366 int flush,
0367 struct gro_remcsum *grc)
0368 {
0369 NAPI_GRO_CB(skb)->flush |= flush;
0370 skb_gro_remcsum_cleanup(skb, grc);
0371 skb->remcsum_offload = 0;
0372 }
0373 #endif
0374
0375 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
0376 struct sk_buff *));
0377 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
0378 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
0379 struct sk_buff *));
0380 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
0381
0382 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
0383 struct sk_buff *));
0384 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
0385
0386 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
0387 struct sk_buff *));
0388 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
0389
0390 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
0391 ({ \
0392 unlikely(gro_recursion_inc_test(skb)) ? \
0393 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
0394 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
0395 })
0396
0397 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
0398 struct udphdr *uh, struct sock *sk);
0399 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
0400
0401 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
0402 {
0403 struct udphdr *uh;
0404 unsigned int hlen, off;
0405
0406 off = skb_gro_offset(skb);
0407 hlen = off + sizeof(*uh);
0408 uh = skb_gro_header_fast(skb, off);
0409 if (skb_gro_header_hard(skb, hlen))
0410 uh = skb_gro_header_slow(skb, hlen, off);
0411
0412 return uh;
0413 }
0414
0415 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
0416 {
0417 const struct ipv6hdr *iph = skb_gro_network_header(skb);
0418
0419 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
0420 skb_gro_len(skb), proto, 0));
0421 }
0422
0423 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
0424
0425
0426 static inline void gro_normal_list(struct napi_struct *napi)
0427 {
0428 if (!napi->rx_count)
0429 return;
0430 netif_receive_skb_list_internal(&napi->rx_list);
0431 INIT_LIST_HEAD(&napi->rx_list);
0432 napi->rx_count = 0;
0433 }
0434
0435
0436
0437
0438 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
0439 {
0440 list_add_tail(&skb->list, &napi->rx_list);
0441 napi->rx_count += segs;
0442 if (napi->rx_count >= READ_ONCE(gro_normal_batch))
0443 gro_normal_list(napi);
0444 }
0445
0446
0447 #endif