0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef _LINUX_SKBUFF_H
0011 #define _LINUX_SKBUFF_H
0012
0013 #include <linux/kernel.h>
0014 #include <linux/compiler.h>
0015 #include <linux/time.h>
0016 #include <linux/bug.h>
0017 #include <linux/bvec.h>
0018 #include <linux/cache.h>
0019 #include <linux/rbtree.h>
0020 #include <linux/socket.h>
0021 #include <linux/refcount.h>
0022
0023 #include <linux/atomic.h>
0024 #include <asm/types.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/net.h>
0027 #include <linux/textsearch.h>
0028 #include <net/checksum.h>
0029 #include <linux/rcupdate.h>
0030 #include <linux/hrtimer.h>
0031 #include <linux/dma-mapping.h>
0032 #include <linux/netdev_features.h>
0033 #include <linux/sched.h>
0034 #include <linux/sched/clock.h>
0035 #include <net/flow_dissector.h>
0036 #include <linux/splice.h>
0037 #include <linux/in6.h>
0038 #include <linux/if_packet.h>
0039 #include <linux/llist.h>
0040 #include <net/flow.h>
0041 #include <net/page_pool.h>
0042 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0043 #include <linux/netfilter/nf_conntrack_common.h>
0044 #endif
0045 #include <net/net_debug.h>
0046 #include <net/dropreason.h>
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 #define CHECKSUM_NONE 0
0254 #define CHECKSUM_UNNECESSARY 1
0255 #define CHECKSUM_COMPLETE 2
0256 #define CHECKSUM_PARTIAL 3
0257
0258
0259 #define SKB_MAX_CSUM_LEVEL 3
0260
0261 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
0262 #define SKB_WITH_OVERHEAD(X) \
0263 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0264 #define SKB_MAX_ORDER(X, ORDER) \
0265 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
0266 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
0267 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
0268
0269
0270 #define SKB_TRUESIZE(X) ((X) + \
0271 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
0272 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0273
0274 struct ahash_request;
0275 struct net_device;
0276 struct scatterlist;
0277 struct pipe_inode_info;
0278 struct iov_iter;
0279 struct napi_struct;
0280 struct bpf_prog;
0281 union bpf_attr;
0282 struct skb_ext;
0283
0284 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
0285 struct nf_bridge_info {
0286 enum {
0287 BRNF_PROTO_UNCHANGED,
0288 BRNF_PROTO_8021Q,
0289 BRNF_PROTO_PPPOE
0290 } orig_proto:8;
0291 u8 pkt_otherhost:1;
0292 u8 in_prerouting:1;
0293 u8 bridged_dnat:1;
0294 __u16 frag_max_size;
0295 struct net_device *physindev;
0296
0297
0298 struct net_device *physoutdev;
0299 union {
0300
0301 __be32 ipv4_daddr;
0302 struct in6_addr ipv6_daddr;
0303
0304
0305
0306
0307
0308 char neigh_header[8];
0309 };
0310 };
0311 #endif
0312
0313 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
0314
0315
0316
0317
0318 struct tc_skb_ext {
0319 __u32 chain;
0320 __u16 mru;
0321 __u16 zone;
0322 u8 post_ct:1;
0323 u8 post_ct_snat:1;
0324 u8 post_ct_dnat:1;
0325 };
0326 #endif
0327
0328 struct sk_buff_head {
0329
0330 struct_group_tagged(sk_buff_list, list,
0331 struct sk_buff *next;
0332 struct sk_buff *prev;
0333 );
0334
0335 __u32 qlen;
0336 spinlock_t lock;
0337 };
0338
0339 struct sk_buff;
0340
0341
0342
0343
0344
0345
0346
0347
0348 #if (65536/PAGE_SIZE + 1) < 16
0349 #define MAX_SKB_FRAGS 16UL
0350 #else
0351 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
0352 #endif
0353 extern int sysctl_max_skb_frags;
0354
0355
0356
0357
0358 #define GSO_BY_FRAGS 0xFFFF
0359
0360 typedef struct bio_vec skb_frag_t;
0361
0362
0363
0364
0365
0366 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
0367 {
0368 return frag->bv_len;
0369 }
0370
0371
0372
0373
0374
0375
0376 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
0377 {
0378 frag->bv_len = size;
0379 }
0380
0381
0382
0383
0384
0385
0386 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
0387 {
0388 frag->bv_len += delta;
0389 }
0390
0391
0392
0393
0394
0395
0396 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
0397 {
0398 frag->bv_len -= delta;
0399 }
0400
0401
0402
0403
0404
0405 static inline bool skb_frag_must_loop(struct page *p)
0406 {
0407 #if defined(CONFIG_HIGHMEM)
0408 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
0409 return true;
0410 #endif
0411 return false;
0412 }
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
0432 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
0433 p_off = (f_off) & (PAGE_SIZE - 1), \
0434 p_len = skb_frag_must_loop(p) ? \
0435 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
0436 copied = 0; \
0437 copied < f_len; \
0438 copied += p_len, p++, p_off = 0, \
0439 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
0440
0441 #define HAVE_HW_TIME_STAMP
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 struct skb_shared_hwtstamps {
0460 union {
0461 ktime_t hwtstamp;
0462 void *netdev_data;
0463 };
0464 };
0465
0466
0467 enum {
0468
0469 SKBTX_HW_TSTAMP = 1 << 0,
0470
0471
0472 SKBTX_SW_TSTAMP = 1 << 1,
0473
0474
0475 SKBTX_IN_PROGRESS = 1 << 2,
0476
0477
0478 SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
0479
0480
0481 SKBTX_WIFI_STATUS = 1 << 4,
0482
0483
0484 SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
0485
0486
0487 SKBTX_SCHED_TSTAMP = 1 << 6,
0488 };
0489
0490 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
0491 SKBTX_SCHED_TSTAMP)
0492 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
0493 SKBTX_HW_TSTAMP_USE_CYCLES | \
0494 SKBTX_ANY_SW_TSTAMP)
0495
0496
0497 enum {
0498
0499 SKBFL_ZEROCOPY_ENABLE = BIT(0),
0500
0501
0502
0503
0504
0505
0506 SKBFL_SHARED_FRAG = BIT(1),
0507
0508
0509
0510
0511 SKBFL_PURE_ZEROCOPY = BIT(2),
0512
0513 SKBFL_DONT_ORPHAN = BIT(3),
0514
0515
0516
0517
0518 SKBFL_MANAGED_FRAG_REFS = BIT(4),
0519 };
0520
0521 #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
0522 #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
0523 SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 struct ubuf_info {
0534 void (*callback)(struct sk_buff *, struct ubuf_info *,
0535 bool zerocopy_success);
0536 union {
0537 struct {
0538 unsigned long desc;
0539 void *ctx;
0540 };
0541 struct {
0542 u32 id;
0543 u16 len;
0544 u16 zerocopy:1;
0545 u32 bytelen;
0546 };
0547 };
0548 refcount_t refcnt;
0549 u8 flags;
0550
0551 struct mmpin {
0552 struct user_struct *user;
0553 unsigned int num_pg;
0554 } mmp;
0555 };
0556
0557 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
0558
0559 int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
0560 void mm_unaccount_pinned_pages(struct mmpin *mmp);
0561
0562
0563
0564
0565 struct skb_shared_info {
0566 __u8 flags;
0567 __u8 meta_len;
0568 __u8 nr_frags;
0569 __u8 tx_flags;
0570 unsigned short gso_size;
0571
0572 unsigned short gso_segs;
0573 struct sk_buff *frag_list;
0574 struct skb_shared_hwtstamps hwtstamps;
0575 unsigned int gso_type;
0576 u32 tskey;
0577
0578
0579
0580
0581 atomic_t dataref;
0582 unsigned int xdp_frags_size;
0583
0584
0585
0586 void * destructor_arg;
0587
0588
0589 skb_frag_t frags[MAX_SKB_FRAGS];
0590 };
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 #define SKB_DATAREF_SHIFT 16
0620 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
0621
0622
0623 enum {
0624 SKB_FCLONE_UNAVAILABLE,
0625 SKB_FCLONE_ORIG,
0626 SKB_FCLONE_CLONE,
0627 };
0628
0629 enum {
0630 SKB_GSO_TCPV4 = 1 << 0,
0631
0632
0633 SKB_GSO_DODGY = 1 << 1,
0634
0635
0636 SKB_GSO_TCP_ECN = 1 << 2,
0637
0638 SKB_GSO_TCP_FIXEDID = 1 << 3,
0639
0640 SKB_GSO_TCPV6 = 1 << 4,
0641
0642 SKB_GSO_FCOE = 1 << 5,
0643
0644 SKB_GSO_GRE = 1 << 6,
0645
0646 SKB_GSO_GRE_CSUM = 1 << 7,
0647
0648 SKB_GSO_IPXIP4 = 1 << 8,
0649
0650 SKB_GSO_IPXIP6 = 1 << 9,
0651
0652 SKB_GSO_UDP_TUNNEL = 1 << 10,
0653
0654 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
0655
0656 SKB_GSO_PARTIAL = 1 << 12,
0657
0658 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
0659
0660 SKB_GSO_SCTP = 1 << 14,
0661
0662 SKB_GSO_ESP = 1 << 15,
0663
0664 SKB_GSO_UDP = 1 << 16,
0665
0666 SKB_GSO_UDP_L4 = 1 << 17,
0667
0668 SKB_GSO_FRAGLIST = 1 << 18,
0669 };
0670
0671 #if BITS_PER_LONG > 32
0672 #define NET_SKBUFF_DATA_USES_OFFSET 1
0673 #endif
0674
0675 #ifdef NET_SKBUFF_DATA_USES_OFFSET
0676 typedef unsigned int sk_buff_data_t;
0677 #else
0678 typedef unsigned char *sk_buff_data_t;
0679 #endif
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835 struct sk_buff {
0836 union {
0837 struct {
0838
0839 struct sk_buff *next;
0840 struct sk_buff *prev;
0841
0842 union {
0843 struct net_device *dev;
0844
0845
0846
0847
0848 unsigned long dev_scratch;
0849 };
0850 };
0851 struct rb_node rbnode;
0852 struct list_head list;
0853 struct llist_node ll_node;
0854 };
0855
0856 union {
0857 struct sock *sk;
0858 int ip_defrag_offset;
0859 };
0860
0861 union {
0862 ktime_t tstamp;
0863 u64 skb_mstamp_ns;
0864 };
0865
0866
0867
0868
0869
0870
0871 char cb[48] __aligned(8);
0872
0873 union {
0874 struct {
0875 unsigned long _skb_refdst;
0876 void (*destructor)(struct sk_buff *skb);
0877 };
0878 struct list_head tcp_tsorted_anchor;
0879 #ifdef CONFIG_NET_SOCK_MSG
0880 unsigned long _sk_redir;
0881 #endif
0882 };
0883
0884 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
0885 unsigned long _nfct;
0886 #endif
0887 unsigned int len,
0888 data_len;
0889 __u16 mac_len,
0890 hdr_len;
0891
0892
0893
0894
0895 __u16 queue_mapping;
0896
0897
0898 #ifdef __BIG_ENDIAN_BITFIELD
0899 #define CLONED_MASK (1 << 7)
0900 #else
0901 #define CLONED_MASK 1
0902 #endif
0903 #define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
0904
0905
0906 __u8 __cloned_offset[0];
0907
0908 __u8 cloned:1,
0909 nohdr:1,
0910 fclone:2,
0911 peeked:1,
0912 head_frag:1,
0913 pfmemalloc:1,
0914 pp_recycle:1;
0915 #ifdef CONFIG_SKB_EXTENSIONS
0916 __u8 active_extensions;
0917 #endif
0918
0919
0920
0921
0922 struct_group(headers,
0923
0924
0925 __u8 __pkt_type_offset[0];
0926
0927 __u8 pkt_type:3;
0928 __u8 ignore_df:1;
0929 __u8 nf_trace:1;
0930 __u8 ip_summed:2;
0931 __u8 ooo_okay:1;
0932
0933 __u8 l4_hash:1;
0934 __u8 sw_hash:1;
0935 __u8 wifi_acked_valid:1;
0936 __u8 wifi_acked:1;
0937 __u8 no_fcs:1;
0938
0939 __u8 encapsulation:1;
0940 __u8 encap_hdr_csum:1;
0941 __u8 csum_valid:1;
0942
0943
0944 __u8 __pkt_vlan_present_offset[0];
0945
0946 __u8 vlan_present:1;
0947 __u8 csum_complete_sw:1;
0948 __u8 csum_level:2;
0949 __u8 dst_pending_confirm:1;
0950 __u8 mono_delivery_time:1;
0951 #ifdef CONFIG_NET_CLS_ACT
0952 __u8 tc_skip_classify:1;
0953 __u8 tc_at_ingress:1;
0954 #endif
0955 #ifdef CONFIG_IPV6_NDISC_NODETYPE
0956 __u8 ndisc_nodetype:2;
0957 #endif
0958
0959 __u8 ipvs_property:1;
0960 __u8 inner_protocol_type:1;
0961 __u8 remcsum_offload:1;
0962 #ifdef CONFIG_NET_SWITCHDEV
0963 __u8 offload_fwd_mark:1;
0964 __u8 offload_l3_fwd_mark:1;
0965 #endif
0966 __u8 redirected:1;
0967 #ifdef CONFIG_NET_REDIRECT
0968 __u8 from_ingress:1;
0969 #endif
0970 #ifdef CONFIG_NETFILTER_SKIP_EGRESS
0971 __u8 nf_skip_egress:1;
0972 #endif
0973 #ifdef CONFIG_TLS_DEVICE
0974 __u8 decrypted:1;
0975 #endif
0976 __u8 slow_gro:1;
0977 __u8 csum_not_inet:1;
0978
0979 #ifdef CONFIG_NET_SCHED
0980 __u16 tc_index;
0981 #endif
0982
0983 union {
0984 __wsum csum;
0985 struct {
0986 __u16 csum_start;
0987 __u16 csum_offset;
0988 };
0989 };
0990 __u32 priority;
0991 int skb_iif;
0992 __u32 hash;
0993 __be16 vlan_proto;
0994 __u16 vlan_tci;
0995 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
0996 union {
0997 unsigned int napi_id;
0998 unsigned int sender_cpu;
0999 };
1000 #endif
1001 u16 alloc_cpu;
1002 #ifdef CONFIG_NETWORK_SECMARK
1003 __u32 secmark;
1004 #endif
1005
1006 union {
1007 __u32 mark;
1008 __u32 reserved_tailroom;
1009 };
1010
1011 union {
1012 __be16 inner_protocol;
1013 __u8 inner_ipproto;
1014 };
1015
1016 __u16 inner_transport_header;
1017 __u16 inner_network_header;
1018 __u16 inner_mac_header;
1019
1020 __be16 protocol;
1021 __u16 transport_header;
1022 __u16 network_header;
1023 __u16 mac_header;
1024
1025 #ifdef CONFIG_KCOV
1026 u64 kcov_handle;
1027 #endif
1028
1029 );
1030
1031
1032 sk_buff_data_t tail;
1033 sk_buff_data_t end;
1034 unsigned char *head,
1035 *data;
1036 unsigned int truesize;
1037 refcount_t users;
1038
1039 #ifdef CONFIG_SKB_EXTENSIONS
1040
1041 struct skb_ext *extensions;
1042 #endif
1043 };
1044
1045
1046 #ifdef __BIG_ENDIAN_BITFIELD
1047 #define PKT_TYPE_MAX (7 << 5)
1048 #else
1049 #define PKT_TYPE_MAX 7
1050 #endif
1051 #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
1052
1053
1054
1055
1056 #ifdef __BIG_ENDIAN_BITFIELD
1057 #define PKT_VLAN_PRESENT_BIT 7
1058 #define TC_AT_INGRESS_MASK (1 << 0)
1059 #define SKB_MONO_DELIVERY_TIME_MASK (1 << 2)
1060 #else
1061 #define PKT_VLAN_PRESENT_BIT 0
1062 #define TC_AT_INGRESS_MASK (1 << 7)
1063 #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5)
1064 #endif
1065 #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset)
1066
1067 #ifdef __KERNEL__
1068
1069
1070
1071
1072 #define SKB_ALLOC_FCLONE 0x01
1073 #define SKB_ALLOC_RX 0x02
1074 #define SKB_ALLOC_NAPI 0x04
1075
1076
1077
1078
1079
1080 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
1081 {
1082 return unlikely(skb->pfmemalloc);
1083 }
1084
1085
1086
1087
1088
1089 #define SKB_DST_NOREF 1UL
1090 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
1091
1092
1093
1094
1095
1096
1097
1098 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
1099 {
1100
1101
1102
1103 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
1104 !rcu_read_lock_held() &&
1105 !rcu_read_lock_bh_held());
1106 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
1118 {
1119 skb->slow_gro |= !!dst;
1120 skb->_skb_refdst = (unsigned long)dst;
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
1134 {
1135 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
1136 skb->slow_gro |= !!dst;
1137 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
1138 }
1139
1140
1141
1142
1143
1144 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1145 {
1146 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1147 }
1148
1149
1150
1151
1152
1153 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1154 {
1155 return (struct rtable *)skb_dst(skb);
1156 }
1157
1158
1159
1160
1161
1162 static inline bool skb_pkt_type_ok(u32 ptype)
1163 {
1164 return ptype <= PACKET_OTHERHOST;
1165 }
1166
1167
1168
1169
1170
1171 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1172 {
1173 #ifdef CONFIG_NET_RX_BUSY_POLL
1174 return skb->napi_id;
1175 #else
1176 return 0;
1177 #endif
1178 }
1179
1180
1181
1182
1183
1184
1185
1186 static inline bool skb_unref(struct sk_buff *skb)
1187 {
1188 if (unlikely(!skb))
1189 return false;
1190 if (likely(refcount_read(&skb->users) == 1))
1191 smp_rmb();
1192 else if (likely(!refcount_dec_and_test(&skb->users)))
1193 return false;
1194
1195 return true;
1196 }
1197
1198 void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
1199
1200
1201
1202
1203
1204 static inline void kfree_skb(struct sk_buff *skb)
1205 {
1206 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
1207 }
1208
1209 void skb_release_head_state(struct sk_buff *skb);
1210 void kfree_skb_list_reason(struct sk_buff *segs,
1211 enum skb_drop_reason reason);
1212 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1213 void skb_tx_error(struct sk_buff *skb);
1214
1215 static inline void kfree_skb_list(struct sk_buff *segs)
1216 {
1217 kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
1218 }
1219
1220 #ifdef CONFIG_TRACEPOINTS
1221 void consume_skb(struct sk_buff *skb);
1222 #else
1223 static inline void consume_skb(struct sk_buff *skb)
1224 {
1225 return kfree_skb(skb);
1226 }
1227 #endif
1228
1229 void __consume_stateless_skb(struct sk_buff *skb);
1230 void __kfree_skb(struct sk_buff *skb);
1231 extern struct kmem_cache *skbuff_head_cache;
1232
1233 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1234 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1235 bool *fragstolen, int *delta_truesize);
1236
1237 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1238 int node);
1239 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1240 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1241 struct sk_buff *build_skb_around(struct sk_buff *skb,
1242 void *data, unsigned int frag_size);
1243 void skb_attempt_defer_free(struct sk_buff *skb);
1244
1245 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
1246
1247
1248
1249
1250
1251
1252
1253
1254 static inline struct sk_buff *alloc_skb(unsigned int size,
1255 gfp_t priority)
1256 {
1257 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1258 }
1259
1260 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1261 unsigned long data_len,
1262 int max_page_order,
1263 int *errcode,
1264 gfp_t gfp_mask);
1265 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1266
1267
1268 struct sk_buff_fclones {
1269 struct sk_buff skb1;
1270
1271 struct sk_buff skb2;
1272
1273 refcount_t fclone_ref;
1274 };
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 static inline bool skb_fclone_busy(const struct sock *sk,
1286 const struct sk_buff *skb)
1287 {
1288 const struct sk_buff_fclones *fclones;
1289
1290 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1291
1292 return skb->fclone == SKB_FCLONE_ORIG &&
1293 refcount_read(&fclones->fclone_ref) > 1 &&
1294 READ_ONCE(fclones->skb2.sk) == sk;
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1305 gfp_t priority)
1306 {
1307 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1308 }
1309
1310 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1311 void skb_headers_offset_update(struct sk_buff *skb, int off);
1312 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1313 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1314 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1315 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1316 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1317 gfp_t gfp_mask, bool fclone);
1318 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1319 gfp_t gfp_mask)
1320 {
1321 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1322 }
1323
1324 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1325 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1326 unsigned int headroom);
1327 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1328 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1329 int newtailroom, gfp_t priority);
1330 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1331 int offset, int len);
1332 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1333 int offset, int len);
1334 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1335 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static inline int skb_pad(struct sk_buff *skb, int pad)
1349 {
1350 return __skb_pad(skb, pad, true);
1351 }
1352 #define dev_kfree_skb(a) consume_skb(a)
1353
1354 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1355 int offset, size_t size);
1356
1357 struct skb_seq_state {
1358 __u32 lower_offset;
1359 __u32 upper_offset;
1360 __u32 frag_idx;
1361 __u32 stepped_offset;
1362 struct sk_buff *root_skb;
1363 struct sk_buff *cur_skb;
1364 __u8 *frag_data;
1365 __u32 frag_off;
1366 };
1367
1368 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1369 unsigned int to, struct skb_seq_state *st);
1370 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1371 struct skb_seq_state *st);
1372 void skb_abort_seq_read(struct skb_seq_state *st);
1373
1374 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1375 unsigned int to, struct ts_config *config);
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 enum pkt_hash_types {
1404 PKT_HASH_TYPE_NONE,
1405 PKT_HASH_TYPE_L2,
1406 PKT_HASH_TYPE_L3,
1407 PKT_HASH_TYPE_L4,
1408 };
1409
1410 static inline void skb_clear_hash(struct sk_buff *skb)
1411 {
1412 skb->hash = 0;
1413 skb->sw_hash = 0;
1414 skb->l4_hash = 0;
1415 }
1416
1417 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1418 {
1419 if (!skb->l4_hash)
1420 skb_clear_hash(skb);
1421 }
1422
1423 static inline void
1424 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1425 {
1426 skb->l4_hash = is_l4;
1427 skb->sw_hash = is_sw;
1428 skb->hash = hash;
1429 }
1430
1431 static inline void
1432 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1433 {
1434
1435 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1436 }
1437
1438 static inline void
1439 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1440 {
1441 __skb_set_hash(skb, hash, true, is_l4);
1442 }
1443
1444 void __skb_get_hash(struct sk_buff *skb);
1445 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1446 u32 skb_get_poff(const struct sk_buff *skb);
1447 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1448 const struct flow_keys_basic *keys, int hlen);
1449 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1450 const void *data, int hlen_proto);
1451
1452 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1453 int thoff, u8 ip_proto)
1454 {
1455 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1456 }
1457
1458 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1459 const struct flow_dissector_key *key,
1460 unsigned int key_count);
1461
1462 struct bpf_flow_dissector;
1463 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1464 __be16 proto, int nhoff, int hlen, unsigned int flags);
1465
1466 bool __skb_flow_dissect(const struct net *net,
1467 const struct sk_buff *skb,
1468 struct flow_dissector *flow_dissector,
1469 void *target_container, const void *data,
1470 __be16 proto, int nhoff, int hlen, unsigned int flags);
1471
1472 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1473 struct flow_dissector *flow_dissector,
1474 void *target_container, unsigned int flags)
1475 {
1476 return __skb_flow_dissect(NULL, skb, flow_dissector,
1477 target_container, NULL, 0, 0, 0, flags);
1478 }
1479
1480 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1481 struct flow_keys *flow,
1482 unsigned int flags)
1483 {
1484 memset(flow, 0, sizeof(*flow));
1485 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1486 flow, NULL, 0, 0, 0, flags);
1487 }
1488
1489 static inline bool
1490 skb_flow_dissect_flow_keys_basic(const struct net *net,
1491 const struct sk_buff *skb,
1492 struct flow_keys_basic *flow,
1493 const void *data, __be16 proto,
1494 int nhoff, int hlen, unsigned int flags)
1495 {
1496 memset(flow, 0, sizeof(*flow));
1497 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1498 data, proto, nhoff, hlen, flags);
1499 }
1500
1501 void skb_flow_dissect_meta(const struct sk_buff *skb,
1502 struct flow_dissector *flow_dissector,
1503 void *target_container);
1504
1505
1506
1507
1508
1509 void
1510 skb_flow_dissect_ct(const struct sk_buff *skb,
1511 struct flow_dissector *flow_dissector,
1512 void *target_container,
1513 u16 *ctinfo_map, size_t mapsize,
1514 bool post_ct, u16 zone);
1515 void
1516 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1517 struct flow_dissector *flow_dissector,
1518 void *target_container);
1519
1520 void skb_flow_dissect_hash(const struct sk_buff *skb,
1521 struct flow_dissector *flow_dissector,
1522 void *target_container);
1523
1524 static inline __u32 skb_get_hash(struct sk_buff *skb)
1525 {
1526 if (!skb->l4_hash && !skb->sw_hash)
1527 __skb_get_hash(skb);
1528
1529 return skb->hash;
1530 }
1531
1532 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1533 {
1534 if (!skb->l4_hash && !skb->sw_hash) {
1535 struct flow_keys keys;
1536 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1537
1538 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1539 }
1540
1541 return skb->hash;
1542 }
1543
1544 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1545 const siphash_key_t *perturb);
1546
1547 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1548 {
1549 return skb->hash;
1550 }
1551
1552 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1553 {
1554 to->hash = from->hash;
1555 to->sw_hash = from->sw_hash;
1556 to->l4_hash = from->l4_hash;
1557 };
1558
1559 static inline void skb_copy_decrypted(struct sk_buff *to,
1560 const struct sk_buff *from)
1561 {
1562 #ifdef CONFIG_TLS_DEVICE
1563 to->decrypted = from->decrypted;
1564 #endif
1565 }
1566
1567 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1568 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1569 {
1570 return skb->head + skb->end;
1571 }
1572
1573 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1574 {
1575 return skb->end;
1576 }
1577
1578 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1579 {
1580 skb->end = offset;
1581 }
1582 #else
1583 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1584 {
1585 return skb->end;
1586 }
1587
1588 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1589 {
1590 return skb->end - skb->head;
1591 }
1592
1593 static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1594 {
1595 skb->end = skb->head + offset;
1596 }
1597 #endif
1598
1599 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1600 struct ubuf_info *uarg);
1601
1602 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
1603
1604 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1605 bool success);
1606
1607 int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
1608 struct sk_buff *skb, struct iov_iter *from,
1609 size_t length);
1610
1611 static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
1612 struct msghdr *msg, int len)
1613 {
1614 return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
1615 }
1616
1617 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1618 struct msghdr *msg, int len,
1619 struct ubuf_info *uarg);
1620
1621
1622 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1623
1624 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1625 {
1626 return &skb_shinfo(skb)->hwtstamps;
1627 }
1628
1629 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1630 {
1631 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
1632
1633 return is_zcopy ? skb_uarg(skb) : NULL;
1634 }
1635
1636 static inline bool skb_zcopy_pure(const struct sk_buff *skb)
1637 {
1638 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
1639 }
1640
1641 static inline bool skb_zcopy_managed(const struct sk_buff *skb)
1642 {
1643 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
1644 }
1645
1646 static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
1647 const struct sk_buff *skb2)
1648 {
1649 return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
1650 }
1651
1652 static inline void net_zcopy_get(struct ubuf_info *uarg)
1653 {
1654 refcount_inc(&uarg->refcnt);
1655 }
1656
1657 static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
1658 {
1659 skb_shinfo(skb)->destructor_arg = uarg;
1660 skb_shinfo(skb)->flags |= uarg->flags;
1661 }
1662
1663 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1664 bool *have_ref)
1665 {
1666 if (skb && uarg && !skb_zcopy(skb)) {
1667 if (unlikely(have_ref && *have_ref))
1668 *have_ref = false;
1669 else
1670 net_zcopy_get(uarg);
1671 skb_zcopy_init(skb, uarg);
1672 }
1673 }
1674
1675 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1676 {
1677 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1678 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
1679 }
1680
1681 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1682 {
1683 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1684 }
1685
1686 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1687 {
1688 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1689 }
1690
1691 static inline void net_zcopy_put(struct ubuf_info *uarg)
1692 {
1693 if (uarg)
1694 uarg->callback(NULL, uarg, true);
1695 }
1696
1697 static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1698 {
1699 if (uarg) {
1700 if (uarg->callback == msg_zerocopy_callback)
1701 msg_zerocopy_put_abort(uarg, have_uref);
1702 else if (have_uref)
1703 net_zcopy_put(uarg);
1704 }
1705 }
1706
1707
1708 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
1709 {
1710 struct ubuf_info *uarg = skb_zcopy(skb);
1711
1712 if (uarg) {
1713 if (!skb_zcopy_is_nouarg(skb))
1714 uarg->callback(skb, uarg, zerocopy_success);
1715
1716 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
1717 }
1718 }
1719
1720 void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
1721
1722 static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
1723 {
1724 if (unlikely(skb_zcopy_managed(skb)))
1725 __skb_zcopy_downgrade_managed(skb);
1726 }
1727
1728 static inline void skb_mark_not_on_list(struct sk_buff *skb)
1729 {
1730 skb->next = NULL;
1731 }
1732
1733
1734 #define skb_list_walk_safe(first, skb, next_skb) \
1735 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1736 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1737
1738 static inline void skb_list_del_init(struct sk_buff *skb)
1739 {
1740 __list_del_entry(&skb->list);
1741 skb_mark_not_on_list(skb);
1742 }
1743
1744
1745
1746
1747
1748
1749
1750 static inline int skb_queue_empty(const struct sk_buff_head *list)
1751 {
1752 return list->next == (const struct sk_buff *) list;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761
1762 static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1763 {
1764 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1776 const struct sk_buff *skb)
1777 {
1778 return skb->next == (const struct sk_buff *) list;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1789 const struct sk_buff *skb)
1790 {
1791 return skb->prev == (const struct sk_buff *) list;
1792 }
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1803 const struct sk_buff *skb)
1804 {
1805
1806
1807
1808 BUG_ON(skb_queue_is_last(list, skb));
1809 return skb->next;
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1821 const struct sk_buff *skb)
1822 {
1823
1824
1825
1826 BUG_ON(skb_queue_is_first(list, skb));
1827 return skb->prev;
1828 }
1829
1830
1831
1832
1833
1834
1835
1836
1837 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1838 {
1839 refcount_inc(&skb->users);
1840 return skb;
1841 }
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 static inline int skb_cloned(const struct sk_buff *skb)
1856 {
1857 return skb->cloned &&
1858 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1859 }
1860
1861 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1862 {
1863 might_sleep_if(gfpflags_allow_blocking(pri));
1864
1865 if (skb_cloned(skb))
1866 return pskb_expand_head(skb, 0, 0, pri);
1867
1868 return 0;
1869 }
1870
1871
1872
1873
1874
1875
1876
1877 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
1878 static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
1879 {
1880 might_sleep_if(gfpflags_allow_blocking(pri));
1881
1882 if (skb_cloned(skb))
1883 return __skb_unclone_keeptruesize(skb, pri);
1884 return 0;
1885 }
1886
1887
1888
1889
1890
1891
1892
1893
1894 static inline int skb_header_cloned(const struct sk_buff *skb)
1895 {
1896 int dataref;
1897
1898 if (!skb->cloned)
1899 return 0;
1900
1901 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1902 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1903 return dataref != 1;
1904 }
1905
1906 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1907 {
1908 might_sleep_if(gfpflags_allow_blocking(pri));
1909
1910 if (skb_header_cloned(skb))
1911 return pskb_expand_head(skb, 0, 0, pri);
1912
1913 return 0;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922 static inline void __skb_header_release(struct sk_buff *skb)
1923 {
1924 skb->nohdr = 1;
1925 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1926 }
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 static inline int skb_shared(const struct sk_buff *skb)
1937 {
1938 return refcount_read(&skb->users) != 1;
1939 }
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1955 {
1956 might_sleep_if(gfpflags_allow_blocking(pri));
1957 if (skb_shared(skb)) {
1958 struct sk_buff *nskb = skb_clone(skb, pri);
1959
1960 if (likely(nskb))
1961 consume_skb(skb);
1962 else
1963 kfree_skb(skb);
1964 skb = nskb;
1965 }
1966 return skb;
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1990 gfp_t pri)
1991 {
1992 might_sleep_if(gfpflags_allow_blocking(pri));
1993 if (skb_cloned(skb)) {
1994 struct sk_buff *nskb = skb_copy(skb, pri);
1995
1996
1997 if (likely(nskb))
1998 consume_skb(skb);
1999 else
2000 kfree_skb(skb);
2001 skb = nskb;
2002 }
2003 return skb;
2004 }
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
2020 {
2021 struct sk_buff *skb = list_->next;
2022
2023 if (skb == (struct sk_buff *)list_)
2024 skb = NULL;
2025 return skb;
2026 }
2027
2028
2029
2030
2031
2032
2033
2034 static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
2035 {
2036 return list_->next;
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
2049 const struct sk_buff_head *list_)
2050 {
2051 struct sk_buff *next = skb->next;
2052
2053 if (next == (struct sk_buff *)list_)
2054 next = NULL;
2055 return next;
2056 }
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
2072 {
2073 struct sk_buff *skb = READ_ONCE(list_->prev);
2074
2075 if (skb == (struct sk_buff *)list_)
2076 skb = NULL;
2077 return skb;
2078
2079 }
2080
2081
2082
2083
2084
2085
2086
2087 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
2088 {
2089 return list_->qlen;
2090 }
2091
2092
2093
2094
2095
2096
2097
2098
2099 static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
2100 {
2101 return READ_ONCE(list_->qlen);
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 static inline void __skb_queue_head_init(struct sk_buff_head *list)
2115 {
2116 list->prev = list->next = (struct sk_buff *)list;
2117 list->qlen = 0;
2118 }
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128 static inline void skb_queue_head_init(struct sk_buff_head *list)
2129 {
2130 spin_lock_init(&list->lock);
2131 __skb_queue_head_init(list);
2132 }
2133
2134 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
2135 struct lock_class_key *class)
2136 {
2137 skb_queue_head_init(list);
2138 lockdep_set_class(&list->lock, class);
2139 }
2140
2141
2142
2143
2144
2145
2146
2147 static inline void __skb_insert(struct sk_buff *newsk,
2148 struct sk_buff *prev, struct sk_buff *next,
2149 struct sk_buff_head *list)
2150 {
2151
2152
2153
2154 WRITE_ONCE(newsk->next, next);
2155 WRITE_ONCE(newsk->prev, prev);
2156 WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
2157 WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
2158 WRITE_ONCE(list->qlen, list->qlen + 1);
2159 }
2160
2161 static inline void __skb_queue_splice(const struct sk_buff_head *list,
2162 struct sk_buff *prev,
2163 struct sk_buff *next)
2164 {
2165 struct sk_buff *first = list->next;
2166 struct sk_buff *last = list->prev;
2167
2168 WRITE_ONCE(first->prev, prev);
2169 WRITE_ONCE(prev->next, first);
2170
2171 WRITE_ONCE(last->next, next);
2172 WRITE_ONCE(next->prev, last);
2173 }
2174
2175
2176
2177
2178
2179
2180 static inline void skb_queue_splice(const struct sk_buff_head *list,
2181 struct sk_buff_head *head)
2182 {
2183 if (!skb_queue_empty(list)) {
2184 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
2185 head->qlen += list->qlen;
2186 }
2187 }
2188
2189
2190
2191
2192
2193
2194
2195
2196 static inline void skb_queue_splice_init(struct sk_buff_head *list,
2197 struct sk_buff_head *head)
2198 {
2199 if (!skb_queue_empty(list)) {
2200 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
2201 head->qlen += list->qlen;
2202 __skb_queue_head_init(list);
2203 }
2204 }
2205
2206
2207
2208
2209
2210
2211 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
2212 struct sk_buff_head *head)
2213 {
2214 if (!skb_queue_empty(list)) {
2215 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2216 head->qlen += list->qlen;
2217 }
2218 }
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
2229 struct sk_buff_head *head)
2230 {
2231 if (!skb_queue_empty(list)) {
2232 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2233 head->qlen += list->qlen;
2234 __skb_queue_head_init(list);
2235 }
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 static inline void __skb_queue_after(struct sk_buff_head *list,
2250 struct sk_buff *prev,
2251 struct sk_buff *newsk)
2252 {
2253 __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
2254 }
2255
2256 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2257 struct sk_buff_head *list);
2258
2259 static inline void __skb_queue_before(struct sk_buff_head *list,
2260 struct sk_buff *next,
2261 struct sk_buff *newsk)
2262 {
2263 __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
2264 }
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 static inline void __skb_queue_head(struct sk_buff_head *list,
2277 struct sk_buff *newsk)
2278 {
2279 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2280 }
2281 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 static inline void __skb_queue_tail(struct sk_buff_head *list,
2294 struct sk_buff *newsk)
2295 {
2296 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2297 }
2298 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2299
2300
2301
2302
2303
2304 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2305 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2306 {
2307 struct sk_buff *next, *prev;
2308
2309 WRITE_ONCE(list->qlen, list->qlen - 1);
2310 next = skb->next;
2311 prev = skb->prev;
2312 skb->next = skb->prev = NULL;
2313 WRITE_ONCE(next->prev, prev);
2314 WRITE_ONCE(prev->next, next);
2315 }
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2326 {
2327 struct sk_buff *skb = skb_peek(list);
2328 if (skb)
2329 __skb_unlink(skb, list);
2330 return skb;
2331 }
2332 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2343 {
2344 struct sk_buff *skb = skb_peek_tail(list);
2345 if (skb)
2346 __skb_unlink(skb, list);
2347 return skb;
2348 }
2349 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2350
2351
2352 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2353 {
2354 return skb->data_len;
2355 }
2356
2357 static inline unsigned int skb_headlen(const struct sk_buff *skb)
2358 {
2359 return skb->len - skb->data_len;
2360 }
2361
2362 static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2363 {
2364 unsigned int i, len = 0;
2365
2366 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2367 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2368 return len;
2369 }
2370
2371 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2372 {
2373 return skb_headlen(skb) + __skb_pagelen(skb);
2374 }
2375
2376 static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
2377 int i, struct page *page,
2378 int off, int size)
2379 {
2380 skb_frag_t *frag = &shinfo->frags[i];
2381
2382
2383
2384
2385
2386
2387 frag->bv_page = page;
2388 frag->bv_offset = off;
2389 skb_frag_size_set(frag, size);
2390 }
2391
2392
2393
2394
2395
2396
2397 static inline void skb_len_add(struct sk_buff *skb, int delta)
2398 {
2399 skb->len += delta;
2400 skb->data_len += delta;
2401 skb->truesize += delta;
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2418 struct page *page, int off, int size)
2419 {
2420 __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
2421 page = compound_head(page);
2422 if (page_is_pfmemalloc(page))
2423 skb->pfmemalloc = true;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2441 struct page *page, int off, int size)
2442 {
2443 __skb_fill_page_desc(skb, i, page, off, size);
2444 skb_shinfo(skb)->nr_frags = i + 1;
2445 }
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458 static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
2459 struct page *page, int off,
2460 int size)
2461 {
2462 struct skb_shared_info *shinfo = skb_shinfo(skb);
2463
2464 __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
2465 shinfo->nr_frags = i + 1;
2466 }
2467
2468 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2469 int size, unsigned int truesize);
2470
2471 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2472 unsigned int truesize);
2473
2474 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2475
2476 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2477 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2478 {
2479 return skb->head + skb->tail;
2480 }
2481
2482 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2483 {
2484 skb->tail = skb->data - skb->head;
2485 }
2486
2487 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2488 {
2489 skb_reset_tail_pointer(skb);
2490 skb->tail += offset;
2491 }
2492
2493 #else
2494 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2495 {
2496 return skb->tail;
2497 }
2498
2499 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2500 {
2501 skb->tail = skb->data;
2502 }
2503
2504 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2505 {
2506 skb->tail = skb->data + offset;
2507 }
2508
2509 #endif
2510
2511 static inline void skb_assert_len(struct sk_buff *skb)
2512 {
2513 #ifdef CONFIG_DEBUG_NET
2514 if (WARN_ONCE(!skb->len, "%s\n", __func__))
2515 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
2516 #endif
2517 }
2518
2519
2520
2521
2522 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2523 void *skb_put(struct sk_buff *skb, unsigned int len);
2524 static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2525 {
2526 void *tmp = skb_tail_pointer(skb);
2527 SKB_LINEAR_ASSERT(skb);
2528 skb->tail += len;
2529 skb->len += len;
2530 return tmp;
2531 }
2532
2533 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2534 {
2535 void *tmp = __skb_put(skb, len);
2536
2537 memset(tmp, 0, len);
2538 return tmp;
2539 }
2540
2541 static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2542 unsigned int len)
2543 {
2544 void *tmp = __skb_put(skb, len);
2545
2546 memcpy(tmp, data, len);
2547 return tmp;
2548 }
2549
2550 static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2551 {
2552 *(u8 *)__skb_put(skb, 1) = val;
2553 }
2554
2555 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2556 {
2557 void *tmp = skb_put(skb, len);
2558
2559 memset(tmp, 0, len);
2560
2561 return tmp;
2562 }
2563
2564 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2565 unsigned int len)
2566 {
2567 void *tmp = skb_put(skb, len);
2568
2569 memcpy(tmp, data, len);
2570
2571 return tmp;
2572 }
2573
2574 static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2575 {
2576 *(u8 *)skb_put(skb, 1) = val;
2577 }
2578
2579 void *skb_push(struct sk_buff *skb, unsigned int len);
2580 static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2581 {
2582 skb->data -= len;
2583 skb->len += len;
2584 return skb->data;
2585 }
2586
2587 void *skb_pull(struct sk_buff *skb, unsigned int len);
2588 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2589 {
2590 skb->len -= len;
2591 if (unlikely(skb->len < skb->data_len)) {
2592 #if defined(CONFIG_DEBUG_NET)
2593 skb->len += len;
2594 pr_err("__skb_pull(len=%u)\n", len);
2595 skb_dump(KERN_ERR, skb, false);
2596 #endif
2597 BUG();
2598 }
2599 return skb->data += len;
2600 }
2601
2602 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2603 {
2604 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2605 }
2606
2607 void *skb_pull_data(struct sk_buff *skb, size_t len);
2608
2609 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2610
2611 static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2612 {
2613 if (len > skb_headlen(skb) &&
2614 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2615 return NULL;
2616 skb->len -= len;
2617 return skb->data += len;
2618 }
2619
2620 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2621 {
2622 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2623 }
2624
2625 static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2626 {
2627 if (likely(len <= skb_headlen(skb)))
2628 return true;
2629 if (unlikely(len > skb->len))
2630 return false;
2631 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2632 }
2633
2634 void skb_condense(struct sk_buff *skb);
2635
2636
2637
2638
2639
2640
2641
2642 static inline unsigned int skb_headroom(const struct sk_buff *skb)
2643 {
2644 return skb->data - skb->head;
2645 }
2646
2647
2648
2649
2650
2651
2652
2653 static inline int skb_tailroom(const struct sk_buff *skb)
2654 {
2655 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665 static inline int skb_availroom(const struct sk_buff *skb)
2666 {
2667 if (skb_is_nonlinear(skb))
2668 return 0;
2669
2670 return skb->end - skb->tail - skb->reserved_tailroom;
2671 }
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681 static inline void skb_reserve(struct sk_buff *skb, int len)
2682 {
2683 skb->data += len;
2684 skb->tail += len;
2685 }
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2700 unsigned int needed_tailroom)
2701 {
2702 SKB_LINEAR_ASSERT(skb);
2703 if (mtu < skb_tailroom(skb) - needed_tailroom)
2704
2705 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2706 else
2707
2708 skb->reserved_tailroom = needed_tailroom;
2709 }
2710
2711 #define ENCAP_TYPE_ETHER 0
2712 #define ENCAP_TYPE_IPPROTO 1
2713
2714 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2715 __be16 protocol)
2716 {
2717 skb->inner_protocol = protocol;
2718 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2719 }
2720
2721 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2722 __u8 ipproto)
2723 {
2724 skb->inner_ipproto = ipproto;
2725 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2726 }
2727
2728 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2729 {
2730 skb->inner_mac_header = skb->mac_header;
2731 skb->inner_network_header = skb->network_header;
2732 skb->inner_transport_header = skb->transport_header;
2733 }
2734
2735 static inline void skb_reset_mac_len(struct sk_buff *skb)
2736 {
2737 skb->mac_len = skb->network_header - skb->mac_header;
2738 }
2739
2740 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2741 *skb)
2742 {
2743 return skb->head + skb->inner_transport_header;
2744 }
2745
2746 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2747 {
2748 return skb_inner_transport_header(skb) - skb->data;
2749 }
2750
2751 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2752 {
2753 skb->inner_transport_header = skb->data - skb->head;
2754 }
2755
2756 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2757 const int offset)
2758 {
2759 skb_reset_inner_transport_header(skb);
2760 skb->inner_transport_header += offset;
2761 }
2762
2763 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2764 {
2765 return skb->head + skb->inner_network_header;
2766 }
2767
2768 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2769 {
2770 skb->inner_network_header = skb->data - skb->head;
2771 }
2772
2773 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2774 const int offset)
2775 {
2776 skb_reset_inner_network_header(skb);
2777 skb->inner_network_header += offset;
2778 }
2779
2780 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2781 {
2782 return skb->head + skb->inner_mac_header;
2783 }
2784
2785 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2786 {
2787 skb->inner_mac_header = skb->data - skb->head;
2788 }
2789
2790 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2791 const int offset)
2792 {
2793 skb_reset_inner_mac_header(skb);
2794 skb->inner_mac_header += offset;
2795 }
2796 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2797 {
2798 return skb->transport_header != (typeof(skb->transport_header))~0U;
2799 }
2800
2801 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2802 {
2803 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
2804 return skb->head + skb->transport_header;
2805 }
2806
2807 static inline void skb_reset_transport_header(struct sk_buff *skb)
2808 {
2809 skb->transport_header = skb->data - skb->head;
2810 }
2811
2812 static inline void skb_set_transport_header(struct sk_buff *skb,
2813 const int offset)
2814 {
2815 skb_reset_transport_header(skb);
2816 skb->transport_header += offset;
2817 }
2818
2819 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2820 {
2821 return skb->head + skb->network_header;
2822 }
2823
2824 static inline void skb_reset_network_header(struct sk_buff *skb)
2825 {
2826 skb->network_header = skb->data - skb->head;
2827 }
2828
2829 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2830 {
2831 skb_reset_network_header(skb);
2832 skb->network_header += offset;
2833 }
2834
2835 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2836 {
2837 return skb->mac_header != (typeof(skb->mac_header))~0U;
2838 }
2839
2840 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2841 {
2842 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
2843 return skb->head + skb->mac_header;
2844 }
2845
2846 static inline int skb_mac_offset(const struct sk_buff *skb)
2847 {
2848 return skb_mac_header(skb) - skb->data;
2849 }
2850
2851 static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2852 {
2853 DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
2854 return skb->network_header - skb->mac_header;
2855 }
2856
2857 static inline void skb_unset_mac_header(struct sk_buff *skb)
2858 {
2859 skb->mac_header = (typeof(skb->mac_header))~0U;
2860 }
2861
2862 static inline void skb_reset_mac_header(struct sk_buff *skb)
2863 {
2864 skb->mac_header = skb->data - skb->head;
2865 }
2866
2867 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2868 {
2869 skb_reset_mac_header(skb);
2870 skb->mac_header += offset;
2871 }
2872
2873 static inline void skb_pop_mac_header(struct sk_buff *skb)
2874 {
2875 skb->mac_header = skb->network_header;
2876 }
2877
2878 static inline void skb_probe_transport_header(struct sk_buff *skb)
2879 {
2880 struct flow_keys_basic keys;
2881
2882 if (skb_transport_header_was_set(skb))
2883 return;
2884
2885 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2886 NULL, 0, 0, 0, 0))
2887 skb_set_transport_header(skb, keys.control.thoff);
2888 }
2889
2890 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2891 {
2892 if (skb_mac_header_was_set(skb)) {
2893 const unsigned char *old_mac = skb_mac_header(skb);
2894
2895 skb_set_mac_header(skb, -skb->mac_len);
2896 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2897 }
2898 }
2899
2900 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2901 {
2902 return skb->csum_start - skb_headroom(skb);
2903 }
2904
2905 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2906 {
2907 return skb->head + skb->csum_start;
2908 }
2909
2910 static inline int skb_transport_offset(const struct sk_buff *skb)
2911 {
2912 return skb_transport_header(skb) - skb->data;
2913 }
2914
2915 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2916 {
2917 return skb->transport_header - skb->network_header;
2918 }
2919
2920 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2921 {
2922 return skb->inner_transport_header - skb->inner_network_header;
2923 }
2924
2925 static inline int skb_network_offset(const struct sk_buff *skb)
2926 {
2927 return skb_network_header(skb) - skb->data;
2928 }
2929
2930 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2931 {
2932 return skb_inner_network_header(skb) - skb->data;
2933 }
2934
2935 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2936 {
2937 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 #ifndef NET_IP_ALIGN
2961 #define NET_IP_ALIGN 2
2962 #endif
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984 #ifndef NET_SKB_PAD
2985 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2986 #endif
2987
2988 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2989
2990 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2991 {
2992 if (WARN_ON(skb_is_nonlinear(skb)))
2993 return;
2994 skb->len = len;
2995 skb_set_tail_pointer(skb, len);
2996 }
2997
2998 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2999 {
3000 __skb_set_length(skb, len);
3001 }
3002
3003 void skb_trim(struct sk_buff *skb, unsigned int len);
3004
3005 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
3006 {
3007 if (skb->data_len)
3008 return ___pskb_trim(skb, len);
3009 __skb_trim(skb, len);
3010 return 0;
3011 }
3012
3013 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
3014 {
3015 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
3016 }
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
3028 {
3029 int err = pskb_trim(skb, len);
3030 BUG_ON(err);
3031 }
3032
3033 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
3034 {
3035 unsigned int diff = len - skb->len;
3036
3037 if (skb_tailroom(skb) < diff) {
3038 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
3039 GFP_ATOMIC);
3040 if (ret)
3041 return ret;
3042 }
3043 __skb_set_length(skb, len);
3044 return 0;
3045 }
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055 static inline void skb_orphan(struct sk_buff *skb)
3056 {
3057 if (skb->destructor) {
3058 skb->destructor(skb);
3059 skb->destructor = NULL;
3060 skb->sk = NULL;
3061 } else {
3062 BUG_ON(skb->sk);
3063 }
3064 }
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
3076 {
3077 if (likely(!skb_zcopy(skb)))
3078 return 0;
3079 if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN)
3080 return 0;
3081 return skb_copy_ubufs(skb, gfp_mask);
3082 }
3083
3084
3085 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
3086 {
3087 if (likely(!skb_zcopy(skb)))
3088 return 0;
3089 return skb_copy_ubufs(skb, gfp_mask);
3090 }
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100 static inline void __skb_queue_purge(struct sk_buff_head *list)
3101 {
3102 struct sk_buff *skb;
3103 while ((skb = __skb_dequeue(list)) != NULL)
3104 kfree_skb(skb);
3105 }
3106 void skb_queue_purge(struct sk_buff_head *list);
3107
3108 unsigned int skb_rbtree_purge(struct rb_root *root);
3109
3110 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
3111
3112
3113
3114
3115
3116
3117
3118
3119 static inline void *netdev_alloc_frag(unsigned int fragsz)
3120 {
3121 return __netdev_alloc_frag_align(fragsz, ~0u);
3122 }
3123
3124 static inline void *netdev_alloc_frag_align(unsigned int fragsz,
3125 unsigned int align)
3126 {
3127 WARN_ON_ONCE(!is_power_of_2(align));
3128 return __netdev_alloc_frag_align(fragsz, -align);
3129 }
3130
3131 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
3132 gfp_t gfp_mask);
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
3148 unsigned int length)
3149 {
3150 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
3151 }
3152
3153
3154 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
3155 gfp_t gfp_mask)
3156 {
3157 return __netdev_alloc_skb(NULL, length, gfp_mask);
3158 }
3159
3160
3161 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
3162 {
3163 return netdev_alloc_skb(NULL, length);
3164 }
3165
3166
3167 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
3168 unsigned int length, gfp_t gfp)
3169 {
3170 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
3171
3172 if (NET_IP_ALIGN && skb)
3173 skb_reserve(skb, NET_IP_ALIGN);
3174 return skb;
3175 }
3176
3177 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
3178 unsigned int length)
3179 {
3180 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
3181 }
3182
3183 static inline void skb_free_frag(void *addr)
3184 {
3185 page_frag_free(addr);
3186 }
3187
3188 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
3189
3190 static inline void *napi_alloc_frag(unsigned int fragsz)
3191 {
3192 return __napi_alloc_frag_align(fragsz, ~0u);
3193 }
3194
3195 static inline void *napi_alloc_frag_align(unsigned int fragsz,
3196 unsigned int align)
3197 {
3198 WARN_ON_ONCE(!is_power_of_2(align));
3199 return __napi_alloc_frag_align(fragsz, -align);
3200 }
3201
3202 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
3203 unsigned int length, gfp_t gfp_mask);
3204 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
3205 unsigned int length)
3206 {
3207 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
3208 }
3209 void napi_consume_skb(struct sk_buff *skb, int budget);
3210
3211 void napi_skb_free_stolen_head(struct sk_buff *skb);
3212 void __kfree_skb_defer(struct sk_buff *skb);
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
3224 unsigned int order)
3225 {
3226
3227
3228
3229
3230
3231
3232
3233
3234 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
3235
3236 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
3237 }
3238
3239 static inline struct page *dev_alloc_pages(unsigned int order)
3240 {
3241 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
3242 }
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
3253 {
3254 return __dev_alloc_pages(gfp_mask, 0);
3255 }
3256
3257 static inline struct page *dev_alloc_page(void)
3258 {
3259 return dev_alloc_pages(0);
3260 }
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272 static inline bool dev_page_is_reusable(const struct page *page)
3273 {
3274 return likely(page_to_nid(page) == numa_mem_id() &&
3275 !page_is_pfmemalloc(page));
3276 }
3277
3278
3279
3280
3281
3282
3283 static inline void skb_propagate_pfmemalloc(const struct page *page,
3284 struct sk_buff *skb)
3285 {
3286 if (page_is_pfmemalloc(page))
3287 skb->pfmemalloc = true;
3288 }
3289
3290
3291
3292
3293
3294 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
3295 {
3296 return frag->bv_offset;
3297 }
3298
3299
3300
3301
3302
3303
3304 static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3305 {
3306 frag->bv_offset += delta;
3307 }
3308
3309
3310
3311
3312
3313
3314 static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3315 {
3316 frag->bv_offset = offset;
3317 }
3318
3319
3320
3321
3322
3323
3324 static inline void skb_frag_off_copy(skb_frag_t *fragto,
3325 const skb_frag_t *fragfrom)
3326 {
3327 fragto->bv_offset = fragfrom->bv_offset;
3328 }
3329
3330
3331
3332
3333
3334
3335
3336 static inline struct page *skb_frag_page(const skb_frag_t *frag)
3337 {
3338 return frag->bv_page;
3339 }
3340
3341
3342
3343
3344
3345
3346
3347 static inline void __skb_frag_ref(skb_frag_t *frag)
3348 {
3349 get_page(skb_frag_page(frag));
3350 }
3351
3352
3353
3354
3355
3356
3357
3358
3359 static inline void skb_frag_ref(struct sk_buff *skb, int f)
3360 {
3361 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3362 }
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372 static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
3373 {
3374 struct page *page = skb_frag_page(frag);
3375
3376 #ifdef CONFIG_PAGE_POOL
3377 if (recycle && page_pool_return_skb_page(page))
3378 return;
3379 #endif
3380 put_page(page);
3381 }
3382
3383
3384
3385
3386
3387
3388
3389
3390 static inline void skb_frag_unref(struct sk_buff *skb, int f)
3391 {
3392 struct skb_shared_info *shinfo = skb_shinfo(skb);
3393
3394 if (!skb_zcopy_managed(skb))
3395 __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
3396 }
3397
3398
3399
3400
3401
3402
3403
3404
3405 static inline void *skb_frag_address(const skb_frag_t *frag)
3406 {
3407 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3408 }
3409
3410
3411
3412
3413
3414
3415
3416
3417 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3418 {
3419 void *ptr = page_address(skb_frag_page(frag));
3420 if (unlikely(!ptr))
3421 return NULL;
3422
3423 return ptr + skb_frag_off(frag);
3424 }
3425
3426
3427
3428
3429
3430
3431 static inline void skb_frag_page_copy(skb_frag_t *fragto,
3432 const skb_frag_t *fragfrom)
3433 {
3434 fragto->bv_page = fragfrom->bv_page;
3435 }
3436
3437
3438
3439
3440
3441
3442
3443
3444 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3445 {
3446 frag->bv_page = page;
3447 }
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3458 struct page *page)
3459 {
3460 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3461 }
3462
3463 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3477 const skb_frag_t *frag,
3478 size_t offset, size_t size,
3479 enum dma_data_direction dir)
3480 {
3481 return dma_map_page(dev, skb_frag_page(frag),
3482 skb_frag_off(frag) + offset, size, dir);
3483 }
3484
3485 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3486 gfp_t gfp_mask)
3487 {
3488 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3489 }
3490
3491
3492 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3493 gfp_t gfp_mask)
3494 {
3495 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3496 }
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3508 {
3509 return !skb_header_cloned(skb) &&
3510 skb_headroom(skb) + len <= skb->hdr_len;
3511 }
3512
3513 static inline int skb_try_make_writable(struct sk_buff *skb,
3514 unsigned int write_len)
3515 {
3516 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3517 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3518 }
3519
3520 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3521 int cloned)
3522 {
3523 int delta = 0;
3524
3525 if (headroom > skb_headroom(skb))
3526 delta = headroom - skb_headroom(skb);
3527
3528 if (delta || cloned)
3529 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3530 GFP_ATOMIC);
3531 return 0;
3532 }
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3547 {
3548 return __skb_cow(skb, headroom, skb_cloned(skb));
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3562 {
3563 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3564 }
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3577 {
3578 unsigned int size = skb->len;
3579 if (likely(size >= len))
3580 return 0;
3581 return skb_pad(skb, len - size);
3582 }
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3596 unsigned int len,
3597 bool free_on_error)
3598 {
3599 unsigned int size = skb->len;
3600
3601 if (unlikely(size < len)) {
3602 len -= size;
3603 if (__skb_pad(skb, len, free_on_error))
3604 return -ENOMEM;
3605 __skb_put(skb, len);
3606 }
3607 return 0;
3608 }
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620 static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3621 {
3622 return __skb_put_padto(skb, len, true);
3623 }
3624
3625 static inline int skb_add_data(struct sk_buff *skb,
3626 struct iov_iter *from, int copy)
3627 {
3628 const int off = skb->len;
3629
3630 if (skb->ip_summed == CHECKSUM_NONE) {
3631 __wsum csum = 0;
3632 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3633 &csum, from)) {
3634 skb->csum = csum_block_add(skb->csum, csum, off);
3635 return 0;
3636 }
3637 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3638 return 0;
3639
3640 __skb_trim(skb, off);
3641 return -EFAULT;
3642 }
3643
3644 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3645 const struct page *page, int off)
3646 {
3647 if (skb_zcopy(skb))
3648 return false;
3649 if (i) {
3650 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3651
3652 return page == skb_frag_page(frag) &&
3653 off == skb_frag_off(frag) + skb_frag_size(frag);
3654 }
3655 return false;
3656 }
3657
3658 static inline int __skb_linearize(struct sk_buff *skb)
3659 {
3660 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3661 }
3662
3663
3664
3665
3666
3667
3668
3669
3670 static inline int skb_linearize(struct sk_buff *skb)
3671 {
3672 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3673 }
3674
3675
3676
3677
3678
3679
3680
3681
3682 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3683 {
3684 return skb_is_nonlinear(skb) &&
3685 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3686 }
3687
3688
3689
3690
3691
3692
3693
3694
3695 static inline int skb_linearize_cow(struct sk_buff *skb)
3696 {
3697 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3698 __skb_linearize(skb) : 0;
3699 }
3700
3701 static __always_inline void
3702 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3703 unsigned int off)
3704 {
3705 if (skb->ip_summed == CHECKSUM_COMPLETE)
3706 skb->csum = csum_block_sub(skb->csum,
3707 csum_partial(start, len, 0), off);
3708 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3709 skb_checksum_start_offset(skb) < 0)
3710 skb->ip_summed = CHECKSUM_NONE;
3711 }
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723 static inline void skb_postpull_rcsum(struct sk_buff *skb,
3724 const void *start, unsigned int len)
3725 {
3726 if (skb->ip_summed == CHECKSUM_COMPLETE)
3727 skb->csum = wsum_negate(csum_partial(start, len,
3728 wsum_negate(skb->csum)));
3729 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3730 skb_checksum_start_offset(skb) < 0)
3731 skb->ip_summed = CHECKSUM_NONE;
3732 }
3733
3734 static __always_inline void
3735 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3736 unsigned int off)
3737 {
3738 if (skb->ip_summed == CHECKSUM_COMPLETE)
3739 skb->csum = csum_block_add(skb->csum,
3740 csum_partial(start, len, 0), off);
3741 }
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752 static inline void skb_postpush_rcsum(struct sk_buff *skb,
3753 const void *start, unsigned int len)
3754 {
3755 __skb_postpush_rcsum(skb, start, len, 0);
3756 }
3757
3758 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3772 {
3773 skb_push(skb, len);
3774 skb_postpush_rcsum(skb, skb->data, len);
3775 return skb->data;
3776 }
3777
3778 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3790 {
3791 if (likely(len >= skb->len))
3792 return 0;
3793 return pskb_trim_rcsum_slow(skb, len);
3794 }
3795
3796 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3797 {
3798 if (skb->ip_summed == CHECKSUM_COMPLETE)
3799 skb->ip_summed = CHECKSUM_NONE;
3800 __skb_trim(skb, len);
3801 return 0;
3802 }
3803
3804 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3805 {
3806 if (skb->ip_summed == CHECKSUM_COMPLETE)
3807 skb->ip_summed = CHECKSUM_NONE;
3808 return __skb_grow(skb, len);
3809 }
3810
3811 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3812 #define skb_rb_first(root) rb_to_skb(rb_first(root))
3813 #define skb_rb_last(root) rb_to_skb(rb_last(root))
3814 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3815 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3816
3817 #define skb_queue_walk(queue, skb) \
3818 for (skb = (queue)->next; \
3819 skb != (struct sk_buff *)(queue); \
3820 skb = skb->next)
3821
3822 #define skb_queue_walk_safe(queue, skb, tmp) \
3823 for (skb = (queue)->next, tmp = skb->next; \
3824 skb != (struct sk_buff *)(queue); \
3825 skb = tmp, tmp = skb->next)
3826
3827 #define skb_queue_walk_from(queue, skb) \
3828 for (; skb != (struct sk_buff *)(queue); \
3829 skb = skb->next)
3830
3831 #define skb_rbtree_walk(skb, root) \
3832 for (skb = skb_rb_first(root); skb != NULL; \
3833 skb = skb_rb_next(skb))
3834
3835 #define skb_rbtree_walk_from(skb) \
3836 for (; skb != NULL; \
3837 skb = skb_rb_next(skb))
3838
3839 #define skb_rbtree_walk_from_safe(skb, tmp) \
3840 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3841 skb = tmp)
3842
3843 #define skb_queue_walk_from_safe(queue, skb, tmp) \
3844 for (tmp = skb->next; \
3845 skb != (struct sk_buff *)(queue); \
3846 skb = tmp, tmp = skb->next)
3847
3848 #define skb_queue_reverse_walk(queue, skb) \
3849 for (skb = (queue)->prev; \
3850 skb != (struct sk_buff *)(queue); \
3851 skb = skb->prev)
3852
3853 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3854 for (skb = (queue)->prev, tmp = skb->prev; \
3855 skb != (struct sk_buff *)(queue); \
3856 skb = tmp, tmp = skb->prev)
3857
3858 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3859 for (tmp = skb->prev; \
3860 skb != (struct sk_buff *)(queue); \
3861 skb = tmp, tmp = skb->prev)
3862
3863 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3864 {
3865 return skb_shinfo(skb)->frag_list != NULL;
3866 }
3867
3868 static inline void skb_frag_list_init(struct sk_buff *skb)
3869 {
3870 skb_shinfo(skb)->frag_list = NULL;
3871 }
3872
3873 #define skb_walk_frags(skb, iter) \
3874 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3875
3876
3877 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3878 int *err, long *timeo_p,
3879 const struct sk_buff *skb);
3880 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3881 struct sk_buff_head *queue,
3882 unsigned int flags,
3883 int *off, int *err,
3884 struct sk_buff **last);
3885 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3886 struct sk_buff_head *queue,
3887 unsigned int flags, int *off, int *err,
3888 struct sk_buff **last);
3889 struct sk_buff *__skb_recv_datagram(struct sock *sk,
3890 struct sk_buff_head *sk_queue,
3891 unsigned int flags, int *off, int *err);
3892 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
3893 __poll_t datagram_poll(struct file *file, struct socket *sock,
3894 struct poll_table_struct *wait);
3895 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3896 struct iov_iter *to, int size);
3897 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3898 struct msghdr *msg, int size)
3899 {
3900 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3901 }
3902 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3903 struct msghdr *msg);
3904 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3905 struct iov_iter *to, int len,
3906 struct ahash_request *hash);
3907 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3908 struct iov_iter *from, int len);
3909 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3910 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3911 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3912 static inline void skb_free_datagram_locked(struct sock *sk,
3913 struct sk_buff *skb)
3914 {
3915 __skb_free_datagram_locked(sk, skb, 0);
3916 }
3917 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3918 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3919 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3920 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3921 int len);
3922 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3923 struct pipe_inode_info *pipe, unsigned int len,
3924 unsigned int flags);
3925 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3926 int len);
3927 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3928 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3929 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3930 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3931 int len, int hlen);
3932 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3933 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3934 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3935 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3936 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3937 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3938 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3939 unsigned int offset);
3940 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3941 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
3942 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3943 int skb_vlan_pop(struct sk_buff *skb);
3944 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3945 int skb_eth_pop(struct sk_buff *skb);
3946 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3947 const unsigned char *src);
3948 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3949 int mac_len, bool ethernet);
3950 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3951 bool ethernet);
3952 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3953 int skb_mpls_dec_ttl(struct sk_buff *skb);
3954 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3955 gfp_t gfp);
3956
3957 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3958 {
3959 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3960 }
3961
3962 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3963 {
3964 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3965 }
3966
3967 struct skb_checksum_ops {
3968 __wsum (*update)(const void *mem, int len, __wsum wsum);
3969 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3970 };
3971
3972 extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3973
3974 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3975 __wsum csum, const struct skb_checksum_ops *ops);
3976 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3977 __wsum csum);
3978
3979 static inline void * __must_check
3980 __skb_header_pointer(const struct sk_buff *skb, int offset, int len,
3981 const void *data, int hlen, void *buffer)
3982 {
3983 if (likely(hlen - offset >= len))
3984 return (void *)data + offset;
3985
3986 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
3987 return NULL;
3988
3989 return buffer;
3990 }
3991
3992 static inline void * __must_check
3993 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3994 {
3995 return __skb_header_pointer(skb, offset, len, skb->data,
3996 skb_headlen(skb), buffer);
3997 }
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009 static inline bool skb_needs_linearize(struct sk_buff *skb,
4010 netdev_features_t features)
4011 {
4012 return skb_is_nonlinear(skb) &&
4013 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
4014 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
4015 }
4016
4017 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
4018 void *to,
4019 const unsigned int len)
4020 {
4021 memcpy(to, skb->data, len);
4022 }
4023
4024 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
4025 const int offset, void *to,
4026 const unsigned int len)
4027 {
4028 memcpy(to, skb->data + offset, len);
4029 }
4030
4031 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
4032 const void *from,
4033 const unsigned int len)
4034 {
4035 memcpy(skb->data, from, len);
4036 }
4037
4038 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
4039 const int offset,
4040 const void *from,
4041 const unsigned int len)
4042 {
4043 memcpy(skb->data + offset, from, len);
4044 }
4045
4046 void skb_init(void);
4047
4048 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
4049 {
4050 return skb->tstamp;
4051 }
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062 static inline void skb_get_timestamp(const struct sk_buff *skb,
4063 struct __kernel_old_timeval *stamp)
4064 {
4065 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
4066 }
4067
4068 static inline void skb_get_new_timestamp(const struct sk_buff *skb,
4069 struct __kernel_sock_timeval *stamp)
4070 {
4071 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4072
4073 stamp->tv_sec = ts.tv_sec;
4074 stamp->tv_usec = ts.tv_nsec / 1000;
4075 }
4076
4077 static inline void skb_get_timestampns(const struct sk_buff *skb,
4078 struct __kernel_old_timespec *stamp)
4079 {
4080 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4081
4082 stamp->tv_sec = ts.tv_sec;
4083 stamp->tv_nsec = ts.tv_nsec;
4084 }
4085
4086 static inline void skb_get_new_timestampns(const struct sk_buff *skb,
4087 struct __kernel_timespec *stamp)
4088 {
4089 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4090
4091 stamp->tv_sec = ts.tv_sec;
4092 stamp->tv_nsec = ts.tv_nsec;
4093 }
4094
4095 static inline void __net_timestamp(struct sk_buff *skb)
4096 {
4097 skb->tstamp = ktime_get_real();
4098 skb->mono_delivery_time = 0;
4099 }
4100
4101 static inline ktime_t net_timedelta(ktime_t t)
4102 {
4103 return ktime_sub(ktime_get_real(), t);
4104 }
4105
4106 static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
4107 bool mono)
4108 {
4109 skb->tstamp = kt;
4110 skb->mono_delivery_time = kt && mono;
4111 }
4112
4113 DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
4114
4115
4116
4117
4118 static inline void skb_clear_delivery_time(struct sk_buff *skb)
4119 {
4120 if (skb->mono_delivery_time) {
4121 skb->mono_delivery_time = 0;
4122 if (static_branch_unlikely(&netstamp_needed_key))
4123 skb->tstamp = ktime_get_real();
4124 else
4125 skb->tstamp = 0;
4126 }
4127 }
4128
4129 static inline void skb_clear_tstamp(struct sk_buff *skb)
4130 {
4131 if (skb->mono_delivery_time)
4132 return;
4133
4134 skb->tstamp = 0;
4135 }
4136
4137 static inline ktime_t skb_tstamp(const struct sk_buff *skb)
4138 {
4139 if (skb->mono_delivery_time)
4140 return 0;
4141
4142 return skb->tstamp;
4143 }
4144
4145 static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
4146 {
4147 if (!skb->mono_delivery_time && skb->tstamp)
4148 return skb->tstamp;
4149
4150 if (static_branch_unlikely(&netstamp_needed_key) || cond)
4151 return ktime_get_real();
4152
4153 return 0;
4154 }
4155
4156 static inline u8 skb_metadata_len(const struct sk_buff *skb)
4157 {
4158 return skb_shinfo(skb)->meta_len;
4159 }
4160
4161 static inline void *skb_metadata_end(const struct sk_buff *skb)
4162 {
4163 return skb_mac_header(skb);
4164 }
4165
4166 static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
4167 const struct sk_buff *skb_b,
4168 u8 meta_len)
4169 {
4170 const void *a = skb_metadata_end(skb_a);
4171 const void *b = skb_metadata_end(skb_b);
4172
4173 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
4174 u64 diffs = 0;
4175
4176 switch (meta_len) {
4177 #define __it(x, op) (x -= sizeof(u##op))
4178 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
4179 case 32: diffs |= __it_diff(a, b, 64);
4180 fallthrough;
4181 case 24: diffs |= __it_diff(a, b, 64);
4182 fallthrough;
4183 case 16: diffs |= __it_diff(a, b, 64);
4184 fallthrough;
4185 case 8: diffs |= __it_diff(a, b, 64);
4186 break;
4187 case 28: diffs |= __it_diff(a, b, 64);
4188 fallthrough;
4189 case 20: diffs |= __it_diff(a, b, 64);
4190 fallthrough;
4191 case 12: diffs |= __it_diff(a, b, 64);
4192 fallthrough;
4193 case 4: diffs |= __it_diff(a, b, 32);
4194 break;
4195 }
4196 return diffs;
4197 #else
4198 return memcmp(a - meta_len, b - meta_len, meta_len);
4199 #endif
4200 }
4201
4202 static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
4203 const struct sk_buff *skb_b)
4204 {
4205 u8 len_a = skb_metadata_len(skb_a);
4206 u8 len_b = skb_metadata_len(skb_b);
4207
4208 if (!(len_a | len_b))
4209 return false;
4210
4211 return len_a != len_b ?
4212 true : __skb_metadata_differs(skb_a, skb_b, len_a);
4213 }
4214
4215 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
4216 {
4217 skb_shinfo(skb)->meta_len = meta_len;
4218 }
4219
4220 static inline void skb_metadata_clear(struct sk_buff *skb)
4221 {
4222 skb_metadata_set(skb, 0);
4223 }
4224
4225 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
4226
4227 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
4228
4229 void skb_clone_tx_timestamp(struct sk_buff *skb);
4230 bool skb_defer_rx_timestamp(struct sk_buff *skb);
4231
4232 #else
4233
4234 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
4235 {
4236 }
4237
4238 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
4239 {
4240 return false;
4241 }
4242
4243 #endif
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257 void skb_complete_tx_timestamp(struct sk_buff *skb,
4258 struct skb_shared_hwtstamps *hwtstamps);
4259
4260 void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
4261 struct skb_shared_hwtstamps *hwtstamps,
4262 struct sock *sk, int tstype);
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275 void skb_tstamp_tx(struct sk_buff *orig_skb,
4276 struct skb_shared_hwtstamps *hwtstamps);
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290 static inline void skb_tx_timestamp(struct sk_buff *skb)
4291 {
4292 skb_clone_tx_timestamp(skb);
4293 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
4294 skb_tstamp_tx(skb, NULL);
4295 }
4296
4297
4298
4299
4300
4301
4302
4303
4304 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
4305
4306 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
4307 __sum16 __skb_checksum_complete(struct sk_buff *skb);
4308
4309 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
4310 {
4311 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
4312 skb->csum_valid ||
4313 (skb->ip_summed == CHECKSUM_PARTIAL &&
4314 skb_checksum_start_offset(skb) >= 0));
4315 }
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
4334 {
4335 return skb_csum_unnecessary(skb) ?
4336 0 : __skb_checksum_complete(skb);
4337 }
4338
4339 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
4340 {
4341 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4342 if (skb->csum_level == 0)
4343 skb->ip_summed = CHECKSUM_NONE;
4344 else
4345 skb->csum_level--;
4346 }
4347 }
4348
4349 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
4350 {
4351 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4352 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
4353 skb->csum_level++;
4354 } else if (skb->ip_summed == CHECKSUM_NONE) {
4355 skb->ip_summed = CHECKSUM_UNNECESSARY;
4356 skb->csum_level = 0;
4357 }
4358 }
4359
4360 static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4361 {
4362 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4363 skb->ip_summed = CHECKSUM_NONE;
4364 skb->csum_level = 0;
4365 }
4366 }
4367
4368
4369
4370
4371
4372
4373 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
4374 bool zero_okay,
4375 __sum16 check)
4376 {
4377 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
4378 skb->csum_valid = 1;
4379 __skb_decr_checksum_unnecessary(skb);
4380 return false;
4381 }
4382
4383 return true;
4384 }
4385
4386
4387
4388
4389 #define CHECKSUM_BREAK 76
4390
4391
4392
4393
4394
4395
4396
4397 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
4398 {
4399 if (skb->ip_summed == CHECKSUM_COMPLETE)
4400 skb->ip_summed = CHECKSUM_NONE;
4401 }
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4413 bool complete,
4414 __wsum psum)
4415 {
4416 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4417 if (!csum_fold(csum_add(psum, skb->csum))) {
4418 skb->csum_valid = 1;
4419 return 0;
4420 }
4421 }
4422
4423 skb->csum = psum;
4424
4425 if (complete || skb->len <= CHECKSUM_BREAK) {
4426 __sum16 csum;
4427
4428 csum = __skb_checksum_complete(skb);
4429 skb->csum_valid = !csum;
4430 return csum;
4431 }
4432
4433 return 0;
4434 }
4435
4436 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4437 {
4438 return 0;
4439 }
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451 #define __skb_checksum_validate(skb, proto, complete, \
4452 zero_okay, check, compute_pseudo) \
4453 ({ \
4454 __sum16 __ret = 0; \
4455 skb->csum_valid = 0; \
4456 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4457 __ret = __skb_checksum_validate_complete(skb, \
4458 complete, compute_pseudo(skb, proto)); \
4459 __ret; \
4460 })
4461
4462 #define skb_checksum_init(skb, proto, compute_pseudo) \
4463 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4464
4465 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4466 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4467
4468 #define skb_checksum_validate(skb, proto, compute_pseudo) \
4469 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4470
4471 #define skb_checksum_validate_zero_check(skb, proto, check, \
4472 compute_pseudo) \
4473 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4474
4475 #define skb_checksum_simple_validate(skb) \
4476 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4477
4478 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4479 {
4480 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4481 }
4482
4483 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4484 {
4485 skb->csum = ~pseudo;
4486 skb->ip_summed = CHECKSUM_COMPLETE;
4487 }
4488
4489 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4490 do { \
4491 if (__skb_checksum_convert_check(skb)) \
4492 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4493 } while (0)
4494
4495 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4496 u16 start, u16 offset)
4497 {
4498 skb->ip_summed = CHECKSUM_PARTIAL;
4499 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4500 skb->csum_offset = offset - start;
4501 }
4502
4503
4504
4505
4506
4507
4508 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4509 int start, int offset, bool nopartial)
4510 {
4511 __wsum delta;
4512
4513 if (!nopartial) {
4514 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4515 return;
4516 }
4517
4518 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4519 __skb_checksum_complete(skb);
4520 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4521 }
4522
4523 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4524
4525
4526 skb->csum = csum_add(skb->csum, delta);
4527 }
4528
4529 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4530 {
4531 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4532 return (void *)(skb->_nfct & NFCT_PTRMASK);
4533 #else
4534 return NULL;
4535 #endif
4536 }
4537
4538 static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4539 {
4540 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4541 return skb->_nfct;
4542 #else
4543 return 0UL;
4544 #endif
4545 }
4546
4547 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4548 {
4549 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4550 skb->slow_gro |= !!nfct;
4551 skb->_nfct = nfct;
4552 #endif
4553 }
4554
4555 #ifdef CONFIG_SKB_EXTENSIONS
4556 enum skb_ext_id {
4557 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4558 SKB_EXT_BRIDGE_NF,
4559 #endif
4560 #ifdef CONFIG_XFRM
4561 SKB_EXT_SEC_PATH,
4562 #endif
4563 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4564 TC_SKB_EXT,
4565 #endif
4566 #if IS_ENABLED(CONFIG_MPTCP)
4567 SKB_EXT_MPTCP,
4568 #endif
4569 #if IS_ENABLED(CONFIG_MCTP_FLOWS)
4570 SKB_EXT_MCTP,
4571 #endif
4572 SKB_EXT_NUM,
4573 };
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585 struct skb_ext {
4586 refcount_t refcnt;
4587 u8 offset[SKB_EXT_NUM];
4588 u8 chunks;
4589 char data[] __aligned(8);
4590 };
4591
4592 struct skb_ext *__skb_ext_alloc(gfp_t flags);
4593 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4594 struct skb_ext *ext);
4595 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4596 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4597 void __skb_ext_put(struct skb_ext *ext);
4598
4599 static inline void skb_ext_put(struct sk_buff *skb)
4600 {
4601 if (skb->active_extensions)
4602 __skb_ext_put(skb->extensions);
4603 }
4604
4605 static inline void __skb_ext_copy(struct sk_buff *dst,
4606 const struct sk_buff *src)
4607 {
4608 dst->active_extensions = src->active_extensions;
4609
4610 if (src->active_extensions) {
4611 struct skb_ext *ext = src->extensions;
4612
4613 refcount_inc(&ext->refcnt);
4614 dst->extensions = ext;
4615 }
4616 }
4617
4618 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4619 {
4620 skb_ext_put(dst);
4621 __skb_ext_copy(dst, src);
4622 }
4623
4624 static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4625 {
4626 return !!ext->offset[i];
4627 }
4628
4629 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4630 {
4631 return skb->active_extensions & (1 << id);
4632 }
4633
4634 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4635 {
4636 if (skb_ext_exist(skb, id))
4637 __skb_ext_del(skb, id);
4638 }
4639
4640 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4641 {
4642 if (skb_ext_exist(skb, id)) {
4643 struct skb_ext *ext = skb->extensions;
4644
4645 return (void *)ext + (ext->offset[id] << 3);
4646 }
4647
4648 return NULL;
4649 }
4650
4651 static inline void skb_ext_reset(struct sk_buff *skb)
4652 {
4653 if (unlikely(skb->active_extensions)) {
4654 __skb_ext_put(skb->extensions);
4655 skb->active_extensions = 0;
4656 }
4657 }
4658
4659 static inline bool skb_has_extensions(struct sk_buff *skb)
4660 {
4661 return unlikely(skb->active_extensions);
4662 }
4663 #else
4664 static inline void skb_ext_put(struct sk_buff *skb) {}
4665 static inline void skb_ext_reset(struct sk_buff *skb) {}
4666 static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4667 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4668 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4669 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4670 #endif
4671
4672 static inline void nf_reset_ct(struct sk_buff *skb)
4673 {
4674 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4675 nf_conntrack_put(skb_nfct(skb));
4676 skb->_nfct = 0;
4677 #endif
4678 }
4679
4680 static inline void nf_reset_trace(struct sk_buff *skb)
4681 {
4682 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4683 skb->nf_trace = 0;
4684 #endif
4685 }
4686
4687 static inline void ipvs_reset(struct sk_buff *skb)
4688 {
4689 #if IS_ENABLED(CONFIG_IP_VS)
4690 skb->ipvs_property = 0;
4691 #endif
4692 }
4693
4694
4695 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4696 bool copy)
4697 {
4698 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4699 dst->_nfct = src->_nfct;
4700 nf_conntrack_get(skb_nfct(src));
4701 #endif
4702 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4703 if (copy)
4704 dst->nf_trace = src->nf_trace;
4705 #endif
4706 }
4707
4708 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4709 {
4710 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4711 nf_conntrack_put(skb_nfct(dst));
4712 #endif
4713 dst->slow_gro = src->slow_gro;
4714 __nf_copy(dst, src, true);
4715 }
4716
4717 #ifdef CONFIG_NETWORK_SECMARK
4718 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4719 {
4720 to->secmark = from->secmark;
4721 }
4722
4723 static inline void skb_init_secmark(struct sk_buff *skb)
4724 {
4725 skb->secmark = 0;
4726 }
4727 #else
4728 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4729 { }
4730
4731 static inline void skb_init_secmark(struct sk_buff *skb)
4732 { }
4733 #endif
4734
4735 static inline int secpath_exists(const struct sk_buff *skb)
4736 {
4737 #ifdef CONFIG_XFRM
4738 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4739 #else
4740 return 0;
4741 #endif
4742 }
4743
4744 static inline bool skb_irq_freeable(const struct sk_buff *skb)
4745 {
4746 return !skb->destructor &&
4747 !secpath_exists(skb) &&
4748 !skb_nfct(skb) &&
4749 !skb->_skb_refdst &&
4750 !skb_has_frag_list(skb);
4751 }
4752
4753 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4754 {
4755 skb->queue_mapping = queue_mapping;
4756 }
4757
4758 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4759 {
4760 return skb->queue_mapping;
4761 }
4762
4763 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4764 {
4765 to->queue_mapping = from->queue_mapping;
4766 }
4767
4768 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4769 {
4770 skb->queue_mapping = rx_queue + 1;
4771 }
4772
4773 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4774 {
4775 return skb->queue_mapping - 1;
4776 }
4777
4778 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4779 {
4780 return skb->queue_mapping != 0;
4781 }
4782
4783 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4784 {
4785 skb->dst_pending_confirm = val;
4786 }
4787
4788 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4789 {
4790 return skb->dst_pending_confirm != 0;
4791 }
4792
4793 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4794 {
4795 #ifdef CONFIG_XFRM
4796 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4797 #else
4798 return NULL;
4799 #endif
4800 }
4801
4802
4803
4804
4805
4806
4807
4808 struct skb_gso_cb {
4809 union {
4810 int mac_offset;
4811 int data_offset;
4812 };
4813 int encap_level;
4814 __wsum csum;
4815 __u16 csum_start;
4816 };
4817 #define SKB_GSO_CB_OFFSET 32
4818 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4819
4820 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4821 {
4822 return (skb_mac_header(inner_skb) - inner_skb->head) -
4823 SKB_GSO_CB(inner_skb)->mac_offset;
4824 }
4825
4826 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4827 {
4828 int new_headroom, headroom;
4829 int ret;
4830
4831 headroom = skb_headroom(skb);
4832 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4833 if (ret)
4834 return ret;
4835
4836 new_headroom = skb_headroom(skb);
4837 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4838 return 0;
4839 }
4840
4841 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4842 {
4843
4844 if (skb->remcsum_offload)
4845 return;
4846
4847 SKB_GSO_CB(skb)->csum = res;
4848 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4849 }
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4860 {
4861 unsigned char *csum_start = skb_transport_header(skb);
4862 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4863 __wsum partial = SKB_GSO_CB(skb)->csum;
4864
4865 SKB_GSO_CB(skb)->csum = res;
4866 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4867
4868 return csum_fold(csum_partial(csum_start, plen, partial));
4869 }
4870
4871 static inline bool skb_is_gso(const struct sk_buff *skb)
4872 {
4873 return skb_shinfo(skb)->gso_size;
4874 }
4875
4876
4877 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4878 {
4879 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4880 }
4881
4882
4883 static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4884 {
4885 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4886 }
4887
4888
4889 static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4890 {
4891 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4892 }
4893
4894 static inline void skb_gso_reset(struct sk_buff *skb)
4895 {
4896 skb_shinfo(skb)->gso_size = 0;
4897 skb_shinfo(skb)->gso_segs = 0;
4898 skb_shinfo(skb)->gso_type = 0;
4899 }
4900
4901 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4902 u16 increment)
4903 {
4904 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4905 return;
4906 shinfo->gso_size += increment;
4907 }
4908
4909 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4910 u16 decrement)
4911 {
4912 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4913 return;
4914 shinfo->gso_size -= decrement;
4915 }
4916
4917 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4918
4919 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4920 {
4921
4922
4923 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4924
4925 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4926 unlikely(shinfo->gso_type == 0)) {
4927 __skb_warn_lro_forwarding(skb);
4928 return true;
4929 }
4930 return false;
4931 }
4932
4933 static inline void skb_forward_csum(struct sk_buff *skb)
4934 {
4935
4936 if (skb->ip_summed == CHECKSUM_COMPLETE)
4937 skb->ip_summed = CHECKSUM_NONE;
4938 }
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4949 {
4950 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
4951 }
4952
4953 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4954
4955 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4956 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4957 unsigned int transport_len,
4958 __sum16(*skb_chkf)(struct sk_buff *skb));
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969 static inline bool skb_head_is_locked(const struct sk_buff *skb)
4970 {
4971 return !skb->head_frag || skb_cloned(skb);
4972 }
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983 static inline __wsum lco_csum(struct sk_buff *skb)
4984 {
4985 unsigned char *csum_start = skb_checksum_start(skb);
4986 unsigned char *l4_hdr = skb_transport_header(skb);
4987 __wsum partial;
4988
4989
4990 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4991 skb->csum_offset));
4992
4993
4994
4995
4996 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4997 }
4998
4999 static inline bool skb_is_redirected(const struct sk_buff *skb)
5000 {
5001 return skb->redirected;
5002 }
5003
5004 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
5005 {
5006 skb->redirected = 1;
5007 #ifdef CONFIG_NET_REDIRECT
5008 skb->from_ingress = from_ingress;
5009 if (skb->from_ingress)
5010 skb_clear_tstamp(skb);
5011 #endif
5012 }
5013
5014 static inline void skb_reset_redirect(struct sk_buff *skb)
5015 {
5016 skb->redirected = 0;
5017 }
5018
5019 static inline bool skb_csum_is_sctp(struct sk_buff *skb)
5020 {
5021 return skb->csum_not_inet;
5022 }
5023
5024 static inline void skb_set_kcov_handle(struct sk_buff *skb,
5025 const u64 kcov_handle)
5026 {
5027 #ifdef CONFIG_KCOV
5028 skb->kcov_handle = kcov_handle;
5029 #endif
5030 }
5031
5032 static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
5033 {
5034 #ifdef CONFIG_KCOV
5035 return skb->kcov_handle;
5036 #else
5037 return 0;
5038 #endif
5039 }
5040
5041 #ifdef CONFIG_PAGE_POOL
5042 static inline void skb_mark_for_recycle(struct sk_buff *skb)
5043 {
5044 skb->pp_recycle = 1;
5045 }
5046 #endif
5047
5048 static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
5049 {
5050 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
5051 return false;
5052 return page_pool_return_skb_page(virt_to_page(data));
5053 }
5054
5055 #endif
5056 #endif