0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/skbuff.h>
0013 #include <linux/init.h>
0014 #include <net/protocol.h>
0015 #include <crypto/aead.h>
0016 #include <crypto/authenc.h>
0017 #include <linux/err.h>
0018 #include <linux/module.h>
0019 #include <net/gro.h>
0020 #include <net/ip.h>
0021 #include <net/xfrm.h>
0022 #include <net/esp.h>
0023 #include <linux/scatterlist.h>
0024 #include <linux/kernel.h>
0025 #include <linux/slab.h>
0026 #include <linux/spinlock.h>
0027 #include <net/udp.h>
0028
0029 static struct sk_buff *esp4_gro_receive(struct list_head *head,
0030 struct sk_buff *skb)
0031 {
0032 int offset = skb_gro_offset(skb);
0033 struct xfrm_offload *xo;
0034 struct xfrm_state *x;
0035 __be32 seq;
0036 __be32 spi;
0037
0038 if (!pskb_pull(skb, offset))
0039 return NULL;
0040
0041 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
0042 goto out;
0043
0044 xo = xfrm_offload(skb);
0045 if (!xo || !(xo->flags & CRYPTO_DONE)) {
0046 struct sec_path *sp = secpath_set(skb);
0047
0048 if (!sp)
0049 goto out;
0050
0051 if (sp->len == XFRM_MAX_DEPTH)
0052 goto out_reset;
0053
0054 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
0055 (xfrm_address_t *)&ip_hdr(skb)->daddr,
0056 spi, IPPROTO_ESP, AF_INET);
0057 if (!x)
0058 goto out_reset;
0059
0060 skb->mark = xfrm_smark_get(skb->mark, x);
0061
0062 sp->xvec[sp->len++] = x;
0063 sp->olen++;
0064
0065 xo = xfrm_offload(skb);
0066 if (!xo)
0067 goto out_reset;
0068 }
0069
0070 xo->flags |= XFRM_GRO;
0071
0072 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
0073 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
0074 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
0075 XFRM_SPI_SKB_CB(skb)->seq = seq;
0076
0077
0078
0079 xfrm_input(skb, IPPROTO_ESP, spi, -2);
0080
0081 return ERR_PTR(-EINPROGRESS);
0082 out_reset:
0083 secpath_reset(skb);
0084 out:
0085 skb_push(skb, offset);
0086 NAPI_GRO_CB(skb)->same_flow = 0;
0087 NAPI_GRO_CB(skb)->flush = 1;
0088
0089 return NULL;
0090 }
0091
0092 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
0093 {
0094 struct ip_esp_hdr *esph;
0095 struct iphdr *iph = ip_hdr(skb);
0096 struct xfrm_offload *xo = xfrm_offload(skb);
0097 int proto = iph->protocol;
0098
0099 skb_push(skb, -skb_network_offset(skb));
0100 esph = ip_esp_hdr(skb);
0101 *skb_mac_header(skb) = IPPROTO_ESP;
0102
0103 esph->spi = x->id.spi;
0104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
0105
0106 xo->proto = proto;
0107 }
0108
0109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
0110 struct sk_buff *skb,
0111 netdev_features_t features)
0112 {
0113 return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
0114 }
0115
0116 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
0117 struct sk_buff *skb,
0118 netdev_features_t features)
0119 {
0120 const struct net_offload *ops;
0121 struct sk_buff *segs = ERR_PTR(-EINVAL);
0122 struct xfrm_offload *xo = xfrm_offload(skb);
0123
0124 skb->transport_header += x->props.header_len;
0125 ops = rcu_dereference(inet_offloads[xo->proto]);
0126 if (likely(ops && ops->callbacks.gso_segment))
0127 segs = ops->callbacks.gso_segment(skb, features);
0128
0129 return segs;
0130 }
0131
0132 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
0133 struct sk_buff *skb,
0134 netdev_features_t features)
0135 {
0136 struct xfrm_offload *xo = xfrm_offload(skb);
0137 struct sk_buff *segs = ERR_PTR(-EINVAL);
0138 const struct net_offload *ops;
0139 u8 proto = xo->proto;
0140
0141 skb->transport_header += x->props.header_len;
0142
0143 if (x->sel.family != AF_INET6) {
0144 if (proto == IPPROTO_BEETPH) {
0145 struct ip_beet_phdr *ph =
0146 (struct ip_beet_phdr *)skb->data;
0147
0148 skb->transport_header += ph->hdrlen * 8;
0149 proto = ph->nexthdr;
0150 } else {
0151 skb->transport_header -= IPV4_BEET_PHMAXLEN;
0152 }
0153 } else {
0154 __be16 frag;
0155
0156 skb->transport_header +=
0157 ipv6_skip_exthdr(skb, 0, &proto, &frag);
0158 if (proto == IPPROTO_TCP)
0159 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
0160 }
0161
0162 if (proto == IPPROTO_IPV6)
0163 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
0164
0165 __skb_pull(skb, skb_transport_offset(skb));
0166 ops = rcu_dereference(inet_offloads[proto]);
0167 if (likely(ops && ops->callbacks.gso_segment))
0168 segs = ops->callbacks.gso_segment(skb, features);
0169
0170 return segs;
0171 }
0172
0173 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
0174 struct sk_buff *skb,
0175 netdev_features_t features)
0176 {
0177 switch (x->outer_mode.encap) {
0178 case XFRM_MODE_TUNNEL:
0179 return xfrm4_tunnel_gso_segment(x, skb, features);
0180 case XFRM_MODE_TRANSPORT:
0181 return xfrm4_transport_gso_segment(x, skb, features);
0182 case XFRM_MODE_BEET:
0183 return xfrm4_beet_gso_segment(x, skb, features);
0184 }
0185
0186 return ERR_PTR(-EOPNOTSUPP);
0187 }
0188
0189 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
0190 netdev_features_t features)
0191 {
0192 struct xfrm_state *x;
0193 struct ip_esp_hdr *esph;
0194 struct crypto_aead *aead;
0195 netdev_features_t esp_features = features;
0196 struct xfrm_offload *xo = xfrm_offload(skb);
0197 struct sec_path *sp;
0198
0199 if (!xo)
0200 return ERR_PTR(-EINVAL);
0201
0202 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
0203 return ERR_PTR(-EINVAL);
0204
0205 sp = skb_sec_path(skb);
0206 x = sp->xvec[sp->len - 1];
0207 aead = x->data;
0208 esph = ip_esp_hdr(skb);
0209
0210 if (esph->spi != x->id.spi)
0211 return ERR_PTR(-EINVAL);
0212
0213 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
0214 return ERR_PTR(-EINVAL);
0215
0216 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
0217
0218 skb->encap_hdr_csum = 1;
0219
0220 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
0221 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
0222 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
0223 NETIF_F_SCTP_CRC);
0224 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
0225 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
0226 esp_features = features & ~(NETIF_F_CSUM_MASK |
0227 NETIF_F_SCTP_CRC);
0228
0229 xo->flags |= XFRM_GSO_SEGMENT;
0230
0231 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
0232 }
0233
0234 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
0235 {
0236 struct crypto_aead *aead = x->data;
0237 struct xfrm_offload *xo = xfrm_offload(skb);
0238
0239 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
0240 return -EINVAL;
0241
0242 if (!(xo->flags & CRYPTO_DONE))
0243 skb->ip_summed = CHECKSUM_NONE;
0244
0245 return esp_input_done2(skb, 0);
0246 }
0247
0248 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
0249 {
0250 int err;
0251 int alen;
0252 int blksize;
0253 struct xfrm_offload *xo;
0254 struct ip_esp_hdr *esph;
0255 struct crypto_aead *aead;
0256 struct esp_info esp;
0257 bool hw_offload = true;
0258 __u32 seq;
0259
0260 esp.inplace = true;
0261
0262 xo = xfrm_offload(skb);
0263
0264 if (!xo)
0265 return -EINVAL;
0266
0267 if ((!(features & NETIF_F_HW_ESP) &&
0268 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
0269 x->xso.dev != skb->dev) {
0270 xo->flags |= CRYPTO_FALLBACK;
0271 hw_offload = false;
0272 }
0273
0274 esp.proto = xo->proto;
0275
0276
0277
0278 aead = x->data;
0279 alen = crypto_aead_authsize(aead);
0280
0281 esp.tfclen = 0;
0282
0283
0284 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
0285 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
0286 esp.plen = esp.clen - skb->len - esp.tfclen;
0287 esp.tailen = esp.tfclen + esp.plen + alen;
0288
0289 esp.esph = ip_esp_hdr(skb);
0290
0291
0292 if (!hw_offload || !skb_is_gso(skb)) {
0293 esp.nfrags = esp_output_head(x, skb, &esp);
0294 if (esp.nfrags < 0)
0295 return esp.nfrags;
0296 }
0297
0298 seq = xo->seq.low;
0299
0300 esph = esp.esph;
0301 esph->spi = x->id.spi;
0302
0303 skb_push(skb, -skb_network_offset(skb));
0304
0305 if (xo->flags & XFRM_GSO_SEGMENT) {
0306 esph->seq_no = htonl(seq);
0307
0308 if (!skb_is_gso(skb))
0309 xo->seq.low++;
0310 else
0311 xo->seq.low += skb_shinfo(skb)->gso_segs;
0312 }
0313
0314 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
0315
0316 ip_hdr(skb)->tot_len = htons(skb->len);
0317 ip_send_check(ip_hdr(skb));
0318
0319 if (hw_offload) {
0320 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
0321 return -ENOMEM;
0322
0323 xo = xfrm_offload(skb);
0324 if (!xo)
0325 return -EINVAL;
0326
0327 xo->flags |= XFRM_XMIT;
0328 return 0;
0329 }
0330
0331 err = esp_output_tail(x, skb, &esp);
0332 if (err)
0333 return err;
0334
0335 secpath_reset(skb);
0336
0337 return 0;
0338 }
0339
0340 static const struct net_offload esp4_offload = {
0341 .callbacks = {
0342 .gro_receive = esp4_gro_receive,
0343 .gso_segment = esp4_gso_segment,
0344 },
0345 };
0346
0347 static const struct xfrm_type_offload esp_type_offload = {
0348 .owner = THIS_MODULE,
0349 .proto = IPPROTO_ESP,
0350 .input_tail = esp_input_tail,
0351 .xmit = esp_xmit,
0352 .encap = esp4_gso_encap,
0353 };
0354
0355 static int __init esp4_offload_init(void)
0356 {
0357 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
0358 pr_info("%s: can't add xfrm type offload\n", __func__);
0359 return -EAGAIN;
0360 }
0361
0362 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
0363 }
0364
0365 static void __exit esp4_offload_exit(void)
0366 {
0367 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
0368 inet_del_offload(&esp4_offload, IPPROTO_ESP);
0369 }
0370
0371 module_init(esp4_offload_init);
0372 module_exit(esp4_offload_exit);
0373 MODULE_LICENSE("GPL");
0374 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
0375 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
0376 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");