0001
0002
0003
0004
0005
0006
0007 #include <linux/netdevice.h>
0008 #include <linux/ip.h>
0009 #include <linux/ipv6.h>
0010 #include <net/ip6_checksum.h>
0011 #include <linux/bitfield.h>
0012 #include "rmnet_config.h"
0013 #include "rmnet_map.h"
0014 #include "rmnet_private.h"
0015
0016 #define RMNET_MAP_DEAGGR_SPACING 64
0017 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
0018
0019 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
0020 const void *txporthdr)
0021 {
0022 if (protocol == IPPROTO_TCP)
0023 return &((struct tcphdr *)txporthdr)->check;
0024
0025 if (protocol == IPPROTO_UDP)
0026 return &((struct udphdr *)txporthdr)->check;
0027
0028 return NULL;
0029 }
0030
0031 static int
0032 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
0033 struct rmnet_map_dl_csum_trailer *csum_trailer,
0034 struct rmnet_priv *priv)
0035 {
0036 struct iphdr *ip4h = (struct iphdr *)skb->data;
0037 void *txporthdr = skb->data + ip4h->ihl * 4;
0038 __sum16 *csum_field, pseudo_csum;
0039 __sum16 ip_payload_csum;
0040
0041
0042
0043
0044
0045 if (ip_fast_csum(ip4h, ip4h->ihl)) {
0046 priv->stats.csum_ip4_header_bad++;
0047 return -EINVAL;
0048 }
0049
0050
0051 if (ip_is_fragment(ip4h)) {
0052 priv->stats.csum_fragmented_pkt++;
0053 return -EOPNOTSUPP;
0054 }
0055
0056
0057 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
0058 if (!csum_field) {
0059 priv->stats.csum_err_invalid_transport++;
0060 return -EPROTONOSUPPORT;
0061 }
0062
0063
0064 if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
0065 priv->stats.csum_skipped++;
0066 return 0;
0067 }
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 ip_payload_csum = csum_trailer->csum_value;
0091
0092 pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
0093 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
0094 ip4h->protocol, 0);
0095
0096
0097 if (ip_payload_csum != (__sum16)~pseudo_csum) {
0098 priv->stats.csum_validation_failed++;
0099 return -EINVAL;
0100 }
0101
0102 priv->stats.csum_ok++;
0103 return 0;
0104 }
0105
0106 #if IS_ENABLED(CONFIG_IPV6)
0107 static int
0108 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
0109 struct rmnet_map_dl_csum_trailer *csum_trailer,
0110 struct rmnet_priv *priv)
0111 {
0112 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
0113 void *txporthdr = skb->data + sizeof(*ip6h);
0114 __sum16 *csum_field, pseudo_csum;
0115 __sum16 ip6_payload_csum;
0116 __be16 ip_header_csum;
0117
0118
0119
0120
0121 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
0122 if (!csum_field) {
0123 priv->stats.csum_err_invalid_transport++;
0124 return -EPROTONOSUPPORT;
0125 }
0126
0127
0128
0129
0130
0131
0132
0133 ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
0134 ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
0135
0136 pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0137 ntohs(ip6h->payload_len),
0138 ip6h->nexthdr, 0);
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 if (ip6_payload_csum != (__sum16)~pseudo_csum) {
0149 priv->stats.csum_validation_failed++;
0150 return -EINVAL;
0151 }
0152
0153 priv->stats.csum_ok++;
0154 return 0;
0155 }
0156 #else
0157 static int
0158 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
0159 struct rmnet_map_dl_csum_trailer *csum_trailer,
0160 struct rmnet_priv *priv)
0161 {
0162 return 0;
0163 }
0164 #endif
0165
0166 static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
0167 {
0168 void *txphdr;
0169 u16 *csum;
0170
0171 txphdr = (void *)ip4h + ip4h->ihl * 4;
0172
0173 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
0174 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
0175 *csum = ~(*csum);
0176 }
0177 }
0178
0179 static void
0180 rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
0181 struct rmnet_map_ul_csum_header *ul_header,
0182 struct sk_buff *skb)
0183 {
0184 u16 val;
0185
0186 val = MAP_CSUM_UL_ENABLED_FLAG;
0187 if (iphdr->protocol == IPPROTO_UDP)
0188 val |= MAP_CSUM_UL_UDP_FLAG;
0189 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
0190
0191 ul_header->csum_start_offset = htons(skb_network_header_len(skb));
0192 ul_header->csum_info = htons(val);
0193
0194 skb->ip_summed = CHECKSUM_NONE;
0195
0196 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
0197 }
0198
0199 #if IS_ENABLED(CONFIG_IPV6)
0200 static void
0201 rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
0202 {
0203 void *txphdr;
0204 u16 *csum;
0205
0206 txphdr = ip6h + 1;
0207
0208 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
0209 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
0210 *csum = ~(*csum);
0211 }
0212 }
0213
0214 static void
0215 rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
0216 struct rmnet_map_ul_csum_header *ul_header,
0217 struct sk_buff *skb)
0218 {
0219 u16 val;
0220
0221 val = MAP_CSUM_UL_ENABLED_FLAG;
0222 if (ipv6hdr->nexthdr == IPPROTO_UDP)
0223 val |= MAP_CSUM_UL_UDP_FLAG;
0224 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
0225
0226 ul_header->csum_start_offset = htons(skb_network_header_len(skb));
0227 ul_header->csum_info = htons(val);
0228
0229 skb->ip_summed = CHECKSUM_NONE;
0230
0231 rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
0232 }
0233 #else
0234 static void
0235 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
0236 struct rmnet_map_ul_csum_header *ul_header,
0237 struct sk_buff *skb)
0238 {
0239 }
0240 #endif
0241
0242 static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
0243 struct rmnet_port *port,
0244 struct net_device *orig_dev)
0245 {
0246 struct rmnet_priv *priv = netdev_priv(orig_dev);
0247 struct rmnet_map_v5_csum_header *ul_header;
0248
0249 ul_header = skb_push(skb, sizeof(*ul_header));
0250 memset(ul_header, 0, sizeof(*ul_header));
0251 ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
0252 MAPV5_HDRINFO_HDR_TYPE_FMASK);
0253
0254 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0255 void *iph = ip_hdr(skb);
0256 __sum16 *check;
0257 void *trans;
0258 u8 proto;
0259
0260 if (skb->protocol == htons(ETH_P_IP)) {
0261 u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
0262
0263 proto = ((struct iphdr *)iph)->protocol;
0264 trans = iph + ip_len;
0265 } else if (IS_ENABLED(CONFIG_IPV6) &&
0266 skb->protocol == htons(ETH_P_IPV6)) {
0267 u16 ip_len = sizeof(struct ipv6hdr);
0268
0269 proto = ((struct ipv6hdr *)iph)->nexthdr;
0270 trans = iph + ip_len;
0271 } else {
0272 priv->stats.csum_err_invalid_ip_version++;
0273 goto sw_csum;
0274 }
0275
0276 check = rmnet_map_get_csum_field(proto, trans);
0277 if (check) {
0278 skb->ip_summed = CHECKSUM_NONE;
0279
0280 ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
0281 priv->stats.csum_hw++;
0282 return;
0283 }
0284 }
0285
0286 sw_csum:
0287 priv->stats.csum_sw++;
0288 }
0289
0290
0291
0292
0293
0294 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
0295 int hdrlen,
0296 struct rmnet_port *port,
0297 int pad)
0298 {
0299 struct rmnet_map_header *map_header;
0300 u32 padding, map_datalen;
0301
0302 map_datalen = skb->len - hdrlen;
0303 map_header = (struct rmnet_map_header *)
0304 skb_push(skb, sizeof(struct rmnet_map_header));
0305 memset(map_header, 0, sizeof(struct rmnet_map_header));
0306
0307
0308 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
0309 map_header->flags |= MAP_NEXT_HEADER_FLAG;
0310
0311 if (pad == RMNET_MAP_NO_PAD_BYTES) {
0312 map_header->pkt_len = htons(map_datalen);
0313 return map_header;
0314 }
0315
0316 BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
0317 padding = ALIGN(map_datalen, 4) - map_datalen;
0318
0319 if (padding == 0)
0320 goto done;
0321
0322 if (skb_tailroom(skb) < padding)
0323 return NULL;
0324
0325 skb_put_zero(skb, padding);
0326
0327 done:
0328 map_header->pkt_len = htons(map_datalen + padding);
0329
0330 map_header->flags = padding & MAP_PAD_LEN_MASK;
0331
0332 return map_header;
0333 }
0334
0335
0336
0337
0338
0339
0340
0341 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
0342 struct rmnet_port *port)
0343 {
0344 struct rmnet_map_v5_csum_header *next_hdr = NULL;
0345 struct rmnet_map_header *maph;
0346 void *data = skb->data;
0347 struct sk_buff *skbn;
0348 u8 nexthdr_type;
0349 u32 packet_len;
0350
0351 if (skb->len == 0)
0352 return NULL;
0353
0354 maph = (struct rmnet_map_header *)skb->data;
0355 packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
0356
0357 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
0358 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
0359 } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
0360 if (!(maph->flags & MAP_CMD_FLAG)) {
0361 packet_len += sizeof(*next_hdr);
0362 if (maph->flags & MAP_NEXT_HEADER_FLAG)
0363 next_hdr = data + sizeof(*maph);
0364 else
0365
0366 return NULL;
0367 }
0368 }
0369
0370 if (((int)skb->len - (int)packet_len) < 0)
0371 return NULL;
0372
0373
0374 if (!maph->pkt_len)
0375 return NULL;
0376
0377 if (next_hdr) {
0378 nexthdr_type = u8_get_bits(next_hdr->header_info,
0379 MAPV5_HDRINFO_HDR_TYPE_FMASK);
0380 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
0381 return NULL;
0382 }
0383
0384 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
0385 if (!skbn)
0386 return NULL;
0387
0388 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
0389 skb_put(skbn, packet_len);
0390 memcpy(skbn->data, skb->data, packet_len);
0391 skb_pull(skb, packet_len);
0392
0393 return skbn;
0394 }
0395
0396
0397
0398
0399
0400
0401
0402 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
0403 {
0404 struct rmnet_priv *priv = netdev_priv(skb->dev);
0405 struct rmnet_map_dl_csum_trailer *csum_trailer;
0406
0407 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
0408 priv->stats.csum_sw++;
0409 return -EOPNOTSUPP;
0410 }
0411
0412 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
0413
0414 if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
0415 priv->stats.csum_valid_unset++;
0416 return -EINVAL;
0417 }
0418
0419 if (skb->protocol == htons(ETH_P_IP))
0420 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
0421
0422 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
0423 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
0424
0425 priv->stats.csum_err_invalid_ip_version++;
0426
0427 return -EPROTONOSUPPORT;
0428 }
0429
0430 static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
0431 struct net_device *orig_dev)
0432 {
0433 struct rmnet_priv *priv = netdev_priv(orig_dev);
0434 struct rmnet_map_ul_csum_header *ul_header;
0435 void *iphdr;
0436
0437 ul_header = (struct rmnet_map_ul_csum_header *)
0438 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
0439
0440 if (unlikely(!(orig_dev->features &
0441 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
0442 goto sw_csum;
0443
0444 if (skb->ip_summed != CHECKSUM_PARTIAL)
0445 goto sw_csum;
0446
0447 iphdr = (char *)ul_header +
0448 sizeof(struct rmnet_map_ul_csum_header);
0449
0450 if (skb->protocol == htons(ETH_P_IP)) {
0451 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
0452 priv->stats.csum_hw++;
0453 return;
0454 }
0455
0456 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
0457 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
0458 priv->stats.csum_hw++;
0459 return;
0460 }
0461
0462 priv->stats.csum_err_invalid_ip_version++;
0463
0464 sw_csum:
0465 memset(ul_header, 0, sizeof(*ul_header));
0466
0467 priv->stats.csum_sw++;
0468 }
0469
0470
0471
0472
0473 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
0474 struct rmnet_port *port,
0475 struct net_device *orig_dev,
0476 int csum_type)
0477 {
0478 switch (csum_type) {
0479 case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
0480 rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
0481 break;
0482 case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
0483 rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
0484 break;
0485 default:
0486 break;
0487 }
0488 }
0489
0490
0491 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
0492 u16 len)
0493 {
0494 struct rmnet_priv *priv = netdev_priv(skb->dev);
0495 struct rmnet_map_v5_csum_header *next_hdr;
0496 u8 nexthdr_type;
0497
0498 next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
0499 sizeof(struct rmnet_map_header));
0500
0501 nexthdr_type = u8_get_bits(next_hdr->header_info,
0502 MAPV5_HDRINFO_HDR_TYPE_FMASK);
0503
0504 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
0505 return -EINVAL;
0506
0507 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
0508 priv->stats.csum_sw++;
0509 } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
0510 priv->stats.csum_ok++;
0511 skb->ip_summed = CHECKSUM_UNNECESSARY;
0512 } else {
0513 priv->stats.csum_valid_unset++;
0514 }
0515
0516
0517 skb_pull(skb, sizeof(*next_hdr));
0518
0519 return 0;
0520 }