0001
0002
0003
0004
0005
0006
0007 #include <linux/netdevice.h>
0008 #include <linux/netdev_features.h>
0009 #include <linux/if_arp.h>
0010 #include <net/sock.h>
0011 #include "rmnet_private.h"
0012 #include "rmnet_config.h"
0013 #include "rmnet_vnd.h"
0014 #include "rmnet_map.h"
0015 #include "rmnet_handlers.h"
0016
0017 #define RMNET_IP_VERSION_4 0x40
0018 #define RMNET_IP_VERSION_6 0x60
0019
0020
0021
0022 static void rmnet_set_skb_proto(struct sk_buff *skb)
0023 {
0024 switch (skb->data[0] & 0xF0) {
0025 case RMNET_IP_VERSION_4:
0026 skb->protocol = htons(ETH_P_IP);
0027 break;
0028 case RMNET_IP_VERSION_6:
0029 skb->protocol = htons(ETH_P_IPV6);
0030 break;
0031 default:
0032 skb->protocol = htons(ETH_P_MAP);
0033 break;
0034 }
0035 }
0036
0037
0038
0039 static void
0040 rmnet_deliver_skb(struct sk_buff *skb)
0041 {
0042 struct rmnet_priv *priv = netdev_priv(skb->dev);
0043
0044 skb_reset_transport_header(skb);
0045 skb_reset_network_header(skb);
0046 rmnet_vnd_rx_fixup(skb, skb->dev);
0047
0048 skb->pkt_type = PACKET_HOST;
0049 skb_set_mac_header(skb, 0);
0050 gro_cells_receive(&priv->gro_cells, skb);
0051 }
0052
0053
0054
0055 static void
0056 __rmnet_map_ingress_handler(struct sk_buff *skb,
0057 struct rmnet_port *port)
0058 {
0059 struct rmnet_map_header *map_header = (void *)skb->data;
0060 struct rmnet_endpoint *ep;
0061 u16 len, pad;
0062 u8 mux_id;
0063
0064 if (map_header->flags & MAP_CMD_FLAG) {
0065
0066 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
0067 return rmnet_map_command(skb, port);
0068
0069 goto free_skb;
0070 }
0071
0072 mux_id = map_header->mux_id;
0073 pad = map_header->flags & MAP_PAD_LEN_MASK;
0074 len = ntohs(map_header->pkt_len) - pad;
0075
0076 if (mux_id >= RMNET_MAX_LOGICAL_EP)
0077 goto free_skb;
0078
0079 ep = rmnet_get_endpoint(port, mux_id);
0080 if (!ep)
0081 goto free_skb;
0082
0083 skb->dev = ep->egress_dev;
0084
0085 if ((port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) &&
0086 (map_header->flags & MAP_NEXT_HEADER_FLAG)) {
0087 if (rmnet_map_process_next_hdr_packet(skb, len))
0088 goto free_skb;
0089 skb_pull(skb, sizeof(*map_header));
0090 rmnet_set_skb_proto(skb);
0091 } else {
0092
0093 skb_pull(skb, sizeof(*map_header));
0094 rmnet_set_skb_proto(skb);
0095 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4 &&
0096 !rmnet_map_checksum_downlink_packet(skb, len + pad))
0097 skb->ip_summed = CHECKSUM_UNNECESSARY;
0098 }
0099
0100 skb_trim(skb, len);
0101 rmnet_deliver_skb(skb);
0102 return;
0103
0104 free_skb:
0105 kfree_skb(skb);
0106 }
0107
0108 static void
0109 rmnet_map_ingress_handler(struct sk_buff *skb,
0110 struct rmnet_port *port)
0111 {
0112 struct sk_buff *skbn;
0113
0114 if (skb->dev->type == ARPHRD_ETHER) {
0115 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
0116 kfree_skb(skb);
0117 return;
0118 }
0119
0120 skb_push(skb, ETH_HLEN);
0121 }
0122
0123 if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
0124 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
0125 __rmnet_map_ingress_handler(skbn, port);
0126
0127 consume_skb(skb);
0128 } else {
0129 __rmnet_map_ingress_handler(skb, port);
0130 }
0131 }
0132
0133 static int rmnet_map_egress_handler(struct sk_buff *skb,
0134 struct rmnet_port *port, u8 mux_id,
0135 struct net_device *orig_dev)
0136 {
0137 int required_headroom, additional_header_len, csum_type = 0;
0138 struct rmnet_map_header *map_header;
0139
0140 additional_header_len = 0;
0141 required_headroom = sizeof(struct rmnet_map_header);
0142
0143 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
0144 additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
0145 csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
0146 } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
0147 additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
0148 csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
0149 }
0150
0151 required_headroom += additional_header_len;
0152
0153 if (skb_cow_head(skb, required_headroom) < 0)
0154 return -ENOMEM;
0155
0156 if (csum_type)
0157 rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
0158 csum_type);
0159
0160 map_header = rmnet_map_add_map_header(skb, additional_header_len,
0161 port, 0);
0162 if (!map_header)
0163 return -ENOMEM;
0164
0165 map_header->mux_id = mux_id;
0166
0167 skb->protocol = htons(ETH_P_MAP);
0168
0169 return 0;
0170 }
0171
0172 static void
0173 rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
0174 {
0175 if (skb_mac_header_was_set(skb))
0176 skb_push(skb, skb->mac_len);
0177
0178 if (bridge_dev) {
0179 skb->dev = bridge_dev;
0180 dev_queue_xmit(skb);
0181 }
0182 }
0183
0184
0185
0186
0187
0188
0189
0190 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
0191 {
0192 struct sk_buff *skb = *pskb;
0193 struct rmnet_port *port;
0194 struct net_device *dev;
0195
0196 if (!skb)
0197 goto done;
0198
0199 if (skb_linearize(skb)) {
0200 kfree_skb(skb);
0201 goto done;
0202 }
0203
0204 if (skb->pkt_type == PACKET_LOOPBACK)
0205 return RX_HANDLER_PASS;
0206
0207 dev = skb->dev;
0208 port = rmnet_get_port_rcu(dev);
0209 if (unlikely(!port)) {
0210 dev_core_stats_rx_nohandler_inc(skb->dev);
0211 kfree_skb(skb);
0212 goto done;
0213 }
0214
0215 switch (port->rmnet_mode) {
0216 case RMNET_EPMODE_VND:
0217 rmnet_map_ingress_handler(skb, port);
0218 break;
0219 case RMNET_EPMODE_BRIDGE:
0220 rmnet_bridge_handler(skb, port->bridge_ep);
0221 break;
0222 }
0223
0224 done:
0225 return RX_HANDLER_CONSUMED;
0226 }
0227
0228
0229
0230
0231
0232 void rmnet_egress_handler(struct sk_buff *skb)
0233 {
0234 struct net_device *orig_dev;
0235 struct rmnet_port *port;
0236 struct rmnet_priv *priv;
0237 u8 mux_id;
0238
0239 sk_pacing_shift_update(skb->sk, 8);
0240
0241 orig_dev = skb->dev;
0242 priv = netdev_priv(orig_dev);
0243 skb->dev = priv->real_dev;
0244 mux_id = priv->mux_id;
0245
0246 port = rmnet_get_port_rcu(skb->dev);
0247 if (!port)
0248 goto drop;
0249
0250 if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
0251 goto drop;
0252
0253 rmnet_vnd_tx_fixup(skb, orig_dev);
0254
0255 dev_queue_xmit(skb);
0256 return;
0257
0258 drop:
0259 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
0260 kfree_skb(skb);
0261 }