0001
0002
0003
0004
0005
0006
0007 #include <linux/etherdevice.h>
0008 #include <linux/ethtool.h>
0009 #include <linux/if_arp.h>
0010 #include <net/pkt_sched.h>
0011 #include "rmnet_config.h"
0012 #include "rmnet_handlers.h"
0013 #include "rmnet_private.h"
0014 #include "rmnet_map.h"
0015 #include "rmnet_vnd.h"
0016
0017
0018
0019 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
0020 {
0021 struct rmnet_priv *priv = netdev_priv(dev);
0022 struct rmnet_pcpu_stats *pcpu_ptr;
0023
0024 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
0025
0026 u64_stats_update_begin(&pcpu_ptr->syncp);
0027 pcpu_ptr->stats.rx_pkts++;
0028 pcpu_ptr->stats.rx_bytes += skb->len;
0029 u64_stats_update_end(&pcpu_ptr->syncp);
0030 }
0031
0032 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
0033 {
0034 struct rmnet_priv *priv = netdev_priv(dev);
0035 struct rmnet_pcpu_stats *pcpu_ptr;
0036
0037 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
0038
0039 u64_stats_update_begin(&pcpu_ptr->syncp);
0040 pcpu_ptr->stats.tx_pkts++;
0041 pcpu_ptr->stats.tx_bytes += skb->len;
0042 u64_stats_update_end(&pcpu_ptr->syncp);
0043 }
0044
0045
0046
0047 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
0048 struct net_device *dev)
0049 {
0050 struct rmnet_priv *priv;
0051
0052 priv = netdev_priv(dev);
0053 if (priv->real_dev) {
0054 rmnet_egress_handler(skb);
0055 } else {
0056 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
0057 kfree_skb(skb);
0058 }
0059 return NETDEV_TX_OK;
0060 }
0061
0062 static int rmnet_vnd_headroom(struct rmnet_port *port)
0063 {
0064 u32 headroom;
0065
0066 headroom = sizeof(struct rmnet_map_header);
0067
0068 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
0069 headroom += sizeof(struct rmnet_map_ul_csum_header);
0070
0071 return headroom;
0072 }
0073
0074 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
0075 {
0076 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
0077 struct rmnet_port *port;
0078 u32 headroom;
0079
0080 port = rmnet_get_port_rtnl(priv->real_dev);
0081
0082 headroom = rmnet_vnd_headroom(port);
0083
0084 if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
0085 new_mtu > (priv->real_dev->mtu - headroom))
0086 return -EINVAL;
0087
0088 rmnet_dev->mtu = new_mtu;
0089 return 0;
0090 }
0091
0092 static int rmnet_vnd_get_iflink(const struct net_device *dev)
0093 {
0094 struct rmnet_priv *priv = netdev_priv(dev);
0095
0096 return priv->real_dev->ifindex;
0097 }
0098
0099 static int rmnet_vnd_init(struct net_device *dev)
0100 {
0101 struct rmnet_priv *priv = netdev_priv(dev);
0102 int err;
0103
0104 priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
0105 if (!priv->pcpu_stats)
0106 return -ENOMEM;
0107
0108 err = gro_cells_init(&priv->gro_cells, dev);
0109 if (err) {
0110 free_percpu(priv->pcpu_stats);
0111 return err;
0112 }
0113
0114 return 0;
0115 }
0116
0117 static void rmnet_vnd_uninit(struct net_device *dev)
0118 {
0119 struct rmnet_priv *priv = netdev_priv(dev);
0120
0121 gro_cells_destroy(&priv->gro_cells);
0122 free_percpu(priv->pcpu_stats);
0123 }
0124
0125 static void rmnet_get_stats64(struct net_device *dev,
0126 struct rtnl_link_stats64 *s)
0127 {
0128 struct rmnet_priv *priv = netdev_priv(dev);
0129 struct rmnet_vnd_stats total_stats = { };
0130 struct rmnet_pcpu_stats *pcpu_ptr;
0131 struct rmnet_vnd_stats snapshot;
0132 unsigned int cpu, start;
0133
0134 for_each_possible_cpu(cpu) {
0135 pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
0136
0137 do {
0138 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
0139 snapshot = pcpu_ptr->stats;
0140 } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
0141
0142 total_stats.rx_pkts += snapshot.rx_pkts;
0143 total_stats.rx_bytes += snapshot.rx_bytes;
0144 total_stats.tx_pkts += snapshot.tx_pkts;
0145 total_stats.tx_bytes += snapshot.tx_bytes;
0146 total_stats.tx_drops += snapshot.tx_drops;
0147 }
0148
0149 s->rx_packets = total_stats.rx_pkts;
0150 s->rx_bytes = total_stats.rx_bytes;
0151 s->tx_packets = total_stats.tx_pkts;
0152 s->tx_bytes = total_stats.tx_bytes;
0153 s->tx_dropped = total_stats.tx_drops;
0154 }
0155
0156 static const struct net_device_ops rmnet_vnd_ops = {
0157 .ndo_start_xmit = rmnet_vnd_start_xmit,
0158 .ndo_change_mtu = rmnet_vnd_change_mtu,
0159 .ndo_get_iflink = rmnet_vnd_get_iflink,
0160 .ndo_add_slave = rmnet_add_bridge,
0161 .ndo_del_slave = rmnet_del_bridge,
0162 .ndo_init = rmnet_vnd_init,
0163 .ndo_uninit = rmnet_vnd_uninit,
0164 .ndo_get_stats64 = rmnet_get_stats64,
0165 };
0166
0167 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
0168 "Checksum ok",
0169 "Bad IPv4 header checksum",
0170 "Checksum valid bit not set",
0171 "Checksum validation failed",
0172 "Checksum error bad buffer",
0173 "Checksum error bad ip version",
0174 "Checksum error bad transport",
0175 "Checksum skipped on ip fragment",
0176 "Checksum skipped",
0177 "Checksum computed in software",
0178 "Checksum computed in hardware",
0179 };
0180
0181 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
0182 {
0183 switch (stringset) {
0184 case ETH_SS_STATS:
0185 memcpy(buf, &rmnet_gstrings_stats,
0186 sizeof(rmnet_gstrings_stats));
0187 break;
0188 }
0189 }
0190
0191 static int rmnet_get_sset_count(struct net_device *dev, int sset)
0192 {
0193 switch (sset) {
0194 case ETH_SS_STATS:
0195 return ARRAY_SIZE(rmnet_gstrings_stats);
0196 default:
0197 return -EOPNOTSUPP;
0198 }
0199 }
0200
0201 static void rmnet_get_ethtool_stats(struct net_device *dev,
0202 struct ethtool_stats *stats, u64 *data)
0203 {
0204 struct rmnet_priv *priv = netdev_priv(dev);
0205 struct rmnet_priv_stats *st = &priv->stats;
0206
0207 if (!data)
0208 return;
0209
0210 memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
0211 }
0212
0213 static const struct ethtool_ops rmnet_ethtool_ops = {
0214 .get_ethtool_stats = rmnet_get_ethtool_stats,
0215 .get_strings = rmnet_get_strings,
0216 .get_sset_count = rmnet_get_sset_count,
0217 };
0218
0219
0220
0221
0222 void rmnet_vnd_setup(struct net_device *rmnet_dev)
0223 {
0224 rmnet_dev->netdev_ops = &rmnet_vnd_ops;
0225 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
0226 rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
0227 eth_hw_addr_random(rmnet_dev);
0228 rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
0229
0230
0231 rmnet_dev->header_ops = NULL;
0232 rmnet_dev->type = ARPHRD_RAWIP;
0233 rmnet_dev->hard_header_len = 0;
0234 rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
0235
0236 rmnet_dev->needs_free_netdev = true;
0237 rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
0238
0239 rmnet_dev->features |= NETIF_F_LLTX;
0240
0241
0242 rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
0243 eth_random_addr(rmnet_dev->perm_addr);
0244 }
0245
0246
0247
0248 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
0249 struct rmnet_port *port,
0250 struct net_device *real_dev,
0251 struct rmnet_endpoint *ep,
0252 struct netlink_ext_ack *extack)
0253
0254 {
0255 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
0256 u32 headroom;
0257 int rc;
0258
0259 if (rmnet_get_endpoint(port, id)) {
0260 NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
0261 return -EBUSY;
0262 }
0263
0264 rmnet_dev->hw_features = NETIF_F_RXCSUM;
0265 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
0266 rmnet_dev->hw_features |= NETIF_F_SG;
0267
0268 priv->real_dev = real_dev;
0269
0270 headroom = rmnet_vnd_headroom(port);
0271
0272 if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
0273 NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
0274 return -EINVAL;
0275 }
0276
0277 rc = register_netdevice(rmnet_dev);
0278 if (!rc) {
0279 ep->egress_dev = rmnet_dev;
0280 ep->mux_id = id;
0281 port->nr_rmnet_devs++;
0282
0283 rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
0284
0285 priv->mux_id = id;
0286
0287 netdev_dbg(rmnet_dev, "rmnet dev created\n");
0288 }
0289
0290 return rc;
0291 }
0292
0293 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
0294 struct rmnet_endpoint *ep)
0295 {
0296 if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
0297 return -EINVAL;
0298
0299 ep->egress_dev = NULL;
0300 port->nr_rmnet_devs--;
0301 return 0;
0302 }
0303
0304 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
0305 {
0306 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
0307
0308
0309
0310
0311 if (unlikely(enable))
0312 netif_wake_queue(rmnet_dev);
0313 else
0314 netif_stop_queue(rmnet_dev);
0315
0316 return 0;
0317 }
0318
0319 int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
0320 {
0321 struct hlist_node *tmp_ep;
0322 struct rmnet_endpoint *ep;
0323 struct rmnet_port *port;
0324 unsigned long bkt_ep;
0325 u32 headroom;
0326
0327 port = rmnet_get_port_rtnl(real_dev);
0328
0329 headroom = rmnet_vnd_headroom(port);
0330
0331 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
0332 if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
0333 return -1;
0334 }
0335
0336 return 0;
0337 }
0338
0339 int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
0340 struct net_device *real_dev)
0341 {
0342 struct hlist_node *tmp_ep;
0343 struct rmnet_endpoint *ep;
0344 unsigned long bkt_ep;
0345 u32 headroom;
0346
0347 headroom = rmnet_vnd_headroom(port);
0348
0349 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
0350 if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
0351 continue;
0352
0353 if (rmnet_vnd_change_mtu(ep->egress_dev,
0354 real_dev->mtu - headroom))
0355 return -1;
0356 }
0357
0358 return 0;
0359 }