0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007
0008 #include <linux/if_arp.h>
0009 #include <linux/if_bridge.h>
0010 #include <linux/if_vlan.h>
0011 #include <linux/kernel.h>
0012 #include <linux/llc.h>
0013 #include <linux/rtnetlink.h>
0014 #include <linux/skbuff.h>
0015 #include <linux/openvswitch.h>
0016 #include <linux/export.h>
0017
0018 #include <net/ip_tunnels.h>
0019 #include <net/rtnetlink.h>
0020
0021 #include "datapath.h"
0022 #include "vport.h"
0023 #include "vport-internal_dev.h"
0024 #include "vport-netdev.h"
0025
0026 static struct vport_ops ovs_netdev_vport_ops;
0027
0028
0029 static void netdev_port_receive(struct sk_buff *skb)
0030 {
0031 struct vport *vport;
0032
0033 vport = ovs_netdev_get_vport(skb->dev);
0034 if (unlikely(!vport))
0035 goto error;
0036
0037 if (unlikely(skb_warn_if_lro(skb)))
0038 goto error;
0039
0040
0041
0042
0043 skb = skb_share_check(skb, GFP_ATOMIC);
0044 if (unlikely(!skb))
0045 return;
0046
0047 if (skb->dev->type == ARPHRD_ETHER)
0048 skb_push_rcsum(skb, ETH_HLEN);
0049
0050 ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
0051 return;
0052 error:
0053 kfree_skb(skb);
0054 }
0055
0056
0057 static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
0058 {
0059 struct sk_buff *skb = *pskb;
0060
0061 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
0062 return RX_HANDLER_PASS;
0063
0064 netdev_port_receive(skb);
0065 return RX_HANDLER_CONSUMED;
0066 }
0067
0068 static struct net_device *get_dpdev(const struct datapath *dp)
0069 {
0070 struct vport *local;
0071
0072 local = ovs_vport_ovsl(dp, OVSP_LOCAL);
0073 return local->dev;
0074 }
0075
0076 struct vport *ovs_netdev_link(struct vport *vport, const char *name)
0077 {
0078 int err;
0079
0080 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
0081 if (!vport->dev) {
0082 err = -ENODEV;
0083 goto error_free_vport;
0084 }
0085 netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL);
0086 if (vport->dev->flags & IFF_LOOPBACK ||
0087 (vport->dev->type != ARPHRD_ETHER &&
0088 vport->dev->type != ARPHRD_NONE) ||
0089 ovs_is_internal_dev(vport->dev)) {
0090 err = -EINVAL;
0091 goto error_put;
0092 }
0093
0094 rtnl_lock();
0095 err = netdev_master_upper_dev_link(vport->dev,
0096 get_dpdev(vport->dp),
0097 NULL, NULL, NULL);
0098 if (err)
0099 goto error_unlock;
0100
0101 err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
0102 vport);
0103 if (err)
0104 goto error_master_upper_dev_unlink;
0105
0106 dev_disable_lro(vport->dev);
0107 dev_set_promiscuity(vport->dev, 1);
0108 vport->dev->priv_flags |= IFF_OVS_DATAPATH;
0109 rtnl_unlock();
0110
0111 return vport;
0112
0113 error_master_upper_dev_unlink:
0114 netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
0115 error_unlock:
0116 rtnl_unlock();
0117 error_put:
0118 netdev_put(vport->dev, &vport->dev_tracker);
0119 error_free_vport:
0120 ovs_vport_free(vport);
0121 return ERR_PTR(err);
0122 }
0123 EXPORT_SYMBOL_GPL(ovs_netdev_link);
0124
0125 static struct vport *netdev_create(const struct vport_parms *parms)
0126 {
0127 struct vport *vport;
0128
0129 vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
0130 if (IS_ERR(vport))
0131 return vport;
0132
0133 return ovs_netdev_link(vport, parms->name);
0134 }
0135
0136 static void vport_netdev_free(struct rcu_head *rcu)
0137 {
0138 struct vport *vport = container_of(rcu, struct vport, rcu);
0139
0140 netdev_put(vport->dev, &vport->dev_tracker);
0141 ovs_vport_free(vport);
0142 }
0143
0144 void ovs_netdev_detach_dev(struct vport *vport)
0145 {
0146 ASSERT_RTNL();
0147 vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
0148 netdev_rx_handler_unregister(vport->dev);
0149 netdev_upper_dev_unlink(vport->dev,
0150 netdev_master_upper_dev_get(vport->dev));
0151 dev_set_promiscuity(vport->dev, -1);
0152 }
0153
0154 static void netdev_destroy(struct vport *vport)
0155 {
0156 rtnl_lock();
0157 if (netif_is_ovs_port(vport->dev))
0158 ovs_netdev_detach_dev(vport);
0159 rtnl_unlock();
0160
0161 call_rcu(&vport->rcu, vport_netdev_free);
0162 }
0163
0164 void ovs_netdev_tunnel_destroy(struct vport *vport)
0165 {
0166 rtnl_lock();
0167 if (netif_is_ovs_port(vport->dev))
0168 ovs_netdev_detach_dev(vport);
0169
0170
0171
0172
0173
0174 if (vport->dev->reg_state == NETREG_REGISTERED)
0175 rtnl_delete_link(vport->dev);
0176 netdev_put(vport->dev, &vport->dev_tracker);
0177 vport->dev = NULL;
0178 rtnl_unlock();
0179
0180 call_rcu(&vport->rcu, vport_netdev_free);
0181 }
0182 EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
0183
0184
0185 struct vport *ovs_netdev_get_vport(struct net_device *dev)
0186 {
0187 if (likely(netif_is_ovs_port(dev)))
0188 return (struct vport *)
0189 rcu_dereference_rtnl(dev->rx_handler_data);
0190 else
0191 return NULL;
0192 }
0193
0194 static struct vport_ops ovs_netdev_vport_ops = {
0195 .type = OVS_VPORT_TYPE_NETDEV,
0196 .create = netdev_create,
0197 .destroy = netdev_destroy,
0198 .send = dev_queue_xmit,
0199 };
0200
0201 int __init ovs_netdev_init(void)
0202 {
0203 return ovs_vport_ops_register(&ovs_netdev_vport_ops);
0204 }
0205
0206 void ovs_netdev_exit(void)
0207 {
0208 ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
0209 }