0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include "ef100_rep.h"
0013 #include "ef100_netdev.h"
0014 #include "ef100_nic.h"
0015 #include "mae.h"
0016 #include "rx_common.h"
0017
0018 #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
0019
0020 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
0021
0022 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
0023
0024 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
0025 unsigned int i)
0026 {
0027 efv->parent = efx;
0028 efv->idx = i;
0029 INIT_LIST_HEAD(&efv->list);
0030 efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
0031 INIT_LIST_HEAD(&efv->dflt.acts.list);
0032 INIT_LIST_HEAD(&efv->rx_list);
0033 spin_lock_init(&efv->rx_lock);
0034 efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
0035 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
0036 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
0037 NETIF_MSG_TX_ERR | NETIF_MSG_HW;
0038 return 0;
0039 }
0040
0041 static int efx_ef100_rep_open(struct net_device *net_dev)
0042 {
0043 struct efx_rep *efv = netdev_priv(net_dev);
0044
0045 netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
0046 NAPI_POLL_WEIGHT);
0047 napi_enable(&efv->napi);
0048 return 0;
0049 }
0050
0051 static int efx_ef100_rep_close(struct net_device *net_dev)
0052 {
0053 struct efx_rep *efv = netdev_priv(net_dev);
0054
0055 napi_disable(&efv->napi);
0056 netif_napi_del(&efv->napi);
0057 return 0;
0058 }
0059
0060 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
0061 struct net_device *dev)
0062 {
0063 struct efx_rep *efv = netdev_priv(dev);
0064 struct efx_nic *efx = efv->parent;
0065 netdev_tx_t rc;
0066
0067
0068
0069
0070
0071 atomic64_inc(&efv->stats.tx_packets);
0072 atomic64_add(skb->len, &efv->stats.tx_bytes);
0073 netif_tx_lock(efx->net_dev);
0074 rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
0075 netif_tx_unlock(efx->net_dev);
0076 return rc;
0077 }
0078
0079 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
0080 struct netdev_phys_item_id *ppid)
0081 {
0082 struct efx_rep *efv = netdev_priv(dev);
0083 struct efx_nic *efx = efv->parent;
0084 struct ef100_nic_data *nic_data;
0085
0086 nic_data = efx->nic_data;
0087
0088 ppid->id_len = sizeof(nic_data->port_id);
0089 memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
0090 return 0;
0091 }
0092
0093 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
0094 char *buf, size_t len)
0095 {
0096 struct efx_rep *efv = netdev_priv(dev);
0097 struct efx_nic *efx = efv->parent;
0098 struct ef100_nic_data *nic_data;
0099 int ret;
0100
0101 nic_data = efx->nic_data;
0102 ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
0103 nic_data->pf_index, efv->idx);
0104 if (ret >= len)
0105 return -EOPNOTSUPP;
0106
0107 return 0;
0108 }
0109
0110 static void efx_ef100_rep_get_stats64(struct net_device *dev,
0111 struct rtnl_link_stats64 *stats)
0112 {
0113 struct efx_rep *efv = netdev_priv(dev);
0114
0115 stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
0116 stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
0117 stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
0118 stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
0119 stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
0120 stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
0121 }
0122
0123 static const struct net_device_ops efx_ef100_rep_netdev_ops = {
0124 .ndo_open = efx_ef100_rep_open,
0125 .ndo_stop = efx_ef100_rep_close,
0126 .ndo_start_xmit = efx_ef100_rep_xmit,
0127 .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
0128 .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
0129 .ndo_get_stats64 = efx_ef100_rep_get_stats64,
0130 };
0131
0132 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
0133 struct ethtool_drvinfo *drvinfo)
0134 {
0135 strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
0136 }
0137
0138 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
0139 {
0140 struct efx_rep *efv = netdev_priv(net_dev);
0141
0142 return efv->msg_enable;
0143 }
0144
0145 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
0146 u32 msg_enable)
0147 {
0148 struct efx_rep *efv = netdev_priv(net_dev);
0149
0150 efv->msg_enable = msg_enable;
0151 }
0152
0153 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
0154 struct ethtool_ringparam *ring,
0155 struct kernel_ethtool_ringparam *kring,
0156 struct netlink_ext_ack *ext_ack)
0157 {
0158 struct efx_rep *efv = netdev_priv(net_dev);
0159
0160 ring->rx_max_pending = U32_MAX;
0161 ring->rx_pending = efv->rx_pring_size;
0162 }
0163
0164 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
0165 struct ethtool_ringparam *ring,
0166 struct kernel_ethtool_ringparam *kring,
0167 struct netlink_ext_ack *ext_ack)
0168 {
0169 struct efx_rep *efv = netdev_priv(net_dev);
0170
0171 if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
0172 return -EINVAL;
0173
0174 efv->rx_pring_size = ring->rx_pending;
0175 return 0;
0176 }
0177
0178 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
0179 .get_drvinfo = efx_ef100_rep_get_drvinfo,
0180 .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
0181 .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
0182 .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
0183 .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
0184 };
0185
0186 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
0187 unsigned int i)
0188 {
0189 struct net_device *net_dev;
0190 struct efx_rep *efv;
0191 int rc;
0192
0193 net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
0194 if (!net_dev)
0195 return ERR_PTR(-ENOMEM);
0196
0197 efv = netdev_priv(net_dev);
0198 rc = efx_ef100_rep_init_struct(efx, efv, i);
0199 if (rc)
0200 goto fail1;
0201 efv->net_dev = net_dev;
0202 rtnl_lock();
0203 spin_lock_bh(&efx->vf_reps_lock);
0204 list_add_tail(&efv->list, &efx->vf_reps);
0205 spin_unlock_bh(&efx->vf_reps_lock);
0206 if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
0207 netif_device_attach(net_dev);
0208 netif_carrier_on(net_dev);
0209 } else {
0210 netif_carrier_off(net_dev);
0211 netif_tx_stop_all_queues(net_dev);
0212 }
0213 rtnl_unlock();
0214
0215 net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
0216 net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
0217 net_dev->min_mtu = EFX_MIN_MTU;
0218 net_dev->max_mtu = EFX_MAX_MTU;
0219 net_dev->features |= NETIF_F_LLTX;
0220 net_dev->hw_features |= NETIF_F_LLTX;
0221 return efv;
0222 fail1:
0223 free_netdev(net_dev);
0224 return ERR_PTR(rc);
0225 }
0226
0227 static int efx_ef100_configure_rep(struct efx_rep *efv)
0228 {
0229 struct efx_nic *efx = efv->parent;
0230 u32 selector;
0231 int rc;
0232
0233 efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
0234
0235 efx_mae_mport_vf(efx, efv->idx, &selector);
0236
0237 rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
0238 if (rc)
0239 return rc;
0240 pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
0241
0242 WARN_ON(efv->mport >> 16);
0243
0244 return efx_tc_configure_default_rule_rep(efv);
0245 }
0246
0247 static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
0248 {
0249 struct efx_nic *efx = efv->parent;
0250
0251 efx_tc_deconfigure_default_rule(efx, &efv->dflt);
0252 }
0253
0254 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
0255 {
0256 struct efx_nic *efx = efv->parent;
0257
0258 rtnl_lock();
0259 spin_lock_bh(&efx->vf_reps_lock);
0260 list_del(&efv->list);
0261 spin_unlock_bh(&efx->vf_reps_lock);
0262 rtnl_unlock();
0263 synchronize_rcu();
0264 free_netdev(efv->net_dev);
0265 }
0266
0267 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
0268 {
0269 struct efx_rep *efv;
0270 int rc;
0271
0272 efv = efx_ef100_rep_create_netdev(efx, i);
0273 if (IS_ERR(efv)) {
0274 rc = PTR_ERR(efv);
0275 pci_err(efx->pci_dev,
0276 "Failed to create representor for VF %d, rc %d\n", i,
0277 rc);
0278 return rc;
0279 }
0280 rc = efx_ef100_configure_rep(efv);
0281 if (rc) {
0282 pci_err(efx->pci_dev,
0283 "Failed to configure representor for VF %d, rc %d\n",
0284 i, rc);
0285 goto fail1;
0286 }
0287 rc = register_netdev(efv->net_dev);
0288 if (rc) {
0289 pci_err(efx->pci_dev,
0290 "Failed to register representor for VF %d, rc %d\n",
0291 i, rc);
0292 goto fail2;
0293 }
0294 pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
0295 efv->net_dev->name);
0296 return 0;
0297 fail2:
0298 efx_ef100_deconfigure_rep(efv);
0299 fail1:
0300 efx_ef100_rep_destroy_netdev(efv);
0301 return rc;
0302 }
0303
0304 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
0305 {
0306 struct net_device *rep_dev;
0307
0308 rep_dev = efv->net_dev;
0309 if (!rep_dev)
0310 return;
0311 netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
0312 unregister_netdev(rep_dev);
0313 efx_ef100_deconfigure_rep(efv);
0314 efx_ef100_rep_destroy_netdev(efv);
0315 }
0316
0317 void efx_ef100_fini_vfreps(struct efx_nic *efx)
0318 {
0319 struct ef100_nic_data *nic_data = efx->nic_data;
0320 struct efx_rep *efv, *next;
0321
0322 if (!nic_data->grp_mae)
0323 return;
0324
0325 list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
0326 efx_ef100_vfrep_destroy(efx, efv);
0327 }
0328
0329 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
0330 {
0331 struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
0332 unsigned int read_index;
0333 struct list_head head;
0334 struct sk_buff *skb;
0335 bool need_resched;
0336 int spent = 0;
0337
0338 INIT_LIST_HEAD(&head);
0339
0340 spin_lock_bh(&efv->rx_lock);
0341 read_index = efv->write_index;
0342 while (spent < weight && !list_empty(&efv->rx_list)) {
0343 skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
0344 list_del(&skb->list);
0345 list_add_tail(&skb->list, &head);
0346 spent++;
0347 }
0348 spin_unlock_bh(&efv->rx_lock);
0349
0350 netif_receive_skb_list(&head);
0351 if (spent < weight)
0352 if (napi_complete_done(napi, spent)) {
0353 spin_lock_bh(&efv->rx_lock);
0354 efv->read_index = read_index;
0355
0356
0357
0358
0359
0360 need_resched = efv->write_index != read_index;
0361 spin_unlock_bh(&efv->rx_lock);
0362 if (need_resched)
0363 napi_schedule(&efv->napi);
0364 }
0365 return spent;
0366 }
0367
0368 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
0369 {
0370 u8 *eh = efx_rx_buf_va(rx_buf);
0371 struct sk_buff *skb;
0372 bool primed;
0373
0374
0375
0376
0377 if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
0378 atomic64_inc(&efv->stats.rx_dropped);
0379 if (net_ratelimit())
0380 netif_dbg(efv->parent, rx_err, efv->net_dev,
0381 "nodesc-dropped packet of length %u\n",
0382 rx_buf->len);
0383 return;
0384 }
0385
0386 skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
0387 if (!skb) {
0388 atomic64_inc(&efv->stats.rx_dropped);
0389 if (net_ratelimit())
0390 netif_dbg(efv->parent, rx_err, efv->net_dev,
0391 "noskb-dropped packet of length %u\n",
0392 rx_buf->len);
0393 return;
0394 }
0395 memcpy(skb->data, eh, rx_buf->len);
0396 __skb_put(skb, rx_buf->len);
0397
0398 skb_record_rx_queue(skb, 0);
0399
0400
0401 skb->protocol = eth_type_trans(skb, efv->net_dev);
0402
0403 skb_checksum_none_assert(skb);
0404
0405 atomic64_inc(&efv->stats.rx_packets);
0406 atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
0407
0408
0409 spin_lock_bh(&efv->rx_lock);
0410 primed = efv->read_index == efv->write_index;
0411 list_add_tail(&skb->list, &efv->rx_list);
0412 efv->write_index++;
0413 spin_unlock_bh(&efv->rx_lock);
0414
0415 if (primed)
0416 napi_schedule(&efv->napi);
0417 }
0418
0419 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
0420 {
0421 struct efx_rep *efv, *out = NULL;
0422
0423
0424
0425
0426
0427 spin_lock_bh(&efx->vf_reps_lock);
0428 list_for_each_entry(efv, &efx->vf_reps, list)
0429 if (efv->mport == mport) {
0430 out = efv;
0431 break;
0432 }
0433 spin_unlock_bh(&efx->vf_reps_lock);
0434 return out;
0435 }