0001
0002
0003
0004
0005
0006 #include <linux/module.h>
0007 #include <linux/interrupt.h>
0008 #include <linux/pci.h>
0009 #include <linux/netdevice.h>
0010 #include <linux/if_vlan.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/ethtool.h>
0013 #include <linux/log2.h>
0014 #include <linux/prefetch.h>
0015 #include <linux/irq.h>
0016 #include <linux/iommu.h>
0017 #include <linux/bpf.h>
0018 #include <linux/bpf_trace.h>
0019 #include <linux/filter.h>
0020 #include <linux/net_tstamp.h>
0021 #include <linux/workqueue.h>
0022
0023 #include "nic_reg.h"
0024 #include "nic.h"
0025 #include "nicvf_queues.h"
0026 #include "thunder_bgx.h"
0027 #include "../common/cavium_ptp.h"
0028
0029 #define DRV_NAME "nicvf"
0030 #define DRV_VERSION "1.0"
0031
0032
0033
0034
0035
0036
0037 #define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
0038
0039
0040 static const struct pci_device_id nicvf_id_table[] = {
0041 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
0042 PCI_DEVICE_ID_THUNDER_NIC_VF,
0043 PCI_VENDOR_ID_CAVIUM,
0044 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
0045 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
0046 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
0047 PCI_VENDOR_ID_CAVIUM,
0048 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
0049 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
0050 PCI_DEVICE_ID_THUNDER_NIC_VF,
0051 PCI_VENDOR_ID_CAVIUM,
0052 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
0053 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
0054 PCI_DEVICE_ID_THUNDER_NIC_VF,
0055 PCI_VENDOR_ID_CAVIUM,
0056 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
0057 { 0, }
0058 };
0059
0060 MODULE_AUTHOR("Sunil Goutham");
0061 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
0062 MODULE_LICENSE("GPL v2");
0063 MODULE_VERSION(DRV_VERSION);
0064 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
0065
0066 static int debug = 0x00;
0067 module_param(debug, int, 0644);
0068 MODULE_PARM_DESC(debug, "Debug message level bitmap");
0069
0070 static int cpi_alg = CPI_ALG_NONE;
0071 module_param(cpi_alg, int, 0444);
0072 MODULE_PARM_DESC(cpi_alg,
0073 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
0074
0075 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
0076 {
0077 if (nic->sqs_mode)
0078 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
0079 else
0080 return qidx;
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
0094 {
0095 writeq_relaxed(val, nic->reg_base + offset);
0096 }
0097
0098 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
0099 {
0100 return readq_relaxed(nic->reg_base + offset);
0101 }
0102
0103 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
0104 u64 qidx, u64 val)
0105 {
0106 void __iomem *addr = nic->reg_base + offset;
0107
0108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
0109 }
0110
0111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
0112 {
0113 void __iomem *addr = nic->reg_base + offset;
0114
0115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
0116 }
0117
0118
0119 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
0120 {
0121 u64 *msg = (u64 *)mbx;
0122
0123 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
0124 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
0125 }
0126
0127 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
0128 {
0129 unsigned long timeout;
0130 int ret = 0;
0131
0132 mutex_lock(&nic->rx_mode_mtx);
0133
0134 nic->pf_acked = false;
0135 nic->pf_nacked = false;
0136
0137 nicvf_write_to_mbx(nic, mbx);
0138
0139 timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
0140
0141 while (!nic->pf_acked) {
0142 if (nic->pf_nacked) {
0143 netdev_err(nic->netdev,
0144 "PF NACK to mbox msg 0x%02x from VF%d\n",
0145 (mbx->msg.msg & 0xFF), nic->vf_id);
0146 ret = -EINVAL;
0147 break;
0148 }
0149 usleep_range(8000, 10000);
0150 if (nic->pf_acked)
0151 break;
0152 if (time_after(jiffies, timeout)) {
0153 netdev_err(nic->netdev,
0154 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
0155 (mbx->msg.msg & 0xFF), nic->vf_id);
0156 ret = -EBUSY;
0157 break;
0158 }
0159 }
0160 mutex_unlock(&nic->rx_mode_mtx);
0161 return ret;
0162 }
0163
0164
0165
0166
0167 static int nicvf_check_pf_ready(struct nicvf *nic)
0168 {
0169 union nic_mbx mbx = {};
0170
0171 mbx.msg.msg = NIC_MBOX_MSG_READY;
0172 if (nicvf_send_msg_to_pf(nic, &mbx)) {
0173 netdev_err(nic->netdev,
0174 "PF didn't respond to READY msg\n");
0175 return 0;
0176 }
0177
0178 return 1;
0179 }
0180
0181 static void nicvf_send_cfg_done(struct nicvf *nic)
0182 {
0183 union nic_mbx mbx = {};
0184
0185 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
0186 if (nicvf_send_msg_to_pf(nic, &mbx)) {
0187 netdev_err(nic->netdev,
0188 "PF didn't respond to CFG DONE msg\n");
0189 }
0190 }
0191
0192 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
0193 {
0194 if (bgx->rx)
0195 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
0196 else
0197 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
0198 }
0199
0200 static void nicvf_handle_mbx_intr(struct nicvf *nic)
0201 {
0202 union nic_mbx mbx = {};
0203 u64 *mbx_data;
0204 u64 mbx_addr;
0205 int i;
0206
0207 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
0208 mbx_data = (u64 *)&mbx;
0209
0210 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
0211 *mbx_data = nicvf_reg_read(nic, mbx_addr);
0212 mbx_data++;
0213 mbx_addr += sizeof(u64);
0214 }
0215
0216 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
0217 switch (mbx.msg.msg) {
0218 case NIC_MBOX_MSG_READY:
0219 nic->pf_acked = true;
0220 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
0221 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
0222 nic->node = mbx.nic_cfg.node_id;
0223 if (!nic->set_mac_pending)
0224 eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
0225 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
0226 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
0227 nic->link_up = false;
0228 nic->duplex = 0;
0229 nic->speed = 0;
0230 break;
0231 case NIC_MBOX_MSG_ACK:
0232 nic->pf_acked = true;
0233 break;
0234 case NIC_MBOX_MSG_NACK:
0235 nic->pf_nacked = true;
0236 break;
0237 case NIC_MBOX_MSG_RSS_SIZE:
0238 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
0239 nic->pf_acked = true;
0240 break;
0241 case NIC_MBOX_MSG_BGX_STATS:
0242 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
0243 nic->pf_acked = true;
0244 break;
0245 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
0246 nic->pf_acked = true;
0247 if (nic->link_up != mbx.link_status.link_up) {
0248 nic->link_up = mbx.link_status.link_up;
0249 nic->duplex = mbx.link_status.duplex;
0250 nic->speed = mbx.link_status.speed;
0251 nic->mac_type = mbx.link_status.mac_type;
0252 if (nic->link_up) {
0253 netdev_info(nic->netdev,
0254 "Link is Up %d Mbps %s duplex\n",
0255 nic->speed,
0256 nic->duplex == DUPLEX_FULL ?
0257 "Full" : "Half");
0258 netif_carrier_on(nic->netdev);
0259 netif_tx_start_all_queues(nic->netdev);
0260 } else {
0261 netdev_info(nic->netdev, "Link is Down\n");
0262 netif_carrier_off(nic->netdev);
0263 netif_tx_stop_all_queues(nic->netdev);
0264 }
0265 }
0266 break;
0267 case NIC_MBOX_MSG_ALLOC_SQS:
0268 nic->sqs_count = mbx.sqs_alloc.qs_count;
0269 nic->pf_acked = true;
0270 break;
0271 case NIC_MBOX_MSG_SNICVF_PTR:
0272
0273
0274
0275 nic->snicvf[mbx.nicvf.sqs_id] =
0276 (struct nicvf *)mbx.nicvf.nicvf;
0277 nic->pf_acked = true;
0278 break;
0279 case NIC_MBOX_MSG_PNICVF_PTR:
0280
0281
0282
0283
0284 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
0285 nic->pf_acked = true;
0286 break;
0287 case NIC_MBOX_MSG_PFC:
0288 nic->pfc.autoneg = mbx.pfc.autoneg;
0289 nic->pfc.fc_rx = mbx.pfc.fc_rx;
0290 nic->pfc.fc_tx = mbx.pfc.fc_tx;
0291 nic->pf_acked = true;
0292 break;
0293 default:
0294 netdev_err(nic->netdev,
0295 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
0296 break;
0297 }
0298 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
0299 }
0300
0301 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
0302 {
0303 union nic_mbx mbx = {};
0304
0305 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
0306 mbx.mac.vf_id = nic->vf_id;
0307 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
0308
0309 return nicvf_send_msg_to_pf(nic, &mbx);
0310 }
0311
0312 static void nicvf_config_cpi(struct nicvf *nic)
0313 {
0314 union nic_mbx mbx = {};
0315
0316 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
0317 mbx.cpi_cfg.vf_id = nic->vf_id;
0318 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
0319 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
0320
0321 nicvf_send_msg_to_pf(nic, &mbx);
0322 }
0323
0324 static void nicvf_get_rss_size(struct nicvf *nic)
0325 {
0326 union nic_mbx mbx = {};
0327
0328 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
0329 mbx.rss_size.vf_id = nic->vf_id;
0330 nicvf_send_msg_to_pf(nic, &mbx);
0331 }
0332
0333 void nicvf_config_rss(struct nicvf *nic)
0334 {
0335 union nic_mbx mbx = {};
0336 struct nicvf_rss_info *rss = &nic->rss_info;
0337 int ind_tbl_len = rss->rss_size;
0338 int i, nextq = 0;
0339
0340 mbx.rss_cfg.vf_id = nic->vf_id;
0341 mbx.rss_cfg.hash_bits = rss->hash_bits;
0342 while (ind_tbl_len) {
0343 mbx.rss_cfg.tbl_offset = nextq;
0344 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
0345 RSS_IND_TBL_LEN_PER_MBX_MSG);
0346 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
0347 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
0348
0349 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
0350 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
0351
0352 nicvf_send_msg_to_pf(nic, &mbx);
0353
0354 ind_tbl_len -= mbx.rss_cfg.tbl_len;
0355 }
0356 }
0357
0358 void nicvf_set_rss_key(struct nicvf *nic)
0359 {
0360 struct nicvf_rss_info *rss = &nic->rss_info;
0361 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
0362 int idx;
0363
0364 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
0365 nicvf_reg_write(nic, key_addr, rss->key[idx]);
0366 key_addr += sizeof(u64);
0367 }
0368 }
0369
0370 static int nicvf_rss_init(struct nicvf *nic)
0371 {
0372 struct nicvf_rss_info *rss = &nic->rss_info;
0373 int idx;
0374
0375 nicvf_get_rss_size(nic);
0376
0377 if (cpi_alg != CPI_ALG_NONE) {
0378 rss->enable = false;
0379 rss->hash_bits = 0;
0380 return 0;
0381 }
0382
0383 rss->enable = true;
0384
0385 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
0386 nicvf_set_rss_key(nic);
0387
0388 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
0389 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
0390
0391 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
0392
0393 for (idx = 0; idx < rss->rss_size; idx++)
0394 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
0395 nic->rx_queues);
0396 nicvf_config_rss(nic);
0397 return 1;
0398 }
0399
0400
0401 static void nicvf_request_sqs(struct nicvf *nic)
0402 {
0403 union nic_mbx mbx = {};
0404 int sqs;
0405 int sqs_count = nic->sqs_count;
0406 int rx_queues = 0, tx_queues = 0;
0407
0408
0409 if (nic->sqs_mode || !nic->sqs_count)
0410 return;
0411
0412 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
0413 mbx.sqs_alloc.vf_id = nic->vf_id;
0414 mbx.sqs_alloc.qs_count = nic->sqs_count;
0415 if (nicvf_send_msg_to_pf(nic, &mbx)) {
0416
0417 nic->sqs_count = 0;
0418 return;
0419 }
0420
0421
0422 if (!nic->sqs_count)
0423 return;
0424
0425 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
0426 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
0427
0428 tx_queues = nic->tx_queues + nic->xdp_tx_queues;
0429 if (tx_queues > MAX_SND_QUEUES_PER_QS)
0430 tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
0431
0432
0433 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
0434 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
0435 mbx.nicvf.vf_id = nic->vf_id;
0436 mbx.nicvf.sqs_id = sqs;
0437 nicvf_send_msg_to_pf(nic, &mbx);
0438
0439 nic->snicvf[sqs]->sqs_id = sqs;
0440 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
0441 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
0442 rx_queues -= MAX_RCV_QUEUES_PER_QS;
0443 } else {
0444 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
0445 rx_queues = 0;
0446 }
0447
0448 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
0449 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
0450 tx_queues -= MAX_SND_QUEUES_PER_QS;
0451 } else {
0452 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
0453 tx_queues = 0;
0454 }
0455
0456 nic->snicvf[sqs]->qs->cq_cnt =
0457 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
0458
0459
0460 nicvf_open(nic->snicvf[sqs]->netdev);
0461 }
0462
0463
0464 if (sqs_count != nic->sqs_count)
0465 nicvf_set_real_num_queues(nic->netdev,
0466 nic->tx_queues, nic->rx_queues);
0467 }
0468
0469
0470
0471
0472
0473 static void nicvf_send_vf_struct(struct nicvf *nic)
0474 {
0475 union nic_mbx mbx = {};
0476
0477 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
0478 mbx.nicvf.sqs_mode = nic->sqs_mode;
0479 mbx.nicvf.nicvf = (u64)nic;
0480 nicvf_send_msg_to_pf(nic, &mbx);
0481 }
0482
0483 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
0484 {
0485 union nic_mbx mbx = {};
0486
0487 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
0488 nicvf_send_msg_to_pf(nic, &mbx);
0489 }
0490
0491 int nicvf_set_real_num_queues(struct net_device *netdev,
0492 int tx_queues, int rx_queues)
0493 {
0494 int err = 0;
0495
0496 err = netif_set_real_num_tx_queues(netdev, tx_queues);
0497 if (err) {
0498 netdev_err(netdev,
0499 "Failed to set no of Tx queues: %d\n", tx_queues);
0500 return err;
0501 }
0502
0503 err = netif_set_real_num_rx_queues(netdev, rx_queues);
0504 if (err)
0505 netdev_err(netdev,
0506 "Failed to set no of Rx queues: %d\n", rx_queues);
0507 return err;
0508 }
0509
0510 static int nicvf_init_resources(struct nicvf *nic)
0511 {
0512 int err;
0513
0514
0515 nicvf_qset_config(nic, true);
0516
0517
0518 err = nicvf_config_data_transfer(nic, true);
0519 if (err) {
0520 netdev_err(nic->netdev,
0521 "Failed to alloc/config VF's QSet resources\n");
0522 return err;
0523 }
0524
0525 return 0;
0526 }
0527
0528 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
0529 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
0530 struct rcv_queue *rq, struct sk_buff **skb)
0531 {
0532 unsigned char *hard_start, *data;
0533 struct xdp_buff xdp;
0534 struct page *page;
0535 u32 action;
0536 u16 len, offset = 0;
0537 u64 dma_addr, cpu_addr;
0538 void *orig_data;
0539
0540
0541 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
0542 dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
0543
0544 cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
0545 if (!cpu_addr)
0546 return false;
0547 cpu_addr = (u64)phys_to_virt(cpu_addr);
0548 page = virt_to_page((void *)cpu_addr);
0549
0550 xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
0551 &rq->xdp_rxq);
0552 hard_start = page_address(page);
0553 data = (unsigned char *)cpu_addr;
0554 xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
0555 orig_data = xdp.data;
0556
0557 action = bpf_prog_run_xdp(prog, &xdp);
0558
0559 len = xdp.data_end - xdp.data;
0560
0561 if (orig_data != xdp.data) {
0562 offset = orig_data - xdp.data;
0563 dma_addr -= offset;
0564 }
0565
0566 switch (action) {
0567 case XDP_PASS:
0568
0569
0570
0571
0572
0573 if (page_ref_count(page) == 1) {
0574 dma_addr &= PAGE_MASK;
0575 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
0576 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
0577 DMA_FROM_DEVICE,
0578 DMA_ATTR_SKIP_CPU_SYNC);
0579 }
0580
0581
0582 *skb = build_skb(xdp.data,
0583 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
0584 if (!*skb)
0585 put_page(page);
0586 else
0587 skb_put(*skb, len);
0588 return false;
0589 case XDP_TX:
0590 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
0591 return true;
0592 default:
0593 bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
0594 fallthrough;
0595 case XDP_ABORTED:
0596 trace_xdp_exception(nic->netdev, prog, action);
0597 fallthrough;
0598 case XDP_DROP:
0599
0600
0601
0602
0603
0604 if (page_ref_count(page) == 1) {
0605 dma_addr &= PAGE_MASK;
0606 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
0607 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
0608 DMA_FROM_DEVICE,
0609 DMA_ATTR_SKIP_CPU_SYNC);
0610 }
0611 put_page(page);
0612 return true;
0613 }
0614 return false;
0615 }
0616
0617 static void nicvf_snd_ptp_handler(struct net_device *netdev,
0618 struct cqe_send_t *cqe_tx)
0619 {
0620 struct nicvf *nic = netdev_priv(netdev);
0621 struct skb_shared_hwtstamps ts;
0622 u64 ns;
0623
0624 nic = nic->pnicvf;
0625
0626
0627 smp_rmb();
0628
0629
0630 atomic_set(&nic->tx_ptp_skbs, 0);
0631
0632
0633 if (!nic->ptp_skb)
0634 return;
0635
0636
0637 if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
0638 cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
0639 goto no_tstamp;
0640
0641
0642 memset(&ts, 0, sizeof(ts));
0643 ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
0644 ts.hwtstamp = ns_to_ktime(ns);
0645 skb_tstamp_tx(nic->ptp_skb, &ts);
0646
0647 no_tstamp:
0648
0649 dev_kfree_skb_any(nic->ptp_skb);
0650 nic->ptp_skb = NULL;
0651
0652 smp_wmb();
0653 }
0654
0655 static void nicvf_snd_pkt_handler(struct net_device *netdev,
0656 struct cqe_send_t *cqe_tx,
0657 int budget, int *subdesc_cnt,
0658 unsigned int *tx_pkts, unsigned int *tx_bytes)
0659 {
0660 struct sk_buff *skb = NULL;
0661 struct page *page;
0662 struct nicvf *nic = netdev_priv(netdev);
0663 struct snd_queue *sq;
0664 struct sq_hdr_subdesc *hdr;
0665 struct sq_hdr_subdesc *tso_sqe;
0666
0667 sq = &nic->qs->sq[cqe_tx->sq_idx];
0668
0669 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
0670 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
0671 return;
0672
0673
0674 if (cqe_tx->send_status)
0675 nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
0676
0677
0678 if (sq->is_xdp) {
0679 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
0680
0681 if (page && (page_ref_count(page) == 1))
0682 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
0683 hdr->subdesc_cnt);
0684
0685
0686 if (page)
0687 put_page(page);
0688 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
0689 *subdesc_cnt += hdr->subdesc_cnt + 1;
0690 return;
0691 }
0692
0693 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
0694 if (skb) {
0695
0696 if (hdr->dont_send) {
0697
0698 tso_sqe =
0699 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
0700 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
0701 tso_sqe->subdesc_cnt);
0702 *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
0703 } else {
0704 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
0705 hdr->subdesc_cnt);
0706 }
0707 *subdesc_cnt += hdr->subdesc_cnt + 1;
0708 prefetch(skb);
0709 (*tx_pkts)++;
0710 *tx_bytes += skb->len;
0711
0712 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
0713 !nic->pnicvf->ptp_skb)
0714 nic->pnicvf->ptp_skb = skb;
0715 else
0716 napi_consume_skb(skb, budget);
0717 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
0718 } else {
0719
0720
0721
0722 if (!nic->hw_tso)
0723 *subdesc_cnt += hdr->subdesc_cnt + 1;
0724 }
0725 }
0726
0727 static inline void nicvf_set_rxhash(struct net_device *netdev,
0728 struct cqe_rx_t *cqe_rx,
0729 struct sk_buff *skb)
0730 {
0731 u8 hash_type;
0732 u32 hash;
0733
0734 if (!(netdev->features & NETIF_F_RXHASH))
0735 return;
0736
0737 switch (cqe_rx->rss_alg) {
0738 case RSS_ALG_TCP_IP:
0739 case RSS_ALG_UDP_IP:
0740 hash_type = PKT_HASH_TYPE_L4;
0741 hash = cqe_rx->rss_tag;
0742 break;
0743 case RSS_ALG_IP:
0744 hash_type = PKT_HASH_TYPE_L3;
0745 hash = cqe_rx->rss_tag;
0746 break;
0747 default:
0748 hash_type = PKT_HASH_TYPE_NONE;
0749 hash = 0;
0750 }
0751
0752 skb_set_hash(skb, hash, hash_type);
0753 }
0754
0755 static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
0756 {
0757 u64 ns;
0758
0759 if (!nic->ptp_clock || !nic->hw_rx_tstamp)
0760 return;
0761
0762
0763 ns = cavium_ptp_tstamp2time(nic->ptp_clock,
0764 be64_to_cpu(*(__be64 *)skb->data));
0765 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
0766
0767 __skb_pull(skb, 8);
0768 }
0769
0770 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
0771 struct napi_struct *napi,
0772 struct cqe_rx_t *cqe_rx,
0773 struct snd_queue *sq, struct rcv_queue *rq)
0774 {
0775 struct sk_buff *skb = NULL;
0776 struct nicvf *nic = netdev_priv(netdev);
0777 struct nicvf *snic = nic;
0778 int err = 0;
0779 int rq_idx;
0780
0781 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
0782
0783 if (nic->sqs_mode) {
0784
0785 nic = nic->pnicvf;
0786 netdev = nic->netdev;
0787 }
0788
0789
0790 if (cqe_rx->err_level || cqe_rx->err_opcode) {
0791 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
0792 if (err && !cqe_rx->rb_cnt)
0793 return;
0794 }
0795
0796
0797 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
0798
0799 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
0800 return;
0801 } else {
0802 skb = nicvf_get_rcv_skb(snic, cqe_rx,
0803 nic->xdp_prog ? true : false);
0804 }
0805
0806 if (!skb)
0807 return;
0808
0809 if (netif_msg_pktdata(nic)) {
0810 netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
0811 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
0812 skb->data, skb->len, true);
0813 }
0814
0815
0816 if (err) {
0817 dev_kfree_skb_any(skb);
0818 return;
0819 }
0820
0821 nicvf_set_rxtstamp(nic, skb);
0822 nicvf_set_rxhash(netdev, cqe_rx, skb);
0823
0824 skb_record_rx_queue(skb, rq_idx);
0825 if (netdev->hw_features & NETIF_F_RXCSUM) {
0826
0827 skb->ip_summed = CHECKSUM_UNNECESSARY;
0828 } else {
0829 skb_checksum_none_assert(skb);
0830 }
0831
0832 skb->protocol = eth_type_trans(skb, netdev);
0833
0834
0835 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
0836 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
0837 ntohs((__force __be16)cqe_rx->vlan_tci));
0838
0839 if (napi && (netdev->features & NETIF_F_GRO))
0840 napi_gro_receive(napi, skb);
0841 else
0842 netif_receive_skb(skb);
0843 }
0844
0845 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
0846 struct napi_struct *napi, int budget)
0847 {
0848 int processed_cqe, work_done = 0, tx_done = 0;
0849 int cqe_count, cqe_head;
0850 int subdesc_cnt = 0;
0851 struct nicvf *nic = netdev_priv(netdev);
0852 struct queue_set *qs = nic->qs;
0853 struct cmp_queue *cq = &qs->cq[cq_idx];
0854 struct cqe_rx_t *cq_desc;
0855 struct netdev_queue *txq;
0856 struct snd_queue *sq = &qs->sq[cq_idx];
0857 struct rcv_queue *rq = &qs->rq[cq_idx];
0858 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
0859
0860 spin_lock_bh(&cq->lock);
0861 loop:
0862 processed_cqe = 0;
0863
0864 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
0865 cqe_count &= CQ_CQE_COUNT;
0866 if (!cqe_count)
0867 goto done;
0868
0869
0870 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
0871 cqe_head &= 0xFFFF;
0872
0873 while (processed_cqe < cqe_count) {
0874
0875 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
0876 cqe_head++;
0877 cqe_head &= (cq->dmem.q_len - 1);
0878
0879 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
0880
0881 if ((work_done >= budget) && napi &&
0882 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
0883 break;
0884 }
0885
0886 switch (cq_desc->cqe_type) {
0887 case CQE_TYPE_RX:
0888 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
0889 work_done++;
0890 break;
0891 case CQE_TYPE_SEND:
0892 nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
0893 budget, &subdesc_cnt,
0894 &tx_pkts, &tx_bytes);
0895 tx_done++;
0896 break;
0897 case CQE_TYPE_SEND_PTP:
0898 nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
0899 break;
0900 case CQE_TYPE_INVALID:
0901 case CQE_TYPE_RX_SPLIT:
0902 case CQE_TYPE_RX_TCP:
0903
0904 break;
0905 }
0906 processed_cqe++;
0907 }
0908
0909
0910 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
0911 cq_idx, processed_cqe);
0912
0913 if ((work_done < budget) && napi)
0914 goto loop;
0915
0916 done:
0917
0918 if (subdesc_cnt)
0919 nicvf_put_sq_desc(sq, subdesc_cnt);
0920
0921 txq_idx = nicvf_netdev_qidx(nic, cq_idx);
0922
0923 if (nic->pnicvf->xdp_prog) {
0924 if (txq_idx < nic->pnicvf->xdp_tx_queues) {
0925 nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
0926 goto out;
0927 }
0928 nic = nic->pnicvf;
0929 txq_idx -= nic->pnicvf->xdp_tx_queues;
0930 }
0931
0932
0933 if (tx_done ||
0934 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
0935 netdev = nic->pnicvf->netdev;
0936 txq = netdev_get_tx_queue(netdev, txq_idx);
0937 if (tx_pkts)
0938 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
0939
0940
0941 smp_mb();
0942 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
0943 netif_tx_wake_queue(txq);
0944 nic = nic->pnicvf;
0945 this_cpu_inc(nic->drv_stats->txq_wake);
0946 netif_warn(nic, tx_err, netdev,
0947 "Transmit queue wakeup SQ%d\n", txq_idx);
0948 }
0949 }
0950
0951 out:
0952 spin_unlock_bh(&cq->lock);
0953 return work_done;
0954 }
0955
0956 static int nicvf_poll(struct napi_struct *napi, int budget)
0957 {
0958 u64 cq_head;
0959 int work_done = 0;
0960 struct net_device *netdev = napi->dev;
0961 struct nicvf *nic = netdev_priv(netdev);
0962 struct nicvf_cq_poll *cq;
0963
0964 cq = container_of(napi, struct nicvf_cq_poll, napi);
0965 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
0966
0967 if (work_done < budget) {
0968
0969 napi_complete_done(napi, work_done);
0970
0971 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
0972 cq->cq_idx);
0973 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
0974 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
0975 cq->cq_idx, cq_head);
0976 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
0977 }
0978 return work_done;
0979 }
0980
0981
0982
0983
0984
0985 static void nicvf_handle_qs_err(struct tasklet_struct *t)
0986 {
0987 struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
0988 struct queue_set *qs = nic->qs;
0989 int qidx;
0990 u64 status;
0991
0992 netif_tx_disable(nic->netdev);
0993
0994
0995 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
0996 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
0997 qidx);
0998 if (!(status & CQ_ERR_MASK))
0999 continue;
1000
1001 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1002 nicvf_sq_disable(nic, qidx);
1003 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
1004 nicvf_cmp_queue_config(nic, qs, qidx, true);
1005 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
1006 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
1007
1008 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1009 }
1010
1011 netif_tx_start_all_queues(nic->netdev);
1012
1013 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1014 }
1015
1016 static void nicvf_dump_intr_status(struct nicvf *nic)
1017 {
1018 netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
1019 nicvf_reg_read(nic, NIC_VF_INT));
1020 }
1021
1022 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
1023 {
1024 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1025 u64 intr;
1026
1027 nicvf_dump_intr_status(nic);
1028
1029 intr = nicvf_reg_read(nic, NIC_VF_INT);
1030
1031 if (!(intr & NICVF_INTR_MBOX_MASK))
1032 return IRQ_HANDLED;
1033
1034 nicvf_handle_mbx_intr(nic);
1035
1036 return IRQ_HANDLED;
1037 }
1038
1039 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
1040 {
1041 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
1042 struct nicvf *nic = cq_poll->nicvf;
1043 int qidx = cq_poll->cq_idx;
1044
1045 nicvf_dump_intr_status(nic);
1046
1047
1048 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1049
1050
1051 napi_schedule_irqoff(&cq_poll->napi);
1052
1053
1054 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1055
1056 return IRQ_HANDLED;
1057 }
1058
1059 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
1060 {
1061 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1062 u8 qidx;
1063
1064
1065 nicvf_dump_intr_status(nic);
1066
1067
1068 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1069 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1070 continue;
1071 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1072 tasklet_hi_schedule(&nic->rbdr_task);
1073
1074 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1075 }
1076
1077 return IRQ_HANDLED;
1078 }
1079
1080 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
1081 {
1082 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1083
1084 nicvf_dump_intr_status(nic);
1085
1086
1087 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1088 tasklet_hi_schedule(&nic->qs_err_task);
1089 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1090
1091 return IRQ_HANDLED;
1092 }
1093
1094 static void nicvf_set_irq_affinity(struct nicvf *nic)
1095 {
1096 int vec, cpu;
1097
1098 for (vec = 0; vec < nic->num_vec; vec++) {
1099 if (!nic->irq_allocated[vec])
1100 continue;
1101
1102 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
1103 return;
1104
1105 if (vec < NICVF_INTR_ID_SQ)
1106
1107 cpu = nicvf_netdev_qidx(nic, vec) + 1;
1108 else
1109 cpu = 0;
1110
1111 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
1112 nic->affinity_mask[vec]);
1113 irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
1114 nic->affinity_mask[vec]);
1115 }
1116 }
1117
1118 static int nicvf_register_interrupts(struct nicvf *nic)
1119 {
1120 int irq, ret = 0;
1121
1122 for_each_cq_irq(irq)
1123 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
1124 nic->pnicvf->netdev->name,
1125 nicvf_netdev_qidx(nic, irq));
1126
1127 for_each_sq_irq(irq)
1128 sprintf(nic->irq_name[irq], "%s-sq-%d",
1129 nic->pnicvf->netdev->name,
1130 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
1131
1132 for_each_rbdr_irq(irq)
1133 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
1134 nic->pnicvf->netdev->name,
1135 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1136
1137
1138 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
1139 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1140 nicvf_intr_handler,
1141 0, nic->irq_name[irq], nic->napi[irq]);
1142 if (ret)
1143 goto err;
1144 nic->irq_allocated[irq] = true;
1145 }
1146
1147
1148 for (irq = NICVF_INTR_ID_RBDR;
1149 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
1150 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1151 nicvf_rbdr_intr_handler,
1152 0, nic->irq_name[irq], nic);
1153 if (ret)
1154 goto err;
1155 nic->irq_allocated[irq] = true;
1156 }
1157
1158
1159 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
1160 nic->pnicvf->netdev->name,
1161 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1162 irq = NICVF_INTR_ID_QS_ERR;
1163 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1164 nicvf_qs_err_intr_handler,
1165 0, nic->irq_name[irq], nic);
1166 if (ret)
1167 goto err;
1168
1169 nic->irq_allocated[irq] = true;
1170
1171
1172 nicvf_set_irq_affinity(nic);
1173
1174 err:
1175 if (ret)
1176 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
1177
1178 return ret;
1179 }
1180
1181 static void nicvf_unregister_interrupts(struct nicvf *nic)
1182 {
1183 struct pci_dev *pdev = nic->pdev;
1184 int irq;
1185
1186
1187 for (irq = 0; irq < nic->num_vec; irq++) {
1188 if (!nic->irq_allocated[irq])
1189 continue;
1190
1191 irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
1192 free_cpumask_var(nic->affinity_mask[irq]);
1193
1194 if (irq < NICVF_INTR_ID_SQ)
1195 free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
1196 else
1197 free_irq(pci_irq_vector(pdev, irq), nic);
1198
1199 nic->irq_allocated[irq] = false;
1200 }
1201
1202
1203 pci_free_irq_vectors(pdev);
1204 nic->num_vec = 0;
1205 }
1206
1207
1208
1209
1210 static int nicvf_register_misc_interrupt(struct nicvf *nic)
1211 {
1212 int ret = 0;
1213 int irq = NICVF_INTR_ID_MISC;
1214
1215
1216 if (nic->pdev->msix_enabled)
1217 return 0;
1218
1219
1220 nic->num_vec = pci_msix_vec_count(nic->pdev);
1221 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1222 PCI_IRQ_MSIX);
1223 if (ret < 0) {
1224 netdev_err(nic->netdev,
1225 "Req for #%d msix vectors failed\n", nic->num_vec);
1226 return ret;
1227 }
1228
1229 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1230
1231 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1232 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1233
1234 if (ret)
1235 return ret;
1236 nic->irq_allocated[irq] = true;
1237
1238
1239 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1240
1241
1242 if (!nicvf_check_pf_ready(nic)) {
1243 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1244 nicvf_unregister_interrupts(nic);
1245 return -EIO;
1246 }
1247
1248 return 0;
1249 }
1250
1251 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1252 {
1253 struct nicvf *nic = netdev_priv(netdev);
1254 int qid = skb_get_queue_mapping(skb);
1255 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1256 struct nicvf *snic;
1257 struct snd_queue *sq;
1258 int tmp;
1259
1260
1261 if (skb->len <= ETH_HLEN) {
1262 dev_kfree_skb(skb);
1263 return NETDEV_TX_OK;
1264 }
1265
1266
1267
1268
1269
1270 if (nic->xdp_prog)
1271 qid += nic->xdp_tx_queues;
1272
1273 snic = nic;
1274
1275 if (qid >= MAX_SND_QUEUES_PER_QS) {
1276 tmp = qid / MAX_SND_QUEUES_PER_QS;
1277 snic = (struct nicvf *)nic->snicvf[tmp - 1];
1278 if (!snic) {
1279 netdev_warn(nic->netdev,
1280 "Secondary Qset#%d's ptr not initialized\n",
1281 tmp - 1);
1282 dev_kfree_skb(skb);
1283 return NETDEV_TX_OK;
1284 }
1285 qid = qid % MAX_SND_QUEUES_PER_QS;
1286 }
1287
1288 sq = &snic->qs->sq[qid];
1289 if (!netif_tx_queue_stopped(txq) &&
1290 !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1291 netif_tx_stop_queue(txq);
1292
1293
1294 smp_mb();
1295
1296
1297 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1298 netif_tx_wake_queue(txq);
1299 } else {
1300 this_cpu_inc(nic->drv_stats->txq_stop);
1301 netif_warn(nic, tx_err, netdev,
1302 "Transmit ring full, stopping SQ%d\n", qid);
1303 }
1304 return NETDEV_TX_BUSY;
1305 }
1306
1307 return NETDEV_TX_OK;
1308 }
1309
1310 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1311 {
1312 struct nicvf_cq_poll *cq_poll;
1313 int qidx;
1314
1315 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1316 cq_poll = nic->napi[qidx];
1317 if (!cq_poll)
1318 continue;
1319 nic->napi[qidx] = NULL;
1320 kfree(cq_poll);
1321 }
1322 }
1323
1324 int nicvf_stop(struct net_device *netdev)
1325 {
1326 int irq, qidx;
1327 struct nicvf *nic = netdev_priv(netdev);
1328 struct queue_set *qs = nic->qs;
1329 struct nicvf_cq_poll *cq_poll = NULL;
1330 union nic_mbx mbx = {};
1331
1332
1333 if (nic->nicvf_rx_mode_wq) {
1334 cancel_delayed_work_sync(&nic->link_change_work);
1335 drain_workqueue(nic->nicvf_rx_mode_wq);
1336 }
1337
1338 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1339 nicvf_send_msg_to_pf(nic, &mbx);
1340
1341 netif_carrier_off(netdev);
1342 netif_tx_stop_all_queues(nic->netdev);
1343 nic->link_up = false;
1344
1345
1346 if (!nic->sqs_mode) {
1347 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1348 if (!nic->snicvf[qidx])
1349 continue;
1350 nicvf_stop(nic->snicvf[qidx]->netdev);
1351 nic->snicvf[qidx] = NULL;
1352 }
1353 }
1354
1355
1356 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1357 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1358 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1359 }
1360 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1361 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1362
1363
1364 for (irq = 0; irq < nic->num_vec; irq++)
1365 synchronize_irq(pci_irq_vector(nic->pdev, irq));
1366
1367 tasklet_kill(&nic->rbdr_task);
1368 tasklet_kill(&nic->qs_err_task);
1369 if (nic->rb_work_scheduled)
1370 cancel_delayed_work_sync(&nic->rbdr_work);
1371
1372 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1373 cq_poll = nic->napi[qidx];
1374 if (!cq_poll)
1375 continue;
1376 napi_synchronize(&cq_poll->napi);
1377
1378
1379
1380 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1381 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1382 napi_disable(&cq_poll->napi);
1383 netif_napi_del(&cq_poll->napi);
1384 }
1385
1386 netif_tx_disable(netdev);
1387
1388 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1389 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1390
1391
1392 nicvf_config_data_transfer(nic, false);
1393
1394
1395 nicvf_qset_config(nic, false);
1396
1397
1398 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1399
1400 nicvf_unregister_interrupts(nic);
1401
1402 nicvf_free_cq_poll(nic);
1403
1404
1405 if (nic->ptp_skb) {
1406 dev_kfree_skb_any(nic->ptp_skb);
1407 nic->ptp_skb = NULL;
1408 }
1409
1410
1411 nic->pnicvf = nic;
1412
1413 return 0;
1414 }
1415
1416 static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
1417 {
1418 union nic_mbx mbx = {};
1419
1420 mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
1421 mbx.ptp.enable = enable;
1422
1423 return nicvf_send_msg_to_pf(nic, &mbx);
1424 }
1425
1426 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1427 {
1428 union nic_mbx mbx = {};
1429
1430 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1431 mbx.frs.max_frs = mtu;
1432 mbx.frs.vf_id = nic->vf_id;
1433
1434 return nicvf_send_msg_to_pf(nic, &mbx);
1435 }
1436
1437 static void nicvf_link_status_check_task(struct work_struct *work_arg)
1438 {
1439 struct nicvf *nic = container_of(work_arg,
1440 struct nicvf,
1441 link_change_work.work);
1442 union nic_mbx mbx = {};
1443 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1444 nicvf_send_msg_to_pf(nic, &mbx);
1445 queue_delayed_work(nic->nicvf_rx_mode_wq,
1446 &nic->link_change_work, 2 * HZ);
1447 }
1448
1449 int nicvf_open(struct net_device *netdev)
1450 {
1451 int cpu, err, qidx;
1452 struct nicvf *nic = netdev_priv(netdev);
1453 struct queue_set *qs = nic->qs;
1454 struct nicvf_cq_poll *cq_poll = NULL;
1455
1456
1457 if (nic->nicvf_rx_mode_wq)
1458 drain_workqueue(nic->nicvf_rx_mode_wq);
1459
1460 netif_carrier_off(netdev);
1461
1462 err = nicvf_register_misc_interrupt(nic);
1463 if (err)
1464 return err;
1465
1466
1467 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1468 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1469 if (!cq_poll) {
1470 err = -ENOMEM;
1471 goto napi_del;
1472 }
1473 cq_poll->cq_idx = qidx;
1474 cq_poll->nicvf = nic;
1475 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1476 NAPI_POLL_WEIGHT);
1477 napi_enable(&cq_poll->napi);
1478 nic->napi[qidx] = cq_poll;
1479 }
1480
1481
1482 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1483 eth_hw_addr_random(netdev);
1484 nicvf_hw_set_mac_addr(nic, netdev);
1485 }
1486
1487 if (nic->set_mac_pending) {
1488 nic->set_mac_pending = false;
1489 nicvf_hw_set_mac_addr(nic, netdev);
1490 }
1491
1492
1493 tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
1494
1495
1496 tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
1497 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1498
1499
1500 nic->cpi_alg = cpi_alg;
1501 if (!nic->sqs_mode)
1502 nicvf_config_cpi(nic);
1503
1504 nicvf_request_sqs(nic);
1505 if (nic->sqs_mode)
1506 nicvf_get_primary_vf_struct(nic);
1507
1508
1509 if (nic->ptp_clock)
1510 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1511 atomic_set(&nic->tx_ptp_skbs, 0);
1512 nic->ptp_skb = NULL;
1513
1514
1515 if (!nic->sqs_mode) {
1516 nicvf_rss_init(nic);
1517 err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1518 if (err)
1519 goto cleanup;
1520
1521
1522 for_each_possible_cpu(cpu)
1523 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1524 sizeof(struct nicvf_drv_stats));
1525 }
1526
1527 err = nicvf_register_interrupts(nic);
1528 if (err)
1529 goto cleanup;
1530
1531
1532 err = nicvf_init_resources(nic);
1533 if (err)
1534 goto cleanup;
1535
1536
1537 wmb();
1538
1539 nicvf_reg_write(nic, NIC_VF_INT, -1);
1540
1541 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1542
1543
1544 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1545 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1546
1547
1548 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1549 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1550
1551
1552 nicvf_send_cfg_done(nic);
1553
1554 if (nic->nicvf_rx_mode_wq) {
1555 INIT_DELAYED_WORK(&nic->link_change_work,
1556 nicvf_link_status_check_task);
1557 queue_delayed_work(nic->nicvf_rx_mode_wq,
1558 &nic->link_change_work, 0);
1559 }
1560
1561 return 0;
1562 cleanup:
1563 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1564 nicvf_unregister_interrupts(nic);
1565 tasklet_kill(&nic->qs_err_task);
1566 tasklet_kill(&nic->rbdr_task);
1567 napi_del:
1568 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1569 cq_poll = nic->napi[qidx];
1570 if (!cq_poll)
1571 continue;
1572 napi_disable(&cq_poll->napi);
1573 netif_napi_del(&cq_poll->napi);
1574 }
1575 nicvf_free_cq_poll(nic);
1576 return err;
1577 }
1578
1579 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1580 {
1581 struct nicvf *nic = netdev_priv(netdev);
1582 int orig_mtu = netdev->mtu;
1583
1584
1585
1586
1587 if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1588 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1589 netdev->mtu);
1590 return -EINVAL;
1591 }
1592
1593 netdev->mtu = new_mtu;
1594
1595 if (!netif_running(netdev))
1596 return 0;
1597
1598 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1599 netdev->mtu = orig_mtu;
1600 return -EINVAL;
1601 }
1602
1603 return 0;
1604 }
1605
1606 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1607 {
1608 struct sockaddr *addr = p;
1609 struct nicvf *nic = netdev_priv(netdev);
1610
1611 if (!is_valid_ether_addr(addr->sa_data))
1612 return -EADDRNOTAVAIL;
1613
1614 eth_hw_addr_set(netdev, addr->sa_data);
1615
1616 if (nic->pdev->msix_enabled) {
1617 if (nicvf_hw_set_mac_addr(nic, netdev))
1618 return -EBUSY;
1619 } else {
1620 nic->set_mac_pending = true;
1621 }
1622
1623 return 0;
1624 }
1625
1626 void nicvf_update_lmac_stats(struct nicvf *nic)
1627 {
1628 int stat = 0;
1629 union nic_mbx mbx = {};
1630
1631 if (!netif_running(nic->netdev))
1632 return;
1633
1634 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1635 mbx.bgx_stats.vf_id = nic->vf_id;
1636
1637 mbx.bgx_stats.rx = 1;
1638 while (stat < BGX_RX_STATS_COUNT) {
1639 mbx.bgx_stats.idx = stat;
1640 if (nicvf_send_msg_to_pf(nic, &mbx))
1641 return;
1642 stat++;
1643 }
1644
1645 stat = 0;
1646
1647
1648 mbx.bgx_stats.rx = 0;
1649 while (stat < BGX_TX_STATS_COUNT) {
1650 mbx.bgx_stats.idx = stat;
1651 if (nicvf_send_msg_to_pf(nic, &mbx))
1652 return;
1653 stat++;
1654 }
1655 }
1656
1657 void nicvf_update_stats(struct nicvf *nic)
1658 {
1659 int qidx, cpu;
1660 u64 tmp_stats = 0;
1661 struct nicvf_hw_stats *stats = &nic->hw_stats;
1662 struct nicvf_drv_stats *drv_stats;
1663 struct queue_set *qs = nic->qs;
1664
1665 #define GET_RX_STATS(reg) \
1666 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1667 #define GET_TX_STATS(reg) \
1668 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1669
1670 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1671 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1672 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1673 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1674 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1675 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1676 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1677 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1678 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1679 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1680 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1681 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1682 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1683 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1684
1685 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1686 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1687 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1688 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1689 stats->tx_drops = GET_TX_STATS(TX_DROP);
1690
1691
1692
1693
1694
1695
1696
1697 if (nic->t88 && nic->hw_tso) {
1698 for_each_possible_cpu(cpu) {
1699 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1700 tmp_stats += drv_stats->tx_tso;
1701 }
1702 stats->tx_drops = tmp_stats - stats->tx_drops;
1703 }
1704 stats->tx_frames = stats->tx_ucast_frames +
1705 stats->tx_bcast_frames +
1706 stats->tx_mcast_frames;
1707 stats->rx_frames = stats->rx_ucast_frames +
1708 stats->rx_bcast_frames +
1709 stats->rx_mcast_frames;
1710 stats->rx_drops = stats->rx_drop_red +
1711 stats->rx_drop_overrun;
1712
1713
1714 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1715 nicvf_update_rq_stats(nic, qidx);
1716 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1717 nicvf_update_sq_stats(nic, qidx);
1718 }
1719
1720 static void nicvf_get_stats64(struct net_device *netdev,
1721 struct rtnl_link_stats64 *stats)
1722 {
1723 struct nicvf *nic = netdev_priv(netdev);
1724 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1725
1726 nicvf_update_stats(nic);
1727
1728 stats->rx_bytes = hw_stats->rx_bytes;
1729 stats->rx_packets = hw_stats->rx_frames;
1730 stats->rx_dropped = hw_stats->rx_drops;
1731 stats->multicast = hw_stats->rx_mcast_frames;
1732
1733 stats->tx_bytes = hw_stats->tx_bytes;
1734 stats->tx_packets = hw_stats->tx_frames;
1735 stats->tx_dropped = hw_stats->tx_drops;
1736
1737 }
1738
1739 static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
1740 {
1741 struct nicvf *nic = netdev_priv(dev);
1742
1743 netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
1744
1745 this_cpu_inc(nic->drv_stats->tx_timeout);
1746 schedule_work(&nic->reset_task);
1747 }
1748
1749 static void nicvf_reset_task(struct work_struct *work)
1750 {
1751 struct nicvf *nic;
1752
1753 nic = container_of(work, struct nicvf, reset_task);
1754
1755 if (!netif_running(nic->netdev))
1756 return;
1757
1758 nicvf_stop(nic->netdev);
1759 nicvf_open(nic->netdev);
1760 netif_trans_update(nic->netdev);
1761 }
1762
1763 static int nicvf_config_loopback(struct nicvf *nic,
1764 netdev_features_t features)
1765 {
1766 union nic_mbx mbx = {};
1767
1768 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1769 mbx.lbk.vf_id = nic->vf_id;
1770 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1771
1772 return nicvf_send_msg_to_pf(nic, &mbx);
1773 }
1774
1775 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1776 netdev_features_t features)
1777 {
1778 struct nicvf *nic = netdev_priv(netdev);
1779
1780 if ((features & NETIF_F_LOOPBACK) &&
1781 netif_running(netdev) && !nic->loopback_supported)
1782 features &= ~NETIF_F_LOOPBACK;
1783
1784 return features;
1785 }
1786
1787 static int nicvf_set_features(struct net_device *netdev,
1788 netdev_features_t features)
1789 {
1790 struct nicvf *nic = netdev_priv(netdev);
1791 netdev_features_t changed = features ^ netdev->features;
1792
1793 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1794 nicvf_config_vlan_stripping(nic, features);
1795
1796 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1797 return nicvf_config_loopback(nic, features);
1798
1799 return 0;
1800 }
1801
1802 static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
1803 {
1804 u8 cq_count, txq_count;
1805
1806
1807 if (!bpf_attached)
1808 nic->xdp_tx_queues = 0;
1809 else
1810 nic->xdp_tx_queues = nic->rx_queues;
1811
1812
1813
1814
1815 txq_count = nic->xdp_tx_queues + nic->tx_queues;
1816 cq_count = max(nic->rx_queues, txq_count);
1817 if (cq_count > MAX_CMP_QUEUES_PER_QS) {
1818 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
1819 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
1820 } else {
1821 nic->sqs_count = 0;
1822 }
1823
1824
1825 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
1826 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
1827 nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
1828
1829
1830 nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
1831 }
1832
1833 static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1834 {
1835 struct net_device *dev = nic->netdev;
1836 bool if_up = netif_running(nic->netdev);
1837 struct bpf_prog *old_prog;
1838 bool bpf_attached = false;
1839 int ret = 0;
1840
1841
1842
1843
1844 if (prog && dev->mtu > MAX_XDP_MTU) {
1845 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1846 dev->mtu);
1847 return -EOPNOTSUPP;
1848 }
1849
1850
1851
1852
1853
1854
1855
1856
1857 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
1858 netdev_warn(dev,
1859 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1860 nic->max_queues);
1861 return -ENOMEM;
1862 }
1863
1864 if (if_up)
1865 nicvf_stop(nic->netdev);
1866
1867 old_prog = xchg(&nic->xdp_prog, prog);
1868
1869 if (old_prog)
1870 bpf_prog_put(old_prog);
1871
1872 if (nic->xdp_prog) {
1873
1874 bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1875 bpf_attached = true;
1876 }
1877
1878
1879 nicvf_set_xdp_queues(nic, bpf_attached);
1880
1881 if (if_up) {
1882
1883 nicvf_open(nic->netdev);
1884 netif_trans_update(nic->netdev);
1885 }
1886
1887 return ret;
1888 }
1889
1890 static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1891 {
1892 struct nicvf *nic = netdev_priv(netdev);
1893
1894
1895
1896
1897
1898 if (pass1_silicon(nic->pdev))
1899 return -EOPNOTSUPP;
1900
1901 switch (xdp->command) {
1902 case XDP_SETUP_PROG:
1903 return nicvf_xdp_setup(nic, xdp->prog);
1904 default:
1905 return -EINVAL;
1906 }
1907 }
1908
1909 static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1910 {
1911 struct hwtstamp_config config;
1912 struct nicvf *nic = netdev_priv(netdev);
1913
1914 if (!nic->ptp_clock)
1915 return -ENODEV;
1916
1917 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1918 return -EFAULT;
1919
1920 switch (config.tx_type) {
1921 case HWTSTAMP_TX_OFF:
1922 case HWTSTAMP_TX_ON:
1923 break;
1924 default:
1925 return -ERANGE;
1926 }
1927
1928 switch (config.rx_filter) {
1929 case HWTSTAMP_FILTER_NONE:
1930 nic->hw_rx_tstamp = false;
1931 break;
1932 case HWTSTAMP_FILTER_ALL:
1933 case HWTSTAMP_FILTER_SOME:
1934 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1935 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1936 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1937 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1938 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1939 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1940 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1941 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1942 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1943 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1944 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1945 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1946 nic->hw_rx_tstamp = true;
1947 config.rx_filter = HWTSTAMP_FILTER_ALL;
1948 break;
1949 default:
1950 return -ERANGE;
1951 }
1952
1953 if (netif_running(netdev))
1954 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1955
1956 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1957 return -EFAULT;
1958
1959 return 0;
1960 }
1961
1962 static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1963 {
1964 switch (cmd) {
1965 case SIOCSHWTSTAMP:
1966 return nicvf_config_hwtstamp(netdev, req);
1967 default:
1968 return -EOPNOTSUPP;
1969 }
1970 }
1971
1972 static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1973 struct nicvf *nic)
1974 {
1975 union nic_mbx mbx = {};
1976 int idx;
1977
1978
1979
1980
1981
1982
1983
1984
1985 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1986 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1987 goto free_mc;
1988
1989 if (mode & BGX_XCAST_MCAST_FILTER) {
1990
1991
1992
1993 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1994 mbx.xcast.mac = 0;
1995 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1996 goto free_mc;
1997 }
1998
1999
2000 if (mc_addrs) {
2001
2002 for (idx = 0; idx < mc_addrs->count; idx++) {
2003 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
2004 mbx.xcast.mac = mc_addrs->mc[idx];
2005 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2006 goto free_mc;
2007 }
2008 }
2009
2010
2011 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
2012 mbx.xcast.mode = mode;
2013
2014 nicvf_send_msg_to_pf(nic, &mbx);
2015 free_mc:
2016 kfree(mc_addrs);
2017 }
2018
2019 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
2020 {
2021 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
2022 work);
2023 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
2024 u8 mode;
2025 struct xcast_addr_list *mc;
2026
2027
2028
2029
2030 spin_lock_bh(&nic->rx_mode_wq_lock);
2031 mode = vf_work->mode;
2032 mc = vf_work->mc;
2033 vf_work->mc = NULL;
2034 spin_unlock_bh(&nic->rx_mode_wq_lock);
2035
2036 __nicvf_set_rx_mode_task(mode, mc, nic);
2037 }
2038
2039 static void nicvf_set_rx_mode(struct net_device *netdev)
2040 {
2041 struct nicvf *nic = netdev_priv(netdev);
2042 struct netdev_hw_addr *ha;
2043 struct xcast_addr_list *mc_list = NULL;
2044 u8 mode = 0;
2045
2046 if (netdev->flags & IFF_PROMISC) {
2047 mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
2048 } else {
2049 if (netdev->flags & IFF_BROADCAST)
2050 mode |= BGX_XCAST_BCAST_ACCEPT;
2051
2052 if (netdev->flags & IFF_ALLMULTI) {
2053 mode |= BGX_XCAST_MCAST_ACCEPT;
2054 } else if (netdev->flags & IFF_MULTICAST) {
2055 mode |= BGX_XCAST_MCAST_FILTER;
2056
2057 if (netdev_mc_count(netdev)) {
2058 mc_list = kmalloc(struct_size(mc_list, mc,
2059 netdev_mc_count(netdev)),
2060 GFP_ATOMIC);
2061 if (unlikely(!mc_list))
2062 return;
2063 mc_list->count = 0;
2064 netdev_hw_addr_list_for_each(ha, &netdev->mc) {
2065 mc_list->mc[mc_list->count] =
2066 ether_addr_to_u64(ha->addr);
2067 mc_list->count++;
2068 }
2069 }
2070 }
2071 }
2072 spin_lock(&nic->rx_mode_wq_lock);
2073 kfree(nic->rx_mode_work.mc);
2074 nic->rx_mode_work.mc = mc_list;
2075 nic->rx_mode_work.mode = mode;
2076 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2077 spin_unlock(&nic->rx_mode_wq_lock);
2078 }
2079
2080 static const struct net_device_ops nicvf_netdev_ops = {
2081 .ndo_open = nicvf_open,
2082 .ndo_stop = nicvf_stop,
2083 .ndo_start_xmit = nicvf_xmit,
2084 .ndo_change_mtu = nicvf_change_mtu,
2085 .ndo_set_mac_address = nicvf_set_mac_address,
2086 .ndo_get_stats64 = nicvf_get_stats64,
2087 .ndo_tx_timeout = nicvf_tx_timeout,
2088 .ndo_fix_features = nicvf_fix_features,
2089 .ndo_set_features = nicvf_set_features,
2090 .ndo_bpf = nicvf_xdp,
2091 .ndo_eth_ioctl = nicvf_ioctl,
2092 .ndo_set_rx_mode = nicvf_set_rx_mode,
2093 };
2094
2095 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2096 {
2097 struct device *dev = &pdev->dev;
2098 struct net_device *netdev;
2099 struct nicvf *nic;
2100 int err, qcount;
2101 u16 sdevid;
2102 struct cavium_ptp *ptp_clock;
2103
2104 ptp_clock = cavium_ptp_get();
2105 if (IS_ERR(ptp_clock)) {
2106 if (PTR_ERR(ptp_clock) == -ENODEV)
2107
2108 ptp_clock = NULL;
2109 else
2110 return PTR_ERR(ptp_clock);
2111 }
2112
2113 err = pci_enable_device(pdev);
2114 if (err)
2115 return dev_err_probe(dev, err, "Failed to enable PCI device\n");
2116
2117 err = pci_request_regions(pdev, DRV_NAME);
2118 if (err) {
2119 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2120 goto err_disable_device;
2121 }
2122
2123 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
2124 if (err) {
2125 dev_err(dev, "Unable to get usable DMA configuration\n");
2126 goto err_release_regions;
2127 }
2128
2129 qcount = netif_get_num_default_rss_queues();
2130
2131
2132 if (pdev->is_virtfn) {
2133
2134 qcount = min_t(int, num_online_cpus(),
2135 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
2136 }
2137
2138 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
2139 if (!netdev) {
2140 err = -ENOMEM;
2141 goto err_release_regions;
2142 }
2143
2144 pci_set_drvdata(pdev, netdev);
2145
2146 SET_NETDEV_DEV(netdev, &pdev->dev);
2147
2148 nic = netdev_priv(netdev);
2149 nic->netdev = netdev;
2150 nic->pdev = pdev;
2151 nic->pnicvf = nic;
2152 nic->max_queues = qcount;
2153
2154
2155
2156 if (!nic->t88)
2157 nic->max_queues *= 2;
2158 nic->ptp_clock = ptp_clock;
2159
2160
2161 mutex_init(&nic->rx_mode_mtx);
2162
2163
2164 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2165 if (!nic->reg_base) {
2166 dev_err(dev, "Cannot map config register space, aborting\n");
2167 err = -ENOMEM;
2168 goto err_free_netdev;
2169 }
2170
2171 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
2172 if (!nic->drv_stats) {
2173 err = -ENOMEM;
2174 goto err_free_netdev;
2175 }
2176
2177 err = nicvf_set_qset_resources(nic);
2178 if (err)
2179 goto err_free_netdev;
2180
2181
2182 err = nicvf_register_misc_interrupt(nic);
2183 if (err)
2184 goto err_free_netdev;
2185
2186 nicvf_send_vf_struct(nic);
2187
2188 if (!pass1_silicon(nic->pdev))
2189 nic->hw_tso = true;
2190
2191
2192 nic->iommu_domain = iommu_get_domain_for_dev(dev);
2193
2194 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
2195 if (sdevid == 0xA134)
2196 nic->t88 = true;
2197
2198
2199 if (nic->sqs_mode)
2200 return 0;
2201
2202 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
2203 if (err)
2204 goto err_unregister_interrupts;
2205
2206 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
2207 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
2208 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2209 NETIF_F_HW_VLAN_CTAG_RX);
2210
2211 netdev->hw_features |= NETIF_F_RXHASH;
2212
2213 netdev->features |= netdev->hw_features;
2214 netdev->hw_features |= NETIF_F_LOOPBACK;
2215
2216 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2217 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
2218
2219 netdev->netdev_ops = &nicvf_netdev_ops;
2220 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
2221
2222
2223 netdev->min_mtu = NIC_HW_MIN_FRS;
2224 netdev->max_mtu = NIC_HW_MAX_FRS;
2225
2226 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2227
2228 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2229 WQ_MEM_RECLAIM,
2230 nic->vf_id);
2231 if (!nic->nicvf_rx_mode_wq) {
2232 err = -ENOMEM;
2233 dev_err(dev, "Failed to allocate work queue\n");
2234 goto err_unregister_interrupts;
2235 }
2236
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239
2240 err = register_netdev(netdev);
2241 if (err) {
2242 dev_err(dev, "Failed to register netdevice\n");
2243 goto err_unregister_interrupts;
2244 }
2245
2246 nic->msg_enable = debug;
2247
2248 nicvf_set_ethtool_ops(netdev);
2249
2250 return 0;
2251
2252 err_unregister_interrupts:
2253 nicvf_unregister_interrupts(nic);
2254 err_free_netdev:
2255 pci_set_drvdata(pdev, NULL);
2256 if (nic->drv_stats)
2257 free_percpu(nic->drv_stats);
2258 free_netdev(netdev);
2259 err_release_regions:
2260 pci_release_regions(pdev);
2261 err_disable_device:
2262 pci_disable_device(pdev);
2263 return err;
2264 }
2265
2266 static void nicvf_remove(struct pci_dev *pdev)
2267 {
2268 struct net_device *netdev = pci_get_drvdata(pdev);
2269 struct nicvf *nic;
2270 struct net_device *pnetdev;
2271
2272 if (!netdev)
2273 return;
2274
2275 nic = netdev_priv(netdev);
2276 pnetdev = nic->pnicvf->netdev;
2277
2278
2279
2280
2281 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2282 unregister_netdev(pnetdev);
2283 if (nic->nicvf_rx_mode_wq) {
2284 destroy_workqueue(nic->nicvf_rx_mode_wq);
2285 nic->nicvf_rx_mode_wq = NULL;
2286 }
2287 nicvf_unregister_interrupts(nic);
2288 pci_set_drvdata(pdev, NULL);
2289 if (nic->drv_stats)
2290 free_percpu(nic->drv_stats);
2291 cavium_ptp_put(nic->ptp_clock);
2292 free_netdev(netdev);
2293 pci_release_regions(pdev);
2294 pci_disable_device(pdev);
2295 }
2296
2297 static void nicvf_shutdown(struct pci_dev *pdev)
2298 {
2299 nicvf_remove(pdev);
2300 }
2301
2302 static struct pci_driver nicvf_driver = {
2303 .name = DRV_NAME,
2304 .id_table = nicvf_id_table,
2305 .probe = nicvf_probe,
2306 .remove = nicvf_remove,
2307 .shutdown = nicvf_shutdown,
2308 };
2309
2310 static int __init nicvf_init_module(void)
2311 {
2312 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2313 return pci_register_driver(&nicvf_driver);
2314 }
2315
2316 static void __exit nicvf_cleanup_module(void)
2317 {
2318 pci_unregister_driver(&nicvf_driver);
2319 }
2320
2321 module_init(nicvf_init_module);
2322 module_exit(nicvf_cleanup_module);