0001
0002
0003 #include <linux/bpf.h>
0004 #include <linux/crash_dump.h>
0005 #include <linux/etherdevice.h>
0006 #include <linux/ethtool.h>
0007 #include <linux/filter.h>
0008 #include <linux/idr.h>
0009 #include <linux/if_vlan.h>
0010 #include <linux/module.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/pci.h>
0013 #include <linux/rtnetlink.h>
0014 #include <linux/inetdevice.h>
0015
0016 #include "funeth.h"
0017 #include "funeth_devlink.h"
0018 #include "funeth_ktls.h"
0019 #include "fun_port.h"
0020 #include "fun_queue.h"
0021 #include "funeth_txrx.h"
0022
0023 #define ADMIN_SQ_DEPTH 32
0024 #define ADMIN_CQ_DEPTH 64
0025 #define ADMIN_RQ_DEPTH 16
0026
0027
0028 #define FUN_DFLT_QUEUES 16U
0029
0030 enum {
0031 FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
0032 FUN_SERV_DEL_PORTS,
0033 };
0034
0035 static const struct pci_device_id funeth_id_table[] = {
0036 { PCI_VDEVICE(FUNGIBLE, 0x0101) },
0037 { PCI_VDEVICE(FUNGIBLE, 0x0181) },
0038 { 0, }
0039 };
0040
0041
0042 static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
0043 const int *keys, const u64 *data)
0044 {
0045 unsigned int cmd_size, i;
0046 union {
0047 struct fun_admin_port_req req;
0048 struct fun_admin_port_rsp rsp;
0049 u8 v[ADMIN_SQE_SIZE];
0050 } cmd;
0051
0052 cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
0053 n * sizeof(struct fun_admin_write48_req);
0054 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
0055 return -EINVAL;
0056
0057 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
0058 cmd_size);
0059 cmd.req.u.write =
0060 FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
0061 fp->netdev->dev_port);
0062 for (i = 0; i < n; i++)
0063 cmd.req.u.write.write48[i] =
0064 FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
0065
0066 return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
0067 &cmd.rsp, cmd_size, 0);
0068 }
0069
0070 int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
0071 {
0072 return fun_port_write_cmds(fp, 1, &key, &data);
0073 }
0074
0075
0076 static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
0077 const int *keys, u64 *data)
0078 {
0079 const struct fun_admin_read48_rsp *r48rsp;
0080 unsigned int cmd_size, i;
0081 int rc;
0082 union {
0083 struct fun_admin_port_req req;
0084 struct fun_admin_port_rsp rsp;
0085 u8 v[ADMIN_SQE_SIZE];
0086 } cmd;
0087
0088 cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
0089 n * sizeof(struct fun_admin_read48_req);
0090 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
0091 return -EINVAL;
0092
0093 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
0094 cmd_size);
0095 cmd.req.u.read =
0096 FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
0097 fp->netdev->dev_port);
0098 for (i = 0; i < n; i++)
0099 cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
0100
0101 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
0102 &cmd.rsp, cmd_size, 0);
0103 if (rc)
0104 return rc;
0105
0106 for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
0107 data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
0108 dev_dbg(fp->fdev->dev,
0109 "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
0110 fp->lport, r48rsp->key_to_data, keys[i], data[i],
0111 FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
0112 }
0113 return 0;
0114 }
0115
0116 int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
0117 {
0118 return fun_port_read_cmds(fp, 1, &key, data);
0119 }
0120
0121 static void fun_report_link(struct net_device *netdev)
0122 {
0123 if (netif_carrier_ok(netdev)) {
0124 const struct funeth_priv *fp = netdev_priv(netdev);
0125 const char *fec = "", *pause = "";
0126 int speed = fp->link_speed;
0127 char unit = 'M';
0128
0129 if (fp->link_speed >= SPEED_1000) {
0130 speed /= 1000;
0131 unit = 'G';
0132 }
0133
0134 if (fp->active_fec & FUN_PORT_FEC_RS)
0135 fec = ", RS-FEC";
0136 else if (fp->active_fec & FUN_PORT_FEC_FC)
0137 fec = ", BASER-FEC";
0138
0139 if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
0140 pause = ", Tx/Rx PAUSE";
0141 else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
0142 pause = ", Rx PAUSE";
0143 else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
0144 pause = ", Tx PAUSE";
0145
0146 netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
0147 speed, unit, pause, fec);
0148 } else {
0149 netdev_info(netdev, "Link down\n");
0150 }
0151 }
0152
0153 static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
0154 unsigned int adi_id, const struct fun_adi_param *param)
0155 {
0156 struct fun_admin_adi_req req = {
0157 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
0158 sizeof(req)),
0159 .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
0160 .u.write.attribute = attr,
0161 .u.write.id = cpu_to_be32(adi_id),
0162 .u.write.param = *param
0163 };
0164
0165 return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
0166 }
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
0177 const u32 *qtable, u8 op)
0178 {
0179 struct funeth_priv *fp = netdev_priv(dev);
0180 unsigned int table_len = fp->indir_table_nentries;
0181 unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
0182 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
0183 union {
0184 struct {
0185 struct fun_admin_rss_req req;
0186 struct fun_dataop_gl gl;
0187 };
0188 struct fun_admin_generic_create_rsp rsp;
0189 } cmd;
0190 __be32 *indir_tab;
0191 u16 flags;
0192 int rc;
0193
0194 if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
0195 return -EINVAL;
0196
0197 flags = op == FUN_ADMIN_SUBOP_CREATE ?
0198 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
0199 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
0200 sizeof(cmd));
0201 cmd.req.u.create =
0202 FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
0203 dev->dev_port, algo,
0204 FUN_ETH_RSS_MAX_KEY_SIZE,
0205 table_len, 0,
0206 FUN_ETH_RSS_MAX_KEY_SIZE);
0207 cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
0208 fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
0209
0210
0211 memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
0212 indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
0213 for (rc = 0; rc < table_len; rc++)
0214 *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
0215
0216 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
0217 &cmd.rsp, sizeof(cmd.rsp), 0);
0218 if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
0219 fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
0220 return rc;
0221 }
0222
0223
0224
0225
0226 static void fun_destroy_rss(struct funeth_priv *fp)
0227 {
0228 if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
0229 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
0230 fp->rss_hw_id = FUN_HCI_ID_INVALID;
0231 }
0232 }
0233
0234 static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
0235 const cpumask_t *mask)
0236 {
0237 struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
0238
0239 cpumask_copy(&p->affinity_mask, mask);
0240 }
0241
0242 static void fun_irq_aff_release(struct kref __always_unused *ref)
0243 {
0244 }
0245
0246
0247
0248
0249 static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
0250 int node, unsigned int xa_idx_offset)
0251 {
0252 struct fun_irq *irq;
0253 int cpu, res;
0254
0255 cpu = cpumask_local_spread(idx, node);
0256 node = cpu_to_mem(cpu);
0257
0258 irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
0259 if (!irq)
0260 return ERR_PTR(-ENOMEM);
0261
0262 res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
0263 if (res != 1)
0264 goto free_irq;
0265
0266 res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
0267 if (res)
0268 goto release_irq;
0269
0270 irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
0271 cpumask_set_cpu(cpu, &irq->affinity_mask);
0272 irq->aff_notify.notify = fun_irq_aff_notify;
0273 irq->aff_notify.release = fun_irq_aff_release;
0274 irq->state = FUN_IRQ_INIT;
0275 return irq;
0276
0277 release_irq:
0278 fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
0279 free_irq:
0280 kfree(irq);
0281 return ERR_PTR(res);
0282 }
0283
0284 static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
0285 {
0286 netif_napi_del(&irq->napi);
0287 fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
0288 kfree(irq);
0289 }
0290
0291
0292 static void fun_prune_queue_irqs(struct net_device *dev)
0293 {
0294 struct funeth_priv *fp = netdev_priv(dev);
0295 unsigned int nreleased = 0;
0296 struct fun_irq *irq;
0297 unsigned long idx;
0298
0299 xa_for_each(&fp->irqs, idx, irq) {
0300 if (irq->txq || irq->rxq)
0301 continue;
0302
0303 xa_erase(&fp->irqs, idx);
0304 fun_free_qirq(fp, irq);
0305 nreleased++;
0306 if (idx < fp->rx_irq_ofst)
0307 fp->num_tx_irqs--;
0308 else
0309 fp->num_rx_irqs--;
0310 }
0311 netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
0312 }
0313
0314
0315
0316
0317
0318
0319 static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
0320 unsigned int nrx)
0321 {
0322 struct funeth_priv *fp = netdev_priv(dev);
0323 int node = dev_to_node(&fp->pdev->dev);
0324 struct fun_irq *irq;
0325 unsigned int i;
0326
0327 for (i = fp->num_tx_irqs; i < ntx; i++) {
0328 irq = fun_alloc_qirq(fp, i, node, 0);
0329 if (IS_ERR(irq))
0330 return PTR_ERR(irq);
0331
0332 fp->num_tx_irqs++;
0333 netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll);
0334 }
0335
0336 for (i = fp->num_rx_irqs; i < nrx; i++) {
0337 irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
0338 if (IS_ERR(irq))
0339 return PTR_ERR(irq);
0340
0341 fp->num_rx_irqs++;
0342 netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
0343 NAPI_POLL_WEIGHT);
0344 }
0345
0346 netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
0347 ntx, nrx);
0348 return 0;
0349 }
0350
0351 static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
0352 unsigned int start, int state)
0353 {
0354 unsigned int i;
0355
0356 for (i = start; i < nqs && txqs[i]; i++)
0357 txqs[i] = funeth_txq_free(txqs[i], state);
0358 }
0359
0360 static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
0361 unsigned int nqs, unsigned int depth, unsigned int start,
0362 int state)
0363 {
0364 struct funeth_priv *fp = netdev_priv(dev);
0365 unsigned int i;
0366 int err;
0367
0368 for (i = start; i < nqs; i++) {
0369 err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
0370 state, &txqs[i]);
0371 if (err) {
0372 free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
0373 return err;
0374 }
0375 }
0376 return 0;
0377 }
0378
0379 static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
0380 unsigned int start, int state)
0381 {
0382 unsigned int i;
0383
0384 for (i = start; i < nqs && rxqs[i]; i++)
0385 rxqs[i] = funeth_rxq_free(rxqs[i], state);
0386 }
0387
0388 static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
0389 unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
0390 unsigned int start, int state)
0391 {
0392 struct funeth_priv *fp = netdev_priv(dev);
0393 unsigned int i;
0394 int err;
0395
0396 for (i = start; i < nqs; i++) {
0397 err = funeth_rxq_create(dev, i, ncqe, nrqe,
0398 xa_load(&fp->irqs, i + fp->rx_irq_ofst),
0399 state, &rxqs[i]);
0400 if (err) {
0401 free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
0402 return err;
0403 }
0404 }
0405 return 0;
0406 }
0407
0408 static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
0409 unsigned int start, int state)
0410 {
0411 unsigned int i;
0412
0413 for (i = start; i < nqs && xdpqs[i]; i++)
0414 xdpqs[i] = funeth_txq_free(xdpqs[i], state);
0415
0416 if (state == FUN_QSTATE_DESTROYED)
0417 kfree(xdpqs);
0418 }
0419
0420 static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
0421 unsigned int depth, unsigned int start,
0422 int state)
0423 {
0424 struct funeth_txq **xdpqs;
0425 unsigned int i;
0426 int err;
0427
0428 xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL);
0429 if (!xdpqs)
0430 return ERR_PTR(-ENOMEM);
0431
0432 for (i = start; i < nqs; i++) {
0433 err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
0434 if (err) {
0435 free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
0436 return ERR_PTR(err);
0437 }
0438 }
0439 return xdpqs;
0440 }
0441
0442 static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
0443 {
0444 struct funeth_priv *fp = netdev_priv(netdev);
0445 struct funeth_txq **xdpqs = qset->xdpqs;
0446 struct funeth_rxq **rxqs = qset->rxqs;
0447
0448
0449
0450
0451 if (!rxqs) {
0452 rxqs = rtnl_dereference(fp->rxqs);
0453 xdpqs = rtnl_dereference(fp->xdpqs);
0454 qset->txqs = fp->txqs;
0455 qset->nrxqs = netdev->real_num_rx_queues;
0456 qset->ntxqs = netdev->real_num_tx_queues;
0457 qset->nxdpqs = fp->num_xdpqs;
0458 }
0459 if (!rxqs)
0460 return;
0461
0462 if (rxqs == rtnl_dereference(fp->rxqs)) {
0463 rcu_assign_pointer(fp->rxqs, NULL);
0464 rcu_assign_pointer(fp->xdpqs, NULL);
0465 synchronize_net();
0466 fp->txqs = NULL;
0467 }
0468
0469 free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
0470 free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
0471 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
0472 if (qset->state == FUN_QSTATE_DESTROYED)
0473 kfree(rxqs);
0474
0475
0476 qset->rxqs = rxqs;
0477 qset->xdpqs = xdpqs;
0478 }
0479
0480 static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
0481 {
0482 struct funeth_txq **xdpqs = NULL, **txqs;
0483 struct funeth_rxq **rxqs;
0484 int err;
0485
0486 err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
0487 if (err)
0488 return err;
0489
0490 rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL);
0491 if (!rxqs)
0492 return -ENOMEM;
0493
0494 if (qset->nxdpqs) {
0495 xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
0496 qset->xdpq_start, qset->state);
0497 if (IS_ERR(xdpqs)) {
0498 err = PTR_ERR(xdpqs);
0499 goto free_qvec;
0500 }
0501 }
0502
0503 txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
0504 err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
0505 qset->txq_start, qset->state);
0506 if (err)
0507 goto free_xdpqs;
0508
0509 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
0510 qset->rq_depth, qset->rxq_start, qset->state);
0511 if (err)
0512 goto free_txqs;
0513
0514 qset->rxqs = rxqs;
0515 qset->txqs = txqs;
0516 qset->xdpqs = xdpqs;
0517 return 0;
0518
0519 free_txqs:
0520 free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
0521 free_xdpqs:
0522 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
0523 free_qvec:
0524 kfree(rxqs);
0525 return err;
0526 }
0527
0528
0529
0530
0531 static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
0532 {
0533 struct funeth_priv *fp = netdev_priv(dev);
0534 int i, err;
0535
0536 for (i = 0; i < qset->nrxqs; i++) {
0537 err = fun_rxq_create_dev(qset->rxqs[i],
0538 xa_load(&fp->irqs,
0539 i + fp->rx_irq_ofst));
0540 if (err)
0541 goto out;
0542 }
0543
0544 for (i = 0; i < qset->ntxqs; i++) {
0545 err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
0546 if (err)
0547 goto out;
0548 }
0549
0550 for (i = 0; i < qset->nxdpqs; i++) {
0551 err = fun_txq_create_dev(qset->xdpqs[i], NULL);
0552 if (err)
0553 goto out;
0554 }
0555
0556 return 0;
0557
0558 out:
0559 fun_free_rings(dev, qset);
0560 return err;
0561 }
0562
0563 static int fun_port_create(struct net_device *netdev)
0564 {
0565 struct funeth_priv *fp = netdev_priv(netdev);
0566 union {
0567 struct fun_admin_port_req req;
0568 struct fun_admin_port_rsp rsp;
0569 } cmd;
0570 int rc;
0571
0572 if (fp->lport != INVALID_LPORT)
0573 return 0;
0574
0575 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
0576 sizeof(cmd.req));
0577 cmd.req.u.create =
0578 FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
0579 netdev->dev_port);
0580
0581 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
0582 sizeof(cmd.rsp), 0);
0583
0584 if (!rc)
0585 fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
0586 return rc;
0587 }
0588
0589 static int fun_port_destroy(struct net_device *netdev)
0590 {
0591 struct funeth_priv *fp = netdev_priv(netdev);
0592
0593 if (fp->lport == INVALID_LPORT)
0594 return 0;
0595
0596 fp->lport = INVALID_LPORT;
0597 return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
0598 netdev->dev_port);
0599 }
0600
0601 static int fun_eth_create(struct funeth_priv *fp)
0602 {
0603 union {
0604 struct fun_admin_eth_req req;
0605 struct fun_admin_generic_create_rsp rsp;
0606 } cmd;
0607 int rc;
0608
0609 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
0610 sizeof(cmd.req));
0611 cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
0612 FUN_ADMIN_SUBOP_CREATE,
0613 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
0614 0, fp->netdev->dev_port);
0615
0616 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
0617 sizeof(cmd.rsp), 0);
0618 return rc ? rc : be32_to_cpu(cmd.rsp.id);
0619 }
0620
0621 static int fun_vi_create(struct funeth_priv *fp)
0622 {
0623 struct fun_admin_vi_req req = {
0624 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
0625 sizeof(req)),
0626 .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
0627 0,
0628 fp->netdev->dev_port,
0629 fp->netdev->dev_port)
0630 };
0631
0632 return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
0633 }
0634
0635
0636
0637
0638 int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
0639 {
0640 int rc, ethid;
0641
0642 ethid = fun_eth_create(fp);
0643 if (ethid >= 0) {
0644 rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
0645 FUN_ADMIN_BIND_TYPE_ETH, ethid);
0646 if (rc) {
0647 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
0648 ethid = rc;
0649 }
0650 }
0651 return ethid;
0652 }
0653
0654 static irqreturn_t fun_queue_irq_handler(int irq, void *data)
0655 {
0656 struct fun_irq *p = data;
0657
0658 if (p->rxq) {
0659 prefetch(p->rxq->next_cqe_info);
0660 p->rxq->irq_cnt++;
0661 }
0662 napi_schedule_irqoff(&p->napi);
0663 return IRQ_HANDLED;
0664 }
0665
0666 static int fun_enable_irqs(struct net_device *dev)
0667 {
0668 struct funeth_priv *fp = netdev_priv(dev);
0669 unsigned long idx, last;
0670 unsigned int qidx;
0671 struct fun_irq *p;
0672 const char *qtype;
0673 int err;
0674
0675 xa_for_each(&fp->irqs, idx, p) {
0676 if (p->txq) {
0677 qtype = "tx";
0678 qidx = p->txq->qidx;
0679 } else if (p->rxq) {
0680 qtype = "rx";
0681 qidx = p->rxq->qidx;
0682 } else {
0683 continue;
0684 }
0685
0686 if (p->state != FUN_IRQ_INIT)
0687 continue;
0688
0689 snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
0690 qtype, qidx);
0691 err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
0692 if (err) {
0693 netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
0694 p->irq, err);
0695 goto unroll;
0696 }
0697 p->state = FUN_IRQ_REQUESTED;
0698 }
0699
0700 xa_for_each(&fp->irqs, idx, p) {
0701 if (p->state != FUN_IRQ_REQUESTED)
0702 continue;
0703 irq_set_affinity_notifier(p->irq, &p->aff_notify);
0704 irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
0705 napi_enable(&p->napi);
0706 p->state = FUN_IRQ_ENABLED;
0707 }
0708
0709 return 0;
0710
0711 unroll:
0712 last = idx - 1;
0713 xa_for_each_range(&fp->irqs, idx, p, 0, last)
0714 if (p->state == FUN_IRQ_REQUESTED) {
0715 free_irq(p->irq, p);
0716 p->state = FUN_IRQ_INIT;
0717 }
0718
0719 return err;
0720 }
0721
0722 static void fun_disable_one_irq(struct fun_irq *irq)
0723 {
0724 napi_disable(&irq->napi);
0725 irq_set_affinity_notifier(irq->irq, NULL);
0726 irq_update_affinity_hint(irq->irq, NULL);
0727 free_irq(irq->irq, irq);
0728 irq->state = FUN_IRQ_INIT;
0729 }
0730
0731 static void fun_disable_irqs(struct net_device *dev)
0732 {
0733 struct funeth_priv *fp = netdev_priv(dev);
0734 struct fun_irq *p;
0735 unsigned long idx;
0736
0737 xa_for_each(&fp->irqs, idx, p)
0738 if (p->state == FUN_IRQ_ENABLED)
0739 fun_disable_one_irq(p);
0740 }
0741
0742 static void fun_down(struct net_device *dev, struct fun_qset *qset)
0743 {
0744 struct funeth_priv *fp = netdev_priv(dev);
0745
0746
0747
0748
0749 if (!rcu_access_pointer(fp->rxqs))
0750 return;
0751
0752
0753 if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
0754 netif_info(fp, ifdown, dev,
0755 "Tearing down data path on device\n");
0756 fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
0757
0758 netif_carrier_off(dev);
0759 netif_tx_disable(dev);
0760
0761 fun_destroy_rss(fp);
0762 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
0763 fun_disable_irqs(dev);
0764 }
0765
0766 fun_free_rings(dev, qset);
0767 }
0768
0769 static int fun_up(struct net_device *dev, struct fun_qset *qset)
0770 {
0771 static const int port_keys[] = {
0772 FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
0773 FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
0774 FUN_ADMIN_PORT_KEY_ENABLE
0775 };
0776
0777 struct funeth_priv *fp = netdev_priv(dev);
0778 u64 vals[] = {
0779 lower_32_bits(fp->stats_dma_addr),
0780 upper_32_bits(fp->stats_dma_addr),
0781 FUN_PORT_FLAG_ENABLE_NOTIFY
0782 };
0783 int err;
0784
0785 netif_info(fp, ifup, dev, "Setting up data path on device\n");
0786
0787 if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
0788 err = fun_advance_ring_state(dev, qset);
0789 if (err)
0790 return err;
0791 }
0792
0793 err = fun_vi_create(fp);
0794 if (err)
0795 goto free_queues;
0796
0797 fp->txqs = qset->txqs;
0798 rcu_assign_pointer(fp->rxqs, qset->rxqs);
0799 rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
0800
0801 err = fun_enable_irqs(dev);
0802 if (err)
0803 goto destroy_vi;
0804
0805 if (fp->rss_cfg) {
0806 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
0807 fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
0808 } else {
0809
0810 err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
0811 FUN_ADMIN_BIND_TYPE_EPCQ,
0812 qset->rxqs[0]->hw_cqid);
0813 }
0814 if (err)
0815 goto disable_irqs;
0816
0817 err = fun_port_write_cmds(fp, 3, port_keys, vals);
0818 if (err)
0819 goto free_rss;
0820
0821 netif_tx_start_all_queues(dev);
0822 return 0;
0823
0824 free_rss:
0825 fun_destroy_rss(fp);
0826 disable_irqs:
0827 fun_disable_irqs(dev);
0828 destroy_vi:
0829 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
0830 free_queues:
0831 fun_free_rings(dev, qset);
0832 return err;
0833 }
0834
0835 static int funeth_open(struct net_device *netdev)
0836 {
0837 struct funeth_priv *fp = netdev_priv(netdev);
0838 struct fun_qset qset = {
0839 .nrxqs = netdev->real_num_rx_queues,
0840 .ntxqs = netdev->real_num_tx_queues,
0841 .nxdpqs = fp->num_xdpqs,
0842 .cq_depth = fp->cq_depth,
0843 .rq_depth = fp->rq_depth,
0844 .sq_depth = fp->sq_depth,
0845 .state = FUN_QSTATE_INIT_FULL,
0846 };
0847 int rc;
0848
0849 rc = fun_alloc_rings(netdev, &qset);
0850 if (rc)
0851 return rc;
0852
0853 rc = fun_up(netdev, &qset);
0854 if (rc) {
0855 qset.state = FUN_QSTATE_DESTROYED;
0856 fun_free_rings(netdev, &qset);
0857 }
0858
0859 return rc;
0860 }
0861
0862 static int funeth_close(struct net_device *netdev)
0863 {
0864 struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
0865
0866 fun_down(netdev, &qset);
0867 return 0;
0868 }
0869
0870 static void fun_get_stats64(struct net_device *netdev,
0871 struct rtnl_link_stats64 *stats)
0872 {
0873 struct funeth_priv *fp = netdev_priv(netdev);
0874 struct funeth_txq **xdpqs;
0875 struct funeth_rxq **rxqs;
0876 unsigned int i, start;
0877
0878 stats->tx_packets = fp->tx_packets;
0879 stats->tx_bytes = fp->tx_bytes;
0880 stats->tx_dropped = fp->tx_dropped;
0881
0882 stats->rx_packets = fp->rx_packets;
0883 stats->rx_bytes = fp->rx_bytes;
0884 stats->rx_dropped = fp->rx_dropped;
0885
0886 rcu_read_lock();
0887 rxqs = rcu_dereference(fp->rxqs);
0888 if (!rxqs)
0889 goto unlock;
0890
0891 for (i = 0; i < netdev->real_num_tx_queues; i++) {
0892 struct funeth_txq_stats txs;
0893
0894 FUN_QSTAT_READ(fp->txqs[i], start, txs);
0895 stats->tx_packets += txs.tx_pkts;
0896 stats->tx_bytes += txs.tx_bytes;
0897 stats->tx_dropped += txs.tx_map_err;
0898 }
0899
0900 for (i = 0; i < netdev->real_num_rx_queues; i++) {
0901 struct funeth_rxq_stats rxs;
0902
0903 FUN_QSTAT_READ(rxqs[i], start, rxs);
0904 stats->rx_packets += rxs.rx_pkts;
0905 stats->rx_bytes += rxs.rx_bytes;
0906 stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
0907 }
0908
0909 xdpqs = rcu_dereference(fp->xdpqs);
0910 if (!xdpqs)
0911 goto unlock;
0912
0913 for (i = 0; i < fp->num_xdpqs; i++) {
0914 struct funeth_txq_stats txs;
0915
0916 FUN_QSTAT_READ(xdpqs[i], start, txs);
0917 stats->tx_packets += txs.tx_pkts;
0918 stats->tx_bytes += txs.tx_bytes;
0919 }
0920 unlock:
0921 rcu_read_unlock();
0922 }
0923
0924 static int fun_change_mtu(struct net_device *netdev, int new_mtu)
0925 {
0926 struct funeth_priv *fp = netdev_priv(netdev);
0927 int rc;
0928
0929 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
0930 if (!rc)
0931 netdev->mtu = new_mtu;
0932 return rc;
0933 }
0934
0935 static int fun_set_macaddr(struct net_device *netdev, void *addr)
0936 {
0937 struct funeth_priv *fp = netdev_priv(netdev);
0938 struct sockaddr *saddr = addr;
0939 int rc;
0940
0941 if (!is_valid_ether_addr(saddr->sa_data))
0942 return -EADDRNOTAVAIL;
0943
0944 if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
0945 return 0;
0946
0947 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
0948 ether_addr_to_u64(saddr->sa_data));
0949 if (!rc)
0950 eth_hw_addr_set(netdev, saddr->sa_data);
0951 return rc;
0952 }
0953
0954 static int fun_get_port_attributes(struct net_device *netdev)
0955 {
0956 static const int keys[] = {
0957 FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
0958 FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
0959 };
0960 static const int phys_keys[] = {
0961 FUN_ADMIN_PORT_KEY_LANE_ATTRS,
0962 };
0963
0964 struct funeth_priv *fp = netdev_priv(netdev);
0965 u64 data[ARRAY_SIZE(keys)];
0966 u8 mac[ETH_ALEN];
0967 int i, rc;
0968
0969 rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
0970 if (rc)
0971 return rc;
0972
0973 for (i = 0; i < ARRAY_SIZE(keys); i++) {
0974 switch (keys[i]) {
0975 case FUN_ADMIN_PORT_KEY_MACADDR:
0976 u64_to_ether_addr(data[i], mac);
0977 if (is_zero_ether_addr(mac)) {
0978 eth_hw_addr_random(netdev);
0979 } else if (is_valid_ether_addr(mac)) {
0980 eth_hw_addr_set(netdev, mac);
0981 } else {
0982 netdev_err(netdev,
0983 "device provided a bad MAC address %pM\n",
0984 mac);
0985 return -EINVAL;
0986 }
0987 break;
0988
0989 case FUN_ADMIN_PORT_KEY_CAPABILITIES:
0990 fp->port_caps = data[i];
0991 break;
0992
0993 case FUN_ADMIN_PORT_KEY_ADVERT:
0994 fp->advertising = data[i];
0995 break;
0996
0997 case FUN_ADMIN_PORT_KEY_MTU:
0998 netdev->mtu = data[i];
0999 break;
1000 }
1001 }
1002
1003 if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
1004 rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
1005 data);
1006 if (rc)
1007 return rc;
1008
1009 fp->lane_attrs = data[0];
1010 }
1011
1012 if (netdev->addr_assign_type == NET_ADDR_RANDOM)
1013 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
1014 ether_addr_to_u64(netdev->dev_addr));
1015 return 0;
1016 }
1017
1018 static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1019 {
1020 const struct funeth_priv *fp = netdev_priv(dev);
1021
1022 return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
1023 sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
1024 }
1025
1026 static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1027 {
1028 struct funeth_priv *fp = netdev_priv(dev);
1029 struct hwtstamp_config cfg;
1030
1031 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1032 return -EFAULT;
1033
1034
1035 cfg.tx_type = HWTSTAMP_TX_OFF;
1036
1037 switch (cfg.rx_filter) {
1038 case HWTSTAMP_FILTER_NONE:
1039 break;
1040 case HWTSTAMP_FILTER_ALL:
1041 case HWTSTAMP_FILTER_SOME:
1042 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1043 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1044 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1045 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1046 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1047 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1048 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1049 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1050 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1051 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1052 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1053 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1054 case HWTSTAMP_FILTER_NTP_ALL:
1055 cfg.rx_filter = HWTSTAMP_FILTER_ALL;
1056 break;
1057 default:
1058 return -ERANGE;
1059 }
1060
1061 fp->hwtstamp_cfg = cfg;
1062 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1063 }
1064
1065 static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1066 {
1067 switch (cmd) {
1068 case SIOCSHWTSTAMP:
1069 return fun_hwtstamp_set(dev, ifr);
1070 case SIOCGHWTSTAMP:
1071 return fun_hwtstamp_get(dev, ifr);
1072 default:
1073 return -EOPNOTSUPP;
1074 }
1075 }
1076
1077
1078 static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
1079 {
1080 struct funeth_priv *fp = netdev_priv(dev);
1081 unsigned int i, nqs = num_online_cpus();
1082 struct funeth_txq **xdpqs;
1083 struct funeth_rxq **rxqs;
1084 int err;
1085
1086 xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
1087 if (IS_ERR(xdpqs))
1088 return PTR_ERR(xdpqs);
1089
1090 rxqs = rtnl_dereference(fp->rxqs);
1091 for (i = 0; i < dev->real_num_rx_queues; i++) {
1092 err = fun_rxq_set_bpf(rxqs[i], prog);
1093 if (err)
1094 goto out;
1095 }
1096
1097 fp->num_xdpqs = nqs;
1098 rcu_assign_pointer(fp->xdpqs, xdpqs);
1099 return 0;
1100 out:
1101 while (i--)
1102 fun_rxq_set_bpf(rxqs[i], NULL);
1103
1104 free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
1105 return err;
1106 }
1107
1108
1109 static void fun_end_xdp(struct net_device *dev)
1110 {
1111 struct funeth_priv *fp = netdev_priv(dev);
1112 struct funeth_txq **xdpqs;
1113 struct funeth_rxq **rxqs;
1114 unsigned int i;
1115
1116 xdpqs = rtnl_dereference(fp->xdpqs);
1117 rcu_assign_pointer(fp->xdpqs, NULL);
1118 synchronize_net();
1119
1120
1121 free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
1122 fp->num_xdpqs = 0;
1123
1124 rxqs = rtnl_dereference(fp->rxqs);
1125 for (i = 0; i < dev->real_num_rx_queues; i++)
1126 fun_rxq_set_bpf(rxqs[i], NULL);
1127 }
1128
1129 #define XDP_MAX_MTU \
1130 (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
1131
1132 static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
1133 {
1134 struct bpf_prog *old_prog, *prog = xdp->prog;
1135 struct funeth_priv *fp = netdev_priv(dev);
1136 int i, err;
1137
1138
1139 if (prog && dev->mtu > XDP_MAX_MTU) {
1140 netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
1141 NL_SET_ERR_MSG_MOD(xdp->extack,
1142 "Device MTU too large for XDP");
1143 return -EINVAL;
1144 }
1145
1146 if (!netif_running(dev)) {
1147 fp->num_xdpqs = prog ? num_online_cpus() : 0;
1148 } else if (prog && !fp->xdp_prog) {
1149 err = fun_enter_xdp(dev, prog);
1150 if (err) {
1151 NL_SET_ERR_MSG_MOD(xdp->extack,
1152 "Failed to set queues for XDP.");
1153 return err;
1154 }
1155 } else if (!prog && fp->xdp_prog) {
1156 fun_end_xdp(dev);
1157 } else {
1158 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
1159
1160 for (i = 0; i < dev->real_num_rx_queues; i++)
1161 WRITE_ONCE(rxqs[i]->xdp_prog, prog);
1162 }
1163
1164 dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
1165 old_prog = xchg(&fp->xdp_prog, prog);
1166 if (old_prog)
1167 bpf_prog_put(old_prog);
1168
1169 return 0;
1170 }
1171
1172 static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1173 {
1174 switch (xdp->command) {
1175 case XDP_SETUP_PROG:
1176 return fun_xdp_setup(dev, xdp);
1177 default:
1178 return -EINVAL;
1179 }
1180 }
1181
1182 static struct devlink_port *fun_get_devlink_port(struct net_device *netdev)
1183 {
1184 struct funeth_priv *fp = netdev_priv(netdev);
1185
1186 return &fp->dl_port;
1187 }
1188
1189 static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
1190 {
1191 if (ed->num_vports)
1192 return -EINVAL;
1193
1194 ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL);
1195 if (!ed->vport_info)
1196 return -ENOMEM;
1197 ed->num_vports = n;
1198 return 0;
1199 }
1200
1201 static void fun_free_vports(struct fun_ethdev *ed)
1202 {
1203 kvfree(ed->vport_info);
1204 ed->vport_info = NULL;
1205 ed->num_vports = 0;
1206 }
1207
1208 static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
1209 unsigned int vport)
1210 {
1211 if (!ed->vport_info || vport >= ed->num_vports)
1212 return NULL;
1213
1214 return ed->vport_info + vport;
1215 }
1216
1217 static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1218 {
1219 struct funeth_priv *fp = netdev_priv(dev);
1220 struct fun_adi_param mac_param = {};
1221 struct fun_dev *fdev = fp->fdev;
1222 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1223 struct fun_vport_info *vi;
1224 int rc = -EINVAL;
1225
1226 if (is_multicast_ether_addr(mac))
1227 return -EINVAL;
1228
1229 mutex_lock(&ed->state_mutex);
1230 vi = fun_get_vport(ed, vf);
1231 if (!vi)
1232 goto unlock;
1233
1234 mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
1235 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
1236 &mac_param);
1237 if (!rc)
1238 ether_addr_copy(vi->mac, mac);
1239 unlock:
1240 mutex_unlock(&ed->state_mutex);
1241 return rc;
1242 }
1243
1244 static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1245 __be16 vlan_proto)
1246 {
1247 struct funeth_priv *fp = netdev_priv(dev);
1248 struct fun_adi_param vlan_param = {};
1249 struct fun_dev *fdev = fp->fdev;
1250 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1251 struct fun_vport_info *vi;
1252 int rc = -EINVAL;
1253
1254 if (vlan > 4095 || qos > 7)
1255 return -EINVAL;
1256 if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
1257 vlan_proto != htons(ETH_P_8021AD))
1258 return -EINVAL;
1259
1260 mutex_lock(&ed->state_mutex);
1261 vi = fun_get_vport(ed, vf);
1262 if (!vi)
1263 goto unlock;
1264
1265 vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
1266 ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
1267 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
1268 if (!rc) {
1269 vi->vlan = vlan;
1270 vi->qos = qos;
1271 vi->vlan_proto = vlan_proto;
1272 }
1273 unlock:
1274 mutex_unlock(&ed->state_mutex);
1275 return rc;
1276 }
1277
1278 static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
1279 int max_tx_rate)
1280 {
1281 struct funeth_priv *fp = netdev_priv(dev);
1282 struct fun_adi_param rate_param = {};
1283 struct fun_dev *fdev = fp->fdev;
1284 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1285 struct fun_vport_info *vi;
1286 int rc = -EINVAL;
1287
1288 if (min_tx_rate)
1289 return -EINVAL;
1290
1291 mutex_lock(&ed->state_mutex);
1292 vi = fun_get_vport(ed, vf);
1293 if (!vi)
1294 goto unlock;
1295
1296 rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
1297 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
1298 if (!rc)
1299 vi->max_rate = max_tx_rate;
1300 unlock:
1301 mutex_unlock(&ed->state_mutex);
1302 return rc;
1303 }
1304
1305 static int fun_get_vf_config(struct net_device *dev, int vf,
1306 struct ifla_vf_info *ivi)
1307 {
1308 struct funeth_priv *fp = netdev_priv(dev);
1309 struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
1310 const struct fun_vport_info *vi;
1311
1312 mutex_lock(&ed->state_mutex);
1313 vi = fun_get_vport(ed, vf);
1314 if (!vi)
1315 goto unlock;
1316
1317 memset(ivi, 0, sizeof(*ivi));
1318 ivi->vf = vf;
1319 ether_addr_copy(ivi->mac, vi->mac);
1320 ivi->vlan = vi->vlan;
1321 ivi->qos = vi->qos;
1322 ivi->vlan_proto = vi->vlan_proto;
1323 ivi->max_tx_rate = vi->max_rate;
1324 ivi->spoofchk = vi->spoofchk;
1325 unlock:
1326 mutex_unlock(&ed->state_mutex);
1327 return vi ? 0 : -EINVAL;
1328 }
1329
1330 static void fun_uninit(struct net_device *dev)
1331 {
1332 struct funeth_priv *fp = netdev_priv(dev);
1333
1334 fun_prune_queue_irqs(dev);
1335 xa_destroy(&fp->irqs);
1336 }
1337
1338 static const struct net_device_ops fun_netdev_ops = {
1339 .ndo_open = funeth_open,
1340 .ndo_stop = funeth_close,
1341 .ndo_start_xmit = fun_start_xmit,
1342 .ndo_get_stats64 = fun_get_stats64,
1343 .ndo_change_mtu = fun_change_mtu,
1344 .ndo_set_mac_address = fun_set_macaddr,
1345 .ndo_validate_addr = eth_validate_addr,
1346 .ndo_eth_ioctl = fun_ioctl,
1347 .ndo_uninit = fun_uninit,
1348 .ndo_bpf = fun_xdp,
1349 .ndo_xdp_xmit = fun_xdp_xmit_frames,
1350 .ndo_set_vf_mac = fun_set_vf_mac,
1351 .ndo_set_vf_vlan = fun_set_vf_vlan,
1352 .ndo_set_vf_rate = fun_set_vf_rate,
1353 .ndo_get_vf_config = fun_get_vf_config,
1354 .ndo_get_devlink_port = fun_get_devlink_port,
1355 };
1356
1357 #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
1358 NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
1359 NETIF_F_GSO_UDP_TUNNEL_CSUM)
1360 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
1361 NETIF_F_GSO_UDP_L4)
1362 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
1363 GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
1364
1365 static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
1366 {
1367 unsigned int i;
1368
1369 for (i = 0; i < fp->indir_table_nentries; i++)
1370 fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
1371 }
1372
1373
1374
1375
1376
1377
1378 static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
1379 {
1380 struct funeth_priv *fp = netdev_priv(dev);
1381
1382 if (!fp->rss_cfg)
1383 return;
1384
1385
1386
1387
1388 fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
1389 fun_dflt_rss_indir(fp, nrx);
1390 }
1391
1392
1393
1394
1395
1396
1397 static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
1398 bool only_if_needed)
1399 {
1400 struct funeth_priv *fp = netdev_priv(dev);
1401 u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
1402 unsigned int i, oldsz;
1403 int err;
1404
1405 if (!fp->rss_cfg)
1406 return 0;
1407
1408 if (only_if_needed) {
1409 for (i = 0; i < fp->indir_table_nentries; i++)
1410 if (fp->indir_table[i] >= nrx)
1411 break;
1412
1413 if (i >= fp->indir_table_nentries)
1414 return 0;
1415 }
1416
1417 memcpy(old_lut, fp->indir_table, sizeof(old_lut));
1418 oldsz = fp->indir_table_nentries;
1419 fun_reset_rss_indir(dev, nrx);
1420
1421 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
1422 fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
1423 if (!err)
1424 return 0;
1425
1426 memcpy(fp->indir_table, old_lut, sizeof(old_lut));
1427 fp->indir_table_nentries = oldsz;
1428 return err;
1429 }
1430
1431
1432
1433
1434
1435
1436 static int fun_init_rss(struct net_device *dev)
1437 {
1438 struct funeth_priv *fp = netdev_priv(dev);
1439 size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
1440
1441 fp->rss_hw_id = FUN_HCI_ID_INVALID;
1442 if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
1443 return 0;
1444
1445 fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
1446 &fp->rss_dma_addr, GFP_KERNEL);
1447 if (!fp->rss_cfg)
1448 return -ENOMEM;
1449
1450 fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
1451 netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
1452 fun_reset_rss_indir(dev, dev->real_num_rx_queues);
1453 return 0;
1454 }
1455
1456 static void fun_free_rss(struct funeth_priv *fp)
1457 {
1458 if (fp->rss_cfg) {
1459 dma_free_coherent(&fp->pdev->dev,
1460 sizeof(fp->rss_key) + sizeof(fp->indir_table),
1461 fp->rss_cfg, fp->rss_dma_addr);
1462 fp->rss_cfg = NULL;
1463 }
1464 }
1465
1466 void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
1467 unsigned int nrx)
1468 {
1469 netif_set_real_num_tx_queues(netdev, ntx);
1470 if (nrx != netdev->real_num_rx_queues) {
1471 netif_set_real_num_rx_queues(netdev, nrx);
1472 fun_reset_rss_indir(netdev, nrx);
1473 }
1474 }
1475
1476 static int fun_init_stats_area(struct funeth_priv *fp)
1477 {
1478 unsigned int nstats;
1479
1480 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
1481 return 0;
1482
1483 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
1484 PORT_MAC_FEC_STATS_MAX;
1485
1486 fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
1487 &fp->stats_dma_addr, GFP_KERNEL);
1488 if (!fp->stats)
1489 return -ENOMEM;
1490 return 0;
1491 }
1492
1493 static void fun_free_stats_area(struct funeth_priv *fp)
1494 {
1495 unsigned int nstats;
1496
1497 if (fp->stats) {
1498 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
1499 dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
1500 fp->stats, fp->stats_dma_addr);
1501 fp->stats = NULL;
1502 }
1503 }
1504
1505 static int fun_dl_port_register(struct net_device *netdev)
1506 {
1507 struct funeth_priv *fp = netdev_priv(netdev);
1508 struct devlink *dl = priv_to_devlink(fp->fdev);
1509 struct devlink_port_attrs attrs = {};
1510 unsigned int idx;
1511
1512 if (fp->port_caps & FUN_PORT_CAP_VPORT) {
1513 attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
1514 idx = fp->lport;
1515 } else {
1516 idx = netdev->dev_port;
1517 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
1518 attrs.lanes = fp->lane_attrs & 7;
1519 if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
1520 attrs.split = 1;
1521 attrs.phys.port_number = fp->lport & ~3;
1522 attrs.phys.split_subport_number = fp->lport & 3;
1523 } else {
1524 attrs.phys.port_number = fp->lport;
1525 }
1526 }
1527
1528 devlink_port_attrs_set(&fp->dl_port, &attrs);
1529
1530 return devlink_port_register(dl, &fp->dl_port, idx);
1531 }
1532
1533
1534 static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
1535 unsigned int *nrx)
1536 {
1537 int neth;
1538
1539 if (ed->num_ports > 1 || is_kdump_kernel()) {
1540 *ntx = 1;
1541 *nrx = 1;
1542 return 0;
1543 }
1544
1545 neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
1546 if (neth < 0)
1547 return neth;
1548
1549
1550
1551
1552
1553
1554
1555
1556 *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
1557 *nrx = *ntx;
1558 if (*ntx > neth)
1559 *ntx = neth;
1560 if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
1561 *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
1562 return 0;
1563 }
1564
1565 static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
1566 {
1567 unsigned int ntx, nrx;
1568
1569 ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
1570 nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
1571 if (ntx <= nrx) {
1572 ntx = min(ntx, nsqs / 2);
1573 nrx = min(nrx, nsqs - ntx);
1574 } else {
1575 nrx = min(nrx, nsqs / 2);
1576 ntx = min(ntx, nsqs - nrx);
1577 }
1578
1579 netif_set_real_num_tx_queues(dev, ntx);
1580 netif_set_real_num_rx_queues(dev, nrx);
1581 }
1582
1583
1584
1585
1586
1587
1588 int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
1589 struct netlink_ext_ack *extack)
1590 {
1591 struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
1592 struct funeth_priv *fp = netdev_priv(dev);
1593 int err;
1594
1595 newqs->nrxqs = dev->real_num_rx_queues;
1596 newqs->ntxqs = dev->real_num_tx_queues;
1597 newqs->nxdpqs = fp->num_xdpqs;
1598 newqs->state = FUN_QSTATE_INIT_SW;
1599 err = fun_alloc_rings(dev, newqs);
1600 if (err) {
1601 NL_SET_ERR_MSG_MOD(extack,
1602 "Unable to allocate memory for new queues, keeping current settings");
1603 return err;
1604 }
1605
1606 fun_down(dev, &oldqs);
1607
1608 err = fun_up(dev, newqs);
1609 if (!err)
1610 return 0;
1611
1612
1613
1614
1615
1616 newqs->state = FUN_QSTATE_DESTROYED;
1617 fun_free_rings(dev, newqs);
1618 NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
1619 return err;
1620 }
1621
1622
1623
1624
1625
1626 int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
1627 unsigned int nrx)
1628 {
1629 unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
1630 unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
1631 struct funeth_priv *fp = netdev_priv(dev);
1632 struct fun_qset oldqs = {
1633 .rxqs = rtnl_dereference(fp->rxqs),
1634 .txqs = fp->txqs,
1635 .nrxqs = dev->real_num_rx_queues,
1636 .ntxqs = dev->real_num_tx_queues,
1637 .rxq_start = keep_rx,
1638 .txq_start = keep_tx,
1639 .state = FUN_QSTATE_DESTROYED
1640 };
1641 struct fun_qset newqs = {
1642 .nrxqs = nrx,
1643 .ntxqs = ntx,
1644 .rxq_start = keep_rx,
1645 .txq_start = keep_tx,
1646 .cq_depth = fp->cq_depth,
1647 .rq_depth = fp->rq_depth,
1648 .sq_depth = fp->sq_depth,
1649 .state = FUN_QSTATE_INIT_FULL
1650 };
1651 int i, err;
1652
1653 err = fun_alloc_rings(dev, &newqs);
1654 if (err)
1655 goto free_irqs;
1656
1657 err = fun_enable_irqs(dev);
1658 if (err)
1659 goto free_rings;
1660
1661
1662 memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
1663 memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
1664
1665 if (nrx < dev->real_num_rx_queues) {
1666 err = fun_rss_set_qnum(dev, nrx, true);
1667 if (err)
1668 goto disable_tx_irqs;
1669
1670 for (i = nrx; i < dev->real_num_rx_queues; i++)
1671 fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
1672 struct fun_irq, napi));
1673
1674 netif_set_real_num_rx_queues(dev, nrx);
1675 }
1676
1677 if (ntx < dev->real_num_tx_queues)
1678 netif_set_real_num_tx_queues(dev, ntx);
1679
1680 rcu_assign_pointer(fp->rxqs, newqs.rxqs);
1681 fp->txqs = newqs.txqs;
1682 synchronize_net();
1683
1684 if (ntx > dev->real_num_tx_queues)
1685 netif_set_real_num_tx_queues(dev, ntx);
1686
1687 if (nrx > dev->real_num_rx_queues) {
1688 netif_set_real_num_rx_queues(dev, nrx);
1689 fun_rss_set_qnum(dev, nrx, false);
1690 }
1691
1692
1693 for (i = keep_tx; i < oldqs.ntxqs; i++)
1694 fun_disable_one_irq(oldqs.txqs[i]->irq);
1695
1696 fun_free_rings(dev, &oldqs);
1697 fun_prune_queue_irqs(dev);
1698 return 0;
1699
1700 disable_tx_irqs:
1701 for (i = oldqs.ntxqs; i < ntx; i++)
1702 fun_disable_one_irq(newqs.txqs[i]->irq);
1703 free_rings:
1704 newqs.state = FUN_QSTATE_DESTROYED;
1705 fun_free_rings(dev, &newqs);
1706 free_irqs:
1707 fun_prune_queue_irqs(dev);
1708 return err;
1709 }
1710
1711 static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
1712 {
1713 struct fun_dev *fdev = &ed->fdev;
1714 struct net_device *netdev;
1715 struct funeth_priv *fp;
1716 unsigned int ntx, nrx;
1717 int rc;
1718
1719 rc = fun_max_qs(ed, &ntx, &nrx);
1720 if (rc)
1721 return rc;
1722
1723 netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
1724 if (!netdev) {
1725 rc = -ENOMEM;
1726 goto done;
1727 }
1728
1729 netdev->dev_port = portid;
1730 fun_queue_defaults(netdev, ed->nsqs_per_port);
1731
1732 fp = netdev_priv(netdev);
1733 fp->fdev = fdev;
1734 fp->pdev = to_pci_dev(fdev->dev);
1735 fp->netdev = netdev;
1736 xa_init(&fp->irqs);
1737 fp->rx_irq_ofst = ntx;
1738 seqcount_init(&fp->link_seq);
1739
1740 fp->lport = INVALID_LPORT;
1741 rc = fun_port_create(netdev);
1742 if (rc)
1743 goto free_netdev;
1744
1745
1746 rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
1747 FUN_ADMIN_BIND_TYPE_EPCQ, 0);
1748 if (rc)
1749 goto destroy_port;
1750
1751 rc = fun_get_port_attributes(netdev);
1752 if (rc)
1753 goto destroy_port;
1754
1755 rc = fun_init_rss(netdev);
1756 if (rc)
1757 goto destroy_port;
1758
1759 rc = fun_init_stats_area(fp);
1760 if (rc)
1761 goto free_rss;
1762
1763 SET_NETDEV_DEV(netdev, fdev->dev);
1764 netdev->netdev_ops = &fun_netdev_ops;
1765
1766 netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
1767 if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
1768 netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
1769 if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
1770 netdev->hw_features |= GSO_ENCAP_FLAGS;
1771
1772 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
1773 netdev->vlan_features = netdev->features & VLAN_FEAT;
1774 netdev->mpls_features = netdev->vlan_features;
1775 netdev->hw_enc_features = netdev->hw_features;
1776
1777 netdev->min_mtu = ETH_MIN_MTU;
1778 netdev->max_mtu = FUN_MAX_MTU;
1779
1780 fun_set_ethtool_ops(netdev);
1781
1782
1783 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
1784 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
1785 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
1786 fp->rx_coal_usec = CQ_INTCOAL_USEC;
1787 fp->rx_coal_count = CQ_INTCOAL_NPKT;
1788 fp->tx_coal_usec = SQ_INTCOAL_USEC;
1789 fp->tx_coal_count = SQ_INTCOAL_NPKT;
1790 fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
1791
1792 rc = fun_dl_port_register(netdev);
1793 if (rc)
1794 goto free_stats;
1795
1796 fp->ktls_id = FUN_HCI_ID_INVALID;
1797 fun_ktls_init(netdev);
1798
1799 netif_carrier_off(netdev);
1800 ed->netdevs[portid] = netdev;
1801 rc = register_netdev(netdev);
1802 if (rc)
1803 goto unreg_devlink;
1804
1805 if (fp->dl_port.devlink)
1806 devlink_port_type_eth_set(&fp->dl_port, netdev);
1807
1808 return 0;
1809
1810 unreg_devlink:
1811 ed->netdevs[portid] = NULL;
1812 fun_ktls_cleanup(fp);
1813 if (fp->dl_port.devlink)
1814 devlink_port_unregister(&fp->dl_port);
1815 free_stats:
1816 fun_free_stats_area(fp);
1817 free_rss:
1818 fun_free_rss(fp);
1819 destroy_port:
1820 fun_port_destroy(netdev);
1821 free_netdev:
1822 free_netdev(netdev);
1823 done:
1824 dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
1825 return rc;
1826 }
1827
1828 static void fun_destroy_netdev(struct net_device *netdev)
1829 {
1830 struct funeth_priv *fp;
1831
1832 fp = netdev_priv(netdev);
1833 if (fp->dl_port.devlink) {
1834 devlink_port_type_clear(&fp->dl_port);
1835 devlink_port_unregister(&fp->dl_port);
1836 }
1837 unregister_netdev(netdev);
1838 fun_ktls_cleanup(fp);
1839 fun_free_stats_area(fp);
1840 fun_free_rss(fp);
1841 fun_port_destroy(netdev);
1842 free_netdev(netdev);
1843 }
1844
1845 static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
1846 {
1847 struct fun_dev *fd = &ed->fdev;
1848 int i, rc;
1849
1850
1851 ed->nsqs_per_port = min(fd->num_irqs - 1,
1852 fd->kern_end_qid - 2) / nports;
1853 if (ed->nsqs_per_port < 2) {
1854 dev_err(fd->dev, "Too few SQs for %u ports", nports);
1855 return -EINVAL;
1856 }
1857
1858 ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL);
1859 if (!ed->netdevs)
1860 return -ENOMEM;
1861
1862 ed->num_ports = nports;
1863 for (i = 0; i < nports; i++) {
1864 rc = fun_create_netdev(ed, i);
1865 if (rc)
1866 goto free_netdevs;
1867 }
1868
1869 return 0;
1870
1871 free_netdevs:
1872 while (i)
1873 fun_destroy_netdev(ed->netdevs[--i]);
1874 kfree(ed->netdevs);
1875 ed->netdevs = NULL;
1876 ed->num_ports = 0;
1877 return rc;
1878 }
1879
1880 static void fun_destroy_ports(struct fun_ethdev *ed)
1881 {
1882 unsigned int i;
1883
1884 for (i = 0; i < ed->num_ports; i++)
1885 fun_destroy_netdev(ed->netdevs[i]);
1886
1887 kfree(ed->netdevs);
1888 ed->netdevs = NULL;
1889 ed->num_ports = 0;
1890 }
1891
1892 static void fun_update_link_state(const struct fun_ethdev *ed,
1893 const struct fun_admin_port_notif *notif)
1894 {
1895 unsigned int port_idx = be16_to_cpu(notif->id);
1896 struct net_device *netdev;
1897 struct funeth_priv *fp;
1898
1899 if (port_idx >= ed->num_ports)
1900 return;
1901
1902 netdev = ed->netdevs[port_idx];
1903 fp = netdev_priv(netdev);
1904
1905 write_seqcount_begin(&fp->link_seq);
1906 fp->link_speed = be32_to_cpu(notif->speed) * 10;
1907 fp->active_fc = notif->flow_ctrl;
1908 fp->active_fec = notif->fec;
1909 fp->xcvr_type = notif->xcvr_type;
1910 fp->link_down_reason = notif->link_down_reason;
1911 fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
1912
1913 if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
1914 netif_carrier_off(netdev);
1915 if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
1916 netif_carrier_on(netdev);
1917
1918 write_seqcount_end(&fp->link_seq);
1919 fun_report_link(netdev);
1920 }
1921
1922
1923 static void fun_event_cb(struct fun_dev *fdev, void *entry)
1924 {
1925 u8 op = ((struct fun_admin_rsp_common *)entry)->op;
1926
1927 if (op == FUN_ADMIN_OP_PORT) {
1928 const struct fun_admin_port_notif *rsp = entry;
1929
1930 if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
1931 fun_update_link_state(to_fun_ethdev(fdev), rsp);
1932 } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
1933 const struct fun_admin_res_count_rsp *r = entry;
1934
1935 if (r->count.data)
1936 set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
1937 else
1938 set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
1939 fun_serv_sched(fdev);
1940 } else {
1941 dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
1942 op, rsp->subop);
1943 }
1944 } else {
1945 dev_info(fdev->dev, "adminq event unexpected op %u", op);
1946 }
1947 }
1948
1949
1950 static void fun_service_cb(struct fun_dev *fdev)
1951 {
1952 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1953 int rc;
1954
1955 if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
1956 fun_destroy_ports(ed);
1957
1958 if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
1959 return;
1960
1961 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
1962 if (rc < 0 || rc == ed->num_ports)
1963 return;
1964
1965 if (ed->num_ports)
1966 fun_destroy_ports(ed);
1967 if (rc)
1968 fun_create_ports(ed, rc);
1969 }
1970
1971 static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
1972 {
1973 struct fun_dev *fdev = pci_get_drvdata(pdev);
1974 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1975 int rc;
1976
1977 if (nvfs == 0) {
1978 if (pci_vfs_assigned(pdev)) {
1979 dev_warn(&pdev->dev,
1980 "Cannot disable SR-IOV while VFs are assigned\n");
1981 return -EPERM;
1982 }
1983
1984 mutex_lock(&ed->state_mutex);
1985 fun_free_vports(ed);
1986 mutex_unlock(&ed->state_mutex);
1987 pci_disable_sriov(pdev);
1988 return 0;
1989 }
1990
1991 rc = pci_enable_sriov(pdev, nvfs);
1992 if (rc)
1993 return rc;
1994
1995 mutex_lock(&ed->state_mutex);
1996 rc = fun_init_vports(ed, nvfs);
1997 mutex_unlock(&ed->state_mutex);
1998 if (rc) {
1999 pci_disable_sriov(pdev);
2000 return rc;
2001 }
2002
2003 return nvfs;
2004 }
2005
2006 static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2007 {
2008 struct fun_dev_params aqreq = {
2009 .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
2010 .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
2011 .cq_depth = ADMIN_CQ_DEPTH,
2012 .sq_depth = ADMIN_SQ_DEPTH,
2013 .rq_depth = ADMIN_RQ_DEPTH,
2014 .min_msix = 2,
2015 .event_cb = fun_event_cb,
2016 .serv_cb = fun_service_cb,
2017 };
2018 struct devlink *devlink;
2019 struct fun_ethdev *ed;
2020 struct fun_dev *fdev;
2021 int rc;
2022
2023 devlink = fun_devlink_alloc(&pdev->dev);
2024 if (!devlink) {
2025 dev_err(&pdev->dev, "devlink alloc failed\n");
2026 return -ENOMEM;
2027 }
2028
2029 ed = devlink_priv(devlink);
2030 mutex_init(&ed->state_mutex);
2031
2032 fdev = &ed->fdev;
2033 rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
2034 if (rc)
2035 goto free_devlink;
2036
2037 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
2038 if (rc > 0)
2039 rc = fun_create_ports(ed, rc);
2040 if (rc < 0)
2041 goto disable_dev;
2042
2043 fun_serv_restart(fdev);
2044 fun_devlink_register(devlink);
2045 return 0;
2046
2047 disable_dev:
2048 fun_dev_disable(fdev);
2049 free_devlink:
2050 mutex_destroy(&ed->state_mutex);
2051 fun_devlink_free(devlink);
2052 return rc;
2053 }
2054
2055 static void funeth_remove(struct pci_dev *pdev)
2056 {
2057 struct fun_dev *fdev = pci_get_drvdata(pdev);
2058 struct devlink *devlink;
2059 struct fun_ethdev *ed;
2060
2061 ed = to_fun_ethdev(fdev);
2062 devlink = priv_to_devlink(ed);
2063 fun_devlink_unregister(devlink);
2064
2065 #ifdef CONFIG_PCI_IOV
2066 funeth_sriov_configure(pdev, 0);
2067 #endif
2068
2069 fun_serv_stop(fdev);
2070 fun_destroy_ports(ed);
2071 fun_dev_disable(fdev);
2072 mutex_destroy(&ed->state_mutex);
2073
2074 fun_devlink_free(devlink);
2075 }
2076
2077 static struct pci_driver funeth_driver = {
2078 .name = KBUILD_MODNAME,
2079 .id_table = funeth_id_table,
2080 .probe = funeth_probe,
2081 .remove = funeth_remove,
2082 .shutdown = funeth_remove,
2083 .sriov_configure = funeth_sriov_configure,
2084 };
2085
2086 module_pci_driver(funeth_driver);
2087
2088 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
2089 MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
2090 MODULE_LICENSE("Dual BSD/GPL");
2091 MODULE_DEVICE_TABLE(pci, funeth_id_table);