0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0036
0037 #include <linux/bitmap.h>
0038 #include <linux/crc32.h>
0039 #include <linux/ctype.h>
0040 #include <linux/debugfs.h>
0041 #include <linux/err.h>
0042 #include <linux/etherdevice.h>
0043 #include <linux/firmware.h>
0044 #include <linux/if.h>
0045 #include <linux/if_vlan.h>
0046 #include <linux/init.h>
0047 #include <linux/log2.h>
0048 #include <linux/mdio.h>
0049 #include <linux/module.h>
0050 #include <linux/moduleparam.h>
0051 #include <linux/mutex.h>
0052 #include <linux/netdevice.h>
0053 #include <linux/pci.h>
0054 #include <linux/aer.h>
0055 #include <linux/rtnetlink.h>
0056 #include <linux/sched.h>
0057 #include <linux/seq_file.h>
0058 #include <linux/sockios.h>
0059 #include <linux/vmalloc.h>
0060 #include <linux/workqueue.h>
0061 #include <net/neighbour.h>
0062 #include <net/netevent.h>
0063 #include <net/addrconf.h>
0064 #include <net/bonding.h>
0065 #include <linux/uaccess.h>
0066 #include <linux/crash_dump.h>
0067 #include <net/udp_tunnel.h>
0068 #include <net/xfrm.h>
0069 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0070 #include <net/tls.h>
0071 #endif
0072
0073 #include "cxgb4.h"
0074 #include "cxgb4_filter.h"
0075 #include "t4_regs.h"
0076 #include "t4_values.h"
0077 #include "t4_msg.h"
0078 #include "t4fw_api.h"
0079 #include "t4fw_version.h"
0080 #include "cxgb4_dcb.h"
0081 #include "srq.h"
0082 #include "cxgb4_debugfs.h"
0083 #include "clip_tbl.h"
0084 #include "l2t.h"
0085 #include "smt.h"
0086 #include "sched.h"
0087 #include "cxgb4_tc_u32.h"
0088 #include "cxgb4_tc_flower.h"
0089 #include "cxgb4_tc_mqprio.h"
0090 #include "cxgb4_tc_matchall.h"
0091 #include "cxgb4_ptp.h"
0092 #include "cxgb4_cudbg.h"
0093
0094 char cxgb4_driver_name[] = KBUILD_MODNAME;
0095
0096 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
0097
0098 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
0099 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
0100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
0101
0102
0103
0104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
0105 static const struct pci_device_id cxgb4_pci_tbl[] = {
0106 #define CXGB4_UNIFIED_PF 0x4
0107
0108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
0109
0110
0111
0112
0113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
0114
0115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
0116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
0117
0118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
0119 { 0, } \
0120 }
0121
0122 #include "t4_pci_id_tbl.h"
0123
0124 #define FW4_FNAME "cxgb4/t4fw.bin"
0125 #define FW5_FNAME "cxgb4/t5fw.bin"
0126 #define FW6_FNAME "cxgb4/t6fw.bin"
0127 #define FW4_CFNAME "cxgb4/t4-config.txt"
0128 #define FW5_CFNAME "cxgb4/t5-config.txt"
0129 #define FW6_CFNAME "cxgb4/t6-config.txt"
0130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
0131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
0132 #define PHY_AQ1202_DEVICEID 0x4409
0133 #define PHY_BCM84834_DEVICEID 0x4486
0134
0135 MODULE_DESCRIPTION(DRV_DESC);
0136 MODULE_AUTHOR("Chelsio Communications");
0137 MODULE_LICENSE("Dual BSD/GPL");
0138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
0139 MODULE_FIRMWARE(FW4_FNAME);
0140 MODULE_FIRMWARE(FW5_FNAME);
0141 MODULE_FIRMWARE(FW6_FNAME);
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int msi = 2;
0153
0154 module_param(msi, int, 0644);
0155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 static int rx_dma_offset = 2;
0170
0171
0172
0173
0174
0175
0176
0177 static int select_queue;
0178 module_param(select_queue, int, 0644);
0179 MODULE_PARM_DESC(select_queue,
0180 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
0181
0182 static struct dentry *cxgb4_debugfs_root;
0183
0184 LIST_HEAD(adapter_list);
0185 DEFINE_MUTEX(uld_mutex);
0186 LIST_HEAD(uld_list);
0187
0188 static int cfg_queues(struct adapter *adap);
0189
0190 static void link_report(struct net_device *dev)
0191 {
0192 if (!netif_carrier_ok(dev))
0193 netdev_info(dev, "link down\n");
0194 else {
0195 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
0196
0197 const char *s;
0198 const struct port_info *p = netdev_priv(dev);
0199
0200 switch (p->link_cfg.speed) {
0201 case 100:
0202 s = "100Mbps";
0203 break;
0204 case 1000:
0205 s = "1Gbps";
0206 break;
0207 case 10000:
0208 s = "10Gbps";
0209 break;
0210 case 25000:
0211 s = "25Gbps";
0212 break;
0213 case 40000:
0214 s = "40Gbps";
0215 break;
0216 case 50000:
0217 s = "50Gbps";
0218 break;
0219 case 100000:
0220 s = "100Gbps";
0221 break;
0222 default:
0223 pr_info("%s: unsupported speed: %d\n",
0224 dev->name, p->link_cfg.speed);
0225 return;
0226 }
0227
0228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
0229 fc[p->link_cfg.fc]);
0230 }
0231 }
0232
0233 #ifdef CONFIG_CHELSIO_T4_DCB
0234
0235 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
0236 {
0237 struct port_info *pi = netdev_priv(dev);
0238 struct adapter *adap = pi->adapter;
0239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
0240 int i;
0241
0242
0243
0244
0245 for (i = 0; i < pi->nqsets; i++, txq++) {
0246 u32 name, value;
0247 int err;
0248
0249 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
0250 FW_PARAMS_PARAM_X_V(
0251 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
0252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
0253 value = enable ? i : 0xffffffff;
0254
0255
0256
0257
0258
0259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
0260 &name, &value,
0261 -FW_CMD_MAX_TIMEOUT);
0262
0263 if (err)
0264 dev_err(adap->pdev_dev,
0265 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
0266 enable ? "set" : "unset", pi->port_id, i, -err);
0267 else
0268 txq->dcb_prio = enable ? value : 0;
0269 }
0270 }
0271
0272 int cxgb4_dcb_enabled(const struct net_device *dev)
0273 {
0274 struct port_info *pi = netdev_priv(dev);
0275
0276 if (!pi->dcb.enabled)
0277 return 0;
0278
0279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
0280 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
0281 }
0282 #endif
0283
0284 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
0285 {
0286 struct net_device *dev = adapter->port[port_id];
0287
0288
0289 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
0290 if (link_stat)
0291 netif_carrier_on(dev);
0292 else {
0293 #ifdef CONFIG_CHELSIO_T4_DCB
0294 if (cxgb4_dcb_enabled(dev)) {
0295 cxgb4_dcb_reset(dev);
0296 dcb_tx_queue_prio_enable(dev, false);
0297 }
0298 #endif
0299 netif_carrier_off(dev);
0300 }
0301
0302 link_report(dev);
0303 }
0304 }
0305
0306 void t4_os_portmod_changed(struct adapter *adap, int port_id)
0307 {
0308 static const char *mod_str[] = {
0309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
0310 };
0311
0312 struct net_device *dev = adap->port[port_id];
0313 struct port_info *pi = netdev_priv(dev);
0314
0315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
0316 netdev_info(dev, "port module unplugged\n");
0317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
0318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
0319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
0320 netdev_info(dev, "%s: unsupported port module inserted\n",
0321 dev->name);
0322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
0323 netdev_info(dev, "%s: unknown port module inserted\n",
0324 dev->name);
0325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
0326 netdev_info(dev, "%s: transceiver module error\n", dev->name);
0327 else
0328 netdev_info(dev, "%s: unknown module type %d inserted\n",
0329 dev->name, pi->mod_type);
0330
0331
0332
0333
0334 pi->link_cfg.redo_l1cfg = netif_running(dev);
0335 }
0336
0337 int dbfifo_int_thresh = 10;
0338 module_param(dbfifo_int_thresh, int, 0644);
0339 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
0340
0341
0342
0343
0344 static int dbfifo_drain_delay = 1000;
0345 module_param(dbfifo_drain_delay, int, 0644);
0346 MODULE_PARM_DESC(dbfifo_drain_delay,
0347 "usecs to sleep while draining the dbfifo");
0348
0349 static inline int cxgb4_set_addr_hash(struct port_info *pi)
0350 {
0351 struct adapter *adap = pi->adapter;
0352 u64 vec = 0;
0353 bool ucast = false;
0354 struct hash_mac_addr *entry;
0355
0356
0357 list_for_each_entry(entry, &adap->mac_hlist, list) {
0358 ucast |= is_unicast_ether_addr(entry->addr);
0359 vec |= (1ULL << hash_mac_addr(entry->addr));
0360 }
0361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
0362 vec, false);
0363 }
0364
0365 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
0366 {
0367 struct port_info *pi = netdev_priv(netdev);
0368 struct adapter *adap = pi->adapter;
0369 int ret;
0370 u64 mhash = 0;
0371 u64 uhash = 0;
0372
0373
0374
0375
0376
0377 u16 idx[1] = {};
0378 bool free = false;
0379 bool ucast = is_unicast_ether_addr(mac_addr);
0380 const u8 *maclist[1] = {mac_addr};
0381 struct hash_mac_addr *new_entry;
0382
0383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
0384 idx, ucast ? &uhash : &mhash, false);
0385 if (ret < 0)
0386 goto out;
0387
0388
0389
0390
0391 if (uhash || mhash) {
0392 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
0393 if (!new_entry)
0394 return -ENOMEM;
0395 ether_addr_copy(new_entry->addr, mac_addr);
0396 list_add_tail(&new_entry->list, &adap->mac_hlist);
0397 ret = cxgb4_set_addr_hash(pi);
0398 }
0399 out:
0400 return ret < 0 ? ret : 0;
0401 }
0402
0403 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
0404 {
0405 struct port_info *pi = netdev_priv(netdev);
0406 struct adapter *adap = pi->adapter;
0407 int ret;
0408 const u8 *maclist[1] = {mac_addr};
0409 struct hash_mac_addr *entry, *tmp;
0410
0411
0412
0413
0414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
0415 if (ether_addr_equal(entry->addr, mac_addr)) {
0416 list_del(&entry->list);
0417 kfree(entry);
0418 return cxgb4_set_addr_hash(pi);
0419 }
0420 }
0421
0422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
0423 return ret < 0 ? -EINVAL : 0;
0424 }
0425
0426
0427
0428
0429
0430 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
0431 {
0432 struct port_info *pi = netdev_priv(dev);
0433 struct adapter *adapter = pi->adapter;
0434
0435 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
0436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
0437
0438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
0439 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
0440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
0441 sleep_ok);
0442 }
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
0462 int *tcam_idx, const u8 *addr, bool persist,
0463 u8 *smt_idx)
0464 {
0465 struct adapter *adapter = pi->adapter;
0466 struct hash_mac_addr *entry, *new_entry;
0467 int ret;
0468
0469 ret = t4_change_mac(adapter, adapter->mbox, viid,
0470 *tcam_idx, addr, persist, smt_idx);
0471
0472 if (ret == -ENOMEM) {
0473
0474
0475
0476 list_for_each_entry(entry, &adapter->mac_hlist, list) {
0477 if (entry->iface_mac) {
0478 ether_addr_copy(entry->addr, addr);
0479 goto set_hash;
0480 }
0481 }
0482 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
0483 if (!new_entry)
0484 return -ENOMEM;
0485 ether_addr_copy(new_entry->addr, addr);
0486 new_entry->iface_mac = true;
0487 list_add_tail(&new_entry->list, &adapter->mac_hlist);
0488 set_hash:
0489 ret = cxgb4_set_addr_hash(pi);
0490 } else if (ret >= 0) {
0491 *tcam_idx = ret;
0492 ret = 0;
0493 }
0494
0495 return ret;
0496 }
0497
0498
0499
0500
0501
0502
0503
0504 static int link_start(struct net_device *dev)
0505 {
0506 struct port_info *pi = netdev_priv(dev);
0507 unsigned int mb = pi->adapter->mbox;
0508 int ret;
0509
0510
0511
0512
0513
0514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
0515 dev->mtu, -1, -1, -1,
0516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
0517 if (ret == 0)
0518 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
0519 dev->dev_addr, true, &pi->smt_idx);
0520 if (ret == 0)
0521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
0522 &pi->link_cfg);
0523 if (ret == 0) {
0524 local_bh_disable();
0525 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
0526 true, CXGB4_DCB_ENABLED);
0527 local_bh_enable();
0528 }
0529
0530 return ret;
0531 }
0532
0533 #ifdef CONFIG_CHELSIO_T4_DCB
0534
0535 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
0536 {
0537 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
0538 struct net_device *dev = adap->port[adap->chan_map[port]];
0539 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
0540 int new_dcb_enabled;
0541
0542 cxgb4_dcb_handle_fw_update(adap, pcmd);
0543 new_dcb_enabled = cxgb4_dcb_enabled(dev);
0544
0545
0546
0547
0548
0549 if (new_dcb_enabled != old_dcb_enabled)
0550 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
0551 }
0552 #endif
0553
0554
0555
0556 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
0557 const struct pkt_gl *gl)
0558 {
0559 u8 opcode = ((const struct rss_header *)rsp)->opcode;
0560
0561 rsp++;
0562
0563
0564
0565 if (unlikely(opcode == CPL_FW4_MSG &&
0566 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
0567 rsp++;
0568 opcode = ((const struct rss_header *)rsp)->opcode;
0569 rsp++;
0570 if (opcode != CPL_SGE_EGR_UPDATE) {
0571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
0572 , opcode);
0573 goto out;
0574 }
0575 }
0576
0577 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
0578 const struct cpl_sge_egr_update *p = (void *)rsp;
0579 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
0580 struct sge_txq *txq;
0581
0582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
0583 txq->restarts++;
0584 if (txq->q_type == CXGB4_TXQ_ETH) {
0585 struct sge_eth_txq *eq;
0586
0587 eq = container_of(txq, struct sge_eth_txq, q);
0588 t4_sge_eth_txq_egress_update(q->adap, eq, -1);
0589 } else {
0590 struct sge_uld_txq *oq;
0591
0592 oq = container_of(txq, struct sge_uld_txq, q);
0593 tasklet_schedule(&oq->qresume_tsk);
0594 }
0595 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
0596 const struct cpl_fw6_msg *p = (void *)rsp;
0597
0598 #ifdef CONFIG_CHELSIO_T4_DCB
0599 const struct fw_port_cmd *pcmd = (const void *)p->data;
0600 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
0601 unsigned int action =
0602 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
0603
0604 if (cmd == FW_PORT_CMD &&
0605 (action == FW_PORT_ACTION_GET_PORT_INFO ||
0606 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
0607 int port = FW_PORT_CMD_PORTID_G(
0608 be32_to_cpu(pcmd->op_to_portid));
0609 struct net_device *dev;
0610 int dcbxdis, state_input;
0611
0612 dev = q->adap->port[q->adap->chan_map[port]];
0613 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
0614 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
0615 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
0616 & FW_PORT_CMD_DCBXDIS32_F));
0617 state_input = (dcbxdis
0618 ? CXGB4_DCB_INPUT_FW_DISABLED
0619 : CXGB4_DCB_INPUT_FW_ENABLED);
0620
0621 cxgb4_dcb_state_fsm(dev, state_input);
0622 }
0623
0624 if (cmd == FW_PORT_CMD &&
0625 action == FW_PORT_ACTION_L2_DCB_CFG)
0626 dcb_rpl(q->adap, pcmd);
0627 else
0628 #endif
0629 if (p->type == 0)
0630 t4_handle_fw_rpl(q->adap, p->data);
0631 } else if (opcode == CPL_L2T_WRITE_RPL) {
0632 const struct cpl_l2t_write_rpl *p = (void *)rsp;
0633
0634 do_l2t_write_rpl(q->adap, p);
0635 } else if (opcode == CPL_SMT_WRITE_RPL) {
0636 const struct cpl_smt_write_rpl *p = (void *)rsp;
0637
0638 do_smt_write_rpl(q->adap, p);
0639 } else if (opcode == CPL_SET_TCB_RPL) {
0640 const struct cpl_set_tcb_rpl *p = (void *)rsp;
0641
0642 filter_rpl(q->adap, p);
0643 } else if (opcode == CPL_ACT_OPEN_RPL) {
0644 const struct cpl_act_open_rpl *p = (void *)rsp;
0645
0646 hash_filter_rpl(q->adap, p);
0647 } else if (opcode == CPL_ABORT_RPL_RSS) {
0648 const struct cpl_abort_rpl_rss *p = (void *)rsp;
0649
0650 hash_del_filter_rpl(q->adap, p);
0651 } else if (opcode == CPL_SRQ_TABLE_RPL) {
0652 const struct cpl_srq_table_rpl *p = (void *)rsp;
0653
0654 do_srq_table_rpl(q->adap, p);
0655 } else
0656 dev_err(q->adap->pdev_dev,
0657 "unexpected CPL %#x on FW event queue\n", opcode);
0658 out:
0659 return 0;
0660 }
0661
0662 static void disable_msi(struct adapter *adapter)
0663 {
0664 if (adapter->flags & CXGB4_USING_MSIX) {
0665 pci_disable_msix(adapter->pdev);
0666 adapter->flags &= ~CXGB4_USING_MSIX;
0667 } else if (adapter->flags & CXGB4_USING_MSI) {
0668 pci_disable_msi(adapter->pdev);
0669 adapter->flags &= ~CXGB4_USING_MSI;
0670 }
0671 }
0672
0673
0674
0675
0676 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
0677 {
0678 struct adapter *adap = cookie;
0679 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
0680
0681 if (v & PFSW_F) {
0682 adap->swintr = 1;
0683 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
0684 }
0685 if (adap->flags & CXGB4_MASTER_PF)
0686 t4_slow_intr_handler(adap);
0687 return IRQ_HANDLED;
0688 }
0689
0690 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
0691 cpumask_var_t *aff_mask, int idx)
0692 {
0693 int rv;
0694
0695 if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
0696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
0697 return -ENOMEM;
0698 }
0699
0700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
0701 *aff_mask);
0702
0703 rv = irq_set_affinity_hint(vec, *aff_mask);
0704 if (rv)
0705 dev_warn(adap->pdev_dev,
0706 "irq_set_affinity_hint %u failed %d\n",
0707 vec, rv);
0708
0709 return 0;
0710 }
0711
0712 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
0713 {
0714 irq_set_affinity_hint(vec, NULL);
0715 free_cpumask_var(aff_mask);
0716 }
0717
0718 static int request_msix_queue_irqs(struct adapter *adap)
0719 {
0720 struct sge *s = &adap->sge;
0721 struct msix_info *minfo;
0722 int err, ethqidx;
0723
0724 if (s->fwevtq_msix_idx < 0)
0725 return -ENOMEM;
0726
0727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
0728 t4_sge_intr_msix, 0,
0729 adap->msix_info[s->fwevtq_msix_idx].desc,
0730 &s->fw_evtq);
0731 if (err)
0732 return err;
0733
0734 for_each_ethrxq(s, ethqidx) {
0735 minfo = s->ethrxq[ethqidx].msix;
0736 err = request_irq(minfo->vec,
0737 t4_sge_intr_msix, 0,
0738 minfo->desc,
0739 &s->ethrxq[ethqidx].rspq);
0740 if (err)
0741 goto unwind;
0742
0743 cxgb4_set_msix_aff(adap, minfo->vec,
0744 &minfo->aff_mask, ethqidx);
0745 }
0746 return 0;
0747
0748 unwind:
0749 while (--ethqidx >= 0) {
0750 minfo = s->ethrxq[ethqidx].msix;
0751 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
0752 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
0753 }
0754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
0755 return err;
0756 }
0757
0758 static void free_msix_queue_irqs(struct adapter *adap)
0759 {
0760 struct sge *s = &adap->sge;
0761 struct msix_info *minfo;
0762 int i;
0763
0764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
0765 for_each_ethrxq(s, i) {
0766 minfo = s->ethrxq[i].msix;
0767 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
0768 free_irq(minfo->vec, &s->ethrxq[i].rspq);
0769 }
0770 }
0771
0772 static int setup_ppod_edram(struct adapter *adap)
0773 {
0774 unsigned int param, val;
0775 int ret;
0776
0777
0778
0779
0780
0781
0782
0783 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
0784 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
0785
0786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
0787 if (ret < 0) {
0788 dev_warn(adap->pdev_dev,
0789 "querying PPOD_EDRAM support failed: %d\n",
0790 ret);
0791 return -1;
0792 }
0793
0794 if (val != 1)
0795 return -1;
0796
0797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
0798 if (ret < 0) {
0799 dev_err(adap->pdev_dev,
0800 "setting PPOD_EDRAM failed: %d\n", ret);
0801 return -1;
0802 }
0803 return 0;
0804 }
0805
0806 static void adap_config_hpfilter(struct adapter *adapter)
0807 {
0808 u32 param, val = 0;
0809 int ret;
0810
0811
0812
0813
0814 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
0815 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
0816 1, ¶m, &val);
0817
0818
0819
0820
0821 if (ret < 0)
0822 dev_err(adapter->pdev_dev,
0823 "HP filter region isn't supported by FW\n");
0824 }
0825
0826 static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
0827 u16 rss_size, u16 viid)
0828 {
0829 struct adapter *adap = pi->adapter;
0830 int ret;
0831
0832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
0833 rss_size);
0834 if (ret)
0835 return ret;
0836
0837
0838
0839
0840
0841
0842 return t4_config_vi_rss(adap, adap->mbox, viid,
0843 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
0844 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
0845 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
0846 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
0847 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
0848 rss[0]);
0849 }
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
0861 {
0862 struct adapter *adapter = pi->adapter;
0863 const struct sge_eth_rxq *rxq;
0864 int i, err;
0865 u16 *rss;
0866
0867 rxq = &adapter->sge.ethrxq[pi->first_qset];
0868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
0869 if (!rss)
0870 return -ENOMEM;
0871
0872
0873 for (i = 0; i < pi->rss_size; i++, queues++)
0874 rss[i] = rxq[*queues].rspq.abs_id;
0875
0876 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
0877 kfree(rss);
0878 return err;
0879 }
0880
0881
0882
0883
0884
0885
0886
0887 static int setup_rss(struct adapter *adap)
0888 {
0889 int i, j, err;
0890
0891 for_each_port(adap, i) {
0892 const struct port_info *pi = adap2pinfo(adap, i);
0893
0894
0895 for (j = 0; j < pi->rss_size; j++)
0896 pi->rss[j] = j % pi->nqsets;
0897
0898 err = cxgb4_write_rss(pi, pi->rss);
0899 if (err)
0900 return err;
0901 }
0902 return 0;
0903 }
0904
0905
0906
0907
0908 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
0909 {
0910 qid -= p->ingr_start;
0911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
0912 }
0913
0914 void cxgb4_quiesce_rx(struct sge_rspq *q)
0915 {
0916 if (q->handler)
0917 napi_disable(&q->napi);
0918 }
0919
0920
0921
0922
0923 static void quiesce_rx(struct adapter *adap)
0924 {
0925 int i;
0926
0927 for (i = 0; i < adap->sge.ingr_sz; i++) {
0928 struct sge_rspq *q = adap->sge.ingr_map[i];
0929
0930 if (!q)
0931 continue;
0932
0933 cxgb4_quiesce_rx(q);
0934 }
0935 }
0936
0937
0938 static void disable_interrupts(struct adapter *adap)
0939 {
0940 struct sge *s = &adap->sge;
0941
0942 if (adap->flags & CXGB4_FULL_INIT_DONE) {
0943 t4_intr_disable(adap);
0944 if (adap->flags & CXGB4_USING_MSIX) {
0945 free_msix_queue_irqs(adap);
0946 free_irq(adap->msix_info[s->nd_msix_idx].vec,
0947 adap);
0948 } else {
0949 free_irq(adap->pdev->irq, adap);
0950 }
0951 quiesce_rx(adap);
0952 }
0953 }
0954
0955 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
0956 {
0957 if (q->handler)
0958 napi_enable(&q->napi);
0959
0960
0961 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
0962 SEINTARM_V(q->intr_params) |
0963 INGRESSQID_V(q->cntxt_id));
0964 }
0965
0966
0967
0968
0969 static void enable_rx(struct adapter *adap)
0970 {
0971 int i;
0972
0973 for (i = 0; i < adap->sge.ingr_sz; i++) {
0974 struct sge_rspq *q = adap->sge.ingr_map[i];
0975
0976 if (!q)
0977 continue;
0978
0979 cxgb4_enable_rx(adap, q);
0980 }
0981 }
0982
0983 static int setup_non_data_intr(struct adapter *adap)
0984 {
0985 int msix;
0986
0987 adap->sge.nd_msix_idx = -1;
0988 if (!(adap->flags & CXGB4_USING_MSIX))
0989 return 0;
0990
0991
0992 msix = cxgb4_get_msix_idx_from_bmap(adap);
0993 if (msix < 0)
0994 return -ENOMEM;
0995
0996 snprintf(adap->msix_info[msix].desc,
0997 sizeof(adap->msix_info[msix].desc),
0998 "%s", adap->port[0]->name);
0999
1000 adap->sge.nd_msix_idx = msix;
1001 return 0;
1002 }
1003
1004 static int setup_fw_sge_queues(struct adapter *adap)
1005 {
1006 struct sge *s = &adap->sge;
1007 int msix, err = 0;
1008
1009 bitmap_zero(s->starving_fl, s->egr_sz);
1010 bitmap_zero(s->txq_maperr, s->egr_sz);
1011
1012 if (adap->flags & CXGB4_USING_MSIX) {
1013 s->fwevtq_msix_idx = -1;
1014 msix = cxgb4_get_msix_idx_from_bmap(adap);
1015 if (msix < 0)
1016 return -ENOMEM;
1017
1018 snprintf(adap->msix_info[msix].desc,
1019 sizeof(adap->msix_info[msix].desc),
1020 "%s-FWeventq", adap->port[0]->name);
1021 } else {
1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023 NULL, NULL, NULL, -1);
1024 if (err)
1025 return err;
1026 msix = -((int)s->intrq.abs_id + 1);
1027 }
1028
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030 msix, NULL, fwevtq_handler, NULL, -1);
1031 if (err && msix >= 0)
1032 cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034 s->fwevtq_msix_idx = msix;
1035 return err;
1036 }
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 static int setup_sge_queues(struct adapter *adap)
1047 {
1048 struct sge_uld_rxq_info *rxq_info = NULL;
1049 struct sge *s = &adap->sge;
1050 unsigned int cmplqid = 0;
1051 int err, i, j, msix = 0;
1052
1053 if (is_uld(adap))
1054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
1056 if (!(adap->flags & CXGB4_USING_MSIX))
1057 msix = -((int)s->intrq.abs_id + 1);
1058
1059 for_each_port(adap, i) {
1060 struct net_device *dev = adap->port[i];
1061 struct port_info *pi = netdev_priv(dev);
1062 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065 for (j = 0; j < pi->nqsets; j++, q++) {
1066 if (msix >= 0) {
1067 msix = cxgb4_get_msix_idx_from_bmap(adap);
1068 if (msix < 0) {
1069 err = msix;
1070 goto freeout;
1071 }
1072
1073 snprintf(adap->msix_info[msix].desc,
1074 sizeof(adap->msix_info[msix].desc),
1075 "%s-Rx%d", dev->name, j);
1076 q->msix = &adap->msix_info[msix];
1077 }
1078
1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080 msix, &q->fl,
1081 t4_ethrx_handler,
1082 NULL,
1083 t4_get_tp_ch_map(adap,
1084 pi->tx_chan));
1085 if (err)
1086 goto freeout;
1087 q->rspq.idx = j;
1088 memset(&q->stats, 0, sizeof(q->stats));
1089 }
1090
1091 q = &s->ethrxq[pi->first_qset];
1092 for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093 err = t4_sge_alloc_eth_txq(adap, t, dev,
1094 netdev_get_tx_queue(dev, j),
1095 q->rspq.cntxt_id,
1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097 if (err)
1098 goto freeout;
1099 }
1100 }
1101
1102 for_each_port(adap, i) {
1103
1104
1105
1106 if (rxq_info)
1107 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110 s->fw_evtq.cntxt_id, cmplqid);
1111 if (err)
1112 goto freeout;
1113 }
1114
1115 if (!is_t4(adap->params.chip)) {
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117 netdev_get_tx_queue(adap->port[0], 0)
1118 , s->fw_evtq.cntxt_id, false);
1119 if (err)
1120 goto freeout;
1121 }
1122
1123 t4_write_reg(adap, is_t4(adap->params.chip) ?
1124 MPS_TRC_RSS_CONTROL_A :
1125 MPS_T5_TRC_RSS_CONTROL_A,
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128 return 0;
1129 freeout:
1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131 t4_free_sge_resources(adap);
1132 return err;
1133 }
1134
1135 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136 struct net_device *sb_dev)
1137 {
1138 int txq;
1139
1140 #ifdef CONFIG_CHELSIO_T4_DCB
1141
1142
1143
1144
1145
1146 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147 u16 vlan_tci;
1148 int err;
1149
1150 err = vlan_get_tag(skb, &vlan_tci);
1151 if (unlikely(err)) {
1152 if (net_ratelimit())
1153 netdev_warn(dev,
1154 "TX Packet without VLAN Tag on DCB Link\n");
1155 txq = 0;
1156 } else {
1157 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1158 #ifdef CONFIG_CHELSIO_T4_FCOE
1159 if (skb->protocol == htons(ETH_P_FCOE))
1160 txq = skb->priority & 0x7;
1161 #endif
1162 }
1163 return txq;
1164 }
1165 #endif
1166
1167 if (dev->num_tc) {
1168 struct port_info *pi = netdev2pinfo(dev);
1169 u8 ver, proto;
1170
1171 ver = ip_hdr(skb)->version;
1172 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173 ip_hdr(skb)->protocol;
1174
1175
1176 txq = netdev_pick_tx(dev, skb, sb_dev);
1177 if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178 skb->encapsulation ||
1179 cxgb4_is_ktls_skb(skb) ||
1180 (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1181 txq = txq % pi->nqsets;
1182
1183 return txq;
1184 }
1185
1186 if (select_queue) {
1187 txq = (skb_rx_queue_recorded(skb)
1188 ? skb_get_rx_queue(skb)
1189 : smp_processor_id());
1190
1191 while (unlikely(txq >= dev->real_num_tx_queues))
1192 txq -= dev->real_num_tx_queues;
1193
1194 return txq;
1195 }
1196
1197 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1198 }
1199
1200 static int closest_timer(const struct sge *s, int time)
1201 {
1202 int i, delta, match = 0, min_delta = INT_MAX;
1203
1204 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1205 delta = time - s->timer_val[i];
1206 if (delta < 0)
1207 delta = -delta;
1208 if (delta < min_delta) {
1209 min_delta = delta;
1210 match = i;
1211 }
1212 }
1213 return match;
1214 }
1215
1216 static int closest_thres(const struct sge *s, int thres)
1217 {
1218 int i, delta, match = 0, min_delta = INT_MAX;
1219
1220 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1221 delta = thres - s->counter_val[i];
1222 if (delta < 0)
1223 delta = -delta;
1224 if (delta < min_delta) {
1225 min_delta = delta;
1226 match = i;
1227 }
1228 }
1229 return match;
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1242 unsigned int us, unsigned int cnt)
1243 {
1244 struct adapter *adap = q->adap;
1245
1246 if ((us | cnt) == 0)
1247 cnt = 1;
1248
1249 if (cnt) {
1250 int err;
1251 u32 v, new_idx;
1252
1253 new_idx = closest_thres(&adap->sge, cnt);
1254 if (q->desc && q->pktcnt_idx != new_idx) {
1255
1256 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1257 FW_PARAMS_PARAM_X_V(
1258 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1259 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1261 &v, &new_idx);
1262 if (err)
1263 return err;
1264 }
1265 q->pktcnt_idx = new_idx;
1266 }
1267
1268 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1269 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1270 return 0;
1271 }
1272
1273 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1274 {
1275 netdev_features_t changed = dev->features ^ features;
1276 const struct port_info *pi = netdev_priv(dev);
1277 int err;
1278
1279 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1280 return 0;
1281
1282 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283 pi->viid_mirror, -1, -1, -1, -1,
1284 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1285 if (unlikely(err))
1286 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1287 return err;
1288 }
1289
1290 static int setup_debugfs(struct adapter *adap)
1291 {
1292 if (IS_ERR_OR_NULL(adap->debugfs_root))
1293 return -1;
1294
1295 #ifdef CONFIG_DEBUG_FS
1296 t4_setup_debugfs(adap);
1297 #endif
1298 return 0;
1299 }
1300
1301 static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302 struct sge_eth_rxq *mirror_rxq)
1303 {
1304 if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305 !(adap->flags & CXGB4_SHUTTING_DOWN))
1306 cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
1308 if (adap->flags & CXGB4_USING_MSIX) {
1309 cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310 mirror_rxq->msix->aff_mask);
1311 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313 }
1314
1315 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316 }
1317
1318 static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319 {
1320 struct port_info *pi = netdev2pinfo(dev);
1321 struct adapter *adap = netdev2adap(dev);
1322 struct sge_eth_rxq *mirror_rxq;
1323 struct sge *s = &adap->sge;
1324 int ret = 0, msix = 0;
1325 u16 i, rxqid;
1326 u16 *rss;
1327
1328 if (!pi->vi_mirror_count)
1329 return 0;
1330
1331 if (s->mirror_rxq[pi->port_id])
1332 return 0;
1333
1334 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335 if (!mirror_rxq)
1336 return -ENOMEM;
1337
1338 s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
1340 if (!(adap->flags & CXGB4_USING_MSIX))
1341 msix = -((int)adap->sge.intrq.abs_id + 1);
1342
1343 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344 mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
1346
1347 if (msix >= 0) {
1348 msix = cxgb4_get_msix_idx_from_bmap(adap);
1349 if (msix < 0) {
1350 ret = msix;
1351 goto out_free_queues;
1352 }
1353
1354 mirror_rxq->msix = &adap->msix_info[msix];
1355 snprintf(mirror_rxq->msix->desc,
1356 sizeof(mirror_rxq->msix->desc),
1357 "%s-mirrorrxq%d", dev->name, i);
1358 }
1359
1360 init_rspq(adap, &mirror_rxq->rspq,
1361 CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362 CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363 CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364 CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
1366 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
1368 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369 dev, msix, &mirror_rxq->fl,
1370 t4_ethrx_handler, NULL, 0);
1371 if (ret)
1372 goto out_free_msix_idx;
1373
1374
1375 if (adap->flags & CXGB4_USING_MSIX) {
1376 ret = request_irq(mirror_rxq->msix->vec,
1377 t4_sge_intr_msix, 0,
1378 mirror_rxq->msix->desc,
1379 &mirror_rxq->rspq);
1380 if (ret)
1381 goto out_free_rxq;
1382
1383 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384 &mirror_rxq->msix->aff_mask, i);
1385 }
1386
1387
1388 cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389 }
1390
1391
1392 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393 if (!rss) {
1394 ret = -ENOMEM;
1395 goto out_free_queues;
1396 }
1397
1398 mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399 for (i = 0; i < pi->rss_size; i++)
1400 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
1402 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403 kfree(rss);
1404 if (ret)
1405 goto out_free_queues;
1406
1407 return 0;
1408
1409 out_free_rxq:
1410 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
1412 out_free_msix_idx:
1413 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
1415 out_free_queues:
1416 while (rxqid-- > 0)
1417 cxgb4_port_mirror_free_rxq(adap,
1418 &s->mirror_rxq[pi->port_id][rxqid]);
1419
1420 kfree(s->mirror_rxq[pi->port_id]);
1421 s->mirror_rxq[pi->port_id] = NULL;
1422 return ret;
1423 }
1424
1425 static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426 {
1427 struct port_info *pi = netdev2pinfo(dev);
1428 struct adapter *adap = netdev2adap(dev);
1429 struct sge *s = &adap->sge;
1430 u16 i;
1431
1432 if (!pi->vi_mirror_count)
1433 return;
1434
1435 if (!s->mirror_rxq[pi->port_id])
1436 return;
1437
1438 for (i = 0; i < pi->nmirrorqsets; i++)
1439 cxgb4_port_mirror_free_rxq(adap,
1440 &s->mirror_rxq[pi->port_id][i]);
1441
1442 kfree(s->mirror_rxq[pi->port_id]);
1443 s->mirror_rxq[pi->port_id] = NULL;
1444 }
1445
1446 static int cxgb4_port_mirror_start(struct net_device *dev)
1447 {
1448 struct port_info *pi = netdev2pinfo(dev);
1449 struct adapter *adap = netdev2adap(dev);
1450 int ret, idx = -1;
1451
1452 if (!pi->vi_mirror_count)
1453 return 0;
1454
1455
1456
1457
1458
1459
1460 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464 if (ret) {
1465 dev_err(adap->pdev_dev,
1466 "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467 pi->viid_mirror, ret);
1468 return ret;
1469 }
1470
1471
1472
1473
1474
1475 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476 dev->dev_addr, true, NULL);
1477 if (ret) {
1478 dev_err(adap->pdev_dev,
1479 "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480 pi->viid_mirror, ret);
1481 return ret;
1482 }
1483
1484
1485
1486
1487
1488
1489
1490 local_bh_disable();
1491 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492 false);
1493 local_bh_enable();
1494 if (ret)
1495 dev_err(adap->pdev_dev,
1496 "Failed starting Mirror VI 0x%x, ret: %d\n",
1497 pi->viid_mirror, ret);
1498
1499 return ret;
1500 }
1501
1502 static void cxgb4_port_mirror_stop(struct net_device *dev)
1503 {
1504 struct port_info *pi = netdev2pinfo(dev);
1505 struct adapter *adap = netdev2adap(dev);
1506
1507 if (!pi->vi_mirror_count)
1508 return;
1509
1510 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511 false);
1512 }
1513
1514 int cxgb4_port_mirror_alloc(struct net_device *dev)
1515 {
1516 struct port_info *pi = netdev2pinfo(dev);
1517 struct adapter *adap = netdev2adap(dev);
1518 int ret = 0;
1519
1520 if (!pi->nmirrorqsets)
1521 return -EOPNOTSUPP;
1522
1523 mutex_lock(&pi->vi_mirror_mutex);
1524 if (pi->viid_mirror) {
1525 pi->vi_mirror_count++;
1526 goto out_unlock;
1527 }
1528
1529 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530 &pi->viid_mirror);
1531 if (ret)
1532 goto out_unlock;
1533
1534 pi->vi_mirror_count = 1;
1535
1536 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537 ret = cxgb4_port_mirror_alloc_queues(dev);
1538 if (ret)
1539 goto out_free_vi;
1540
1541 ret = cxgb4_port_mirror_start(dev);
1542 if (ret)
1543 goto out_free_queues;
1544 }
1545
1546 mutex_unlock(&pi->vi_mirror_mutex);
1547 return 0;
1548
1549 out_free_queues:
1550 cxgb4_port_mirror_free_queues(dev);
1551
1552 out_free_vi:
1553 pi->vi_mirror_count = 0;
1554 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555 pi->viid_mirror = 0;
1556
1557 out_unlock:
1558 mutex_unlock(&pi->vi_mirror_mutex);
1559 return ret;
1560 }
1561
1562 void cxgb4_port_mirror_free(struct net_device *dev)
1563 {
1564 struct port_info *pi = netdev2pinfo(dev);
1565 struct adapter *adap = netdev2adap(dev);
1566
1567 mutex_lock(&pi->vi_mirror_mutex);
1568 if (!pi->viid_mirror)
1569 goto out_unlock;
1570
1571 if (pi->vi_mirror_count > 1) {
1572 pi->vi_mirror_count--;
1573 goto out_unlock;
1574 }
1575
1576 cxgb4_port_mirror_stop(dev);
1577 cxgb4_port_mirror_free_queues(dev);
1578
1579 pi->vi_mirror_count = 0;
1580 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581 pi->viid_mirror = 0;
1582
1583 out_unlock:
1584 mutex_unlock(&pi->vi_mirror_mutex);
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1595 {
1596 int atid = -1;
1597
1598 spin_lock_bh(&t->atid_lock);
1599 if (t->afree) {
1600 union aopen_entry *p = t->afree;
1601
1602 atid = (p - t->atid_tab) + t->atid_base;
1603 t->afree = p->next;
1604 p->data = data;
1605 t->atids_in_use++;
1606 }
1607 spin_unlock_bh(&t->atid_lock);
1608 return atid;
1609 }
1610 EXPORT_SYMBOL(cxgb4_alloc_atid);
1611
1612
1613
1614
1615 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1616 {
1617 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1618
1619 spin_lock_bh(&t->atid_lock);
1620 p->next = t->afree;
1621 t->afree = p;
1622 t->atids_in_use--;
1623 spin_unlock_bh(&t->atid_lock);
1624 }
1625 EXPORT_SYMBOL(cxgb4_free_atid);
1626
1627
1628
1629
1630 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1631 {
1632 int stid;
1633
1634 spin_lock_bh(&t->stid_lock);
1635 if (family == PF_INET) {
1636 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1637 if (stid < t->nstids)
1638 __set_bit(stid, t->stid_bmap);
1639 else
1640 stid = -1;
1641 } else {
1642 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1643 if (stid < 0)
1644 stid = -1;
1645 }
1646 if (stid >= 0) {
1647 t->stid_tab[stid].data = data;
1648 stid += t->stid_base;
1649
1650
1651
1652
1653 if (family == PF_INET6) {
1654 t->stids_in_use += 2;
1655 t->v6_stids_in_use += 2;
1656 } else {
1657 t->stids_in_use++;
1658 }
1659 }
1660 spin_unlock_bh(&t->stid_lock);
1661 return stid;
1662 }
1663 EXPORT_SYMBOL(cxgb4_alloc_stid);
1664
1665
1666
1667 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1668 {
1669 int stid;
1670
1671 spin_lock_bh(&t->stid_lock);
1672 if (family == PF_INET) {
1673 stid = find_next_zero_bit(t->stid_bmap,
1674 t->nstids + t->nsftids, t->nstids);
1675 if (stid < (t->nstids + t->nsftids))
1676 __set_bit(stid, t->stid_bmap);
1677 else
1678 stid = -1;
1679 } else {
1680 stid = -1;
1681 }
1682 if (stid >= 0) {
1683 t->stid_tab[stid].data = data;
1684 stid -= t->nstids;
1685 stid += t->sftid_base;
1686 t->sftids_in_use++;
1687 }
1688 spin_unlock_bh(&t->stid_lock);
1689 return stid;
1690 }
1691 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1692
1693
1694
1695 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1696 {
1697
1698 if (t->nsftids && (stid >= t->sftid_base)) {
1699 stid -= t->sftid_base;
1700 stid += t->nstids;
1701 } else {
1702 stid -= t->stid_base;
1703 }
1704
1705 spin_lock_bh(&t->stid_lock);
1706 if (family == PF_INET)
1707 __clear_bit(stid, t->stid_bmap);
1708 else
1709 bitmap_release_region(t->stid_bmap, stid, 1);
1710 t->stid_tab[stid].data = NULL;
1711 if (stid < t->nstids) {
1712 if (family == PF_INET6) {
1713 t->stids_in_use -= 2;
1714 t->v6_stids_in_use -= 2;
1715 } else {
1716 t->stids_in_use--;
1717 }
1718 } else {
1719 t->sftids_in_use--;
1720 }
1721
1722 spin_unlock_bh(&t->stid_lock);
1723 }
1724 EXPORT_SYMBOL(cxgb4_free_stid);
1725
1726
1727
1728
1729 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1730 unsigned int tid)
1731 {
1732 struct cpl_tid_release *req;
1733
1734 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1735 req = __skb_put(skb, sizeof(*req));
1736 INIT_TP_WR(req, tid);
1737 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1738 }
1739
1740
1741
1742
1743
1744 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1745 unsigned int tid)
1746 {
1747 struct adapter *adap = container_of(t, struct adapter, tids);
1748 void **p = &t->tid_tab[tid - t->tid_base];
1749
1750 spin_lock_bh(&adap->tid_release_lock);
1751 *p = adap->tid_release_head;
1752
1753 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1754 if (!adap->tid_release_task_busy) {
1755 adap->tid_release_task_busy = true;
1756 queue_work(adap->workq, &adap->tid_release_task);
1757 }
1758 spin_unlock_bh(&adap->tid_release_lock);
1759 }
1760
1761
1762
1763
1764 static void process_tid_release_list(struct work_struct *work)
1765 {
1766 struct sk_buff *skb;
1767 struct adapter *adap;
1768
1769 adap = container_of(work, struct adapter, tid_release_task);
1770
1771 spin_lock_bh(&adap->tid_release_lock);
1772 while (adap->tid_release_head) {
1773 void **p = adap->tid_release_head;
1774 unsigned int chan = (uintptr_t)p & 3;
1775 p = (void *)p - chan;
1776
1777 adap->tid_release_head = *p;
1778 *p = NULL;
1779 spin_unlock_bh(&adap->tid_release_lock);
1780
1781 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1782 GFP_KERNEL)))
1783 schedule_timeout_uninterruptible(1);
1784
1785 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786 t4_ofld_send(adap, skb);
1787 spin_lock_bh(&adap->tid_release_lock);
1788 }
1789 adap->tid_release_task_busy = false;
1790 spin_unlock_bh(&adap->tid_release_lock);
1791 }
1792
1793
1794
1795
1796
1797 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1798 unsigned short family)
1799 {
1800 struct adapter *adap = container_of(t, struct adapter, tids);
1801 struct sk_buff *skb;
1802
1803 WARN_ON(tid_out_of_range(&adap->tids, tid));
1804
1805 if (t->tid_tab[tid - adap->tids.tid_base]) {
1806 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1807 atomic_dec(&t->conns_in_use);
1808 if (t->hash_base && (tid >= t->hash_base)) {
1809 if (family == AF_INET6)
1810 atomic_sub(2, &t->hash_tids_in_use);
1811 else
1812 atomic_dec(&t->hash_tids_in_use);
1813 } else {
1814 if (family == AF_INET6)
1815 atomic_sub(2, &t->tids_in_use);
1816 else
1817 atomic_dec(&t->tids_in_use);
1818 }
1819 }
1820
1821 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1822 if (likely(skb)) {
1823 mk_tid_release(skb, chan, tid);
1824 t4_ofld_send(adap, skb);
1825 } else
1826 cxgb4_queue_tid_release(t, chan, tid);
1827 }
1828 EXPORT_SYMBOL(cxgb4_remove_tid);
1829
1830
1831
1832
1833 static int tid_init(struct tid_info *t)
1834 {
1835 struct adapter *adap = container_of(t, struct adapter, tids);
1836 unsigned int max_ftids = t->nftids + t->nsftids;
1837 unsigned int natids = t->natids;
1838 unsigned int hpftid_bmap_size;
1839 unsigned int eotid_bmap_size;
1840 unsigned int stid_bmap_size;
1841 unsigned int ftid_bmap_size;
1842 size_t size;
1843
1844 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1845 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1846 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1847 eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1848 size = t->ntids * sizeof(*t->tid_tab) +
1849 natids * sizeof(*t->atid_tab) +
1850 t->nstids * sizeof(*t->stid_tab) +
1851 t->nsftids * sizeof(*t->stid_tab) +
1852 stid_bmap_size * sizeof(long) +
1853 t->nhpftids * sizeof(*t->hpftid_tab) +
1854 hpftid_bmap_size * sizeof(long) +
1855 max_ftids * sizeof(*t->ftid_tab) +
1856 ftid_bmap_size * sizeof(long) +
1857 t->neotids * sizeof(*t->eotid_tab) +
1858 eotid_bmap_size * sizeof(long);
1859
1860 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1861 if (!t->tid_tab)
1862 return -ENOMEM;
1863
1864 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1865 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1866 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1867 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1870 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1871 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1873 spin_lock_init(&t->stid_lock);
1874 spin_lock_init(&t->atid_lock);
1875 spin_lock_init(&t->ftid_lock);
1876
1877 t->stids_in_use = 0;
1878 t->v6_stids_in_use = 0;
1879 t->sftids_in_use = 0;
1880 t->afree = NULL;
1881 t->atids_in_use = 0;
1882 atomic_set(&t->tids_in_use, 0);
1883 atomic_set(&t->conns_in_use, 0);
1884 atomic_set(&t->hash_tids_in_use, 0);
1885 atomic_set(&t->eotids_in_use, 0);
1886
1887
1888 if (natids) {
1889 while (--natids)
1890 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1891 t->afree = t->atid_tab;
1892 }
1893
1894 if (is_offload(adap)) {
1895 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1896
1897 if (!t->stid_base &&
1898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1899 __set_bit(0, t->stid_bmap);
1900
1901 if (t->neotids)
1902 bitmap_zero(t->eotid_bmap, t->neotids);
1903 }
1904
1905 if (t->nhpftids)
1906 bitmap_zero(t->hpftid_bmap, t->nhpftids);
1907 bitmap_zero(t->ftid_bmap, t->nftids);
1908 return 0;
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1924 __be32 sip, __be16 sport, __be16 vlan,
1925 unsigned int queue)
1926 {
1927 unsigned int chan;
1928 struct sk_buff *skb;
1929 struct adapter *adap;
1930 struct cpl_pass_open_req *req;
1931 int ret;
1932
1933 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1934 if (!skb)
1935 return -ENOMEM;
1936
1937 adap = netdev2adap(dev);
1938 req = __skb_put(skb, sizeof(*req));
1939 INIT_TP_WR(req, 0);
1940 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1941 req->local_port = sport;
1942 req->peer_port = htons(0);
1943 req->local_ip = sip;
1944 req->peer_ip = htonl(0);
1945 chan = rxq_to_chan(&adap->sge, queue);
1946 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1947 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1948 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1949 ret = t4_mgmt_tx(adap, skb);
1950 return net_xmit_eval(ret);
1951 }
1952 EXPORT_SYMBOL(cxgb4_create_server);
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1965 const struct in6_addr *sip, __be16 sport,
1966 unsigned int queue)
1967 {
1968 unsigned int chan;
1969 struct sk_buff *skb;
1970 struct adapter *adap;
1971 struct cpl_pass_open_req6 *req;
1972 int ret;
1973
1974 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1975 if (!skb)
1976 return -ENOMEM;
1977
1978 adap = netdev2adap(dev);
1979 req = __skb_put(skb, sizeof(*req));
1980 INIT_TP_WR(req, 0);
1981 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1982 req->local_port = sport;
1983 req->peer_port = htons(0);
1984 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1985 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1986 req->peer_ip_hi = cpu_to_be64(0);
1987 req->peer_ip_lo = cpu_to_be64(0);
1988 chan = rxq_to_chan(&adap->sge, queue);
1989 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1990 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1991 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1992 ret = t4_mgmt_tx(adap, skb);
1993 return net_xmit_eval(ret);
1994 }
1995 EXPORT_SYMBOL(cxgb4_create_server6);
1996
1997 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1998 unsigned int queue, bool ipv6)
1999 {
2000 struct sk_buff *skb;
2001 struct adapter *adap;
2002 struct cpl_close_listsvr_req *req;
2003 int ret;
2004
2005 adap = netdev2adap(dev);
2006
2007 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2008 if (!skb)
2009 return -ENOMEM;
2010
2011 req = __skb_put(skb, sizeof(*req));
2012 INIT_TP_WR(req, 0);
2013 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2014 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2015 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2016 ret = t4_mgmt_tx(adap, skb);
2017 return net_xmit_eval(ret);
2018 }
2019 EXPORT_SYMBOL(cxgb4_remove_server);
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2032 unsigned int *idx)
2033 {
2034 unsigned int i = 0;
2035
2036 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2037 ++i;
2038 if (idx)
2039 *idx = i;
2040 return mtus[i];
2041 }
2042 EXPORT_SYMBOL(cxgb4_best_mtu);
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2061 unsigned short header_size,
2062 unsigned short data_size_max,
2063 unsigned short data_size_align,
2064 unsigned int *mtu_idxp)
2065 {
2066 unsigned short max_mtu = header_size + data_size_max;
2067 unsigned short data_size_align_mask = data_size_align - 1;
2068 int mtu_idx, aligned_mtu_idx;
2069
2070
2071
2072
2073
2074
2075 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2076 unsigned short data_size = mtus[mtu_idx] - header_size;
2077
2078
2079
2080
2081 if ((data_size & data_size_align_mask) == 0)
2082 aligned_mtu_idx = mtu_idx;
2083
2084
2085
2086
2087
2088 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2089 break;
2090 }
2091
2092
2093
2094
2095 if (mtu_idx == NMTUS)
2096 mtu_idx--;
2097
2098
2099
2100
2101
2102 if (aligned_mtu_idx >= 0 &&
2103 mtu_idx - aligned_mtu_idx <= 1)
2104 mtu_idx = aligned_mtu_idx;
2105
2106
2107
2108
2109 if (mtu_idxp)
2110 *mtu_idxp = mtu_idx;
2111 return mtus[mtu_idx];
2112 }
2113 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2114
2115
2116
2117
2118
2119
2120
2121 unsigned int cxgb4_port_chan(const struct net_device *dev)
2122 {
2123 return netdev2pinfo(dev)->tx_chan;
2124 }
2125 EXPORT_SYMBOL(cxgb4_port_chan);
2126
2127
2128
2129
2130
2131
2132
2133 unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134 {
2135 return netdev2pinfo(dev)->rx_cchan;
2136 }
2137 EXPORT_SYMBOL(cxgb4_port_e2cchan);
2138
2139 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2140 {
2141 struct adapter *adap = netdev2adap(dev);
2142 u32 v1, v2, lp_count, hp_count;
2143
2144 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2145 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2146 if (is_t4(adap->params.chip)) {
2147 lp_count = LP_COUNT_G(v1);
2148 hp_count = HP_COUNT_G(v1);
2149 } else {
2150 lp_count = LP_COUNT_T5_G(v1);
2151 hp_count = HP_COUNT_T5_G(v2);
2152 }
2153 return lpfifo ? lp_count : hp_count;
2154 }
2155 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2156
2157
2158
2159
2160
2161
2162
2163 unsigned int cxgb4_port_viid(const struct net_device *dev)
2164 {
2165 return netdev2pinfo(dev)->viid;
2166 }
2167 EXPORT_SYMBOL(cxgb4_port_viid);
2168
2169
2170
2171
2172
2173
2174
2175 unsigned int cxgb4_port_idx(const struct net_device *dev)
2176 {
2177 return netdev2pinfo(dev)->port_id;
2178 }
2179 EXPORT_SYMBOL(cxgb4_port_idx);
2180
2181 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2182 struct tp_tcp_stats *v6)
2183 {
2184 struct adapter *adap = pci_get_drvdata(pdev);
2185
2186 spin_lock(&adap->stats_lock);
2187 t4_tp_get_tcp_stats(adap, v4, v6, false);
2188 spin_unlock(&adap->stats_lock);
2189 }
2190 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2191
2192 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2193 const unsigned int *pgsz_order)
2194 {
2195 struct adapter *adap = netdev2adap(dev);
2196
2197 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2198 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2199 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2200 HPZ3_V(pgsz_order[3]));
2201 }
2202 EXPORT_SYMBOL(cxgb4_iscsi_init);
2203
2204 int cxgb4_flush_eq_cache(struct net_device *dev)
2205 {
2206 struct adapter *adap = netdev2adap(dev);
2207
2208 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2209 }
2210 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2211
2212 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2213 {
2214 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2215 __be64 indices;
2216 int ret;
2217
2218 spin_lock(&adap->win0_lock);
2219 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2220 sizeof(indices), (__be32 *)&indices,
2221 T4_MEMORY_READ);
2222 spin_unlock(&adap->win0_lock);
2223 if (!ret) {
2224 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2225 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2226 }
2227 return ret;
2228 }
2229
2230 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2231 u16 size)
2232 {
2233 struct adapter *adap = netdev2adap(dev);
2234 u16 hw_pidx, hw_cidx;
2235 int ret;
2236
2237 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2238 if (ret)
2239 goto out;
2240
2241 if (pidx != hw_pidx) {
2242 u16 delta;
2243 u32 val;
2244
2245 if (pidx >= hw_pidx)
2246 delta = pidx - hw_pidx;
2247 else
2248 delta = size - hw_pidx + pidx;
2249
2250 if (is_t4(adap->params.chip))
2251 val = PIDX_V(delta);
2252 else
2253 val = PIDX_T5_V(delta);
2254 wmb();
2255 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2256 QID_V(qid) | val);
2257 }
2258 out:
2259 return ret;
2260 }
2261 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2262
2263 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2264 {
2265 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2266 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2267 u32 offset, memtype, memaddr;
2268 struct adapter *adap;
2269 u32 hma_size = 0;
2270 int ret;
2271
2272 adap = netdev2adap(dev);
2273
2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2275
2276
2277
2278
2279
2280
2281
2282 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2283 edc0_size = EDRAM0_SIZE_G(size) << 20;
2284 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2285 edc1_size = EDRAM1_SIZE_G(size) << 20;
2286 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2287 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2288
2289 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2290 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2291 hma_size = EXT_MEM1_SIZE_G(size) << 20;
2292 }
2293 edc0_end = edc0_size;
2294 edc1_end = edc0_end + edc1_size;
2295 mc0_end = edc1_end + mc0_size;
2296
2297 if (offset < edc0_end) {
2298 memtype = MEM_EDC0;
2299 memaddr = offset;
2300 } else if (offset < edc1_end) {
2301 memtype = MEM_EDC1;
2302 memaddr = offset - edc0_end;
2303 } else {
2304 if (hma_size && (offset < (edc1_end + hma_size))) {
2305 memtype = MEM_HMA;
2306 memaddr = offset - edc1_end;
2307 } else if (offset < mc0_end) {
2308 memtype = MEM_MC0;
2309 memaddr = offset - edc1_end;
2310 } else if (is_t5(adap->params.chip)) {
2311 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2312 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2313 mc1_end = mc0_end + mc1_size;
2314 if (offset < mc1_end) {
2315 memtype = MEM_MC1;
2316 memaddr = offset - mc0_end;
2317 } else {
2318
2319 goto err;
2320 }
2321 } else {
2322
2323 goto err;
2324 }
2325 }
2326
2327 spin_lock(&adap->win0_lock);
2328 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2329 spin_unlock(&adap->win0_lock);
2330 return ret;
2331
2332 err:
2333 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2334 stag, offset);
2335 return -EINVAL;
2336 }
2337 EXPORT_SYMBOL(cxgb4_read_tpte);
2338
2339 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2340 {
2341 u32 hi, lo;
2342 struct adapter *adap;
2343
2344 adap = netdev2adap(dev);
2345 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2346 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2347
2348 return ((u64)hi << 32) | (u64)lo;
2349 }
2350 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2351
2352 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2353 unsigned int qid,
2354 enum cxgb4_bar2_qtype qtype,
2355 int user,
2356 u64 *pbar2_qoffset,
2357 unsigned int *pbar2_qid)
2358 {
2359 return t4_bar2_sge_qregs(netdev2adap(dev),
2360 qid,
2361 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2362 ? T4_BAR2_QTYPE_EGRESS
2363 : T4_BAR2_QTYPE_INGRESS),
2364 user,
2365 pbar2_qoffset,
2366 pbar2_qid);
2367 }
2368 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2369
2370 static struct pci_driver cxgb4_driver;
2371
2372 static void check_neigh_update(struct neighbour *neigh)
2373 {
2374 const struct device *parent;
2375 const struct net_device *netdev = neigh->dev;
2376
2377 if (is_vlan_dev(netdev))
2378 netdev = vlan_dev_real_dev(netdev);
2379 parent = netdev->dev.parent;
2380 if (parent && parent->driver == &cxgb4_driver.driver)
2381 t4_l2t_update(dev_get_drvdata(parent), neigh);
2382 }
2383
2384 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2385 void *data)
2386 {
2387 switch (event) {
2388 case NETEVENT_NEIGH_UPDATE:
2389 check_neigh_update(data);
2390 break;
2391 case NETEVENT_REDIRECT:
2392 default:
2393 break;
2394 }
2395 return 0;
2396 }
2397
2398 static bool netevent_registered;
2399 static struct notifier_block cxgb4_netevent_nb = {
2400 .notifier_call = netevent_cb
2401 };
2402
2403 static void drain_db_fifo(struct adapter *adap, int usecs)
2404 {
2405 u32 v1, v2, lp_count, hp_count;
2406
2407 do {
2408 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2409 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2410 if (is_t4(adap->params.chip)) {
2411 lp_count = LP_COUNT_G(v1);
2412 hp_count = HP_COUNT_G(v1);
2413 } else {
2414 lp_count = LP_COUNT_T5_G(v1);
2415 hp_count = HP_COUNT_T5_G(v2);
2416 }
2417
2418 if (lp_count == 0 && hp_count == 0)
2419 break;
2420 set_current_state(TASK_UNINTERRUPTIBLE);
2421 schedule_timeout(usecs_to_jiffies(usecs));
2422 } while (1);
2423 }
2424
2425 static void disable_txq_db(struct sge_txq *q)
2426 {
2427 unsigned long flags;
2428
2429 spin_lock_irqsave(&q->db_lock, flags);
2430 q->db_disabled = 1;
2431 spin_unlock_irqrestore(&q->db_lock, flags);
2432 }
2433
2434 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2435 {
2436 spin_lock_irq(&q->db_lock);
2437 if (q->db_pidx_inc) {
2438
2439
2440
2441 wmb();
2442 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2443 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2444 q->db_pidx_inc = 0;
2445 }
2446 q->db_disabled = 0;
2447 spin_unlock_irq(&q->db_lock);
2448 }
2449
2450 static void disable_dbs(struct adapter *adap)
2451 {
2452 int i;
2453
2454 for_each_ethrxq(&adap->sge, i)
2455 disable_txq_db(&adap->sge.ethtxq[i].q);
2456 if (is_offload(adap)) {
2457 struct sge_uld_txq_info *txq_info =
2458 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2459
2460 if (txq_info) {
2461 for_each_ofldtxq(&adap->sge, i) {
2462 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2463
2464 disable_txq_db(&txq->q);
2465 }
2466 }
2467 }
2468 for_each_port(adap, i)
2469 disable_txq_db(&adap->sge.ctrlq[i].q);
2470 }
2471
2472 static void enable_dbs(struct adapter *adap)
2473 {
2474 int i;
2475
2476 for_each_ethrxq(&adap->sge, i)
2477 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2478 if (is_offload(adap)) {
2479 struct sge_uld_txq_info *txq_info =
2480 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2481
2482 if (txq_info) {
2483 for_each_ofldtxq(&adap->sge, i) {
2484 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2485
2486 enable_txq_db(adap, &txq->q);
2487 }
2488 }
2489 }
2490 for_each_port(adap, i)
2491 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2492 }
2493
2494 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2495 {
2496 enum cxgb4_uld type = CXGB4_ULD_RDMA;
2497
2498 if (adap->uld && adap->uld[type].handle)
2499 adap->uld[type].control(adap->uld[type].handle, cmd);
2500 }
2501
2502 static void process_db_full(struct work_struct *work)
2503 {
2504 struct adapter *adap;
2505
2506 adap = container_of(work, struct adapter, db_full_task);
2507
2508 drain_db_fifo(adap, dbfifo_drain_delay);
2509 enable_dbs(adap);
2510 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2511 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2512 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2513 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2514 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2515 else
2516 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2517 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2518 }
2519
2520 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2521 {
2522 u16 hw_pidx, hw_cidx;
2523 int ret;
2524
2525 spin_lock_irq(&q->db_lock);
2526 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2527 if (ret)
2528 goto out;
2529 if (q->db_pidx != hw_pidx) {
2530 u16 delta;
2531 u32 val;
2532
2533 if (q->db_pidx >= hw_pidx)
2534 delta = q->db_pidx - hw_pidx;
2535 else
2536 delta = q->size - hw_pidx + q->db_pidx;
2537
2538 if (is_t4(adap->params.chip))
2539 val = PIDX_V(delta);
2540 else
2541 val = PIDX_T5_V(delta);
2542 wmb();
2543 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2544 QID_V(q->cntxt_id) | val);
2545 }
2546 out:
2547 q->db_disabled = 0;
2548 q->db_pidx_inc = 0;
2549 spin_unlock_irq(&q->db_lock);
2550 if (ret)
2551 CH_WARN(adap, "DB drop recovery failed.\n");
2552 }
2553
2554 static void recover_all_queues(struct adapter *adap)
2555 {
2556 int i;
2557
2558 for_each_ethrxq(&adap->sge, i)
2559 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2560 if (is_offload(adap)) {
2561 struct sge_uld_txq_info *txq_info =
2562 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2563 if (txq_info) {
2564 for_each_ofldtxq(&adap->sge, i) {
2565 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2566
2567 sync_txq_pidx(adap, &txq->q);
2568 }
2569 }
2570 }
2571 for_each_port(adap, i)
2572 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2573 }
2574
2575 static void process_db_drop(struct work_struct *work)
2576 {
2577 struct adapter *adap;
2578
2579 adap = container_of(work, struct adapter, db_drop_task);
2580
2581 if (is_t4(adap->params.chip)) {
2582 drain_db_fifo(adap, dbfifo_drain_delay);
2583 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2584 drain_db_fifo(adap, dbfifo_drain_delay);
2585 recover_all_queues(adap);
2586 drain_db_fifo(adap, dbfifo_drain_delay);
2587 enable_dbs(adap);
2588 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2589 } else if (is_t5(adap->params.chip)) {
2590 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2591 u16 qid = (dropped_db >> 15) & 0x1ffff;
2592 u16 pidx_inc = dropped_db & 0x1fff;
2593 u64 bar2_qoffset;
2594 unsigned int bar2_qid;
2595 int ret;
2596
2597 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2598 0, &bar2_qoffset, &bar2_qid);
2599 if (ret)
2600 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2601 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2602 else
2603 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2604 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2605
2606
2607 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2608 }
2609
2610 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2611 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2612 }
2613
2614 void t4_db_full(struct adapter *adap)
2615 {
2616 if (is_t4(adap->params.chip)) {
2617 disable_dbs(adap);
2618 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2620 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2621 queue_work(adap->workq, &adap->db_full_task);
2622 }
2623 }
2624
2625 void t4_db_dropped(struct adapter *adap)
2626 {
2627 if (is_t4(adap->params.chip)) {
2628 disable_dbs(adap);
2629 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2630 }
2631 queue_work(adap->workq, &adap->db_drop_task);
2632 }
2633
2634 void t4_register_netevent_notifier(void)
2635 {
2636 if (!netevent_registered) {
2637 register_netevent_notifier(&cxgb4_netevent_nb);
2638 netevent_registered = true;
2639 }
2640 }
2641
2642 static void detach_ulds(struct adapter *adap)
2643 {
2644 unsigned int i;
2645
2646 if (!is_uld(adap))
2647 return;
2648
2649 mutex_lock(&uld_mutex);
2650 list_del(&adap->list_node);
2651
2652 for (i = 0; i < CXGB4_ULD_MAX; i++)
2653 if (adap->uld && adap->uld[i].handle)
2654 adap->uld[i].state_change(adap->uld[i].handle,
2655 CXGB4_STATE_DETACH);
2656
2657 if (netevent_registered && list_empty(&adapter_list)) {
2658 unregister_netevent_notifier(&cxgb4_netevent_nb);
2659 netevent_registered = false;
2660 }
2661 mutex_unlock(&uld_mutex);
2662 }
2663
2664 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2665 {
2666 unsigned int i;
2667
2668 mutex_lock(&uld_mutex);
2669 for (i = 0; i < CXGB4_ULD_MAX; i++)
2670 if (adap->uld && adap->uld[i].handle)
2671 adap->uld[i].state_change(adap->uld[i].handle,
2672 new_state);
2673 mutex_unlock(&uld_mutex);
2674 }
2675
2676 #if IS_ENABLED(CONFIG_IPV6)
2677 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2678 unsigned long event, void *data)
2679 {
2680 struct inet6_ifaddr *ifa = data;
2681 struct net_device *event_dev = ifa->idev->dev;
2682 const struct device *parent = NULL;
2683 #if IS_ENABLED(CONFIG_BONDING)
2684 struct adapter *adap;
2685 #endif
2686 if (is_vlan_dev(event_dev))
2687 event_dev = vlan_dev_real_dev(event_dev);
2688 #if IS_ENABLED(CONFIG_BONDING)
2689 if (event_dev->flags & IFF_MASTER) {
2690 list_for_each_entry(adap, &adapter_list, list_node) {
2691 switch (event) {
2692 case NETDEV_UP:
2693 cxgb4_clip_get(adap->port[0],
2694 (const u32 *)ifa, 1);
2695 break;
2696 case NETDEV_DOWN:
2697 cxgb4_clip_release(adap->port[0],
2698 (const u32 *)ifa, 1);
2699 break;
2700 default:
2701 break;
2702 }
2703 }
2704 return NOTIFY_OK;
2705 }
2706 #endif
2707
2708 if (event_dev)
2709 parent = event_dev->dev.parent;
2710
2711 if (parent && parent->driver == &cxgb4_driver.driver) {
2712 switch (event) {
2713 case NETDEV_UP:
2714 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2715 break;
2716 case NETDEV_DOWN:
2717 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2718 break;
2719 default:
2720 break;
2721 }
2722 }
2723 return NOTIFY_OK;
2724 }
2725
2726 static bool inet6addr_registered;
2727 static struct notifier_block cxgb4_inet6addr_notifier = {
2728 .notifier_call = cxgb4_inet6addr_handler
2729 };
2730
2731 static void update_clip(const struct adapter *adap)
2732 {
2733 int i;
2734 struct net_device *dev;
2735 int ret;
2736
2737 rcu_read_lock();
2738
2739 for (i = 0; i < MAX_NPORTS; i++) {
2740 dev = adap->port[i];
2741 ret = 0;
2742
2743 if (dev)
2744 ret = cxgb4_update_root_dev_clip(dev);
2745
2746 if (ret < 0)
2747 break;
2748 }
2749 rcu_read_unlock();
2750 }
2751 #endif
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 static int cxgb_up(struct adapter *adap)
2764 {
2765 struct sge *s = &adap->sge;
2766 int err;
2767
2768 mutex_lock(&uld_mutex);
2769 err = setup_sge_queues(adap);
2770 if (err)
2771 goto rel_lock;
2772 err = setup_rss(adap);
2773 if (err)
2774 goto freeq;
2775
2776 if (adap->flags & CXGB4_USING_MSIX) {
2777 if (s->nd_msix_idx < 0) {
2778 err = -ENOMEM;
2779 goto irq_err;
2780 }
2781
2782 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2783 t4_nondata_intr, 0,
2784 adap->msix_info[s->nd_msix_idx].desc, adap);
2785 if (err)
2786 goto irq_err;
2787
2788 err = request_msix_queue_irqs(adap);
2789 if (err)
2790 goto irq_err_free_nd_msix;
2791 } else {
2792 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2793 (adap->flags & CXGB4_USING_MSI) ? 0
2794 : IRQF_SHARED,
2795 adap->port[0]->name, adap);
2796 if (err)
2797 goto irq_err;
2798 }
2799
2800 enable_rx(adap);
2801 t4_sge_start(adap);
2802 t4_intr_enable(adap);
2803 adap->flags |= CXGB4_FULL_INIT_DONE;
2804 mutex_unlock(&uld_mutex);
2805
2806 notify_ulds(adap, CXGB4_STATE_UP);
2807 #if IS_ENABLED(CONFIG_IPV6)
2808 update_clip(adap);
2809 #endif
2810 return err;
2811
2812 irq_err_free_nd_msix:
2813 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2814 irq_err:
2815 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2816 freeq:
2817 t4_free_sge_resources(adap);
2818 rel_lock:
2819 mutex_unlock(&uld_mutex);
2820 return err;
2821 }
2822
2823 static void cxgb_down(struct adapter *adapter)
2824 {
2825 cancel_work_sync(&adapter->tid_release_task);
2826 cancel_work_sync(&adapter->db_full_task);
2827 cancel_work_sync(&adapter->db_drop_task);
2828 adapter->tid_release_task_busy = false;
2829 adapter->tid_release_head = NULL;
2830
2831 t4_sge_stop(adapter);
2832 t4_free_sge_resources(adapter);
2833
2834 adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2835 }
2836
2837
2838
2839
2840 static int cxgb_open(struct net_device *dev)
2841 {
2842 struct port_info *pi = netdev_priv(dev);
2843 struct adapter *adapter = pi->adapter;
2844 int err;
2845
2846 netif_carrier_off(dev);
2847
2848 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2849 err = cxgb_up(adapter);
2850 if (err < 0)
2851 return err;
2852 }
2853
2854
2855
2856
2857 err = t4_update_port_info(pi);
2858 if (err < 0)
2859 return err;
2860
2861 err = link_start(dev);
2862 if (err)
2863 return err;
2864
2865 if (pi->nmirrorqsets) {
2866 mutex_lock(&pi->vi_mirror_mutex);
2867 err = cxgb4_port_mirror_alloc_queues(dev);
2868 if (err)
2869 goto out_unlock;
2870
2871 err = cxgb4_port_mirror_start(dev);
2872 if (err)
2873 goto out_free_queues;
2874 mutex_unlock(&pi->vi_mirror_mutex);
2875 }
2876
2877 netif_tx_start_all_queues(dev);
2878 return 0;
2879
2880 out_free_queues:
2881 cxgb4_port_mirror_free_queues(dev);
2882
2883 out_unlock:
2884 mutex_unlock(&pi->vi_mirror_mutex);
2885 return err;
2886 }
2887
2888 static int cxgb_close(struct net_device *dev)
2889 {
2890 struct port_info *pi = netdev_priv(dev);
2891 struct adapter *adapter = pi->adapter;
2892 int ret;
2893
2894 netif_tx_stop_all_queues(dev);
2895 netif_carrier_off(dev);
2896 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2897 false, false, false);
2898 #ifdef CONFIG_CHELSIO_T4_DCB
2899 cxgb4_dcb_reset(dev);
2900 dcb_tx_queue_prio_enable(dev, false);
2901 #endif
2902 if (ret)
2903 return ret;
2904
2905 if (pi->nmirrorqsets) {
2906 mutex_lock(&pi->vi_mirror_mutex);
2907 cxgb4_port_mirror_stop(dev);
2908 cxgb4_port_mirror_free_queues(dev);
2909 mutex_unlock(&pi->vi_mirror_mutex);
2910 }
2911
2912 return 0;
2913 }
2914
2915 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2916 __be32 sip, __be16 sport, __be16 vlan,
2917 unsigned int queue, unsigned char port, unsigned char mask)
2918 {
2919 int ret;
2920 struct filter_entry *f;
2921 struct adapter *adap;
2922 int i;
2923 u8 *val;
2924
2925 adap = netdev2adap(dev);
2926
2927
2928 stid -= adap->tids.sftid_base;
2929 stid += adap->tids.nftids;
2930
2931
2932
2933 f = &adap->tids.ftid_tab[stid];
2934 ret = writable_filter(f);
2935 if (ret)
2936 return ret;
2937
2938
2939
2940
2941 if (f->valid)
2942 clear_filter(adap, f);
2943
2944
2945 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2946 f->fs.val.lport = be16_to_cpu(sport);
2947 f->fs.mask.lport = ~0;
2948 val = (u8 *)&sip;
2949 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2950 for (i = 0; i < 4; i++) {
2951 f->fs.val.lip[i] = val[i];
2952 f->fs.mask.lip[i] = ~0;
2953 }
2954 if (adap->params.tp.vlan_pri_map & PORT_F) {
2955 f->fs.val.iport = port;
2956 f->fs.mask.iport = mask;
2957 }
2958 }
2959
2960 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2961 f->fs.val.proto = IPPROTO_TCP;
2962 f->fs.mask.proto = ~0;
2963 }
2964
2965 f->fs.dirsteer = 1;
2966 f->fs.iq = queue;
2967
2968 f->locked = 1;
2969 f->fs.rpttid = 1;
2970
2971
2972
2973
2974 f->tid = stid + adap->tids.ftid_base;
2975 ret = set_filter_wr(adap, stid);
2976 if (ret) {
2977 clear_filter(adap, f);
2978 return ret;
2979 }
2980
2981 return 0;
2982 }
2983 EXPORT_SYMBOL(cxgb4_create_server_filter);
2984
2985 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2986 unsigned int queue, bool ipv6)
2987 {
2988 struct filter_entry *f;
2989 struct adapter *adap;
2990
2991 adap = netdev2adap(dev);
2992
2993
2994 stid -= adap->tids.sftid_base;
2995 stid += adap->tids.nftids;
2996
2997 f = &adap->tids.ftid_tab[stid];
2998
2999 f->locked = 0;
3000
3001 return delete_filter(adap, stid);
3002 }
3003 EXPORT_SYMBOL(cxgb4_remove_server_filter);
3004
3005 static void cxgb_get_stats(struct net_device *dev,
3006 struct rtnl_link_stats64 *ns)
3007 {
3008 struct port_stats stats;
3009 struct port_info *p = netdev_priv(dev);
3010 struct adapter *adapter = p->adapter;
3011
3012
3013
3014
3015
3016 spin_lock(&adapter->stats_lock);
3017 if (!netif_device_present(dev)) {
3018 spin_unlock(&adapter->stats_lock);
3019 return;
3020 }
3021 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3022 &p->stats_base);
3023 spin_unlock(&adapter->stats_lock);
3024
3025 ns->tx_bytes = stats.tx_octets;
3026 ns->tx_packets = stats.tx_frames;
3027 ns->rx_bytes = stats.rx_octets;
3028 ns->rx_packets = stats.rx_frames;
3029 ns->multicast = stats.rx_mcast_frames;
3030
3031
3032 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3033 stats.rx_runt;
3034 ns->rx_over_errors = 0;
3035 ns->rx_crc_errors = stats.rx_fcs_err;
3036 ns->rx_frame_errors = stats.rx_symbol_err;
3037 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
3038 stats.rx_ovflow2 + stats.rx_ovflow3 +
3039 stats.rx_trunc0 + stats.rx_trunc1 +
3040 stats.rx_trunc2 + stats.rx_trunc3;
3041 ns->rx_missed_errors = 0;
3042
3043
3044 ns->tx_aborted_errors = 0;
3045 ns->tx_carrier_errors = 0;
3046 ns->tx_fifo_errors = 0;
3047 ns->tx_heartbeat_errors = 0;
3048 ns->tx_window_errors = 0;
3049
3050 ns->tx_errors = stats.tx_error_frames;
3051 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3052 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3053 }
3054
3055 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3056 {
3057 unsigned int mbox;
3058 int ret = 0, prtad, devad;
3059 struct port_info *pi = netdev_priv(dev);
3060 struct adapter *adapter = pi->adapter;
3061 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3062
3063 switch (cmd) {
3064 case SIOCGMIIPHY:
3065 if (pi->mdio_addr < 0)
3066 return -EOPNOTSUPP;
3067 data->phy_id = pi->mdio_addr;
3068 break;
3069 case SIOCGMIIREG:
3070 case SIOCSMIIREG:
3071 if (mdio_phy_id_is_c45(data->phy_id)) {
3072 prtad = mdio_phy_id_prtad(data->phy_id);
3073 devad = mdio_phy_id_devad(data->phy_id);
3074 } else if (data->phy_id < 32) {
3075 prtad = data->phy_id;
3076 devad = 0;
3077 data->reg_num &= 0x1f;
3078 } else
3079 return -EINVAL;
3080
3081 mbox = pi->adapter->pf;
3082 if (cmd == SIOCGMIIREG)
3083 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3084 data->reg_num, &data->val_out);
3085 else
3086 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3087 data->reg_num, data->val_in);
3088 break;
3089 case SIOCGHWTSTAMP:
3090 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3091 sizeof(pi->tstamp_config)) ?
3092 -EFAULT : 0;
3093 case SIOCSHWTSTAMP:
3094 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3095 sizeof(pi->tstamp_config)))
3096 return -EFAULT;
3097
3098 if (!is_t4(adapter->params.chip)) {
3099 switch (pi->tstamp_config.tx_type) {
3100 case HWTSTAMP_TX_OFF:
3101 case HWTSTAMP_TX_ON:
3102 break;
3103 default:
3104 return -ERANGE;
3105 }
3106
3107 switch (pi->tstamp_config.rx_filter) {
3108 case HWTSTAMP_FILTER_NONE:
3109 pi->rxtstamp = false;
3110 break;
3111 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3112 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3113 cxgb4_ptprx_timestamping(pi, pi->port_id,
3114 PTP_TS_L4);
3115 break;
3116 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3117 cxgb4_ptprx_timestamping(pi, pi->port_id,
3118 PTP_TS_L2_L4);
3119 break;
3120 case HWTSTAMP_FILTER_ALL:
3121 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3122 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3123 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3124 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3125 pi->rxtstamp = true;
3126 break;
3127 default:
3128 pi->tstamp_config.rx_filter =
3129 HWTSTAMP_FILTER_NONE;
3130 return -ERANGE;
3131 }
3132
3133 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3134 (pi->tstamp_config.rx_filter ==
3135 HWTSTAMP_FILTER_NONE)) {
3136 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3137 pi->ptp_enable = false;
3138 }
3139
3140 if (pi->tstamp_config.rx_filter !=
3141 HWTSTAMP_FILTER_NONE) {
3142 if (cxgb4_ptp_redirect_rx_packet(adapter,
3143 pi) >= 0)
3144 pi->ptp_enable = true;
3145 }
3146 } else {
3147
3148 switch (pi->tstamp_config.rx_filter) {
3149 case HWTSTAMP_FILTER_NONE:
3150 pi->rxtstamp = false;
3151 break;
3152 case HWTSTAMP_FILTER_ALL:
3153 pi->rxtstamp = true;
3154 break;
3155 default:
3156 pi->tstamp_config.rx_filter =
3157 HWTSTAMP_FILTER_NONE;
3158 return -ERANGE;
3159 }
3160 }
3161 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3162 sizeof(pi->tstamp_config)) ?
3163 -EFAULT : 0;
3164 default:
3165 return -EOPNOTSUPP;
3166 }
3167 return ret;
3168 }
3169
3170 static void cxgb_set_rxmode(struct net_device *dev)
3171 {
3172
3173 set_rxmode(dev, -1, false);
3174 }
3175
3176 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3177 {
3178 struct port_info *pi = netdev_priv(dev);
3179 int ret;
3180
3181 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3182 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3183 if (!ret)
3184 dev->mtu = new_mtu;
3185 return ret;
3186 }
3187
3188 #ifdef CONFIG_PCI_IOV
3189 static int cxgb4_mgmt_open(struct net_device *dev)
3190 {
3191
3192
3193
3194 netif_carrier_off(dev);
3195 return 0;
3196 }
3197
3198
3199 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3200 {
3201 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
3202 unsigned int i, vf, nvfs;
3203 u16 a, b;
3204 int err;
3205 u8 *na;
3206
3207 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3208 if (err)
3209 return;
3210
3211 na = adap->params.vpd.na;
3212 for (i = 0; i < ETH_ALEN; i++)
3213 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3214 hex2val(na[2 * i + 1]));
3215
3216 a = (hw_addr[0] << 8) | hw_addr[1];
3217 b = (hw_addr[1] << 8) | hw_addr[2];
3218 a ^= b;
3219 a |= 0x0200;
3220 a &= ~0x0100;
3221 macaddr[0] = a >> 8;
3222 macaddr[1] = a & 0xff;
3223
3224 for (i = 2; i < 5; i++)
3225 macaddr[i] = hw_addr[i + 1];
3226
3227 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3228 vf < nvfs; vf++) {
3229 macaddr[5] = adap->pf * nvfs + vf;
3230 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3231 }
3232 }
3233
3234 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3235 {
3236 struct port_info *pi = netdev_priv(dev);
3237 struct adapter *adap = pi->adapter;
3238 int ret;
3239
3240
3241 if (!is_valid_ether_addr(mac)) {
3242 dev_err(pi->adapter->pdev_dev,
3243 "Invalid Ethernet address %pM for VF %d\n",
3244 mac, vf);
3245 return -EINVAL;
3246 }
3247
3248 dev_info(pi->adapter->pdev_dev,
3249 "Setting MAC %pM on VF %d\n", mac, vf);
3250 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3251 if (!ret)
3252 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3253 return ret;
3254 }
3255
3256 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3257 int vf, struct ifla_vf_info *ivi)
3258 {
3259 struct port_info *pi = netdev_priv(dev);
3260 struct adapter *adap = pi->adapter;
3261 struct vf_info *vfinfo;
3262
3263 if (vf >= adap->num_vfs)
3264 return -EINVAL;
3265 vfinfo = &adap->vfinfo[vf];
3266
3267 ivi->vf = vf;
3268 ivi->max_tx_rate = vfinfo->tx_rate;
3269 ivi->min_tx_rate = 0;
3270 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3271 ivi->vlan = vfinfo->vlan;
3272 ivi->linkstate = vfinfo->link_state;
3273 return 0;
3274 }
3275
3276 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3277 struct netdev_phys_item_id *ppid)
3278 {
3279 struct port_info *pi = netdev_priv(dev);
3280 unsigned int phy_port_id;
3281
3282 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3283 ppid->id_len = sizeof(phy_port_id);
3284 memcpy(ppid->id, &phy_port_id, ppid->id_len);
3285 return 0;
3286 }
3287
3288 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3289 int min_tx_rate, int max_tx_rate)
3290 {
3291 struct port_info *pi = netdev_priv(dev);
3292 struct adapter *adap = pi->adapter;
3293 unsigned int link_ok, speed, mtu;
3294 u32 fw_pfvf, fw_class;
3295 int class_id = vf;
3296 int ret;
3297 u16 pktsize;
3298
3299 if (vf >= adap->num_vfs)
3300 return -EINVAL;
3301
3302 if (min_tx_rate) {
3303 dev_err(adap->pdev_dev,
3304 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3305 min_tx_rate, vf);
3306 return -EINVAL;
3307 }
3308
3309 if (max_tx_rate == 0) {
3310
3311 fw_pfvf =
3312 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3313 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3314 fw_class = 0xffffffff;
3315 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3316 &fw_pfvf, &fw_class);
3317 if (ret) {
3318 dev_err(adap->pdev_dev,
3319 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3320 ret, adap->pf, vf);
3321 return -EINVAL;
3322 }
3323 dev_info(adap->pdev_dev,
3324 "PF %d VF %d is unbound from TX Rate Limiting\n",
3325 adap->pf, vf);
3326 adap->vfinfo[vf].tx_rate = 0;
3327 return 0;
3328 }
3329
3330 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3331 if (ret != FW_SUCCESS) {
3332 dev_err(adap->pdev_dev,
3333 "Failed to get link information for VF %d\n", vf);
3334 return -EINVAL;
3335 }
3336
3337 if (!link_ok) {
3338 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3339 return -EINVAL;
3340 }
3341
3342 if (max_tx_rate > speed) {
3343 dev_err(adap->pdev_dev,
3344 "Max tx rate %d for VF %d can't be > link-speed %u",
3345 max_tx_rate, vf, speed);
3346 return -EINVAL;
3347 }
3348
3349 pktsize = mtu;
3350
3351 pktsize = pktsize - sizeof(struct ethhdr) - 4;
3352
3353 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3354
3355 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3356 SCHED_CLASS_LEVEL_CL_RL,
3357 SCHED_CLASS_MODE_CLASS,
3358 SCHED_CLASS_RATEUNIT_BITS,
3359 SCHED_CLASS_RATEMODE_ABS,
3360 pi->tx_chan, class_id, 0,
3361 max_tx_rate * 1000, 0, pktsize, 0);
3362 if (ret) {
3363 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3364 ret);
3365 return -EINVAL;
3366 }
3367 dev_info(adap->pdev_dev,
3368 "Class %d with MSS %u configured with rate %u\n",
3369 class_id, pktsize, max_tx_rate);
3370
3371
3372 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3373 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3374 fw_class = class_id;
3375 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3376 &fw_class);
3377 if (ret) {
3378 dev_err(adap->pdev_dev,
3379 "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3380 ret, adap->pf, vf, class_id);
3381 return -EINVAL;
3382 }
3383 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3384 adap->pf, vf, class_id);
3385 adap->vfinfo[vf].tx_rate = max_tx_rate;
3386 return 0;
3387 }
3388
3389 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3390 u16 vlan, u8 qos, __be16 vlan_proto)
3391 {
3392 struct port_info *pi = netdev_priv(dev);
3393 struct adapter *adap = pi->adapter;
3394 int ret;
3395
3396 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3397 return -EINVAL;
3398
3399 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3400 return -EPROTONOSUPPORT;
3401
3402 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3403 if (!ret) {
3404 adap->vfinfo[vf].vlan = vlan;
3405 return 0;
3406 }
3407
3408 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3409 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3410 return ret;
3411 }
3412
3413 static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3414 int link)
3415 {
3416 struct port_info *pi = netdev_priv(dev);
3417 struct adapter *adap = pi->adapter;
3418 u32 param, val;
3419 int ret = 0;
3420
3421 if (vf >= adap->num_vfs)
3422 return -EINVAL;
3423
3424 switch (link) {
3425 case IFLA_VF_LINK_STATE_AUTO:
3426 val = FW_VF_LINK_STATE_AUTO;
3427 break;
3428
3429 case IFLA_VF_LINK_STATE_ENABLE:
3430 val = FW_VF_LINK_STATE_ENABLE;
3431 break;
3432
3433 case IFLA_VF_LINK_STATE_DISABLE:
3434 val = FW_VF_LINK_STATE_DISABLE;
3435 break;
3436
3437 default:
3438 return -EINVAL;
3439 }
3440
3441 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3442 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3443 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3444 ¶m, &val);
3445 if (ret) {
3446 dev_err(adap->pdev_dev,
3447 "Error %d in setting PF %d VF %d link state\n",
3448 ret, adap->pf, vf);
3449 return -EINVAL;
3450 }
3451
3452 adap->vfinfo[vf].link_state = link;
3453 return ret;
3454 }
3455 #endif
3456
3457 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3458 {
3459 int ret;
3460 struct sockaddr *addr = p;
3461 struct port_info *pi = netdev_priv(dev);
3462
3463 if (!is_valid_ether_addr(addr->sa_data))
3464 return -EADDRNOTAVAIL;
3465
3466 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3467 addr->sa_data, true, &pi->smt_idx);
3468 if (ret < 0)
3469 return ret;
3470
3471 eth_hw_addr_set(dev, addr->sa_data);
3472 return 0;
3473 }
3474
3475 #ifdef CONFIG_NET_POLL_CONTROLLER
3476 static void cxgb_netpoll(struct net_device *dev)
3477 {
3478 struct port_info *pi = netdev_priv(dev);
3479 struct adapter *adap = pi->adapter;
3480
3481 if (adap->flags & CXGB4_USING_MSIX) {
3482 int i;
3483 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3484
3485 for (i = pi->nqsets; i; i--, rx++)
3486 t4_sge_intr_msix(0, &rx->rspq);
3487 } else
3488 t4_intr_handler(adap)(0, adap);
3489 }
3490 #endif
3491
3492 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3493 {
3494 struct port_info *pi = netdev_priv(dev);
3495 struct adapter *adap = pi->adapter;
3496 struct ch_sched_queue qe = { 0 };
3497 struct ch_sched_params p = { 0 };
3498 struct sched_class *e;
3499 u32 req_rate;
3500 int err = 0;
3501
3502 if (!can_sched(dev))
3503 return -ENOTSUPP;
3504
3505 if (index < 0 || index > pi->nqsets - 1)
3506 return -EINVAL;
3507
3508 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3509 dev_err(adap->pdev_dev,
3510 "Failed to rate limit on queue %d. Link Down?\n",
3511 index);
3512 return -EINVAL;
3513 }
3514
3515 qe.queue = index;
3516 e = cxgb4_sched_queue_lookup(dev, &qe);
3517 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3518 dev_err(adap->pdev_dev,
3519 "Queue %u already bound to class %u of type: %u\n",
3520 index, e->idx, e->info.u.params.level);
3521 return -EBUSY;
3522 }
3523
3524
3525 req_rate = rate * 1000;
3526
3527
3528 if (req_rate > SCHED_MAX_RATE_KBPS) {
3529 dev_err(adap->pdev_dev,
3530 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3531 rate, SCHED_MAX_RATE_KBPS / 1000);
3532 return -ERANGE;
3533 }
3534
3535
3536 memset(&qe, 0, sizeof(qe));
3537 qe.queue = index;
3538 qe.class = SCHED_CLS_NONE;
3539
3540 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3541 if (err) {
3542 dev_err(adap->pdev_dev,
3543 "Unbinding Queue %d on port %d fail. Err: %d\n",
3544 index, pi->port_id, err);
3545 return err;
3546 }
3547
3548
3549 if (!req_rate)
3550 return 0;
3551
3552
3553 p.type = SCHED_CLASS_TYPE_PACKET;
3554 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
3555 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
3556 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3557 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3558 p.u.params.channel = pi->tx_chan;
3559 p.u.params.class = SCHED_CLS_NONE;
3560 p.u.params.minrate = 0;
3561 p.u.params.maxrate = req_rate;
3562 p.u.params.weight = 0;
3563 p.u.params.pktsize = dev->mtu;
3564
3565 e = cxgb4_sched_class_alloc(dev, &p);
3566 if (!e)
3567 return -ENOMEM;
3568
3569
3570 memset(&qe, 0, sizeof(qe));
3571 qe.queue = index;
3572 qe.class = e->idx;
3573
3574 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3575 if (err)
3576 dev_err(adap->pdev_dev,
3577 "Queue rate limiting failed. Err: %d\n", err);
3578 return err;
3579 }
3580
3581 static int cxgb_setup_tc_flower(struct net_device *dev,
3582 struct flow_cls_offload *cls_flower)
3583 {
3584 switch (cls_flower->command) {
3585 case FLOW_CLS_REPLACE:
3586 return cxgb4_tc_flower_replace(dev, cls_flower);
3587 case FLOW_CLS_DESTROY:
3588 return cxgb4_tc_flower_destroy(dev, cls_flower);
3589 case FLOW_CLS_STATS:
3590 return cxgb4_tc_flower_stats(dev, cls_flower);
3591 default:
3592 return -EOPNOTSUPP;
3593 }
3594 }
3595
3596 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3597 struct tc_cls_u32_offload *cls_u32)
3598 {
3599 switch (cls_u32->command) {
3600 case TC_CLSU32_NEW_KNODE:
3601 case TC_CLSU32_REPLACE_KNODE:
3602 return cxgb4_config_knode(dev, cls_u32);
3603 case TC_CLSU32_DELETE_KNODE:
3604 return cxgb4_delete_knode(dev, cls_u32);
3605 default:
3606 return -EOPNOTSUPP;
3607 }
3608 }
3609
3610 static int cxgb_setup_tc_matchall(struct net_device *dev,
3611 struct tc_cls_matchall_offload *cls_matchall,
3612 bool ingress)
3613 {
3614 struct adapter *adap = netdev2adap(dev);
3615
3616 if (!adap->tc_matchall)
3617 return -ENOMEM;
3618
3619 switch (cls_matchall->command) {
3620 case TC_CLSMATCHALL_REPLACE:
3621 return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3622 case TC_CLSMATCHALL_DESTROY:
3623 return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3624 case TC_CLSMATCHALL_STATS:
3625 if (ingress)
3626 return cxgb4_tc_matchall_stats(dev, cls_matchall);
3627 break;
3628 default:
3629 break;
3630 }
3631
3632 return -EOPNOTSUPP;
3633 }
3634
3635 static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3636 void *type_data, void *cb_priv)
3637 {
3638 struct net_device *dev = cb_priv;
3639 struct port_info *pi = netdev2pinfo(dev);
3640 struct adapter *adap = netdev2adap(dev);
3641
3642 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3643 dev_err(adap->pdev_dev,
3644 "Failed to setup tc on port %d. Link Down?\n",
3645 pi->port_id);
3646 return -EINVAL;
3647 }
3648
3649 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3650 return -EOPNOTSUPP;
3651
3652 switch (type) {
3653 case TC_SETUP_CLSU32:
3654 return cxgb_setup_tc_cls_u32(dev, type_data);
3655 case TC_SETUP_CLSFLOWER:
3656 return cxgb_setup_tc_flower(dev, type_data);
3657 case TC_SETUP_CLSMATCHALL:
3658 return cxgb_setup_tc_matchall(dev, type_data, true);
3659 default:
3660 return -EOPNOTSUPP;
3661 }
3662 }
3663
3664 static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3665 void *type_data, void *cb_priv)
3666 {
3667 struct net_device *dev = cb_priv;
3668 struct port_info *pi = netdev2pinfo(dev);
3669 struct adapter *adap = netdev2adap(dev);
3670
3671 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3672 dev_err(adap->pdev_dev,
3673 "Failed to setup tc on port %d. Link Down?\n",
3674 pi->port_id);
3675 return -EINVAL;
3676 }
3677
3678 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3679 return -EOPNOTSUPP;
3680
3681 switch (type) {
3682 case TC_SETUP_CLSMATCHALL:
3683 return cxgb_setup_tc_matchall(dev, type_data, false);
3684 default:
3685 break;
3686 }
3687
3688 return -EOPNOTSUPP;
3689 }
3690
3691 static int cxgb_setup_tc_mqprio(struct net_device *dev,
3692 struct tc_mqprio_qopt_offload *mqprio)
3693 {
3694 struct adapter *adap = netdev2adap(dev);
3695
3696 if (!is_ethofld(adap) || !adap->tc_mqprio)
3697 return -ENOMEM;
3698
3699 return cxgb4_setup_tc_mqprio(dev, mqprio);
3700 }
3701
3702 static LIST_HEAD(cxgb_block_cb_list);
3703
3704 static int cxgb_setup_tc_block(struct net_device *dev,
3705 struct flow_block_offload *f)
3706 {
3707 struct port_info *pi = netdev_priv(dev);
3708 flow_setup_cb_t *cb;
3709 bool ingress_only;
3710
3711 pi->tc_block_shared = f->block_shared;
3712 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3713 cb = cxgb_setup_tc_block_egress_cb;
3714 ingress_only = false;
3715 } else {
3716 cb = cxgb_setup_tc_block_ingress_cb;
3717 ingress_only = true;
3718 }
3719
3720 return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3721 cb, pi, dev, ingress_only);
3722 }
3723
3724 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3725 void *type_data)
3726 {
3727 switch (type) {
3728 case TC_SETUP_QDISC_MQPRIO:
3729 return cxgb_setup_tc_mqprio(dev, type_data);
3730 case TC_SETUP_BLOCK:
3731 return cxgb_setup_tc_block(dev, type_data);
3732 default:
3733 return -EOPNOTSUPP;
3734 }
3735 }
3736
3737 static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3738 unsigned int table, unsigned int entry,
3739 struct udp_tunnel_info *ti)
3740 {
3741 struct port_info *pi = netdev_priv(netdev);
3742 struct adapter *adapter = pi->adapter;
3743 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3744 int ret = 0, i;
3745
3746 switch (ti->type) {
3747 case UDP_TUNNEL_TYPE_VXLAN:
3748 adapter->vxlan_port = 0;
3749 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3750 break;
3751 case UDP_TUNNEL_TYPE_GENEVE:
3752 adapter->geneve_port = 0;
3753 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3754 break;
3755 default:
3756 return -EINVAL;
3757 }
3758
3759
3760
3761
3762 if (!adapter->rawf_cnt)
3763 return 0;
3764 for_each_port(adapter, i) {
3765 pi = adap2pinfo(adapter, i);
3766 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3767 match_all_mac, match_all_mac,
3768 adapter->rawf_start + pi->port_id,
3769 1, pi->port_id, false);
3770 if (ret < 0) {
3771 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3772 i);
3773 return ret;
3774 }
3775 }
3776
3777 return 0;
3778 }
3779
3780 static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3781 unsigned int table, unsigned int entry,
3782 struct udp_tunnel_info *ti)
3783 {
3784 struct port_info *pi = netdev_priv(netdev);
3785 struct adapter *adapter = pi->adapter;
3786 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3787 int i, ret;
3788
3789 switch (ti->type) {
3790 case UDP_TUNNEL_TYPE_VXLAN:
3791 adapter->vxlan_port = ti->port;
3792 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3793 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3794 break;
3795 case UDP_TUNNEL_TYPE_GENEVE:
3796 adapter->geneve_port = ti->port;
3797 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3798 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3799 break;
3800 default:
3801 return -EINVAL;
3802 }
3803
3804
3805
3806
3807
3808
3809
3810 for_each_port(adapter, i) {
3811 pi = adap2pinfo(adapter, i);
3812
3813 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3814 match_all_mac,
3815 match_all_mac,
3816 adapter->rawf_start + pi->port_id,
3817 1, pi->port_id, false);
3818 if (ret < 0) {
3819 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3820 be16_to_cpu(ti->port));
3821 return ret;
3822 }
3823 }
3824
3825 return 0;
3826 }
3827
3828 static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3829 .set_port = cxgb_udp_tunnel_set_port,
3830 .unset_port = cxgb_udp_tunnel_unset_port,
3831 .tables = {
3832 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
3833 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3834 },
3835 };
3836
3837 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3838 struct net_device *dev,
3839 netdev_features_t features)
3840 {
3841 struct port_info *pi = netdev_priv(dev);
3842 struct adapter *adapter = pi->adapter;
3843
3844 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3845 return features;
3846
3847
3848 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3849 return features;
3850
3851
3852 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3853 }
3854
3855 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3856 netdev_features_t features)
3857 {
3858
3859 if (!(features & NETIF_F_RXCSUM))
3860 features &= ~NETIF_F_GRO;
3861
3862 return features;
3863 }
3864
3865 static const struct net_device_ops cxgb4_netdev_ops = {
3866 .ndo_open = cxgb_open,
3867 .ndo_stop = cxgb_close,
3868 .ndo_start_xmit = t4_start_xmit,
3869 .ndo_select_queue = cxgb_select_queue,
3870 .ndo_get_stats64 = cxgb_get_stats,
3871 .ndo_set_rx_mode = cxgb_set_rxmode,
3872 .ndo_set_mac_address = cxgb_set_mac_addr,
3873 .ndo_set_features = cxgb_set_features,
3874 .ndo_validate_addr = eth_validate_addr,
3875 .ndo_eth_ioctl = cxgb_ioctl,
3876 .ndo_change_mtu = cxgb_change_mtu,
3877 #ifdef CONFIG_NET_POLL_CONTROLLER
3878 .ndo_poll_controller = cxgb_netpoll,
3879 #endif
3880 #ifdef CONFIG_CHELSIO_T4_FCOE
3881 .ndo_fcoe_enable = cxgb_fcoe_enable,
3882 .ndo_fcoe_disable = cxgb_fcoe_disable,
3883 #endif
3884 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
3885 .ndo_setup_tc = cxgb_setup_tc,
3886 .ndo_features_check = cxgb_features_check,
3887 .ndo_fix_features = cxgb_fix_features,
3888 };
3889
3890 #ifdef CONFIG_PCI_IOV
3891 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3892 .ndo_open = cxgb4_mgmt_open,
3893 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3894 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3895 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3896 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3897 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3898 .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
3899 };
3900
3901 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3902 struct ethtool_drvinfo *info)
3903 {
3904 struct adapter *adapter = netdev2adap(dev);
3905
3906 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3907 strlcpy(info->bus_info, pci_name(adapter->pdev),
3908 sizeof(info->bus_info));
3909 }
3910
3911 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3912 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
3913 };
3914 #endif
3915
3916 static void notify_fatal_err(struct work_struct *work)
3917 {
3918 struct adapter *adap;
3919
3920 adap = container_of(work, struct adapter, fatal_err_notify_task);
3921 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3922 }
3923
3924 void t4_fatal_err(struct adapter *adap)
3925 {
3926 int port;
3927
3928 if (pci_channel_offline(adap->pdev))
3929 return;
3930
3931
3932
3933
3934 t4_shutdown_adapter(adap);
3935 for_each_port(adap, port) {
3936 struct net_device *dev = adap->port[port];
3937
3938
3939
3940
3941 if (!dev)
3942 continue;
3943
3944 netif_tx_stop_all_queues(dev);
3945 netif_carrier_off(dev);
3946 }
3947 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3948 queue_work(adap->workq, &adap->fatal_err_notify_task);
3949 }
3950
3951 static void setup_memwin(struct adapter *adap)
3952 {
3953 u32 nic_win_base = t4_get_util_window(adap);
3954
3955 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3956 }
3957
3958 static void setup_memwin_rdma(struct adapter *adap)
3959 {
3960 if (adap->vres.ocq.size) {
3961 u32 start;
3962 unsigned int sz_kb;
3963
3964 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3965 start &= PCI_BASE_ADDRESS_MEM_MASK;
3966 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3967 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3968 t4_write_reg(adap,
3969 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3970 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3971 t4_write_reg(adap,
3972 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3973 adap->vres.ocq.start);
3974 t4_read_reg(adap,
3975 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3976 }
3977 }
3978
3979
3980
3981
3982 #define HMA_MAX_ADDR_IN_CMD 5
3983
3984 #define HMA_PAGE_SIZE PAGE_SIZE
3985
3986 #define HMA_MAX_NO_FW_ADDRESS (16 << 10)
3987
3988 #define HMA_PAGE_ORDER \
3989 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3990 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3991
3992
3993
3994
3995 #define HMA_MIN_TOTAL_SIZE 1
3996 #define HMA_MAX_TOTAL_SIZE \
3997 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3998 HMA_MAX_NO_FW_ADDRESS) >> 20)
3999
4000 static void adap_free_hma_mem(struct adapter *adapter)
4001 {
4002 struct scatterlist *iter;
4003 struct page *page;
4004 int i;
4005
4006 if (!adapter->hma.sgt)
4007 return;
4008
4009 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4010 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4011 adapter->hma.sgt->nents, DMA_BIDIRECTIONAL);
4012 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4013 }
4014
4015 for_each_sg(adapter->hma.sgt->sgl, iter,
4016 adapter->hma.sgt->orig_nents, i) {
4017 page = sg_page(iter);
4018 if (page)
4019 __free_pages(page, HMA_PAGE_ORDER);
4020 }
4021
4022 kfree(adapter->hma.phy_addr);
4023 sg_free_table(adapter->hma.sgt);
4024 kfree(adapter->hma.sgt);
4025 adapter->hma.sgt = NULL;
4026 }
4027
4028 static int adap_config_hma(struct adapter *adapter)
4029 {
4030 struct scatterlist *sgl, *iter;
4031 struct sg_table *sgt;
4032 struct page *newpage;
4033 unsigned int i, j, k;
4034 u32 param, hma_size;
4035 unsigned int ncmds;
4036 size_t page_size;
4037 u32 page_order;
4038 int node, ret;
4039
4040
4041
4042
4043 if (is_kdump_kernel() ||
4044 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4045 return 0;
4046
4047
4048 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4049 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
4050 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4051 1, ¶m, &hma_size);
4052
4053
4054
4055 if (ret || !hma_size)
4056 return 0;
4057
4058 if (hma_size < HMA_MIN_TOTAL_SIZE ||
4059 hma_size > HMA_MAX_TOTAL_SIZE) {
4060 dev_err(adapter->pdev_dev,
4061 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4062 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
4063 return -EINVAL;
4064 }
4065
4066 page_size = HMA_PAGE_SIZE;
4067 page_order = HMA_PAGE_ORDER;
4068 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4069 if (unlikely(!adapter->hma.sgt)) {
4070 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4071 return -ENOMEM;
4072 }
4073 sgt = adapter->hma.sgt;
4074
4075
4076 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4077 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4078 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4079 kfree(adapter->hma.sgt);
4080 adapter->hma.sgt = NULL;
4081 return -ENOMEM;
4082 }
4083
4084 sgl = adapter->hma.sgt->sgl;
4085 node = dev_to_node(adapter->pdev_dev);
4086 for_each_sg(sgl, iter, sgt->orig_nents, i) {
4087 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
4088 __GFP_ZERO, page_order);
4089 if (!newpage) {
4090 dev_err(adapter->pdev_dev,
4091 "Not enough memory for HMA page allocation\n");
4092 ret = -ENOMEM;
4093 goto free_hma;
4094 }
4095 sg_set_page(iter, newpage, page_size << page_order, 0);
4096 }
4097
4098 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4099 DMA_BIDIRECTIONAL);
4100 if (!sgt->nents) {
4101 dev_err(adapter->pdev_dev,
4102 "Not enough memory for HMA DMA mapping");
4103 ret = -ENOMEM;
4104 goto free_hma;
4105 }
4106 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4107
4108 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4109 GFP_KERNEL);
4110 if (unlikely(!adapter->hma.phy_addr))
4111 goto free_hma;
4112
4113 for_each_sg(sgl, iter, sgt->nents, i) {
4114 newpage = sg_page(iter);
4115 adapter->hma.phy_addr[i] = sg_dma_address(iter);
4116 }
4117
4118 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4119
4120 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
4121 struct fw_hma_cmd hma_cmd;
4122 u8 naddr = HMA_MAX_ADDR_IN_CMD;
4123 u8 soc = 0, eoc = 0;
4124 u8 hma_mode = 1;
4125
4126 soc = (i == 0) ? 1 : 0;
4127 eoc = (i == ncmds - 1) ? 1 : 0;
4128
4129
4130
4131
4132 if (i == ncmds - 1) {
4133 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4134 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
4135 }
4136 memset(&hma_cmd, 0, sizeof(hma_cmd));
4137 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
4138 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4139 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
4140
4141 hma_cmd.mode_to_pcie_params =
4142 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
4143 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
4144
4145
4146 hma_cmd.naddr_size =
4147 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
4148 FW_HMA_CMD_NADDR_V(naddr));
4149
4150
4151 hma_cmd.addr_size_pkd =
4152 htonl(FW_HMA_CMD_ADDR_SIZE_V
4153 ((page_size << page_order) >> 12));
4154
4155
4156 for (j = 0; j < naddr; j++) {
4157 hma_cmd.phy_address[j] =
4158 cpu_to_be64(adapter->hma.phy_addr[j + k]);
4159 }
4160 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4161 sizeof(hma_cmd), &hma_cmd);
4162 if (ret) {
4163 dev_err(adapter->pdev_dev,
4164 "HMA FW command failed with err %d\n", ret);
4165 goto free_hma;
4166 }
4167 }
4168
4169 if (!ret)
4170 dev_info(adapter->pdev_dev,
4171 "Reserved %uMB host memory for HMA\n", hma_size);
4172 return ret;
4173
4174 free_hma:
4175 adap_free_hma_mem(adapter);
4176 return ret;
4177 }
4178
4179 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4180 {
4181 u32 v;
4182 int ret;
4183
4184
4185
4186
4187 ret = t4_get_pfres(adap);
4188 if (ret) {
4189 dev_err(adap->pdev_dev,
4190 "Unable to retrieve resource provisioning information\n");
4191 return ret;
4192 }
4193
4194
4195 memset(c, 0, sizeof(*c));
4196 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4197 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4198 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4199 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4200 if (ret < 0)
4201 return ret;
4202
4203 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4204 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4205 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4206 if (ret < 0)
4207 return ret;
4208
4209 ret = t4_config_glbl_rss(adap, adap->pf,
4210 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4211 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4212 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4213 if (ret < 0)
4214 return ret;
4215
4216 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4217 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4218 FW_CMD_CAP_PF);
4219 if (ret < 0)
4220 return ret;
4221
4222 t4_sge_init(adap);
4223
4224
4225 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4226 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4227 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4228 v = t4_read_reg(adap, TP_PIO_DATA_A);
4229 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4230
4231
4232 adap->params.tp.tx_modq_map = 0xE4;
4233 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4234 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4235
4236
4237 v = 0x84218421;
4238 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4239 &v, 1, TP_TX_SCHED_HDR_A);
4240 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4241 &v, 1, TP_TX_SCHED_FIFO_A);
4242 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4243 &v, 1, TP_TX_SCHED_PCMD_A);
4244
4245 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16
4246 if (is_offload(adap)) {
4247 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4248 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4249 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4250 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4251 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4252 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4253 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4254 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4255 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4256 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4257 }
4258
4259
4260 return t4_early_init(adap, adap->pf);
4261 }
4262
4263
4264
4265
4266 #define MAX_ATIDS 8192U
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284 static int adap_init0_tweaks(struct adapter *adapter)
4285 {
4286
4287
4288
4289
4290
4291 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4292
4293
4294
4295
4296 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4297 dev_err(&adapter->pdev->dev,
4298 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4299 rx_dma_offset);
4300 rx_dma_offset = 2;
4301 }
4302 t4_set_reg_field(adapter, SGE_CONTROL_A,
4303 PKTSHIFT_V(PKTSHIFT_M),
4304 PKTSHIFT_V(rx_dma_offset));
4305
4306
4307
4308
4309
4310 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4311 CSUM_HAS_PSEUDO_HDR_F, 0);
4312
4313 return 0;
4314 }
4315
4316
4317
4318
4319
4320 static int phy_aq1202_version(const u8 *phy_fw_data,
4321 size_t phy_fw_size)
4322 {
4323 int offset;
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4335 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4336 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4337
4338 offset = le24(phy_fw_data + 0x8) << 12;
4339 offset = le24(phy_fw_data + offset + 0xa);
4340 return be16(phy_fw_data + offset + 0x27e);
4341
4342 #undef be16
4343 #undef le16
4344 #undef le24
4345 }
4346
4347 static struct info_10gbt_phy_fw {
4348 unsigned int phy_fw_id;
4349 char *phy_fw_file;
4350 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4351 int phy_flash;
4352 } phy_info_array[] = {
4353 {
4354 PHY_AQ1202_DEVICEID,
4355 PHY_AQ1202_FIRMWARE,
4356 phy_aq1202_version,
4357 1,
4358 },
4359 {
4360 PHY_BCM84834_DEVICEID,
4361 PHY_BCM84834_FIRMWARE,
4362 NULL,
4363 0,
4364 },
4365 { 0, NULL, NULL },
4366 };
4367
4368 static struct info_10gbt_phy_fw *find_phy_info(int devid)
4369 {
4370 int i;
4371
4372 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4373 if (phy_info_array[i].phy_fw_id == devid)
4374 return &phy_info_array[i];
4375 }
4376 return NULL;
4377 }
4378
4379
4380
4381
4382
4383
4384 static int adap_init0_phy(struct adapter *adap)
4385 {
4386 const struct firmware *phyf;
4387 int ret;
4388 struct info_10gbt_phy_fw *phy_info;
4389
4390
4391
4392 phy_info = find_phy_info(adap->pdev->device);
4393 if (!phy_info) {
4394 dev_warn(adap->pdev_dev,
4395 "No PHY Firmware file found for this PHY\n");
4396 return -EOPNOTSUPP;
4397 }
4398
4399
4400
4401
4402
4403
4404 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4405 adap->pdev_dev);
4406 if (ret < 0) {
4407
4408
4409
4410
4411
4412
4413 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4414 "/lib/firmware/%s, error %d\n",
4415 phy_info->phy_fw_file, -ret);
4416 if (phy_info->phy_flash) {
4417 int cur_phy_fw_ver = 0;
4418
4419 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4420 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4421 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4422 ret = 0;
4423 }
4424
4425 return ret;
4426 }
4427
4428
4429
4430 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4431 (u8 *)phyf->data, phyf->size);
4432 if (ret < 0)
4433 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4434 -ret);
4435 else if (ret > 0) {
4436 int new_phy_fw_ver = 0;
4437
4438 if (phy_info->phy_fw_version)
4439 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4440 phyf->size);
4441 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4442 "Firmware /lib/firmware/%s, version %#x\n",
4443 phy_info->phy_fw_file, new_phy_fw_ver);
4444 }
4445
4446 release_firmware(phyf);
4447
4448 return ret;
4449 }
4450
4451
4452
4453
4454 static int adap_init0_config(struct adapter *adapter, int reset)
4455 {
4456 char *fw_config_file, fw_config_file_path[256];
4457 u32 finiver, finicsum, cfcsum, param, val;
4458 struct fw_caps_config_cmd caps_cmd;
4459 unsigned long mtype = 0, maddr = 0;
4460 const struct firmware *cf;
4461 char *config_name = NULL;
4462 int config_issued = 0;
4463 int ret;
4464
4465
4466
4467
4468 if (reset) {
4469 ret = t4_fw_reset(adapter, adapter->mbox,
4470 PIORSTMODE_F | PIORST_F);
4471 if (ret < 0)
4472 goto bye;
4473 }
4474
4475
4476
4477
4478
4479
4480 if (is_10gbt_device(adapter->pdev->device)) {
4481 ret = adap_init0_phy(adapter);
4482 if (ret < 0)
4483 goto bye;
4484 }
4485
4486
4487
4488
4489
4490 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4491 case CHELSIO_T4:
4492 fw_config_file = FW4_CFNAME;
4493 break;
4494 case CHELSIO_T5:
4495 fw_config_file = FW5_CFNAME;
4496 break;
4497 case CHELSIO_T6:
4498 fw_config_file = FW6_CFNAME;
4499 break;
4500 default:
4501 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4502 adapter->pdev->device);
4503 ret = -EINVAL;
4504 goto bye;
4505 }
4506
4507 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4508 if (ret < 0) {
4509 config_name = "On FLASH";
4510 mtype = FW_MEMTYPE_CF_FLASH;
4511 maddr = t4_flash_cfg_addr(adapter);
4512 } else {
4513 u32 params[7], val[7];
4514
4515 sprintf(fw_config_file_path,
4516 "/lib/firmware/%s", fw_config_file);
4517 config_name = fw_config_file_path;
4518
4519 if (cf->size >= FLASH_CFG_MAX_SIZE)
4520 ret = -ENOMEM;
4521 else {
4522 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4523 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4524 ret = t4_query_params(adapter, adapter->mbox,
4525 adapter->pf, 0, 1, params, val);
4526 if (ret == 0) {
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537 size_t resid = cf->size & 0x3;
4538 size_t size = cf->size & ~0x3;
4539 __be32 *data = (__be32 *)cf->data;
4540
4541 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4542 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4543
4544 spin_lock(&adapter->win0_lock);
4545 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4546 size, data, T4_MEMORY_WRITE);
4547 if (ret == 0 && resid != 0) {
4548 union {
4549 __be32 word;
4550 char buf[4];
4551 } last;
4552 int i;
4553
4554 last.word = data[size >> 2];
4555 for (i = resid; i < 4; i++)
4556 last.buf[i] = 0;
4557 ret = t4_memory_rw(adapter, 0, mtype,
4558 maddr + size,
4559 4, &last.word,
4560 T4_MEMORY_WRITE);
4561 }
4562 spin_unlock(&adapter->win0_lock);
4563 }
4564 }
4565
4566 release_firmware(cf);
4567 if (ret)
4568 goto bye;
4569 }
4570
4571 val = 0;
4572
4573
4574
4575
4576 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4577 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4578 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4579 1, ¶m, &val);
4580
4581
4582
4583
4584 if (ret < 0) {
4585 dev_warn(adapter->pdev_dev,
4586 "Hash filter with ofld is not supported by FW\n");
4587 }
4588
4589
4590
4591
4592
4593
4594
4595 memset(&caps_cmd, 0, sizeof(caps_cmd));
4596 caps_cmd.op_to_write =
4597 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4598 FW_CMD_REQUEST_F |
4599 FW_CMD_READ_F);
4600 caps_cmd.cfvalid_to_len16 =
4601 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4602 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4603 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4604 FW_LEN16(caps_cmd));
4605 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4606 &caps_cmd);
4607
4608
4609
4610
4611
4612
4613
4614 if (ret == -ENOENT) {
4615 memset(&caps_cmd, 0, sizeof(caps_cmd));
4616 caps_cmd.op_to_write =
4617 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4618 FW_CMD_REQUEST_F |
4619 FW_CMD_READ_F);
4620 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4621 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4622 sizeof(caps_cmd), &caps_cmd);
4623 config_name = "Firmware Default";
4624 }
4625
4626 config_issued = 1;
4627 if (ret < 0)
4628 goto bye;
4629
4630 finiver = ntohl(caps_cmd.finiver);
4631 finicsum = ntohl(caps_cmd.finicsum);
4632 cfcsum = ntohl(caps_cmd.cfcsum);
4633 if (finicsum != cfcsum)
4634 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4635 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4636 finicsum, cfcsum);
4637
4638
4639
4640
4641 caps_cmd.op_to_write =
4642 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4643 FW_CMD_REQUEST_F |
4644 FW_CMD_WRITE_F);
4645 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4646 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4647 NULL);
4648 if (ret < 0)
4649 goto bye;
4650
4651
4652
4653
4654
4655 ret = adap_init0_tweaks(adapter);
4656 if (ret < 0)
4657 goto bye;
4658
4659
4660 ret = adap_config_hma(adapter);
4661 if (ret)
4662 dev_err(adapter->pdev_dev,
4663 "HMA configuration failed with error %d\n", ret);
4664
4665 if (is_t6(adapter->params.chip)) {
4666 adap_config_hpfilter(adapter);
4667 ret = setup_ppod_edram(adapter);
4668 if (!ret)
4669 dev_info(adapter->pdev_dev, "Successfully enabled "
4670 "ppod edram feature\n");
4671 }
4672
4673
4674
4675
4676
4677 ret = t4_fw_initialize(adapter, adapter->mbox);
4678 if (ret < 0)
4679 goto bye;
4680
4681
4682
4683
4684 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4685 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4686 config_name, finiver, cfcsum);
4687 return 0;
4688
4689
4690
4691
4692
4693
4694 bye:
4695 if (config_issued && ret != -ENOENT)
4696 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4697 config_name, -ret);
4698 return ret;
4699 }
4700
4701 static struct fw_info fw_info_array[] = {
4702 {
4703 .chip = CHELSIO_T4,
4704 .fs_name = FW4_CFNAME,
4705 .fw_mod_name = FW4_FNAME,
4706 .fw_hdr = {
4707 .chip = FW_HDR_CHIP_T4,
4708 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4709 .intfver_nic = FW_INTFVER(T4, NIC),
4710 .intfver_vnic = FW_INTFVER(T4, VNIC),
4711 .intfver_ri = FW_INTFVER(T4, RI),
4712 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4713 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4714 },
4715 }, {
4716 .chip = CHELSIO_T5,
4717 .fs_name = FW5_CFNAME,
4718 .fw_mod_name = FW5_FNAME,
4719 .fw_hdr = {
4720 .chip = FW_HDR_CHIP_T5,
4721 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4722 .intfver_nic = FW_INTFVER(T5, NIC),
4723 .intfver_vnic = FW_INTFVER(T5, VNIC),
4724 .intfver_ri = FW_INTFVER(T5, RI),
4725 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4726 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4727 },
4728 }, {
4729 .chip = CHELSIO_T6,
4730 .fs_name = FW6_CFNAME,
4731 .fw_mod_name = FW6_FNAME,
4732 .fw_hdr = {
4733 .chip = FW_HDR_CHIP_T6,
4734 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4735 .intfver_nic = FW_INTFVER(T6, NIC),
4736 .intfver_vnic = FW_INTFVER(T6, VNIC),
4737 .intfver_ofld = FW_INTFVER(T6, OFLD),
4738 .intfver_ri = FW_INTFVER(T6, RI),
4739 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4740 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4741 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4742 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4743 },
4744 }
4745
4746 };
4747
4748 static struct fw_info *find_fw_info(int chip)
4749 {
4750 int i;
4751
4752 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4753 if (fw_info_array[i].chip == chip)
4754 return &fw_info_array[i];
4755 }
4756 return NULL;
4757 }
4758
4759
4760
4761
4762 static int adap_init0(struct adapter *adap, int vpd_skip)
4763 {
4764 struct fw_caps_config_cmd caps_cmd;
4765 u32 params[7], val[7];
4766 enum dev_state state;
4767 u32 v, port_vec;
4768 int reset = 1;
4769 int ret;
4770
4771
4772
4773
4774 ret = t4_init_devlog_params(adap);
4775 if (ret < 0)
4776 return ret;
4777
4778
4779 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4780 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4781 if (ret < 0) {
4782 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4783 ret);
4784 return ret;
4785 }
4786 if (ret == adap->mbox)
4787 adap->flags |= CXGB4_MASTER_PF;
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797 t4_get_version_info(adap);
4798 ret = t4_check_fw_version(adap);
4799
4800 if (ret)
4801 state = DEV_STATE_UNINIT;
4802 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4803 struct fw_info *fw_info;
4804 struct fw_hdr *card_fw;
4805 const struct firmware *fw;
4806 const u8 *fw_data = NULL;
4807 unsigned int fw_size = 0;
4808
4809
4810
4811
4812 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4813 if (fw_info == NULL) {
4814 dev_err(adap->pdev_dev,
4815 "unable to get firmware info for chip %d.\n",
4816 CHELSIO_CHIP_VERSION(adap->params.chip));
4817 return -EINVAL;
4818 }
4819
4820
4821
4822
4823 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4824 if (!card_fw) {
4825 ret = -ENOMEM;
4826 goto bye;
4827 }
4828
4829
4830 ret = request_firmware(&fw, fw_info->fw_mod_name,
4831 adap->pdev_dev);
4832 if (ret < 0) {
4833 dev_err(adap->pdev_dev,
4834 "unable to load firmware image %s, error %d\n",
4835 fw_info->fw_mod_name, ret);
4836 } else {
4837 fw_data = fw->data;
4838 fw_size = fw->size;
4839 }
4840
4841
4842 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4843 state, &reset);
4844
4845
4846 release_firmware(fw);
4847 kvfree(card_fw);
4848
4849 if (ret < 0)
4850 goto bye;
4851 }
4852
4853
4854
4855
4856 if (state == DEV_STATE_INIT) {
4857 ret = adap_config_hma(adap);
4858 if (ret)
4859 dev_err(adap->pdev_dev,
4860 "HMA configuration failed with error %d\n",
4861 ret);
4862 dev_info(adap->pdev_dev, "Coming up as %s: "\
4863 "Adapter already initialized\n",
4864 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4865 } else {
4866 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4867 "Initializing adapter\n");
4868
4869
4870
4871
4872 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4873 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4874 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4875 params, val);
4876
4877
4878
4879
4880 if (ret < 0) {
4881 dev_err(adap->pdev_dev, "firmware doesn't support "
4882 "Firmware Configuration Files\n");
4883 goto bye;
4884 }
4885
4886
4887
4888
4889
4890 ret = adap_init0_config(adap, reset);
4891 if (ret == -ENOENT) {
4892 dev_err(adap->pdev_dev, "no Configuration File "
4893 "present on adapter.\n");
4894 goto bye;
4895 }
4896 if (ret < 0) {
4897 dev_err(adap->pdev_dev, "could not initialize "
4898 "adapter, error %d\n", -ret);
4899 goto bye;
4900 }
4901 }
4902
4903
4904
4905
4906
4907 ret = t4_get_pfres(adap);
4908 if (ret) {
4909 dev_err(adap->pdev_dev,
4910 "Unable to retrieve resource provisioning information\n");
4911 goto bye;
4912 }
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924 if (!vpd_skip) {
4925 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4926 if (ret < 0)
4927 goto bye;
4928 }
4929
4930
4931
4932
4933
4934 v =
4935 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4936 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4937 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4938 if (ret < 0)
4939 goto bye;
4940
4941 adap->params.nports = hweight32(port_vec);
4942 adap->params.portvec = port_vec;
4943
4944
4945
4946
4947
4948 ret = t4_sge_init(adap);
4949 if (ret < 0)
4950 goto bye;
4951
4952
4953
4954
4955 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4956 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4957 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4958 1, params, val);
4959
4960 if (!ret) {
4961 adap->sge.dbqtimer_tick = val[0];
4962 ret = t4_read_sge_dbqtimers(adap,
4963 ARRAY_SIZE(adap->sge.dbqtimer_val),
4964 adap->sge.dbqtimer_val);
4965 }
4966
4967 if (!ret)
4968 adap->flags |= CXGB4_SGE_DBQ_TIMER;
4969
4970 if (is_bypass_device(adap->pdev->device))
4971 adap->params.bypass = 1;
4972
4973
4974
4975
4976 params[0] = FW_PARAM_PFVF(EQ_START);
4977 params[1] = FW_PARAM_PFVF(L2T_START);
4978 params[2] = FW_PARAM_PFVF(L2T_END);
4979 params[3] = FW_PARAM_PFVF(FILTER_START);
4980 params[4] = FW_PARAM_PFVF(FILTER_END);
4981 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4982 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4983 if (ret < 0)
4984 goto bye;
4985 adap->sge.egr_start = val[0];
4986 adap->l2t_start = val[1];
4987 adap->l2t_end = val[2];
4988 adap->tids.ftid_base = val[3];
4989 adap->tids.nftids = val[4] - val[3] + 1;
4990 adap->sge.ingr_start = val[5];
4991
4992 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4993 params[0] = FW_PARAM_PFVF(HPFILTER_START);
4994 params[1] = FW_PARAM_PFVF(HPFILTER_END);
4995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4996 params, val);
4997 if (ret < 0)
4998 goto bye;
4999
5000 adap->tids.hpftid_base = val[0];
5001 adap->tids.nhpftids = val[1] - val[0] + 1;
5002
5003
5004
5005
5006 params[0] = FW_PARAM_PFVF(RAWF_START);
5007 params[1] = FW_PARAM_PFVF(RAWF_END);
5008 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5009 params, val);
5010 if (ret == 0) {
5011 adap->rawf_start = val[0];
5012 adap->rawf_cnt = val[1] - val[0] + 1;
5013 }
5014
5015 adap->tids.tid_base =
5016 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
5017 }
5018
5019
5020
5021
5022
5023
5024
5025 params[0] = FW_PARAM_PFVF(EQ_END);
5026 params[1] = FW_PARAM_PFVF(IQFLINT_END);
5027 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5028 if (ret < 0)
5029 goto bye;
5030 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5031 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5032
5033 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5034 sizeof(*adap->sge.egr_map), GFP_KERNEL);
5035 if (!adap->sge.egr_map) {
5036 ret = -ENOMEM;
5037 goto bye;
5038 }
5039
5040 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5041 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5042 if (!adap->sge.ingr_map) {
5043 ret = -ENOMEM;
5044 goto bye;
5045 }
5046
5047
5048
5049
5050 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5051 if (!adap->sge.starving_fl) {
5052 ret = -ENOMEM;
5053 goto bye;
5054 }
5055
5056 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5057 if (!adap->sge.txq_maperr) {
5058 ret = -ENOMEM;
5059 goto bye;
5060 }
5061
5062 #ifdef CONFIG_DEBUG_FS
5063 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5064 if (!adap->sge.blocked_fl) {
5065 ret = -ENOMEM;
5066 goto bye;
5067 }
5068 #endif
5069
5070 params[0] = FW_PARAM_PFVF(CLIP_START);
5071 params[1] = FW_PARAM_PFVF(CLIP_END);
5072 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5073 if (ret < 0)
5074 goto bye;
5075 adap->clipt_start = val[0];
5076 adap->clipt_end = val[1];
5077
5078
5079 params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5080 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5081 if (ret < 0) {
5082
5083
5084
5085
5086 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5087 } else {
5088 adap->params.nsched_cls = val[0];
5089 }
5090
5091
5092 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5093 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5094 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5095
5096
5097
5098 if ((val[0] != val[1]) && (ret >= 0)) {
5099 adap->flags |= CXGB4_FW_OFLD_CONN;
5100 adap->tids.aftid_base = val[0];
5101 adap->tids.aftid_end = val[1];
5102 }
5103
5104
5105
5106
5107
5108
5109 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5110 val[0] = 1;
5111 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5112
5113
5114
5115
5116
5117
5118
5119 if (is_t4(adap->params.chip)) {
5120 adap->params.ulptx_memwrite_dsgl = false;
5121 } else {
5122 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5123 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5124 1, params, val);
5125 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5126 }
5127
5128
5129 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5130 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5131 1, params, val);
5132 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5133
5134
5135 if (is_t4(adap->params.chip)) {
5136 adap->params.filter2_wr_support = false;
5137 } else {
5138 params[0] = FW_PARAM_DEV(FILTER2_WR);
5139 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5140 1, params, val);
5141 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5142 }
5143
5144
5145
5146
5147
5148 params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5149 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5150 1, params, val);
5151 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5152
5153
5154
5155
5156
5157 memset(&caps_cmd, 0, sizeof(caps_cmd));
5158 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5159 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5160 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5161 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5162 &caps_cmd);
5163 if (ret < 0)
5164 goto bye;
5165
5166
5167
5168
5169
5170 if (caps_cmd.ofldcaps)
5171 adap->params.offload = 1;
5172
5173 if (caps_cmd.ofldcaps ||
5174 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5175 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
5176
5177 params[0] = FW_PARAM_DEV(NTID);
5178 params[1] = FW_PARAM_PFVF(SERVER_START);
5179 params[2] = FW_PARAM_PFVF(SERVER_END);
5180 params[3] = FW_PARAM_PFVF(TDDP_START);
5181 params[4] = FW_PARAM_PFVF(TDDP_END);
5182 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5183 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5184 params, val);
5185 if (ret < 0)
5186 goto bye;
5187 adap->tids.ntids = val[0];
5188 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5189 adap->tids.stid_base = val[1];
5190 adap->tids.nstids = val[2] - val[1] + 1;
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5201 adap->tids.sftid_base = adap->tids.ftid_base +
5202 DIV_ROUND_UP(adap->tids.nftids, 3);
5203 adap->tids.nsftids = adap->tids.nftids -
5204 DIV_ROUND_UP(adap->tids.nftids, 3);
5205 adap->tids.nftids = adap->tids.sftid_base -
5206 adap->tids.ftid_base;
5207 }
5208 adap->vres.ddp.start = val[3];
5209 adap->vres.ddp.size = val[4] - val[3] + 1;
5210 adap->params.ofldq_wr_cred = val[5];
5211
5212 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5213 init_hash_filter(adap);
5214 } else {
5215 adap->num_ofld_uld += 1;
5216 }
5217
5218 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5219 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5220 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5221 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5222 params, val);
5223 if (!ret) {
5224 adap->tids.eotid_base = val[0];
5225 adap->tids.neotids = min_t(u32, MAX_ATIDS,
5226 val[1] - val[0] + 1);
5227 adap->params.ethofld = 1;
5228 }
5229 }
5230 }
5231 if (caps_cmd.rdmacaps) {
5232 params[0] = FW_PARAM_PFVF(STAG_START);
5233 params[1] = FW_PARAM_PFVF(STAG_END);
5234 params[2] = FW_PARAM_PFVF(RQ_START);
5235 params[3] = FW_PARAM_PFVF(RQ_END);
5236 params[4] = FW_PARAM_PFVF(PBL_START);
5237 params[5] = FW_PARAM_PFVF(PBL_END);
5238 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5239 params, val);
5240 if (ret < 0)
5241 goto bye;
5242 adap->vres.stag.start = val[0];
5243 adap->vres.stag.size = val[1] - val[0] + 1;
5244 adap->vres.rq.start = val[2];
5245 adap->vres.rq.size = val[3] - val[2] + 1;
5246 adap->vres.pbl.start = val[4];
5247 adap->vres.pbl.size = val[5] - val[4] + 1;
5248
5249 params[0] = FW_PARAM_PFVF(SRQ_START);
5250 params[1] = FW_PARAM_PFVF(SRQ_END);
5251 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5252 params, val);
5253 if (!ret) {
5254 adap->vres.srq.start = val[0];
5255 adap->vres.srq.size = val[1] - val[0] + 1;
5256 }
5257 if (adap->vres.srq.size) {
5258 adap->srq = t4_init_srq(adap->vres.srq.size);
5259 if (!adap->srq)
5260 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5261 }
5262
5263 params[0] = FW_PARAM_PFVF(SQRQ_START);
5264 params[1] = FW_PARAM_PFVF(SQRQ_END);
5265 params[2] = FW_PARAM_PFVF(CQ_START);
5266 params[3] = FW_PARAM_PFVF(CQ_END);
5267 params[4] = FW_PARAM_PFVF(OCQ_START);
5268 params[5] = FW_PARAM_PFVF(OCQ_END);
5269 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5270 val);
5271 if (ret < 0)
5272 goto bye;
5273 adap->vres.qp.start = val[0];
5274 adap->vres.qp.size = val[1] - val[0] + 1;
5275 adap->vres.cq.start = val[2];
5276 adap->vres.cq.size = val[3] - val[2] + 1;
5277 adap->vres.ocq.start = val[4];
5278 adap->vres.ocq.size = val[5] - val[4] + 1;
5279
5280 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5281 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5282 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5283 val);
5284 if (ret < 0) {
5285 adap->params.max_ordird_qp = 8;
5286 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5287 ret = 0;
5288 } else {
5289 adap->params.max_ordird_qp = val[0];
5290 adap->params.max_ird_adapter = val[1];
5291 }
5292 dev_info(adap->pdev_dev,
5293 "max_ordird_qp %d max_ird_adapter %d\n",
5294 adap->params.max_ordird_qp,
5295 adap->params.max_ird_adapter);
5296
5297
5298 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5299 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5300 val);
5301 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5302
5303
5304 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5306 val);
5307 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5308 adap->num_ofld_uld += 2;
5309 }
5310 if (caps_cmd.iscsicaps) {
5311 params[0] = FW_PARAM_PFVF(ISCSI_START);
5312 params[1] = FW_PARAM_PFVF(ISCSI_END);
5313 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5314 params, val);
5315 if (ret < 0)
5316 goto bye;
5317 adap->vres.iscsi.start = val[0];
5318 adap->vres.iscsi.size = val[1] - val[0] + 1;
5319 if (is_t6(adap->params.chip)) {
5320 params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5321 params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5322 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5323 params, val);
5324 if (!ret) {
5325 adap->vres.ppod_edram.start = val[0];
5326 adap->vres.ppod_edram.size =
5327 val[1] - val[0] + 1;
5328
5329 dev_info(adap->pdev_dev,
5330 "ppod edram start 0x%x end 0x%x size 0x%x\n",
5331 val[0], val[1],
5332 adap->vres.ppod_edram.size);
5333 }
5334 }
5335
5336 adap->num_ofld_uld += 2;
5337 }
5338 if (caps_cmd.cryptocaps) {
5339 if (ntohs(caps_cmd.cryptocaps) &
5340 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5341 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5342 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5343 2, params, val);
5344 if (ret < 0) {
5345 if (ret != -EINVAL)
5346 goto bye;
5347 } else {
5348 adap->vres.ncrypto_fc = val[0];
5349 }
5350 adap->num_ofld_uld += 1;
5351 }
5352 if (ntohs(caps_cmd.cryptocaps) &
5353 FW_CAPS_CONFIG_TLS_INLINE) {
5354 params[0] = FW_PARAM_PFVF(TLS_START);
5355 params[1] = FW_PARAM_PFVF(TLS_END);
5356 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5357 2, params, val);
5358 if (ret < 0)
5359 goto bye;
5360 adap->vres.key.start = val[0];
5361 adap->vres.key.size = val[1] - val[0] + 1;
5362 adap->num_uld += 1;
5363 }
5364 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5365 }
5366
5367
5368
5369
5370
5371
5372 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5373 if (state != DEV_STATE_INIT) {
5374 int i;
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393 for (i = 0; i < NMTUS; i++)
5394 if (adap->params.mtus[i] == 1492) {
5395 adap->params.mtus[i] = 1488;
5396 break;
5397 }
5398
5399 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5400 adap->params.b_wnd);
5401 }
5402 t4_init_sge_params(adap);
5403 adap->flags |= CXGB4_FW_OK;
5404 t4_init_tp_params(adap, true);
5405 return 0;
5406
5407
5408
5409
5410
5411
5412 bye:
5413 adap_free_hma_mem(adap);
5414 kfree(adap->sge.egr_map);
5415 kfree(adap->sge.ingr_map);
5416 bitmap_free(adap->sge.starving_fl);
5417 bitmap_free(adap->sge.txq_maperr);
5418 #ifdef CONFIG_DEBUG_FS
5419 bitmap_free(adap->sge.blocked_fl);
5420 #endif
5421 if (ret != -ETIMEDOUT && ret != -EIO)
5422 t4_fw_bye(adap, adap->mbox);
5423 return ret;
5424 }
5425
5426
5427
5428 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5429 pci_channel_state_t state)
5430 {
5431 int i;
5432 struct adapter *adap = pci_get_drvdata(pdev);
5433
5434 if (!adap)
5435 goto out;
5436
5437 rtnl_lock();
5438 adap->flags &= ~CXGB4_FW_OK;
5439 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5440 spin_lock(&adap->stats_lock);
5441 for_each_port(adap, i) {
5442 struct net_device *dev = adap->port[i];
5443 if (dev) {
5444 netif_device_detach(dev);
5445 netif_carrier_off(dev);
5446 }
5447 }
5448 spin_unlock(&adap->stats_lock);
5449 disable_interrupts(adap);
5450 if (adap->flags & CXGB4_FULL_INIT_DONE)
5451 cxgb_down(adap);
5452 rtnl_unlock();
5453 if ((adap->flags & CXGB4_DEV_ENABLED)) {
5454 pci_disable_device(pdev);
5455 adap->flags &= ~CXGB4_DEV_ENABLED;
5456 }
5457 out: return state == pci_channel_io_perm_failure ?
5458 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5459 }
5460
5461 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5462 {
5463 int i, ret;
5464 struct fw_caps_config_cmd c;
5465 struct adapter *adap = pci_get_drvdata(pdev);
5466
5467 if (!adap) {
5468 pci_restore_state(pdev);
5469 pci_save_state(pdev);
5470 return PCI_ERS_RESULT_RECOVERED;
5471 }
5472
5473 if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5474 if (pci_enable_device(pdev)) {
5475 dev_err(&pdev->dev, "Cannot reenable PCI "
5476 "device after reset\n");
5477 return PCI_ERS_RESULT_DISCONNECT;
5478 }
5479 adap->flags |= CXGB4_DEV_ENABLED;
5480 }
5481
5482 pci_set_master(pdev);
5483 pci_restore_state(pdev);
5484 pci_save_state(pdev);
5485
5486 if (t4_wait_dev_ready(adap->regs) < 0)
5487 return PCI_ERS_RESULT_DISCONNECT;
5488 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5489 return PCI_ERS_RESULT_DISCONNECT;
5490 adap->flags |= CXGB4_FW_OK;
5491 if (adap_init1(adap, &c))
5492 return PCI_ERS_RESULT_DISCONNECT;
5493
5494 for_each_port(adap, i) {
5495 struct port_info *pi = adap2pinfo(adap, i);
5496 u8 vivld = 0, vin = 0;
5497
5498 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5499 NULL, NULL, &vivld, &vin);
5500 if (ret < 0)
5501 return PCI_ERS_RESULT_DISCONNECT;
5502 pi->viid = ret;
5503 pi->xact_addr_filt = -1;
5504
5505
5506
5507 if (adap->params.viid_smt_extn_support) {
5508 pi->vivld = vivld;
5509 pi->vin = vin;
5510 } else {
5511
5512 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5513 pi->vin = FW_VIID_VIN_G(pi->viid);
5514 }
5515 }
5516
5517 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5518 adap->params.b_wnd);
5519 setup_memwin(adap);
5520 if (cxgb_up(adap))
5521 return PCI_ERS_RESULT_DISCONNECT;
5522 return PCI_ERS_RESULT_RECOVERED;
5523 }
5524
5525 static void eeh_resume(struct pci_dev *pdev)
5526 {
5527 int i;
5528 struct adapter *adap = pci_get_drvdata(pdev);
5529
5530 if (!adap)
5531 return;
5532
5533 rtnl_lock();
5534 for_each_port(adap, i) {
5535 struct net_device *dev = adap->port[i];
5536 if (dev) {
5537 if (netif_running(dev)) {
5538 link_start(dev);
5539 cxgb_set_rxmode(dev);
5540 }
5541 netif_device_attach(dev);
5542 }
5543 }
5544 rtnl_unlock();
5545 }
5546
5547 static void eeh_reset_prepare(struct pci_dev *pdev)
5548 {
5549 struct adapter *adapter = pci_get_drvdata(pdev);
5550 int i;
5551
5552 if (adapter->pf != 4)
5553 return;
5554
5555 adapter->flags &= ~CXGB4_FW_OK;
5556
5557 notify_ulds(adapter, CXGB4_STATE_DOWN);
5558
5559 for_each_port(adapter, i)
5560 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5561 cxgb_close(adapter->port[i]);
5562
5563 disable_interrupts(adapter);
5564 cxgb4_free_mps_ref_entries(adapter);
5565
5566 adap_free_hma_mem(adapter);
5567
5568 if (adapter->flags & CXGB4_FULL_INIT_DONE)
5569 cxgb_down(adapter);
5570 }
5571
5572 static void eeh_reset_done(struct pci_dev *pdev)
5573 {
5574 struct adapter *adapter = pci_get_drvdata(pdev);
5575 int err, i;
5576
5577 if (adapter->pf != 4)
5578 return;
5579
5580 err = t4_wait_dev_ready(adapter->regs);
5581 if (err < 0) {
5582 dev_err(adapter->pdev_dev,
5583 "Device not ready, err %d", err);
5584 return;
5585 }
5586
5587 setup_memwin(adapter);
5588
5589 err = adap_init0(adapter, 1);
5590 if (err) {
5591 dev_err(adapter->pdev_dev,
5592 "Adapter init failed, err %d", err);
5593 return;
5594 }
5595
5596 setup_memwin_rdma(adapter);
5597
5598 if (adapter->flags & CXGB4_FW_OK) {
5599 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5600 if (err) {
5601 dev_err(adapter->pdev_dev,
5602 "Port init failed, err %d", err);
5603 return;
5604 }
5605 }
5606
5607 err = cfg_queues(adapter);
5608 if (err) {
5609 dev_err(adapter->pdev_dev,
5610 "Config queues failed, err %d", err);
5611 return;
5612 }
5613
5614 cxgb4_init_mps_ref_entries(adapter);
5615
5616 err = setup_fw_sge_queues(adapter);
5617 if (err) {
5618 dev_err(adapter->pdev_dev,
5619 "FW sge queue allocation failed, err %d", err);
5620 return;
5621 }
5622
5623 for_each_port(adapter, i)
5624 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5625 cxgb_open(adapter->port[i]);
5626 }
5627
5628 static const struct pci_error_handlers cxgb4_eeh = {
5629 .error_detected = eeh_err_detected,
5630 .slot_reset = eeh_slot_reset,
5631 .resume = eeh_resume,
5632 .reset_prepare = eeh_reset_prepare,
5633 .reset_done = eeh_reset_done,
5634 };
5635
5636
5637
5638
5639 static inline bool is_x_10g_port(const struct link_config *lc)
5640 {
5641 unsigned int speeds, high_speeds;
5642
5643 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5644 high_speeds = speeds &
5645 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5646
5647 return high_speeds != 0;
5648 }
5649
5650
5651
5652
5653
5654 static int cfg_queues(struct adapter *adap)
5655 {
5656 u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5657 u32 ncpus = num_online_cpus();
5658 u32 niqflint, neq, num_ulds;
5659 struct sge *s = &adap->sge;
5660 u32 i, n10g = 0, qidx = 0;
5661 u32 q10g = 0, q1g;
5662
5663
5664 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5665 adap->params.offload = 0;
5666 adap->params.crypto = 0;
5667 adap->params.ethofld = 0;
5668 }
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682 niqflint = adap->params.pfres.niqflint - 1;
5683 if (!(adap->flags & CXGB4_USING_MSIX))
5684 niqflint--;
5685 neq = adap->params.pfres.neq / 2;
5686 avail_qsets = min(niqflint, neq);
5687
5688 if (avail_qsets < adap->params.nports) {
5689 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5690 avail_qsets, adap->params.nports);
5691 return -ENOMEM;
5692 }
5693
5694
5695 for_each_port(adap, i)
5696 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5697
5698 avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5699
5700
5701
5702
5703 if (n10g)
5704 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5705
5706 #ifdef CONFIG_CHELSIO_T4_DCB
5707
5708
5709
5710
5711 q1g = 8;
5712 if (adap->params.nports * 8 > avail_eth_qsets) {
5713 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5714 avail_eth_qsets, adap->params.nports * 8);
5715 return -ENOMEM;
5716 }
5717
5718 if (adap->params.nports * ncpus < avail_eth_qsets)
5719 q10g = max(8U, ncpus);
5720 else
5721 q10g = max(8U, q10g);
5722
5723 while ((q10g * n10g) >
5724 (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5725 q10g--;
5726
5727 #else
5728 q1g = 1;
5729 q10g = min(q10g, ncpus);
5730 #endif
5731 if (is_kdump_kernel()) {
5732 q10g = 1;
5733 q1g = 1;
5734 }
5735
5736 for_each_port(adap, i) {
5737 struct port_info *pi = adap2pinfo(adap, i);
5738
5739 pi->first_qset = qidx;
5740 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5741 qidx += pi->nqsets;
5742 }
5743
5744 s->ethqsets = qidx;
5745 s->max_ethqsets = qidx;
5746 avail_qsets -= qidx;
5747
5748 if (is_uld(adap)) {
5749
5750
5751
5752
5753 num_ulds = adap->num_uld + adap->num_ofld_uld;
5754 i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5755 avail_uld_qsets = roundup(i, adap->params.nports);
5756 if (avail_qsets < num_ulds * adap->params.nports) {
5757 adap->params.offload = 0;
5758 adap->params.crypto = 0;
5759 s->ofldqsets = 0;
5760 } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5761 s->ofldqsets = adap->params.nports;
5762 } else {
5763 s->ofldqsets = avail_uld_qsets;
5764 }
5765
5766 avail_qsets -= num_ulds * s->ofldqsets;
5767 }
5768
5769
5770
5771
5772 if (is_ethofld(adap)) {
5773 if (avail_qsets < s->max_ethqsets) {
5774 adap->params.ethofld = 0;
5775 s->eoqsets = 0;
5776 } else {
5777 s->eoqsets = s->max_ethqsets;
5778 }
5779 avail_qsets -= s->eoqsets;
5780 }
5781
5782
5783
5784
5785
5786
5787 if (avail_qsets >= s->max_ethqsets)
5788 s->mirrorqsets = s->max_ethqsets;
5789 else if (avail_qsets >= adap->params.nports)
5790 s->mirrorqsets = adap->params.nports;
5791 else
5792 s->mirrorqsets = 0;
5793 avail_qsets -= s->mirrorqsets;
5794
5795 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5796 struct sge_eth_rxq *r = &s->ethrxq[i];
5797
5798 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5799 r->fl.size = 72;
5800 }
5801
5802 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5803 s->ethtxq[i].q.size = 1024;
5804
5805 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5806 s->ctrlq[i].q.size = 512;
5807
5808 if (!is_t4(adap->params.chip))
5809 s->ptptxq.q.size = 8;
5810
5811 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5812 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5813
5814 return 0;
5815 }
5816
5817
5818
5819
5820
5821 static void reduce_ethqs(struct adapter *adap, int n)
5822 {
5823 int i;
5824 struct port_info *pi;
5825
5826 while (n < adap->sge.ethqsets)
5827 for_each_port(adap, i) {
5828 pi = adap2pinfo(adap, i);
5829 if (pi->nqsets > 1) {
5830 pi->nqsets--;
5831 adap->sge.ethqsets--;
5832 if (adap->sge.ethqsets <= n)
5833 break;
5834 }
5835 }
5836
5837 n = 0;
5838 for_each_port(adap, i) {
5839 pi = adap2pinfo(adap, i);
5840 pi->first_qset = n;
5841 n += pi->nqsets;
5842 }
5843 }
5844
5845 static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5846 {
5847 struct msix_info *msix_info;
5848
5849 msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5850 if (!msix_info)
5851 return -ENOMEM;
5852
5853 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
5854 if (!adap->msix_bmap.msix_bmap) {
5855 kfree(msix_info);
5856 return -ENOMEM;
5857 }
5858
5859 spin_lock_init(&adap->msix_bmap.lock);
5860 adap->msix_bmap.mapsize = num_vec;
5861
5862 adap->msix_info = msix_info;
5863 return 0;
5864 }
5865
5866 static void free_msix_info(struct adapter *adap)
5867 {
5868 bitmap_free(adap->msix_bmap.msix_bmap);
5869 kfree(adap->msix_info);
5870 }
5871
5872 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5873 {
5874 struct msix_bmap *bmap = &adap->msix_bmap;
5875 unsigned int msix_idx;
5876 unsigned long flags;
5877
5878 spin_lock_irqsave(&bmap->lock, flags);
5879 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5880 if (msix_idx < bmap->mapsize) {
5881 __set_bit(msix_idx, bmap->msix_bmap);
5882 } else {
5883 spin_unlock_irqrestore(&bmap->lock, flags);
5884 return -ENOSPC;
5885 }
5886
5887 spin_unlock_irqrestore(&bmap->lock, flags);
5888 return msix_idx;
5889 }
5890
5891 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5892 unsigned int msix_idx)
5893 {
5894 struct msix_bmap *bmap = &adap->msix_bmap;
5895 unsigned long flags;
5896
5897 spin_lock_irqsave(&bmap->lock, flags);
5898 __clear_bit(msix_idx, bmap->msix_bmap);
5899 spin_unlock_irqrestore(&bmap->lock, flags);
5900 }
5901
5902
5903 #define EXTRA_VECS 2
5904
5905 static int enable_msix(struct adapter *adap)
5906 {
5907 u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5908 u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5909 u8 num_uld = 0, nchan = adap->params.nports;
5910 u32 i, want, need, num_vec;
5911 struct sge *s = &adap->sge;
5912 struct msix_entry *entries;
5913 struct port_info *pi;
5914 int allocated, ret;
5915
5916 want = s->max_ethqsets;
5917 #ifdef CONFIG_CHELSIO_T4_DCB
5918
5919
5920
5921 need = 8 * nchan;
5922 #else
5923 need = nchan;
5924 #endif
5925 eth_need = need;
5926 if (is_uld(adap)) {
5927 num_uld = adap->num_ofld_uld + adap->num_uld;
5928 want += num_uld * s->ofldqsets;
5929 uld_need = num_uld * nchan;
5930 need += uld_need;
5931 }
5932
5933 if (is_ethofld(adap)) {
5934 want += s->eoqsets;
5935 ethofld_need = eth_need;
5936 need += ethofld_need;
5937 }
5938
5939 if (s->mirrorqsets) {
5940 want += s->mirrorqsets;
5941 mirror_need = nchan;
5942 need += mirror_need;
5943 }
5944
5945 want += EXTRA_VECS;
5946 need += EXTRA_VECS;
5947
5948 entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5949 if (!entries)
5950 return -ENOMEM;
5951
5952 for (i = 0; i < want; i++)
5953 entries[i].entry = i;
5954
5955 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5956 if (allocated < 0) {
5957
5958
5959
5960 want = s->max_ethqsets + EXTRA_VECS;
5961 need = eth_need + EXTRA_VECS;
5962 allocated = pci_enable_msix_range(adap->pdev, entries,
5963 need, want);
5964 if (allocated < 0) {
5965 dev_info(adap->pdev_dev,
5966 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5967 ret = allocated;
5968 goto out_free;
5969 }
5970
5971 dev_info(adap->pdev_dev,
5972 "Disabling offload due to insufficient MSI-X vectors\n");
5973 adap->params.offload = 0;
5974 adap->params.crypto = 0;
5975 adap->params.ethofld = 0;
5976 s->ofldqsets = 0;
5977 s->eoqsets = 0;
5978 s->mirrorqsets = 0;
5979 uld_need = 0;
5980 ethofld_need = 0;
5981 mirror_need = 0;
5982 }
5983
5984 num_vec = allocated;
5985 if (num_vec < want) {
5986
5987
5988
5989
5990 ethqsets = eth_need;
5991 if (is_uld(adap))
5992 ofldqsets = nchan;
5993 if (is_ethofld(adap))
5994 eoqsets = ethofld_need;
5995 if (s->mirrorqsets)
5996 mirrorqsets = mirror_need;
5997
5998 num_vec -= need;
5999 while (num_vec) {
6000 if (num_vec < eth_need + ethofld_need ||
6001 ethqsets > s->max_ethqsets)
6002 break;
6003
6004 for_each_port(adap, i) {
6005 pi = adap2pinfo(adap, i);
6006 if (pi->nqsets < 2)
6007 continue;
6008
6009 ethqsets++;
6010 num_vec--;
6011 if (ethofld_need) {
6012 eoqsets++;
6013 num_vec--;
6014 }
6015 }
6016 }
6017
6018 if (is_uld(adap)) {
6019 while (num_vec) {
6020 if (num_vec < uld_need ||
6021 ofldqsets > s->ofldqsets)
6022 break;
6023
6024 ofldqsets++;
6025 num_vec -= uld_need;
6026 }
6027 }
6028
6029 if (s->mirrorqsets) {
6030 while (num_vec) {
6031 if (num_vec < mirror_need ||
6032 mirrorqsets > s->mirrorqsets)
6033 break;
6034
6035 mirrorqsets++;
6036 num_vec -= mirror_need;
6037 }
6038 }
6039 } else {
6040 ethqsets = s->max_ethqsets;
6041 if (is_uld(adap))
6042 ofldqsets = s->ofldqsets;
6043 if (is_ethofld(adap))
6044 eoqsets = s->eoqsets;
6045 if (s->mirrorqsets)
6046 mirrorqsets = s->mirrorqsets;
6047 }
6048
6049 if (ethqsets < s->max_ethqsets) {
6050 s->max_ethqsets = ethqsets;
6051 reduce_ethqs(adap, ethqsets);
6052 }
6053
6054 if (is_uld(adap)) {
6055 s->ofldqsets = ofldqsets;
6056 s->nqs_per_uld = s->ofldqsets;
6057 }
6058
6059 if (is_ethofld(adap))
6060 s->eoqsets = eoqsets;
6061
6062 if (s->mirrorqsets) {
6063 s->mirrorqsets = mirrorqsets;
6064 for_each_port(adap, i) {
6065 pi = adap2pinfo(adap, i);
6066 pi->nmirrorqsets = s->mirrorqsets / nchan;
6067 mutex_init(&pi->vi_mirror_mutex);
6068 }
6069 }
6070
6071
6072 ret = alloc_msix_info(adap, allocated);
6073 if (ret)
6074 goto out_disable_msix;
6075
6076 for (i = 0; i < allocated; i++) {
6077 adap->msix_info[i].vec = entries[i].vector;
6078 adap->msix_info[i].idx = i;
6079 }
6080
6081 dev_info(adap->pdev_dev,
6082 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6083 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6084 s->mirrorqsets);
6085
6086 kfree(entries);
6087 return 0;
6088
6089 out_disable_msix:
6090 pci_disable_msix(adap->pdev);
6091
6092 out_free:
6093 kfree(entries);
6094 return ret;
6095 }
6096
6097 #undef EXTRA_VECS
6098
6099 static int init_rss(struct adapter *adap)
6100 {
6101 unsigned int i;
6102 int err;
6103
6104 err = t4_init_rss_mode(adap, adap->mbox);
6105 if (err)
6106 return err;
6107
6108 for_each_port(adap, i) {
6109 struct port_info *pi = adap2pinfo(adap, i);
6110
6111 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6112 if (!pi->rss)
6113 return -ENOMEM;
6114 }
6115 return 0;
6116 }
6117
6118
6119 static void print_adapter_info(struct adapter *adapter)
6120 {
6121
6122 t4_dump_version_info(adapter);
6123
6124
6125 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6126 is_offload(adapter) ? "R" : "",
6127 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6128 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6129 is_offload(adapter) ? "Offload" : "non-Offload");
6130 }
6131
6132 static void print_port_info(const struct net_device *dev)
6133 {
6134 char buf[80];
6135 char *bufp = buf;
6136 const struct port_info *pi = netdev_priv(dev);
6137 const struct adapter *adap = pi->adapter;
6138
6139 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6140 bufp += sprintf(bufp, "100M/");
6141 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6142 bufp += sprintf(bufp, "1G/");
6143 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6144 bufp += sprintf(bufp, "10G/");
6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6146 bufp += sprintf(bufp, "25G/");
6147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6148 bufp += sprintf(bufp, "40G/");
6149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6150 bufp += sprintf(bufp, "50G/");
6151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6152 bufp += sprintf(bufp, "100G/");
6153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6154 bufp += sprintf(bufp, "200G/");
6155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6156 bufp += sprintf(bufp, "400G/");
6157 if (bufp != buf)
6158 --bufp;
6159 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6160
6161 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf);
6162 }
6163
6164
6165
6166
6167
6168
6169
6170
6171 static void free_some_resources(struct adapter *adapter)
6172 {
6173 unsigned int i;
6174
6175 kvfree(adapter->smt);
6176 kvfree(adapter->l2t);
6177 kvfree(adapter->srq);
6178 t4_cleanup_sched(adapter);
6179 kvfree(adapter->tids.tid_tab);
6180 cxgb4_cleanup_tc_matchall(adapter);
6181 cxgb4_cleanup_tc_mqprio(adapter);
6182 cxgb4_cleanup_tc_flower(adapter);
6183 cxgb4_cleanup_tc_u32(adapter);
6184 cxgb4_cleanup_ethtool_filters(adapter);
6185 kfree(adapter->sge.egr_map);
6186 kfree(adapter->sge.ingr_map);
6187 bitmap_free(adapter->sge.starving_fl);
6188 bitmap_free(adapter->sge.txq_maperr);
6189 #ifdef CONFIG_DEBUG_FS
6190 bitmap_free(adapter->sge.blocked_fl);
6191 #endif
6192 disable_msi(adapter);
6193
6194 for_each_port(adapter, i)
6195 if (adapter->port[i]) {
6196 struct port_info *pi = adap2pinfo(adapter, i);
6197
6198 if (pi->viid != 0)
6199 t4_free_vi(adapter, adapter->mbox, adapter->pf,
6200 0, pi->viid);
6201 kfree(adap2pinfo(adapter, i)->rss);
6202 free_netdev(adapter->port[i]);
6203 }
6204 if (adapter->flags & CXGB4_FW_OK)
6205 t4_fw_bye(adapter, adapter->pf);
6206 }
6207
6208 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6209 NETIF_F_GSO_UDP_L4)
6210 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6211 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6212 #define SEGMENT_SIZE 128
6213
6214 static int t4_get_chip_type(struct adapter *adap, int ver)
6215 {
6216 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
6217
6218 switch (ver) {
6219 case CHELSIO_T4:
6220 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6221 case CHELSIO_T5:
6222 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6223 case CHELSIO_T6:
6224 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6225 default:
6226 break;
6227 }
6228 return -EINVAL;
6229 }
6230
6231 #ifdef CONFIG_PCI_IOV
6232 static void cxgb4_mgmt_setup(struct net_device *dev)
6233 {
6234 dev->type = ARPHRD_NONE;
6235 dev->mtu = 0;
6236 dev->hard_header_len = 0;
6237 dev->addr_len = 0;
6238 dev->tx_queue_len = 0;
6239 dev->flags |= IFF_NOARP;
6240 dev->priv_flags |= IFF_NO_QUEUE;
6241
6242
6243 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6244 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
6245 }
6246
6247 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
6248 {
6249 struct adapter *adap = pci_get_drvdata(pdev);
6250 int err = 0;
6251 int current_vfs = pci_num_vf(pdev);
6252 u32 pcie_fw;
6253
6254 pcie_fw = readl(adap->regs + PCIE_FW_A);
6255
6256 if (!(pcie_fw & PCIE_FW_INIT_F)) {
6257 dev_warn(&pdev->dev, "Device not initialized\n");
6258 return -EOPNOTSUPP;
6259 }
6260
6261
6262
6263
6264 if (current_vfs && pci_vfs_assigned(pdev)) {
6265 dev_err(&pdev->dev,
6266 "Cannot modify SR-IOV while VFs are assigned\n");
6267 return current_vfs;
6268 }
6269
6270
6271
6272
6273 if (num_vfs != 0 && current_vfs != 0)
6274 return -EBUSY;
6275
6276
6277 if (num_vfs == current_vfs)
6278 return num_vfs;
6279
6280
6281 if (!num_vfs) {
6282 pci_disable_sriov(pdev);
6283
6284 unregister_netdev(adap->port[0]);
6285 free_netdev(adap->port[0]);
6286 adap->port[0] = NULL;
6287
6288
6289 adap->num_vfs = 0;
6290 kfree(adap->vfinfo);
6291 adap->vfinfo = NULL;
6292 return 0;
6293 }
6294
6295 if (!current_vfs) {
6296 struct fw_pfvf_cmd port_cmd, port_rpl;
6297 struct net_device *netdev;
6298 unsigned int pmask, port;
6299 struct pci_dev *pbridge;
6300 struct port_info *pi;
6301 char name[IFNAMSIZ];
6302 u32 devcap2;
6303 u16 flags;
6304
6305
6306
6307
6308
6309
6310 pbridge = pdev->bus->self;
6311 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6312 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
6313
6314 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
6315 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
6316
6317
6318
6319
6320 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6321 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6322 PCI_FUNC(pbridge->devfn));
6323 return -ENOTSUPP;
6324 }
6325 memset(&port_cmd, 0, sizeof(port_cmd));
6326 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6327 FW_CMD_REQUEST_F |
6328 FW_CMD_READ_F |
6329 FW_PFVF_CMD_PFN_V(adap->pf) |
6330 FW_PFVF_CMD_VFN_V(0));
6331 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6332 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6333 &port_rpl);
6334 if (err)
6335 return err;
6336 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6337 port = ffs(pmask) - 1;
6338
6339 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6340 adap->pf);
6341 netdev = alloc_netdev(sizeof(struct port_info),
6342 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6343 if (!netdev)
6344 return -ENOMEM;
6345
6346 pi = netdev_priv(netdev);
6347 pi->adapter = adap;
6348 pi->lport = port;
6349 pi->tx_chan = port;
6350 SET_NETDEV_DEV(netdev, &pdev->dev);
6351
6352 adap->port[0] = netdev;
6353 pi->port_id = 0;
6354
6355 err = register_netdev(adap->port[0]);
6356 if (err) {
6357 pr_info("Unable to register VF mgmt netdev %s\n", name);
6358 free_netdev(adap->port[0]);
6359 adap->port[0] = NULL;
6360 return err;
6361 }
6362
6363 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6364 sizeof(struct vf_info), GFP_KERNEL);
6365 if (!adap->vfinfo) {
6366 unregister_netdev(adap->port[0]);
6367 free_netdev(adap->port[0]);
6368 adap->port[0] = NULL;
6369 return -ENOMEM;
6370 }
6371 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6372 }
6373
6374 err = pci_enable_sriov(pdev, num_vfs);
6375 if (err) {
6376 pr_info("Unable to instantiate %d VFs\n", num_vfs);
6377 if (!current_vfs) {
6378 unregister_netdev(adap->port[0]);
6379 free_netdev(adap->port[0]);
6380 adap->port[0] = NULL;
6381 kfree(adap->vfinfo);
6382 adap->vfinfo = NULL;
6383 }
6384 return err;
6385 }
6386
6387 adap->num_vfs = num_vfs;
6388 return num_vfs;
6389 }
6390 #endif
6391
6392 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6393
6394 static int chcr_offload_state(struct adapter *adap,
6395 enum cxgb4_netdev_tls_ops op_val)
6396 {
6397 switch (op_val) {
6398 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6399 case CXGB4_TLSDEV_OPS:
6400 if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6401 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
6402 return -EOPNOTSUPP;
6403 }
6404 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
6405 dev_dbg(adap->pdev_dev,
6406 "ch_ktls driver has no registered tlsdev_ops\n");
6407 return -EOPNOTSUPP;
6408 }
6409 break;
6410 #endif
6411 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6412 case CXGB4_XFRMDEV_OPS:
6413 if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6414 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6415 return -EOPNOTSUPP;
6416 }
6417 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6418 dev_dbg(adap->pdev_dev,
6419 "chipsec driver has no registered xfrmdev_ops\n");
6420 return -EOPNOTSUPP;
6421 }
6422 break;
6423 #endif
6424 default:
6425 dev_dbg(adap->pdev_dev,
6426 "driver has no support for offload %d\n", op_val);
6427 return -EOPNOTSUPP;
6428 }
6429
6430 return 0;
6431 }
6432
6433 #endif
6434
6435 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6436
6437 static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6438 enum tls_offload_ctx_dir direction,
6439 struct tls_crypto_info *crypto_info,
6440 u32 tcp_sn)
6441 {
6442 struct adapter *adap = netdev2adap(netdev);
6443 int ret;
6444
6445 mutex_lock(&uld_mutex);
6446 ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
6447 if (ret)
6448 goto out_unlock;
6449
6450 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6451 if (ret)
6452 goto out_unlock;
6453
6454 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6455 direction,
6456 crypto_info,
6457 tcp_sn);
6458
6459 if (ret)
6460 cxgb4_set_ktls_feature(adap,
6461 FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6462 out_unlock:
6463 mutex_unlock(&uld_mutex);
6464 return ret;
6465 }
6466
6467 static void cxgb4_ktls_dev_del(struct net_device *netdev,
6468 struct tls_context *tls_ctx,
6469 enum tls_offload_ctx_dir direction)
6470 {
6471 struct adapter *adap = netdev2adap(netdev);
6472
6473 mutex_lock(&uld_mutex);
6474 if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
6475 goto out_unlock;
6476
6477 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6478 direction);
6479
6480 out_unlock:
6481 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6482 mutex_unlock(&uld_mutex);
6483 }
6484
6485 static const struct tlsdev_ops cxgb4_ktls_ops = {
6486 .tls_dev_add = cxgb4_ktls_dev_add,
6487 .tls_dev_del = cxgb4_ktls_dev_del,
6488 };
6489 #endif
6490
6491 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6492
6493 static int cxgb4_xfrm_add_state(struct xfrm_state *x)
6494 {
6495 struct adapter *adap = netdev2adap(x->xso.dev);
6496 int ret;
6497
6498 if (!mutex_trylock(&uld_mutex)) {
6499 dev_dbg(adap->pdev_dev,
6500 "crypto uld critical resource is under use\n");
6501 return -EBUSY;
6502 }
6503 ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
6504 if (ret)
6505 goto out_unlock;
6506
6507 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
6508
6509 out_unlock:
6510 mutex_unlock(&uld_mutex);
6511
6512 return ret;
6513 }
6514
6515 static void cxgb4_xfrm_del_state(struct xfrm_state *x)
6516 {
6517 struct adapter *adap = netdev2adap(x->xso.dev);
6518
6519 if (!mutex_trylock(&uld_mutex)) {
6520 dev_dbg(adap->pdev_dev,
6521 "crypto uld critical resource is under use\n");
6522 return;
6523 }
6524 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6525 goto out_unlock;
6526
6527 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
6528
6529 out_unlock:
6530 mutex_unlock(&uld_mutex);
6531 }
6532
6533 static void cxgb4_xfrm_free_state(struct xfrm_state *x)
6534 {
6535 struct adapter *adap = netdev2adap(x->xso.dev);
6536
6537 if (!mutex_trylock(&uld_mutex)) {
6538 dev_dbg(adap->pdev_dev,
6539 "crypto uld critical resource is under use\n");
6540 return;
6541 }
6542 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6543 goto out_unlock;
6544
6545 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
6546
6547 out_unlock:
6548 mutex_unlock(&uld_mutex);
6549 }
6550
6551 static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
6552 {
6553 struct adapter *adap = netdev2adap(x->xso.dev);
6554 bool ret = false;
6555
6556 if (!mutex_trylock(&uld_mutex)) {
6557 dev_dbg(adap->pdev_dev,
6558 "crypto uld critical resource is under use\n");
6559 return ret;
6560 }
6561 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6562 goto out_unlock;
6563
6564 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
6565
6566 out_unlock:
6567 mutex_unlock(&uld_mutex);
6568 return ret;
6569 }
6570
6571 static void cxgb4_advance_esn_state(struct xfrm_state *x)
6572 {
6573 struct adapter *adap = netdev2adap(x->xso.dev);
6574
6575 if (!mutex_trylock(&uld_mutex)) {
6576 dev_dbg(adap->pdev_dev,
6577 "crypto uld critical resource is under use\n");
6578 return;
6579 }
6580 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6581 goto out_unlock;
6582
6583 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6584
6585 out_unlock:
6586 mutex_unlock(&uld_mutex);
6587 }
6588
6589 static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
6590 .xdo_dev_state_add = cxgb4_xfrm_add_state,
6591 .xdo_dev_state_delete = cxgb4_xfrm_del_state,
6592 .xdo_dev_state_free = cxgb4_xfrm_free_state,
6593 .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
6594 .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
6595 };
6596
6597 #endif
6598
6599 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6600 {
6601 struct net_device *netdev;
6602 struct adapter *adapter;
6603 static int adap_idx = 1;
6604 int s_qpp, qpp, num_seg;
6605 struct port_info *pi;
6606 enum chip_type chip;
6607 void __iomem *regs;
6608 int func, chip_ver;
6609 u16 device_id;
6610 int i, err;
6611 u32 whoami;
6612
6613 err = pci_request_regions(pdev, KBUILD_MODNAME);
6614 if (err) {
6615
6616 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6617 return err;
6618 }
6619
6620 err = pci_enable_device(pdev);
6621 if (err) {
6622 dev_err(&pdev->dev, "cannot enable PCI device\n");
6623 goto out_release_regions;
6624 }
6625
6626 regs = pci_ioremap_bar(pdev, 0);
6627 if (!regs) {
6628 dev_err(&pdev->dev, "cannot map device registers\n");
6629 err = -ENOMEM;
6630 goto out_disable_device;
6631 }
6632
6633 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6634 if (!adapter) {
6635 err = -ENOMEM;
6636 goto out_unmap_bar0;
6637 }
6638
6639 adapter->regs = regs;
6640 err = t4_wait_dev_ready(regs);
6641 if (err < 0)
6642 goto out_free_adapter;
6643
6644
6645 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6646 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6647 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6648 if ((int)chip < 0) {
6649 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6650 err = chip;
6651 goto out_free_adapter;
6652 }
6653 chip_ver = CHELSIO_CHIP_VERSION(chip);
6654 func = chip_ver <= CHELSIO_T5 ?
6655 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6656
6657 adapter->pdev = pdev;
6658 adapter->pdev_dev = &pdev->dev;
6659 adapter->name = pci_name(pdev);
6660 adapter->mbox = func;
6661 adapter->pf = func;
6662 adapter->params.chip = chip;
6663 adapter->adap_idx = adap_idx;
6664 adapter->msg_enable = DFLT_MSG_ENABLE;
6665 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6666 (sizeof(struct mbox_cmd) *
6667 T4_OS_LOG_MBOX_CMDS),
6668 GFP_KERNEL);
6669 if (!adapter->mbox_log) {
6670 err = -ENOMEM;
6671 goto out_free_adapter;
6672 }
6673 spin_lock_init(&adapter->mbox_lock);
6674 INIT_LIST_HEAD(&adapter->mlist.list);
6675 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6676 pci_set_drvdata(pdev, adapter);
6677
6678 if (func != ent->driver_data) {
6679 pci_disable_device(pdev);
6680 pci_save_state(pdev);
6681 return 0;
6682 }
6683
6684 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6685 if (err) {
6686 dev_err(&pdev->dev, "no usable DMA configuration\n");
6687 goto out_free_adapter;
6688 }
6689
6690 pci_enable_pcie_error_reporting(pdev);
6691 pci_set_master(pdev);
6692 pci_save_state(pdev);
6693 adap_idx++;
6694 adapter->workq = create_singlethread_workqueue("cxgb4");
6695 if (!adapter->workq) {
6696 err = -ENOMEM;
6697 goto out_free_adapter;
6698 }
6699
6700
6701 adapter->flags |= CXGB4_DEV_ENABLED;
6702 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718 if (!pcie_relaxed_ordering_enabled(pdev))
6719 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6720
6721 spin_lock_init(&adapter->stats_lock);
6722 spin_lock_init(&adapter->tid_release_lock);
6723 spin_lock_init(&adapter->win0_lock);
6724
6725 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6726 INIT_WORK(&adapter->db_full_task, process_db_full);
6727 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6728 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6729
6730 err = t4_prep_adapter(adapter);
6731 if (err)
6732 goto out_free_adapter;
6733
6734 if (is_kdump_kernel()) {
6735
6736 err = cxgb4_cudbg_vmcore_add_dump(adapter);
6737 if (err) {
6738 dev_warn(adapter->pdev_dev,
6739 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6740 err);
6741 err = 0;
6742 }
6743 }
6744
6745 if (!is_t4(adapter->params.chip)) {
6746 s_qpp = (QUEUESPERPAGEPF0_S +
6747 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6748 adapter->pf);
6749 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6750 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6751 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6752
6753
6754
6755
6756
6757
6758 if (qpp > num_seg) {
6759 dev_err(&pdev->dev,
6760 "Incorrect number of egress queues per page\n");
6761 err = -EINVAL;
6762 goto out_free_adapter;
6763 }
6764 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6765 pci_resource_len(pdev, 2));
6766 if (!adapter->bar2) {
6767 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6768 err = -ENOMEM;
6769 goto out_free_adapter;
6770 }
6771 }
6772
6773 setup_memwin(adapter);
6774 err = adap_init0(adapter, 0);
6775 if (err)
6776 goto out_unmap_bar;
6777
6778 setup_memwin_rdma(adapter);
6779
6780
6781 if (!is_t4(adapter->params.chip))
6782 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6783 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6784 T6_STATMODE_V(0)));
6785
6786
6787 INIT_LIST_HEAD(&adapter->mac_hlist);
6788
6789 for_each_port(adapter, i) {
6790
6791
6792
6793
6794
6795
6796 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6797 MAX_ETH_QSETS + MAX_ATIDS);
6798 if (!netdev) {
6799 err = -ENOMEM;
6800 goto out_free_dev;
6801 }
6802
6803 SET_NETDEV_DEV(netdev, &pdev->dev);
6804
6805 adapter->port[i] = netdev;
6806 pi = netdev_priv(netdev);
6807 pi->adapter = adapter;
6808 pi->xact_addr_filt = -1;
6809 pi->port_id = i;
6810 netdev->irq = pdev->irq;
6811
6812 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6813 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6814 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6815 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6816 NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
6817
6818 if (chip_ver > CHELSIO_T5) {
6819 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6820 NETIF_F_IPV6_CSUM |
6821 NETIF_F_RXCSUM |
6822 NETIF_F_GSO_UDP_TUNNEL |
6823 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6824 NETIF_F_TSO | NETIF_F_TSO6;
6825
6826 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6827 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6828 NETIF_F_HW_TLS_RECORD;
6829
6830 if (adapter->rawf_cnt)
6831 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
6832 }
6833
6834 netdev->features |= netdev->hw_features;
6835 netdev->vlan_features = netdev->features & VLAN_FEAT;
6836 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6837 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6838 netdev->hw_features |= NETIF_F_HW_TLS_TX;
6839 netdev->tlsdev_ops = &cxgb4_ktls_ops;
6840
6841 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6842 }
6843 #endif
6844 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6845 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6846 netdev->hw_enc_features |= NETIF_F_HW_ESP;
6847 netdev->features |= NETIF_F_HW_ESP;
6848 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6849 }
6850 #endif
6851
6852 netdev->priv_flags |= IFF_UNICAST_FLT;
6853
6854
6855 netdev->min_mtu = 81;
6856 netdev->max_mtu = MAX_MTU;
6857
6858 netdev->netdev_ops = &cxgb4_netdev_ops;
6859 #ifdef CONFIG_CHELSIO_T4_DCB
6860 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6861 cxgb4_dcb_state_init(netdev);
6862 cxgb4_dcb_version_init(netdev);
6863 #endif
6864 cxgb4_set_ethtool_ops(netdev);
6865 }
6866
6867 cxgb4_init_ethtool_dump(adapter);
6868
6869 pci_set_drvdata(pdev, adapter);
6870
6871 if (adapter->flags & CXGB4_FW_OK) {
6872 err = t4_port_init(adapter, func, func, 0);
6873 if (err)
6874 goto out_free_dev;
6875 } else if (adapter->params.nports == 1) {
6876
6877
6878
6879
6880
6881 u8 hw_addr[ETH_ALEN];
6882 u8 *na = adapter->params.vpd.na;
6883
6884 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6885 if (!err) {
6886 for (i = 0; i < ETH_ALEN; i++)
6887 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6888 hex2val(na[2 * i + 1]));
6889 t4_set_hw_addr(adapter, 0, hw_addr);
6890 }
6891 }
6892
6893 if (!(adapter->flags & CXGB4_FW_OK))
6894 goto fw_attach_fail;
6895
6896
6897
6898
6899 err = cfg_queues(adapter);
6900 if (err)
6901 goto out_free_dev;
6902
6903 adapter->smt = t4_init_smt();
6904 if (!adapter->smt) {
6905
6906 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6907 }
6908
6909 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6910 if (!adapter->l2t) {
6911
6912 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6913 adapter->params.offload = 0;
6914 }
6915
6916 #if IS_ENABLED(CONFIG_IPV6)
6917 if (chip_ver <= CHELSIO_T5 &&
6918 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6919
6920
6921
6922 dev_warn(&pdev->dev,
6923 "CLIP not enabled in hardware, continuing\n");
6924 adapter->params.offload = 0;
6925 } else {
6926 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6927 adapter->clipt_end);
6928 if (!adapter->clipt) {
6929
6930
6931
6932 dev_warn(&pdev->dev,
6933 "could not allocate Clip table, continuing\n");
6934 adapter->params.offload = 0;
6935 }
6936 }
6937 #endif
6938
6939 for_each_port(adapter, i) {
6940 pi = adap2pinfo(adapter, i);
6941 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6942 if (!pi->sched_tbl)
6943 dev_warn(&pdev->dev,
6944 "could not activate scheduling on port %d\n",
6945 i);
6946 }
6947
6948 if (is_offload(adapter) || is_hashfilter(adapter)) {
6949 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6950 u32 v;
6951
6952 v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6953 if (chip_ver <= CHELSIO_T5) {
6954 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6955 v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6956 adapter->tids.hash_base = v / 4;
6957 } else {
6958 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6959 v = t4_read_reg(adapter,
6960 T6_LE_DB_HASH_TID_BASE_A);
6961 adapter->tids.hash_base = v;
6962 }
6963 }
6964 }
6965
6966 if (tid_init(&adapter->tids) < 0) {
6967 dev_warn(&pdev->dev, "could not allocate TID table, "
6968 "continuing\n");
6969 adapter->params.offload = 0;
6970 } else {
6971 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6972 if (!adapter->tc_u32)
6973 dev_warn(&pdev->dev,
6974 "could not offload tc u32, continuing\n");
6975
6976 if (cxgb4_init_tc_flower(adapter))
6977 dev_warn(&pdev->dev,
6978 "could not offload tc flower, continuing\n");
6979
6980 if (cxgb4_init_tc_mqprio(adapter))
6981 dev_warn(&pdev->dev,
6982 "could not offload tc mqprio, continuing\n");
6983
6984 if (cxgb4_init_tc_matchall(adapter))
6985 dev_warn(&pdev->dev,
6986 "could not offload tc matchall, continuing\n");
6987 if (cxgb4_init_ethtool_filters(adapter))
6988 dev_warn(&pdev->dev,
6989 "could not initialize ethtool filters, continuing\n");
6990 }
6991
6992
6993 if (msi > 1 && enable_msix(adapter) == 0)
6994 adapter->flags |= CXGB4_USING_MSIX;
6995 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
6996 adapter->flags |= CXGB4_USING_MSI;
6997 if (msi > 1)
6998 free_msix_info(adapter);
6999 }
7000
7001
7002 pcie_print_link_status(pdev);
7003
7004 cxgb4_init_mps_ref_entries(adapter);
7005
7006 err = init_rss(adapter);
7007 if (err)
7008 goto out_free_dev;
7009
7010 err = setup_non_data_intr(adapter);
7011 if (err) {
7012 dev_err(adapter->pdev_dev,
7013 "Non Data interrupt allocation failed, err: %d\n", err);
7014 goto out_free_dev;
7015 }
7016
7017 err = setup_fw_sge_queues(adapter);
7018 if (err) {
7019 dev_err(adapter->pdev_dev,
7020 "FW sge queue allocation failed, err %d", err);
7021 goto out_free_dev;
7022 }
7023
7024 fw_attach_fail:
7025
7026
7027
7028
7029
7030
7031 for_each_port(adapter, i) {
7032 pi = adap2pinfo(adapter, i);
7033 adapter->port[i]->dev_port = pi->lport;
7034 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
7035 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
7036
7037 netif_carrier_off(adapter->port[i]);
7038
7039 err = register_netdev(adapter->port[i]);
7040 if (err)
7041 break;
7042 adapter->chan_map[pi->tx_chan] = i;
7043 print_port_info(adapter->port[i]);
7044 }
7045 if (i == 0) {
7046 dev_err(&pdev->dev, "could not register any net devices\n");
7047 goto out_free_dev;
7048 }
7049 if (err) {
7050 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
7051 err = 0;
7052 }
7053
7054 if (cxgb4_debugfs_root) {
7055 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
7056 cxgb4_debugfs_root);
7057 setup_debugfs(adapter);
7058 }
7059
7060
7061 pdev->needs_freset = 1;
7062
7063 if (is_uld(adapter))
7064 cxgb4_uld_enable(adapter);
7065
7066 if (!is_t4(adapter->params.chip))
7067 cxgb4_ptp_init(adapter);
7068
7069 if (IS_REACHABLE(CONFIG_THERMAL) &&
7070 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
7071 cxgb4_thermal_init(adapter);
7072
7073 print_adapter_info(adapter);
7074 return 0;
7075
7076 out_free_dev:
7077 t4_free_sge_resources(adapter);
7078 free_some_resources(adapter);
7079 if (adapter->flags & CXGB4_USING_MSIX)
7080 free_msix_info(adapter);
7081 if (adapter->num_uld || adapter->num_ofld_uld)
7082 t4_uld_mem_free(adapter);
7083 out_unmap_bar:
7084 if (!is_t4(adapter->params.chip))
7085 iounmap(adapter->bar2);
7086 out_free_adapter:
7087 if (adapter->workq)
7088 destroy_workqueue(adapter->workq);
7089
7090 kfree(adapter->mbox_log);
7091 kfree(adapter);
7092 out_unmap_bar0:
7093 iounmap(regs);
7094 out_disable_device:
7095 pci_disable_pcie_error_reporting(pdev);
7096 pci_disable_device(pdev);
7097 out_release_regions:
7098 pci_release_regions(pdev);
7099 return err;
7100 }
7101
7102 static void remove_one(struct pci_dev *pdev)
7103 {
7104 struct adapter *adapter = pci_get_drvdata(pdev);
7105 struct hash_mac_addr *entry, *tmp;
7106
7107 if (!adapter) {
7108 pci_release_regions(pdev);
7109 return;
7110 }
7111
7112
7113
7114
7115 clear_all_filters(adapter);
7116
7117 adapter->flags |= CXGB4_SHUTTING_DOWN;
7118
7119 if (adapter->pf == 4) {
7120 int i;
7121
7122
7123
7124
7125 destroy_workqueue(adapter->workq);
7126
7127 detach_ulds(adapter);
7128
7129 for_each_port(adapter, i)
7130 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7131 unregister_netdev(adapter->port[i]);
7132
7133 t4_uld_clean_up(adapter);
7134
7135 adap_free_hma_mem(adapter);
7136
7137 disable_interrupts(adapter);
7138
7139 cxgb4_free_mps_ref_entries(adapter);
7140
7141 debugfs_remove_recursive(adapter->debugfs_root);
7142
7143 if (!is_t4(adapter->params.chip))
7144 cxgb4_ptp_stop(adapter);
7145 if (IS_REACHABLE(CONFIG_THERMAL))
7146 cxgb4_thermal_remove(adapter);
7147
7148 if (adapter->flags & CXGB4_FULL_INIT_DONE)
7149 cxgb_down(adapter);
7150
7151 if (adapter->flags & CXGB4_USING_MSIX)
7152 free_msix_info(adapter);
7153 if (adapter->num_uld || adapter->num_ofld_uld)
7154 t4_uld_mem_free(adapter);
7155 free_some_resources(adapter);
7156 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7157 list) {
7158 list_del(&entry->list);
7159 kfree(entry);
7160 }
7161
7162 #if IS_ENABLED(CONFIG_IPV6)
7163 t4_cleanup_clip_tbl(adapter);
7164 #endif
7165 if (!is_t4(adapter->params.chip))
7166 iounmap(adapter->bar2);
7167 }
7168 #ifdef CONFIG_PCI_IOV
7169 else {
7170 cxgb4_iov_configure(adapter->pdev, 0);
7171 }
7172 #endif
7173 iounmap(adapter->regs);
7174 pci_disable_pcie_error_reporting(pdev);
7175 if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7176 pci_disable_device(pdev);
7177 adapter->flags &= ~CXGB4_DEV_ENABLED;
7178 }
7179 pci_release_regions(pdev);
7180 kfree(adapter->mbox_log);
7181 synchronize_rcu();
7182 kfree(adapter);
7183 }
7184
7185
7186
7187
7188
7189
7190 static void shutdown_one(struct pci_dev *pdev)
7191 {
7192 struct adapter *adapter = pci_get_drvdata(pdev);
7193
7194
7195
7196
7197
7198 if (!adapter) {
7199 pci_release_regions(pdev);
7200 return;
7201 }
7202
7203 adapter->flags |= CXGB4_SHUTTING_DOWN;
7204
7205 if (adapter->pf == 4) {
7206 int i;
7207
7208 for_each_port(adapter, i)
7209 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7210 cxgb_close(adapter->port[i]);
7211
7212 rtnl_lock();
7213 cxgb4_mqprio_stop_offload(adapter);
7214 rtnl_unlock();
7215
7216 if (is_uld(adapter)) {
7217 detach_ulds(adapter);
7218 t4_uld_clean_up(adapter);
7219 }
7220
7221 disable_interrupts(adapter);
7222 disable_msi(adapter);
7223
7224 t4_sge_stop(adapter);
7225 if (adapter->flags & CXGB4_FW_OK)
7226 t4_fw_bye(adapter, adapter->mbox);
7227 }
7228 }
7229
7230 static struct pci_driver cxgb4_driver = {
7231 .name = KBUILD_MODNAME,
7232 .id_table = cxgb4_pci_tbl,
7233 .probe = init_one,
7234 .remove = remove_one,
7235 .shutdown = shutdown_one,
7236 #ifdef CONFIG_PCI_IOV
7237 .sriov_configure = cxgb4_iov_configure,
7238 #endif
7239 .err_handler = &cxgb4_eeh,
7240 };
7241
7242 static int __init cxgb4_init_module(void)
7243 {
7244 int ret;
7245
7246 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
7247
7248 ret = pci_register_driver(&cxgb4_driver);
7249 if (ret < 0)
7250 goto err_pci;
7251
7252 #if IS_ENABLED(CONFIG_IPV6)
7253 if (!inet6addr_registered) {
7254 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7255 if (ret)
7256 pci_unregister_driver(&cxgb4_driver);
7257 else
7258 inet6addr_registered = true;
7259 }
7260 #endif
7261
7262 if (ret == 0)
7263 return ret;
7264
7265 err_pci:
7266 debugfs_remove(cxgb4_debugfs_root);
7267
7268 return ret;
7269 }
7270
7271 static void __exit cxgb4_cleanup_module(void)
7272 {
7273 #if IS_ENABLED(CONFIG_IPV6)
7274 if (inet6addr_registered) {
7275 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7276 inet6addr_registered = false;
7277 }
7278 #endif
7279 pci_unregister_driver(&cxgb4_driver);
7280 debugfs_remove(cxgb4_debugfs_root);
7281 }
7282
7283 module_init(cxgb4_init_module);
7284 module_exit(cxgb4_cleanup_module);