0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include "common.h"
0039 #include <linux/module.h>
0040 #include <linux/pci.h>
0041 #include <linux/netdevice.h>
0042 #include <linux/etherdevice.h>
0043 #include <linux/if_vlan.h>
0044 #include <linux/mii.h>
0045 #include <linux/sockios.h>
0046 #include <linux/dma-mapping.h>
0047 #include <linux/uaccess.h>
0048
0049 #include "cpl5_cmd.h"
0050 #include "regs.h"
0051 #include "gmac.h"
0052 #include "cphy.h"
0053 #include "sge.h"
0054 #include "tp.h"
0055 #include "espi.h"
0056 #include "elmer0.h"
0057
0058 #include <linux/workqueue.h>
0059
0060 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
0061 {
0062 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
0063 }
0064
0065 static inline void cancel_mac_stats_update(struct adapter *ap)
0066 {
0067 cancel_delayed_work(&ap->stats_update_task);
0068 }
0069
0070 #define MAX_CMDQ_ENTRIES 16384
0071 #define MAX_CMDQ1_ENTRIES 1024
0072 #define MAX_RX_BUFFERS 16384
0073 #define MAX_RX_JUMBO_BUFFERS 16384
0074 #define MAX_TX_BUFFERS_HIGH 16384U
0075 #define MAX_TX_BUFFERS_LOW 1536U
0076 #define MAX_TX_BUFFERS 1460U
0077 #define MIN_FL_ENTRIES 32
0078
0079 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
0080 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
0081 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
0082
0083
0084
0085
0086
0087 #define EEPROM_SIZE 32
0088
0089 MODULE_DESCRIPTION(DRV_DESCRIPTION);
0090 MODULE_AUTHOR("Chelsio Communications");
0091 MODULE_LICENSE("GPL");
0092
0093 static int dflt_msg_enable = DFLT_MSG_ENABLE;
0094
0095 module_param(dflt_msg_enable, int, 0);
0096 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
0097
0098 #define HCLOCK 0x0
0099 #define LCLOCK 0x1
0100
0101
0102 static int t1_clock(struct adapter *adapter, int mode);
0103 static int t1powersave = 1;
0104
0105 module_param(t1powersave, int, 0);
0106 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
0107
0108 static int disable_msi = 0;
0109 module_param(disable_msi, int, 0);
0110 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
0111
0112
0113
0114
0115 static void t1_set_rxmode(struct net_device *dev)
0116 {
0117 struct adapter *adapter = dev->ml_priv;
0118 struct cmac *mac = adapter->port[dev->if_port].mac;
0119 struct t1_rx_mode rm;
0120
0121 rm.dev = dev;
0122 mac->ops->set_rx_mode(mac, &rm);
0123 }
0124
0125 static void link_report(struct port_info *p)
0126 {
0127 if (!netif_carrier_ok(p->dev))
0128 netdev_info(p->dev, "link down\n");
0129 else {
0130 const char *s = "10Mbps";
0131
0132 switch (p->link_config.speed) {
0133 case SPEED_10000: s = "10Gbps"; break;
0134 case SPEED_1000: s = "1000Mbps"; break;
0135 case SPEED_100: s = "100Mbps"; break;
0136 }
0137
0138 netdev_info(p->dev, "link up, %s, %s-duplex\n",
0139 s, p->link_config.duplex == DUPLEX_FULL
0140 ? "full" : "half");
0141 }
0142 }
0143
0144 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
0145 int speed, int duplex, int pause)
0146 {
0147 struct port_info *p = &adapter->port[port_id];
0148
0149 if (link_stat != netif_carrier_ok(p->dev)) {
0150 if (link_stat)
0151 netif_carrier_on(p->dev);
0152 else
0153 netif_carrier_off(p->dev);
0154 link_report(p);
0155
0156
0157 if ((speed > 0) && (adapter->params.nports > 1)) {
0158 unsigned int sched_speed = 10;
0159 switch (speed) {
0160 case SPEED_1000:
0161 sched_speed = 1000;
0162 break;
0163 case SPEED_100:
0164 sched_speed = 100;
0165 break;
0166 case SPEED_10:
0167 sched_speed = 10;
0168 break;
0169 }
0170 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
0171 }
0172 }
0173 }
0174
0175 static void link_start(struct port_info *p)
0176 {
0177 struct cmac *mac = p->mac;
0178
0179 mac->ops->reset(mac);
0180 if (mac->ops->macaddress_set)
0181 mac->ops->macaddress_set(mac, p->dev->dev_addr);
0182 t1_set_rxmode(p->dev);
0183 t1_link_start(p->phy, mac, &p->link_config);
0184 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
0185 }
0186
0187 static void enable_hw_csum(struct adapter *adapter)
0188 {
0189 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
0190 t1_tp_set_ip_checksum_offload(adapter->tp, 1);
0191 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
0192 }
0193
0194
0195
0196
0197
0198 static int cxgb_up(struct adapter *adapter)
0199 {
0200 int err = 0;
0201
0202 if (!(adapter->flags & FULL_INIT_DONE)) {
0203 err = t1_init_hw_modules(adapter);
0204 if (err)
0205 goto out_err;
0206
0207 enable_hw_csum(adapter);
0208 adapter->flags |= FULL_INIT_DONE;
0209 }
0210
0211 t1_interrupts_clear(adapter);
0212
0213 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
0214 err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
0215 t1_interrupt_thread,
0216 adapter->params.has_msi ? 0 : IRQF_SHARED,
0217 adapter->name, adapter);
0218 if (err) {
0219 if (adapter->params.has_msi)
0220 pci_disable_msi(adapter->pdev);
0221
0222 goto out_err;
0223 }
0224
0225 t1_sge_start(adapter->sge);
0226 t1_interrupts_enable(adapter);
0227 out_err:
0228 return err;
0229 }
0230
0231
0232
0233
0234 static void cxgb_down(struct adapter *adapter)
0235 {
0236 t1_sge_stop(adapter->sge);
0237 t1_interrupts_disable(adapter);
0238 free_irq(adapter->pdev->irq, adapter);
0239 if (adapter->params.has_msi)
0240 pci_disable_msi(adapter->pdev);
0241 }
0242
0243 static int cxgb_open(struct net_device *dev)
0244 {
0245 int err;
0246 struct adapter *adapter = dev->ml_priv;
0247 int other_ports = adapter->open_device_map & PORT_MASK;
0248
0249 napi_enable(&adapter->napi);
0250 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
0251 napi_disable(&adapter->napi);
0252 return err;
0253 }
0254
0255 __set_bit(dev->if_port, &adapter->open_device_map);
0256 link_start(&adapter->port[dev->if_port]);
0257 netif_start_queue(dev);
0258 if (!other_ports && adapter->params.stats_update_period)
0259 schedule_mac_stats_update(adapter,
0260 adapter->params.stats_update_period);
0261
0262 t1_vlan_mode(adapter, dev->features);
0263 return 0;
0264 }
0265
0266 static int cxgb_close(struct net_device *dev)
0267 {
0268 struct adapter *adapter = dev->ml_priv;
0269 struct port_info *p = &adapter->port[dev->if_port];
0270 struct cmac *mac = p->mac;
0271
0272 netif_stop_queue(dev);
0273 napi_disable(&adapter->napi);
0274 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
0275 netif_carrier_off(dev);
0276
0277 clear_bit(dev->if_port, &adapter->open_device_map);
0278 if (adapter->params.stats_update_period &&
0279 !(adapter->open_device_map & PORT_MASK)) {
0280
0281 smp_mb__after_atomic();
0282 spin_lock(&adapter->work_lock);
0283 spin_unlock(&adapter->work_lock);
0284 cancel_mac_stats_update(adapter);
0285 }
0286
0287 if (!adapter->open_device_map)
0288 cxgb_down(adapter);
0289 return 0;
0290 }
0291
0292 static struct net_device_stats *t1_get_stats(struct net_device *dev)
0293 {
0294 struct adapter *adapter = dev->ml_priv;
0295 struct port_info *p = &adapter->port[dev->if_port];
0296 struct net_device_stats *ns = &dev->stats;
0297 const struct cmac_statistics *pstats;
0298
0299
0300 pstats = p->mac->ops->statistics_update(p->mac,
0301 MAC_STATS_UPDATE_FULL);
0302
0303 ns->tx_packets = pstats->TxUnicastFramesOK +
0304 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
0305
0306 ns->rx_packets = pstats->RxUnicastFramesOK +
0307 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
0308
0309 ns->tx_bytes = pstats->TxOctetsOK;
0310 ns->rx_bytes = pstats->RxOctetsOK;
0311
0312 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
0313 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
0314 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
0315 pstats->RxFCSErrors + pstats->RxAlignErrors +
0316 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
0317 pstats->RxSymbolErrors + pstats->RxRuntErrors;
0318
0319 ns->multicast = pstats->RxMulticastFramesOK;
0320 ns->collisions = pstats->TxTotalCollisions;
0321
0322
0323 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
0324 pstats->RxJabberErrors;
0325 ns->rx_over_errors = 0;
0326 ns->rx_crc_errors = pstats->RxFCSErrors;
0327 ns->rx_frame_errors = pstats->RxAlignErrors;
0328 ns->rx_fifo_errors = 0;
0329 ns->rx_missed_errors = 0;
0330
0331
0332 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
0333 ns->tx_carrier_errors = 0;
0334 ns->tx_fifo_errors = pstats->TxUnderrun;
0335 ns->tx_heartbeat_errors = 0;
0336 ns->tx_window_errors = pstats->TxLateCollisions;
0337 return ns;
0338 }
0339
0340 static u32 get_msglevel(struct net_device *dev)
0341 {
0342 struct adapter *adapter = dev->ml_priv;
0343
0344 return adapter->msg_enable;
0345 }
0346
0347 static void set_msglevel(struct net_device *dev, u32 val)
0348 {
0349 struct adapter *adapter = dev->ml_priv;
0350
0351 adapter->msg_enable = val;
0352 }
0353
0354 static const char stats_strings[][ETH_GSTRING_LEN] = {
0355 "TxOctetsOK",
0356 "TxOctetsBad",
0357 "TxUnicastFramesOK",
0358 "TxMulticastFramesOK",
0359 "TxBroadcastFramesOK",
0360 "TxPauseFrames",
0361 "TxFramesWithDeferredXmissions",
0362 "TxLateCollisions",
0363 "TxTotalCollisions",
0364 "TxFramesAbortedDueToXSCollisions",
0365 "TxUnderrun",
0366 "TxLengthErrors",
0367 "TxInternalMACXmitError",
0368 "TxFramesWithExcessiveDeferral",
0369 "TxFCSErrors",
0370 "TxJumboFramesOk",
0371 "TxJumboOctetsOk",
0372
0373 "RxOctetsOK",
0374 "RxOctetsBad",
0375 "RxUnicastFramesOK",
0376 "RxMulticastFramesOK",
0377 "RxBroadcastFramesOK",
0378 "RxPauseFrames",
0379 "RxFCSErrors",
0380 "RxAlignErrors",
0381 "RxSymbolErrors",
0382 "RxDataErrors",
0383 "RxSequenceErrors",
0384 "RxRuntErrors",
0385 "RxJabberErrors",
0386 "RxInternalMACRcvError",
0387 "RxInRangeLengthErrors",
0388 "RxOutOfRangeLengthField",
0389 "RxFrameTooLongErrors",
0390 "RxJumboFramesOk",
0391 "RxJumboOctetsOk",
0392
0393
0394 "RxCsumGood",
0395 "TxCsumOffload",
0396 "TxTso",
0397 "RxVlan",
0398 "TxVlan",
0399 "TxNeedHeadroom",
0400
0401
0402 "rx drops",
0403 "pure_rsps",
0404 "unhandled irqs",
0405 "respQ_empty",
0406 "respQ_overflow",
0407 "freelistQ_empty",
0408 "pkt_too_big",
0409 "pkt_mismatch",
0410 "cmdQ_full0",
0411 "cmdQ_full1",
0412
0413 "espi_DIP2ParityErr",
0414 "espi_DIP4Err",
0415 "espi_RxDrops",
0416 "espi_TxDrops",
0417 "espi_RxOvfl",
0418 "espi_ParityErr"
0419 };
0420
0421 #define T2_REGMAP_SIZE (3 * 1024)
0422
0423 static int get_regs_len(struct net_device *dev)
0424 {
0425 return T2_REGMAP_SIZE;
0426 }
0427
0428 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
0429 {
0430 struct adapter *adapter = dev->ml_priv;
0431
0432 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
0433 strlcpy(info->bus_info, pci_name(adapter->pdev),
0434 sizeof(info->bus_info));
0435 }
0436
0437 static int get_sset_count(struct net_device *dev, int sset)
0438 {
0439 switch (sset) {
0440 case ETH_SS_STATS:
0441 return ARRAY_SIZE(stats_strings);
0442 default:
0443 return -EOPNOTSUPP;
0444 }
0445 }
0446
0447 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
0448 {
0449 if (stringset == ETH_SS_STATS)
0450 memcpy(data, stats_strings, sizeof(stats_strings));
0451 }
0452
0453 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
0454 u64 *data)
0455 {
0456 struct adapter *adapter = dev->ml_priv;
0457 struct cmac *mac = adapter->port[dev->if_port].mac;
0458 const struct cmac_statistics *s;
0459 const struct sge_intr_counts *t;
0460 struct sge_port_stats ss;
0461
0462 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
0463 t = t1_sge_get_intr_counts(adapter->sge);
0464 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
0465
0466 *data++ = s->TxOctetsOK;
0467 *data++ = s->TxOctetsBad;
0468 *data++ = s->TxUnicastFramesOK;
0469 *data++ = s->TxMulticastFramesOK;
0470 *data++ = s->TxBroadcastFramesOK;
0471 *data++ = s->TxPauseFrames;
0472 *data++ = s->TxFramesWithDeferredXmissions;
0473 *data++ = s->TxLateCollisions;
0474 *data++ = s->TxTotalCollisions;
0475 *data++ = s->TxFramesAbortedDueToXSCollisions;
0476 *data++ = s->TxUnderrun;
0477 *data++ = s->TxLengthErrors;
0478 *data++ = s->TxInternalMACXmitError;
0479 *data++ = s->TxFramesWithExcessiveDeferral;
0480 *data++ = s->TxFCSErrors;
0481 *data++ = s->TxJumboFramesOK;
0482 *data++ = s->TxJumboOctetsOK;
0483
0484 *data++ = s->RxOctetsOK;
0485 *data++ = s->RxOctetsBad;
0486 *data++ = s->RxUnicastFramesOK;
0487 *data++ = s->RxMulticastFramesOK;
0488 *data++ = s->RxBroadcastFramesOK;
0489 *data++ = s->RxPauseFrames;
0490 *data++ = s->RxFCSErrors;
0491 *data++ = s->RxAlignErrors;
0492 *data++ = s->RxSymbolErrors;
0493 *data++ = s->RxDataErrors;
0494 *data++ = s->RxSequenceErrors;
0495 *data++ = s->RxRuntErrors;
0496 *data++ = s->RxJabberErrors;
0497 *data++ = s->RxInternalMACRcvError;
0498 *data++ = s->RxInRangeLengthErrors;
0499 *data++ = s->RxOutOfRangeLengthField;
0500 *data++ = s->RxFrameTooLongErrors;
0501 *data++ = s->RxJumboFramesOK;
0502 *data++ = s->RxJumboOctetsOK;
0503
0504 *data++ = ss.rx_cso_good;
0505 *data++ = ss.tx_cso;
0506 *data++ = ss.tx_tso;
0507 *data++ = ss.vlan_xtract;
0508 *data++ = ss.vlan_insert;
0509 *data++ = ss.tx_need_hdrroom;
0510
0511 *data++ = t->rx_drops;
0512 *data++ = t->pure_rsps;
0513 *data++ = t->unhandled_irqs;
0514 *data++ = t->respQ_empty;
0515 *data++ = t->respQ_overflow;
0516 *data++ = t->freelistQ_empty;
0517 *data++ = t->pkt_too_big;
0518 *data++ = t->pkt_mismatch;
0519 *data++ = t->cmdQ_full[0];
0520 *data++ = t->cmdQ_full[1];
0521
0522 if (adapter->espi) {
0523 const struct espi_intr_counts *e;
0524
0525 e = t1_espi_get_intr_counts(adapter->espi);
0526 *data++ = e->DIP2_parity_err;
0527 *data++ = e->DIP4_err;
0528 *data++ = e->rx_drops;
0529 *data++ = e->tx_drops;
0530 *data++ = e->rx_ovflw;
0531 *data++ = e->parity_err;
0532 }
0533 }
0534
0535 static inline void reg_block_dump(struct adapter *ap, void *buf,
0536 unsigned int start, unsigned int end)
0537 {
0538 u32 *p = buf + start;
0539
0540 for ( ; start <= end; start += sizeof(u32))
0541 *p++ = readl(ap->regs + start);
0542 }
0543
0544 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0545 void *buf)
0546 {
0547 struct adapter *ap = dev->ml_priv;
0548
0549
0550
0551
0552 regs->version = 2;
0553
0554 memset(buf, 0, T2_REGMAP_SIZE);
0555 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
0556 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
0557 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
0558 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
0559 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
0560 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
0561 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
0562 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
0563 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
0564 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
0565 }
0566
0567 static int get_link_ksettings(struct net_device *dev,
0568 struct ethtool_link_ksettings *cmd)
0569 {
0570 struct adapter *adapter = dev->ml_priv;
0571 struct port_info *p = &adapter->port[dev->if_port];
0572 u32 supported, advertising;
0573
0574 supported = p->link_config.supported;
0575 advertising = p->link_config.advertising;
0576
0577 if (netif_carrier_ok(dev)) {
0578 cmd->base.speed = p->link_config.speed;
0579 cmd->base.duplex = p->link_config.duplex;
0580 } else {
0581 cmd->base.speed = SPEED_UNKNOWN;
0582 cmd->base.duplex = DUPLEX_UNKNOWN;
0583 }
0584
0585 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
0586 cmd->base.phy_address = p->phy->mdio.prtad;
0587 cmd->base.autoneg = p->link_config.autoneg;
0588
0589 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
0590 supported);
0591 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
0592 advertising);
0593
0594 return 0;
0595 }
0596
0597 static int speed_duplex_to_caps(int speed, int duplex)
0598 {
0599 int cap = 0;
0600
0601 switch (speed) {
0602 case SPEED_10:
0603 if (duplex == DUPLEX_FULL)
0604 cap = SUPPORTED_10baseT_Full;
0605 else
0606 cap = SUPPORTED_10baseT_Half;
0607 break;
0608 case SPEED_100:
0609 if (duplex == DUPLEX_FULL)
0610 cap = SUPPORTED_100baseT_Full;
0611 else
0612 cap = SUPPORTED_100baseT_Half;
0613 break;
0614 case SPEED_1000:
0615 if (duplex == DUPLEX_FULL)
0616 cap = SUPPORTED_1000baseT_Full;
0617 else
0618 cap = SUPPORTED_1000baseT_Half;
0619 break;
0620 case SPEED_10000:
0621 if (duplex == DUPLEX_FULL)
0622 cap = SUPPORTED_10000baseT_Full;
0623 }
0624 return cap;
0625 }
0626
0627 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
0628 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
0629 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
0630 ADVERTISED_10000baseT_Full)
0631
0632 static int set_link_ksettings(struct net_device *dev,
0633 const struct ethtool_link_ksettings *cmd)
0634 {
0635 struct adapter *adapter = dev->ml_priv;
0636 struct port_info *p = &adapter->port[dev->if_port];
0637 struct link_config *lc = &p->link_config;
0638 u32 advertising;
0639
0640 ethtool_convert_link_mode_to_legacy_u32(&advertising,
0641 cmd->link_modes.advertising);
0642
0643 if (!(lc->supported & SUPPORTED_Autoneg))
0644 return -EOPNOTSUPP;
0645
0646 if (cmd->base.autoneg == AUTONEG_DISABLE) {
0647 u32 speed = cmd->base.speed;
0648 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
0649
0650 if (!(lc->supported & cap) || (speed == SPEED_1000))
0651 return -EINVAL;
0652 lc->requested_speed = speed;
0653 lc->requested_duplex = cmd->base.duplex;
0654 lc->advertising = 0;
0655 } else {
0656 advertising &= ADVERTISED_MASK;
0657 if (advertising & (advertising - 1))
0658 advertising = lc->supported;
0659 advertising &= lc->supported;
0660 if (!advertising)
0661 return -EINVAL;
0662 lc->requested_speed = SPEED_INVALID;
0663 lc->requested_duplex = DUPLEX_INVALID;
0664 lc->advertising = advertising | ADVERTISED_Autoneg;
0665 }
0666 lc->autoneg = cmd->base.autoneg;
0667 if (netif_running(dev))
0668 t1_link_start(p->phy, p->mac, lc);
0669 return 0;
0670 }
0671
0672 static void get_pauseparam(struct net_device *dev,
0673 struct ethtool_pauseparam *epause)
0674 {
0675 struct adapter *adapter = dev->ml_priv;
0676 struct port_info *p = &adapter->port[dev->if_port];
0677
0678 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
0679 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
0680 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
0681 }
0682
0683 static int set_pauseparam(struct net_device *dev,
0684 struct ethtool_pauseparam *epause)
0685 {
0686 struct adapter *adapter = dev->ml_priv;
0687 struct port_info *p = &adapter->port[dev->if_port];
0688 struct link_config *lc = &p->link_config;
0689
0690 if (epause->autoneg == AUTONEG_DISABLE)
0691 lc->requested_fc = 0;
0692 else if (lc->supported & SUPPORTED_Autoneg)
0693 lc->requested_fc = PAUSE_AUTONEG;
0694 else
0695 return -EINVAL;
0696
0697 if (epause->rx_pause)
0698 lc->requested_fc |= PAUSE_RX;
0699 if (epause->tx_pause)
0700 lc->requested_fc |= PAUSE_TX;
0701 if (lc->autoneg == AUTONEG_ENABLE) {
0702 if (netif_running(dev))
0703 t1_link_start(p->phy, p->mac, lc);
0704 } else {
0705 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
0706 if (netif_running(dev))
0707 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
0708 lc->fc);
0709 }
0710 return 0;
0711 }
0712
0713 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
0714 struct kernel_ethtool_ringparam *kernel_e,
0715 struct netlink_ext_ack *extack)
0716 {
0717 struct adapter *adapter = dev->ml_priv;
0718 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
0719
0720 e->rx_max_pending = MAX_RX_BUFFERS;
0721 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
0722 e->tx_max_pending = MAX_CMDQ_ENTRIES;
0723
0724 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
0725 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
0726 e->tx_pending = adapter->params.sge.cmdQ_size[0];
0727 }
0728
0729 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
0730 struct kernel_ethtool_ringparam *kernel_e,
0731 struct netlink_ext_ack *extack)
0732 {
0733 struct adapter *adapter = dev->ml_priv;
0734 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
0735
0736 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
0737 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
0738 e->tx_pending > MAX_CMDQ_ENTRIES ||
0739 e->rx_pending < MIN_FL_ENTRIES ||
0740 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
0741 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
0742 return -EINVAL;
0743
0744 if (adapter->flags & FULL_INIT_DONE)
0745 return -EBUSY;
0746
0747 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
0748 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
0749 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
0750 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
0751 MAX_CMDQ1_ENTRIES : e->tx_pending;
0752 return 0;
0753 }
0754
0755 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
0756 struct kernel_ethtool_coalesce *kernel_coal,
0757 struct netlink_ext_ack *extack)
0758 {
0759 struct adapter *adapter = dev->ml_priv;
0760
0761 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
0762 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
0763 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
0764 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
0765 return 0;
0766 }
0767
0768 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
0769 struct kernel_ethtool_coalesce *kernel_coal,
0770 struct netlink_ext_ack *extack)
0771 {
0772 struct adapter *adapter = dev->ml_priv;
0773
0774 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
0775 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
0776 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
0777 return 0;
0778 }
0779
0780 static int get_eeprom_len(struct net_device *dev)
0781 {
0782 struct adapter *adapter = dev->ml_priv;
0783
0784 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
0785 }
0786
0787 #define EEPROM_MAGIC(ap) \
0788 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
0789
0790 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
0791 u8 *data)
0792 {
0793 int i;
0794 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
0795 struct adapter *adapter = dev->ml_priv;
0796
0797 e->magic = EEPROM_MAGIC(adapter);
0798 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
0799 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
0800 memcpy(data, buf + e->offset, e->len);
0801 return 0;
0802 }
0803
0804 static const struct ethtool_ops t1_ethtool_ops = {
0805 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
0806 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
0807 ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
0808 .get_drvinfo = get_drvinfo,
0809 .get_msglevel = get_msglevel,
0810 .set_msglevel = set_msglevel,
0811 .get_ringparam = get_sge_param,
0812 .set_ringparam = set_sge_param,
0813 .get_coalesce = get_coalesce,
0814 .set_coalesce = set_coalesce,
0815 .get_eeprom_len = get_eeprom_len,
0816 .get_eeprom = get_eeprom,
0817 .get_pauseparam = get_pauseparam,
0818 .set_pauseparam = set_pauseparam,
0819 .get_link = ethtool_op_get_link,
0820 .get_strings = get_strings,
0821 .get_sset_count = get_sset_count,
0822 .get_ethtool_stats = get_stats,
0823 .get_regs_len = get_regs_len,
0824 .get_regs = get_regs,
0825 .get_link_ksettings = get_link_ksettings,
0826 .set_link_ksettings = set_link_ksettings,
0827 };
0828
0829 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
0830 {
0831 struct adapter *adapter = dev->ml_priv;
0832 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
0833
0834 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
0835 }
0836
0837 static int t1_change_mtu(struct net_device *dev, int new_mtu)
0838 {
0839 int ret;
0840 struct adapter *adapter = dev->ml_priv;
0841 struct cmac *mac = adapter->port[dev->if_port].mac;
0842
0843 if (!mac->ops->set_mtu)
0844 return -EOPNOTSUPP;
0845 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
0846 return ret;
0847 dev->mtu = new_mtu;
0848 return 0;
0849 }
0850
0851 static int t1_set_mac_addr(struct net_device *dev, void *p)
0852 {
0853 struct adapter *adapter = dev->ml_priv;
0854 struct cmac *mac = adapter->port[dev->if_port].mac;
0855 struct sockaddr *addr = p;
0856
0857 if (!mac->ops->macaddress_set)
0858 return -EOPNOTSUPP;
0859
0860 eth_hw_addr_set(dev, addr->sa_data);
0861 mac->ops->macaddress_set(mac, dev->dev_addr);
0862 return 0;
0863 }
0864
0865 static netdev_features_t t1_fix_features(struct net_device *dev,
0866 netdev_features_t features)
0867 {
0868
0869
0870
0871
0872 if (features & NETIF_F_HW_VLAN_CTAG_RX)
0873 features |= NETIF_F_HW_VLAN_CTAG_TX;
0874 else
0875 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
0876
0877 return features;
0878 }
0879
0880 static int t1_set_features(struct net_device *dev, netdev_features_t features)
0881 {
0882 netdev_features_t changed = dev->features ^ features;
0883 struct adapter *adapter = dev->ml_priv;
0884
0885 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
0886 t1_vlan_mode(adapter, features);
0887
0888 return 0;
0889 }
0890 #ifdef CONFIG_NET_POLL_CONTROLLER
0891 static void t1_netpoll(struct net_device *dev)
0892 {
0893 unsigned long flags;
0894 struct adapter *adapter = dev->ml_priv;
0895
0896 local_irq_save(flags);
0897 t1_interrupt(adapter->pdev->irq, adapter);
0898 local_irq_restore(flags);
0899 }
0900 #endif
0901
0902
0903
0904
0905
0906 static void mac_stats_task(struct work_struct *work)
0907 {
0908 int i;
0909 struct adapter *adapter =
0910 container_of(work, struct adapter, stats_update_task.work);
0911
0912 for_each_port(adapter, i) {
0913 struct port_info *p = &adapter->port[i];
0914
0915 if (netif_running(p->dev))
0916 p->mac->ops->statistics_update(p->mac,
0917 MAC_STATS_UPDATE_FAST);
0918 }
0919
0920
0921 spin_lock(&adapter->work_lock);
0922 if (adapter->open_device_map & PORT_MASK)
0923 schedule_mac_stats_update(adapter,
0924 adapter->params.stats_update_period);
0925 spin_unlock(&adapter->work_lock);
0926 }
0927
0928 static const struct net_device_ops cxgb_netdev_ops = {
0929 .ndo_open = cxgb_open,
0930 .ndo_stop = cxgb_close,
0931 .ndo_start_xmit = t1_start_xmit,
0932 .ndo_get_stats = t1_get_stats,
0933 .ndo_validate_addr = eth_validate_addr,
0934 .ndo_set_rx_mode = t1_set_rxmode,
0935 .ndo_eth_ioctl = t1_ioctl,
0936 .ndo_change_mtu = t1_change_mtu,
0937 .ndo_set_mac_address = t1_set_mac_addr,
0938 .ndo_fix_features = t1_fix_features,
0939 .ndo_set_features = t1_set_features,
0940 #ifdef CONFIG_NET_POLL_CONTROLLER
0941 .ndo_poll_controller = t1_netpoll,
0942 #endif
0943 };
0944
0945 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0946 {
0947 unsigned long mmio_start, mmio_len;
0948 const struct board_info *bi;
0949 struct adapter *adapter = NULL;
0950 struct port_info *pi;
0951 int i, err;
0952
0953 err = pci_enable_device(pdev);
0954 if (err)
0955 return err;
0956
0957 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
0958 pr_err("%s: cannot find PCI device memory base address\n",
0959 pci_name(pdev));
0960 err = -ENODEV;
0961 goto out_disable_pdev;
0962 }
0963
0964 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0965 if (err) {
0966 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
0967 goto out_disable_pdev;
0968 }
0969
0970 err = pci_request_regions(pdev, DRV_NAME);
0971 if (err) {
0972 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
0973 goto out_disable_pdev;
0974 }
0975
0976 pci_set_master(pdev);
0977
0978 mmio_start = pci_resource_start(pdev, 0);
0979 mmio_len = pci_resource_len(pdev, 0);
0980 bi = t1_get_board_info(ent->driver_data);
0981
0982 for (i = 0; i < bi->port_number; ++i) {
0983 struct net_device *netdev;
0984
0985 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
0986 if (!netdev) {
0987 err = -ENOMEM;
0988 goto out_free_dev;
0989 }
0990
0991 SET_NETDEV_DEV(netdev, &pdev->dev);
0992
0993 if (!adapter) {
0994 adapter = netdev_priv(netdev);
0995 adapter->pdev = pdev;
0996 adapter->port[0].dev = netdev;
0997
0998 adapter->regs = ioremap(mmio_start, mmio_len);
0999 if (!adapter->regs) {
1000 pr_err("%s: cannot map device registers\n",
1001 pci_name(pdev));
1002 err = -ENOMEM;
1003 goto out_free_dev;
1004 }
1005
1006 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1007 err = -ENODEV;
1008 goto out_free_dev;
1009 }
1010
1011 adapter->name = pci_name(pdev);
1012 adapter->msg_enable = dflt_msg_enable;
1013 adapter->mmio_len = mmio_len;
1014
1015 spin_lock_init(&adapter->tpi_lock);
1016 spin_lock_init(&adapter->work_lock);
1017 spin_lock_init(&adapter->async_lock);
1018 spin_lock_init(&adapter->mac_lock);
1019
1020 INIT_DELAYED_WORK(&adapter->stats_update_task,
1021 mac_stats_task);
1022
1023 pci_set_drvdata(pdev, netdev);
1024 }
1025
1026 pi = &adapter->port[i];
1027 pi->dev = netdev;
1028 netif_carrier_off(netdev);
1029 netdev->irq = pdev->irq;
1030 netdev->if_port = i;
1031 netdev->mem_start = mmio_start;
1032 netdev->mem_end = mmio_start + mmio_len - 1;
1033 netdev->ml_priv = adapter;
1034 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1035 NETIF_F_RXCSUM;
1036 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1037 NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
1038
1039 if (vlan_tso_capable(adapter)) {
1040 netdev->features |=
1041 NETIF_F_HW_VLAN_CTAG_TX |
1042 NETIF_F_HW_VLAN_CTAG_RX;
1043 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1044
1045
1046 if (!(is_T2(adapter)) || bi->port_number != 4) {
1047 netdev->hw_features |= NETIF_F_TSO;
1048 netdev->features |= NETIF_F_TSO;
1049 }
1050 }
1051
1052 netdev->netdev_ops = &cxgb_netdev_ops;
1053 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1054 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1055
1056 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1057
1058 netdev->ethtool_ops = &t1_ethtool_ops;
1059
1060 switch (bi->board) {
1061 case CHBT_BOARD_CHT110:
1062 case CHBT_BOARD_N110:
1063 case CHBT_BOARD_N210:
1064 case CHBT_BOARD_CHT210:
1065 netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1066 (ETH_HLEN + ETH_FCS_LEN);
1067 break;
1068 case CHBT_BOARD_CHN204:
1069 netdev->max_mtu = VSC7326_MAX_MTU;
1070 break;
1071 default:
1072 netdev->max_mtu = ETH_DATA_LEN;
1073 break;
1074 }
1075 }
1076
1077 if (t1_init_sw_modules(adapter, bi) < 0) {
1078 err = -ENODEV;
1079 goto out_free_dev;
1080 }
1081
1082
1083
1084
1085
1086
1087
1088 for (i = 0; i < bi->port_number; ++i) {
1089 err = register_netdev(adapter->port[i].dev);
1090 if (err)
1091 pr_warn("%s: cannot register net device %s, skipping\n",
1092 pci_name(pdev), adapter->port[i].dev->name);
1093 else {
1094
1095
1096
1097
1098 if (!adapter->registered_device_map)
1099 adapter->name = adapter->port[i].dev->name;
1100
1101 __set_bit(i, &adapter->registered_device_map);
1102 }
1103 }
1104 if (!adapter->registered_device_map) {
1105 pr_err("%s: could not register any net devices\n",
1106 pci_name(pdev));
1107 err = -EINVAL;
1108 goto out_release_adapter_res;
1109 }
1110
1111 pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1112 adapter->name, bi->desc, adapter->params.chip_revision,
1113 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1114 adapter->params.pci.speed, adapter->params.pci.width);
1115
1116
1117
1118
1119 if (t1powersave)
1120 adapter->t1powersave = LCLOCK;
1121 else
1122 adapter->t1powersave = HCLOCK;
1123 if (t1_is_T1B(adapter))
1124 t1_clock(adapter, t1powersave);
1125
1126 return 0;
1127
1128 out_release_adapter_res:
1129 t1_free_sw_modules(adapter);
1130 out_free_dev:
1131 if (adapter) {
1132 if (adapter->regs)
1133 iounmap(adapter->regs);
1134 for (i = bi->port_number - 1; i >= 0; --i)
1135 if (adapter->port[i].dev)
1136 free_netdev(adapter->port[i].dev);
1137 }
1138 pci_release_regions(pdev);
1139 out_disable_pdev:
1140 pci_disable_device(pdev);
1141 return err;
1142 }
1143
1144 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1145 {
1146 int data;
1147 int i;
1148 u32 val;
1149
1150 enum {
1151 S_CLOCK = 1 << 3,
1152 S_DATA = 1 << 4
1153 };
1154
1155 for (i = (nbits - 1); i > -1; i--) {
1156
1157 udelay(50);
1158
1159 data = ((bitdata >> i) & 0x1);
1160 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1161
1162 if (data)
1163 val |= S_DATA;
1164 else
1165 val &= ~S_DATA;
1166
1167 udelay(50);
1168
1169
1170 val &= ~S_CLOCK;
1171 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1172
1173 udelay(50);
1174
1175
1176 val |= S_CLOCK;
1177 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1178
1179 }
1180 }
1181
1182 static int t1_clock(struct adapter *adapter, int mode)
1183 {
1184 u32 val;
1185 int M_CORE_VAL;
1186 int M_MEM_VAL;
1187
1188 enum {
1189 M_CORE_BITS = 9,
1190 T_CORE_VAL = 0,
1191 T_CORE_BITS = 2,
1192 N_CORE_VAL = 0,
1193 N_CORE_BITS = 2,
1194 M_MEM_BITS = 9,
1195 T_MEM_VAL = 0,
1196 T_MEM_BITS = 2,
1197 N_MEM_VAL = 0,
1198 N_MEM_BITS = 2,
1199 NP_LOAD = 1 << 17,
1200 S_LOAD_MEM = 1 << 5,
1201 S_LOAD_CORE = 1 << 6,
1202 S_CLOCK = 1 << 3
1203 };
1204
1205 if (!t1_is_T1B(adapter))
1206 return -ENODEV;
1207
1208 if (mode & 2)
1209 return 0;
1210
1211 if ((adapter->t1powersave & 1) == (mode & 1))
1212 return -EALREADY;
1213
1214 if ((mode & 1) == HCLOCK) {
1215 M_CORE_VAL = 0x14;
1216 M_MEM_VAL = 0x18;
1217 adapter->t1powersave = HCLOCK;
1218 } else {
1219 M_CORE_VAL = 0xe;
1220 M_MEM_VAL = 0x10;
1221 adapter->t1powersave = LCLOCK;
1222 }
1223
1224
1225 spin_lock(&adapter->tpi_lock);
1226
1227
1228 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1229 val |= NP_LOAD;
1230 udelay(50);
1231 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1232 udelay(50);
1233 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1234 val &= ~S_LOAD_CORE;
1235 val &= ~S_CLOCK;
1236 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1237 udelay(50);
1238
1239
1240 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1241 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1242 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1243 udelay(50);
1244
1245
1246 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1247 val |= S_LOAD_CORE;
1248 udelay(50);
1249 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1250 udelay(50);
1251 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1252 val &= ~S_LOAD_CORE;
1253 udelay(50);
1254 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1255 udelay(50);
1256
1257
1258 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1259 val |= NP_LOAD;
1260 udelay(50);
1261 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1262 udelay(50);
1263 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1264 val &= ~S_LOAD_MEM;
1265 val &= ~S_CLOCK;
1266 udelay(50);
1267 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1268 udelay(50);
1269
1270
1271 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1272 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1273 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1274 udelay(50);
1275
1276
1277 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1278 val |= S_LOAD_MEM;
1279 udelay(50);
1280 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1281 udelay(50);
1282 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1283 val &= ~S_LOAD_MEM;
1284 udelay(50);
1285 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1286
1287 spin_unlock(&adapter->tpi_lock);
1288
1289 return 0;
1290 }
1291
1292 static inline void t1_sw_reset(struct pci_dev *pdev)
1293 {
1294 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1295 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1296 }
1297
1298 static void remove_one(struct pci_dev *pdev)
1299 {
1300 struct net_device *dev = pci_get_drvdata(pdev);
1301 struct adapter *adapter = dev->ml_priv;
1302 int i;
1303
1304 for_each_port(adapter, i) {
1305 if (test_bit(i, &adapter->registered_device_map))
1306 unregister_netdev(adapter->port[i].dev);
1307 }
1308
1309 t1_free_sw_modules(adapter);
1310 iounmap(adapter->regs);
1311
1312 while (--i >= 0) {
1313 if (adapter->port[i].dev)
1314 free_netdev(adapter->port[i].dev);
1315 }
1316
1317 pci_release_regions(pdev);
1318 pci_disable_device(pdev);
1319 t1_sw_reset(pdev);
1320 }
1321
1322 static struct pci_driver cxgb_pci_driver = {
1323 .name = DRV_NAME,
1324 .id_table = t1_pci_tbl,
1325 .probe = init_one,
1326 .remove = remove_one,
1327 };
1328
1329 module_pci_driver(cxgb_pci_driver);