0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/etherdevice.h>
0033 #include "common.h"
0034 #include "regs.h"
0035 #include "sge_defs.h"
0036 #include "firmware_exports.h"
0037
0038 static void t3_port_intr_clear(struct adapter *adapter, int idx);
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
0057 int polarity, int attempts, int delay, u32 *valp)
0058 {
0059 while (1) {
0060 u32 val = t3_read_reg(adapter, reg);
0061
0062 if (!!(val & mask) == polarity) {
0063 if (valp)
0064 *valp = val;
0065 return 0;
0066 }
0067 if (--attempts == 0)
0068 return -EAGAIN;
0069 if (delay)
0070 udelay(delay);
0071 }
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
0086 int n, unsigned int offset)
0087 {
0088 while (n--) {
0089 t3_write_reg(adapter, p->reg_addr + offset, p->val);
0090 p++;
0091 }
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
0105 u32 val)
0106 {
0107 u32 v = t3_read_reg(adapter, addr) & ~mask;
0108
0109 t3_write_reg(adapter, addr, v | val);
0110 t3_read_reg(adapter, addr);
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
0126 unsigned int data_reg, u32 *vals,
0127 unsigned int nregs, unsigned int start_idx)
0128 {
0129 while (nregs--) {
0130 t3_write_reg(adap, addr_reg, start_idx);
0131 *vals++ = t3_read_reg(adap, data_reg);
0132 start_idx++;
0133 }
0134 }
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
0147 u64 *buf)
0148 {
0149 static const int shift[] = { 0, 0, 16, 24 };
0150 static const int step[] = { 0, 32, 16, 8 };
0151
0152 unsigned int size64 = mc7->size / 8;
0153 struct adapter *adap = mc7->adapter;
0154
0155 if (start >= size64 || start + n > size64)
0156 return -EINVAL;
0157
0158 start *= (8 << mc7->width);
0159 while (n--) {
0160 int i;
0161 u64 val64 = 0;
0162
0163 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
0164 int attempts = 10;
0165 u32 val;
0166
0167 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
0168 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
0169 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
0170 while ((val & F_BUSY) && attempts--)
0171 val = t3_read_reg(adap,
0172 mc7->offset + A_MC7_BD_OP);
0173 if (val & F_BUSY)
0174 return -EIO;
0175
0176 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
0177 if (mc7->width == 0) {
0178 val64 = t3_read_reg(adap,
0179 mc7->offset +
0180 A_MC7_BD_DATA0);
0181 val64 |= (u64) val << 32;
0182 } else {
0183 if (mc7->width > 1)
0184 val >>= shift[mc7->width];
0185 val64 |= (u64) val << (step[mc7->width] * i);
0186 }
0187 start += 8;
0188 }
0189 *buf++ = val64;
0190 }
0191 return 0;
0192 }
0193
0194
0195
0196
0197 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
0198 {
0199 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
0200 u32 val = F_PREEN | V_CLKDIV(clkdiv);
0201
0202 t3_write_reg(adap, A_MI1_CFG, val);
0203 }
0204
0205 #define MDIO_ATTEMPTS 20
0206
0207
0208
0209
0210 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
0211 u16 reg_addr)
0212 {
0213 struct port_info *pi = netdev_priv(dev);
0214 struct adapter *adapter = pi->adapter;
0215 int ret;
0216 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
0217
0218 mutex_lock(&adapter->mdio_lock);
0219 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
0220 t3_write_reg(adapter, A_MI1_ADDR, addr);
0221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
0222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
0223 if (!ret)
0224 ret = t3_read_reg(adapter, A_MI1_DATA);
0225 mutex_unlock(&adapter->mdio_lock);
0226 return ret;
0227 }
0228
0229 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
0230 u16 reg_addr, u16 val)
0231 {
0232 struct port_info *pi = netdev_priv(dev);
0233 struct adapter *adapter = pi->adapter;
0234 int ret;
0235 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
0236
0237 mutex_lock(&adapter->mdio_lock);
0238 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
0239 t3_write_reg(adapter, A_MI1_ADDR, addr);
0240 t3_write_reg(adapter, A_MI1_DATA, val);
0241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
0242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
0243 mutex_unlock(&adapter->mdio_lock);
0244 return ret;
0245 }
0246
0247 static const struct mdio_ops mi1_mdio_ops = {
0248 .read = t3_mi1_read,
0249 .write = t3_mi1_write,
0250 .mode_support = MDIO_SUPPORTS_C22
0251 };
0252
0253
0254
0255
0256
0257 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
0258 int reg_addr)
0259 {
0260 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
0261
0262 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
0263 t3_write_reg(adapter, A_MI1_ADDR, addr);
0264 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
0265 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
0266 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
0267 MDIO_ATTEMPTS, 10);
0268 }
0269
0270
0271
0272
0273 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
0274 u16 reg_addr)
0275 {
0276 struct port_info *pi = netdev_priv(dev);
0277 struct adapter *adapter = pi->adapter;
0278 int ret;
0279
0280 mutex_lock(&adapter->mdio_lock);
0281 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
0282 if (!ret) {
0283 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
0284 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
0285 MDIO_ATTEMPTS, 10);
0286 if (!ret)
0287 ret = t3_read_reg(adapter, A_MI1_DATA);
0288 }
0289 mutex_unlock(&adapter->mdio_lock);
0290 return ret;
0291 }
0292
0293 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
0294 u16 reg_addr, u16 val)
0295 {
0296 struct port_info *pi = netdev_priv(dev);
0297 struct adapter *adapter = pi->adapter;
0298 int ret;
0299
0300 mutex_lock(&adapter->mdio_lock);
0301 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
0302 if (!ret) {
0303 t3_write_reg(adapter, A_MI1_DATA, val);
0304 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
0305 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
0306 MDIO_ATTEMPTS, 10);
0307 }
0308 mutex_unlock(&adapter->mdio_lock);
0309 return ret;
0310 }
0311
0312 static const struct mdio_ops mi1_mdio_ext_ops = {
0313 .read = mi1_ext_read,
0314 .write = mi1_ext_write,
0315 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
0316 };
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
0330 unsigned int set)
0331 {
0332 int ret;
0333 unsigned int val;
0334
0335 ret = t3_mdio_read(phy, mmd, reg, &val);
0336 if (!ret) {
0337 val &= ~clear;
0338 ret = t3_mdio_write(phy, mmd, reg, val | set);
0339 }
0340 return ret;
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
0354 {
0355 int err;
0356 unsigned int ctl;
0357
0358 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
0359 MDIO_CTRL1_RESET);
0360 if (err || !wait)
0361 return err;
0362
0363 do {
0364 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
0365 if (err)
0366 return err;
0367 ctl &= MDIO_CTRL1_RESET;
0368 if (ctl)
0369 msleep(1);
0370 } while (ctl && --wait);
0371
0372 return ctl ? -1 : 0;
0373 }
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
0384 {
0385 int err;
0386 unsigned int val = 0;
0387
0388 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
0389 if (err)
0390 return err;
0391
0392 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
0393 if (advert & ADVERTISED_1000baseT_Half)
0394 val |= ADVERTISE_1000HALF;
0395 if (advert & ADVERTISED_1000baseT_Full)
0396 val |= ADVERTISE_1000FULL;
0397
0398 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
0399 if (err)
0400 return err;
0401
0402 val = 1;
0403 if (advert & ADVERTISED_10baseT_Half)
0404 val |= ADVERTISE_10HALF;
0405 if (advert & ADVERTISED_10baseT_Full)
0406 val |= ADVERTISE_10FULL;
0407 if (advert & ADVERTISED_100baseT_Half)
0408 val |= ADVERTISE_100HALF;
0409 if (advert & ADVERTISED_100baseT_Full)
0410 val |= ADVERTISE_100FULL;
0411 if (advert & ADVERTISED_Pause)
0412 val |= ADVERTISE_PAUSE_CAP;
0413 if (advert & ADVERTISED_Asym_Pause)
0414 val |= ADVERTISE_PAUSE_ASYM;
0415 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
0427 {
0428 unsigned int val = 0;
0429
0430 if (advert & ADVERTISED_1000baseT_Half)
0431 val |= ADVERTISE_1000XHALF;
0432 if (advert & ADVERTISED_1000baseT_Full)
0433 val |= ADVERTISE_1000XFULL;
0434 if (advert & ADVERTISED_Pause)
0435 val |= ADVERTISE_1000XPAUSE;
0436 if (advert & ADVERTISED_Asym_Pause)
0437 val |= ADVERTISE_1000XPSE_ASYM;
0438 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
0451 {
0452 int err;
0453 unsigned int ctl;
0454
0455 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
0456 if (err)
0457 return err;
0458
0459 if (speed >= 0) {
0460 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
0461 if (speed == SPEED_100)
0462 ctl |= BMCR_SPEED100;
0463 else if (speed == SPEED_1000)
0464 ctl |= BMCR_SPEED1000;
0465 }
0466 if (duplex >= 0) {
0467 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
0468 if (duplex == DUPLEX_FULL)
0469 ctl |= BMCR_FULLDPLX;
0470 }
0471 if (ctl & BMCR_SPEED1000)
0472 ctl |= BMCR_ANENABLE;
0473 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
0474 }
0475
0476 int t3_phy_lasi_intr_enable(struct cphy *phy)
0477 {
0478 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
0479 MDIO_PMA_LASI_LSALARM);
0480 }
0481
0482 int t3_phy_lasi_intr_disable(struct cphy *phy)
0483 {
0484 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
0485 }
0486
0487 int t3_phy_lasi_intr_clear(struct cphy *phy)
0488 {
0489 u32 val;
0490
0491 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
0492 }
0493
0494 int t3_phy_lasi_intr_handler(struct cphy *phy)
0495 {
0496 unsigned int status;
0497 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
0498 &status);
0499
0500 if (err)
0501 return err;
0502 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
0503 }
0504
0505 static const struct adapter_info t3_adap_info[] = {
0506 {1, 1, 0,
0507 F_GPIO2_OEN | F_GPIO4_OEN |
0508 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
0509 &mi1_mdio_ops, "Chelsio PE9000"},
0510 {1, 1, 0,
0511 F_GPIO2_OEN | F_GPIO4_OEN |
0512 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
0513 &mi1_mdio_ops, "Chelsio T302"},
0514 {1, 0, 0,
0515 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
0516 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
0517 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
0518 &mi1_mdio_ext_ops, "Chelsio T310"},
0519 {1, 1, 0,
0520 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
0521 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
0522 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
0523 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
0524 &mi1_mdio_ext_ops, "Chelsio T320"},
0525 {},
0526 {},
0527 {1, 0, 0,
0528 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
0529 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
0530 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
0531 &mi1_mdio_ext_ops, "Chelsio T310" },
0532 {1, 0, 0,
0533 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
0534 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
0535 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
0536 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
0537 };
0538
0539
0540
0541
0542
0543 const struct adapter_info *t3_get_adapter_info(unsigned int id)
0544 {
0545 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
0546 }
0547
0548 struct port_type_info {
0549 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
0550 int phy_addr, const struct mdio_ops *ops);
0551 };
0552
0553 static const struct port_type_info port_types[] = {
0554 { NULL },
0555 { t3_ael1002_phy_prep },
0556 { t3_vsc8211_phy_prep },
0557 { NULL},
0558 { t3_xaui_direct_phy_prep },
0559 { t3_ael2005_phy_prep },
0560 { t3_qt2045_phy_prep },
0561 { t3_ael1006_phy_prep },
0562 { NULL },
0563 { t3_aq100x_phy_prep },
0564 { t3_ael2020_phy_prep },
0565 };
0566
0567 #define VPD_ENTRY(name, len) \
0568 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
0569
0570
0571
0572
0573
0574 struct t3_vpd {
0575 u8 id_tag;
0576 u8 id_len[2];
0577 u8 id_data[16];
0578 u8 vpdr_tag;
0579 u8 vpdr_len[2];
0580 VPD_ENTRY(pn, 16);
0581 VPD_ENTRY(ec, 16);
0582 VPD_ENTRY(sn, SERNUM_LEN);
0583 VPD_ENTRY(na, 12);
0584 VPD_ENTRY(cclk, 6);
0585 VPD_ENTRY(mclk, 6);
0586 VPD_ENTRY(uclk, 6);
0587 VPD_ENTRY(mdc, 6);
0588 VPD_ENTRY(mt, 2);
0589 VPD_ENTRY(xaui0cfg, 6);
0590 VPD_ENTRY(xaui1cfg, 6);
0591 VPD_ENTRY(port0, 2);
0592 VPD_ENTRY(port1, 2);
0593 VPD_ENTRY(port2, 2);
0594 VPD_ENTRY(port3, 2);
0595 VPD_ENTRY(rv, 1);
0596 u32 pad;
0597 };
0598
0599 #define EEPROM_STAT_ADDR 0x4000
0600 #define VPD_BASE 0xc00
0601
0602
0603
0604
0605
0606
0607
0608
0609 int t3_seeprom_wp(struct adapter *adapter, int enable)
0610 {
0611 u32 data = enable ? 0xc : 0;
0612 int ret;
0613
0614
0615 ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
0616 &data);
0617
0618 return ret < 0 ? ret : 0;
0619 }
0620
0621 static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
0622 {
0623 char tok[256];
0624
0625 memcpy(tok, s, len);
0626 tok[len] = 0;
0627 return kstrtouint(strim(tok), base, val);
0628 }
0629
0630 static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
0631 {
0632 char tok[256];
0633
0634 memcpy(tok, s, len);
0635 tok[len] = 0;
0636 return kstrtou16(strim(tok), base, val);
0637 }
0638
0639
0640
0641
0642
0643
0644
0645
0646 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
0647 {
0648 struct t3_vpd vpd;
0649 u8 base_val = 0;
0650 int addr, ret;
0651
0652
0653
0654
0655
0656 ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
0657 if (ret < 0)
0658 return ret;
0659 addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
0660
0661 ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
0662 if (ret < 0)
0663 return ret;
0664
0665 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
0666 if (ret)
0667 return ret;
0668 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
0669 if (ret)
0670 return ret;
0671 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
0672 if (ret)
0673 return ret;
0674 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
0675 if (ret)
0676 return ret;
0677 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
0678 if (ret)
0679 return ret;
0680 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
0681
0682
0683 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
0684 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
0685 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
0686 } else {
0687 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
0688 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
0689 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
0690 &p->xauicfg[0]);
0691 if (ret)
0692 return ret;
0693 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
0694 &p->xauicfg[1]);
0695 if (ret)
0696 return ret;
0697 }
0698
0699 ret = hex2bin(p->eth_base, vpd.na_data, 6);
0700 if (ret < 0)
0701 return -EINVAL;
0702 return 0;
0703 }
0704
0705
0706 enum {
0707 SF_ATTEMPTS = 5,
0708 SF_SEC_SIZE = 64 * 1024,
0709 SF_SIZE = SF_SEC_SIZE * 8,
0710
0711
0712 SF_PROG_PAGE = 2,
0713 SF_WR_DISABLE = 4,
0714 SF_RD_STATUS = 5,
0715 SF_WR_ENABLE = 6,
0716 SF_RD_DATA_FAST = 0xb,
0717 SF_ERASE_SECTOR = 0xd8,
0718
0719 FW_FLASH_BOOT_ADDR = 0x70000,
0720 FW_VERS_ADDR = 0x7fffc,
0721 FW_MIN_SIZE = 8
0722 };
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
0736 u32 *valp)
0737 {
0738 int ret;
0739
0740 if (!byte_cnt || byte_cnt > 4)
0741 return -EINVAL;
0742 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
0743 return -EBUSY;
0744 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
0745 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
0746 if (!ret)
0747 *valp = t3_read_reg(adapter, A_SF_DATA);
0748 return ret;
0749 }
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
0763 u32 val)
0764 {
0765 if (!byte_cnt || byte_cnt > 4)
0766 return -EINVAL;
0767 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
0768 return -EBUSY;
0769 t3_write_reg(adapter, A_SF_DATA, val);
0770 t3_write_reg(adapter, A_SF_OP,
0771 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
0772 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
0773 }
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
0784 {
0785 int ret;
0786 u32 status;
0787
0788 while (1) {
0789 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
0790 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
0791 return ret;
0792 if (!(status & 1))
0793 return 0;
0794 if (--attempts == 0)
0795 return -EAGAIN;
0796 if (delay)
0797 msleep(delay);
0798 }
0799 }
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
0815 unsigned int nwords, u32 *data, int byte_oriented)
0816 {
0817 int ret;
0818
0819 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
0820 return -EINVAL;
0821
0822 addr = swab32(addr) | SF_RD_DATA_FAST;
0823
0824 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
0825 (ret = sf1_read(adapter, 1, 1, data)) != 0)
0826 return ret;
0827
0828 for (; nwords; nwords--, data++) {
0829 ret = sf1_read(adapter, 4, nwords > 1, data);
0830 if (ret)
0831 return ret;
0832 if (byte_oriented)
0833 *data = htonl(*data);
0834 }
0835 return 0;
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
0849 unsigned int n, const u8 *data)
0850 {
0851 int ret;
0852 u32 buf[64];
0853 unsigned int i, c, left, val, offset = addr & 0xff;
0854
0855 if (addr + n > SF_SIZE || offset + n > 256)
0856 return -EINVAL;
0857
0858 val = swab32(addr) | SF_PROG_PAGE;
0859
0860 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
0861 (ret = sf1_write(adapter, 4, 1, val)) != 0)
0862 return ret;
0863
0864 for (left = n; left; left -= c) {
0865 c = min(left, 4U);
0866 for (val = 0, i = 0; i < c; ++i)
0867 val = (val << 8) + *data++;
0868
0869 ret = sf1_write(adapter, c, c != left, val);
0870 if (ret)
0871 return ret;
0872 }
0873 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
0874 return ret;
0875
0876
0877 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
0878 if (ret)
0879 return ret;
0880
0881 if (memcmp(data - n, (u8 *) buf + offset, n))
0882 return -EIO;
0883 return 0;
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
0894 {
0895 int ret;
0896
0897
0898 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
0899 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
0900 1, 1, 5, 1);
0901 if (ret)
0902 return ret;
0903
0904 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
0905
0906 return 0;
0907 }
0908
0909
0910
0911
0912
0913
0914
0915 int t3_check_tpsram_version(struct adapter *adapter)
0916 {
0917 int ret;
0918 u32 vers;
0919 unsigned int major, minor;
0920
0921 if (adapter->params.rev == T3_REV_A)
0922 return 0;
0923
0924
0925 ret = t3_get_tp_version(adapter, &vers);
0926 if (ret)
0927 return ret;
0928
0929 major = G_TP_VERSION_MAJOR(vers);
0930 minor = G_TP_VERSION_MINOR(vers);
0931
0932 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
0933 return 0;
0934 else {
0935 CH_ERR(adapter, "found wrong TP version (%u.%u), "
0936 "driver compiled for version %d.%d\n", major, minor,
0937 TP_VERSION_MAJOR, TP_VERSION_MINOR);
0938 }
0939 return -EINVAL;
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
0953 unsigned int size)
0954 {
0955 u32 csum;
0956 unsigned int i;
0957 const __be32 *p = (const __be32 *)tp_sram;
0958
0959
0960 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
0961 csum += ntohl(p[i]);
0962 if (csum != 0xffffffff) {
0963 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
0964 csum);
0965 return -EINVAL;
0966 }
0967
0968 return 0;
0969 }
0970
0971 enum fw_version_type {
0972 FW_VERSION_N3,
0973 FW_VERSION_T3
0974 };
0975
0976
0977
0978
0979
0980
0981
0982
0983 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
0984 {
0985 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
0986 }
0987
0988
0989
0990
0991
0992
0993
0994
0995 int t3_check_fw_version(struct adapter *adapter)
0996 {
0997 int ret;
0998 u32 vers;
0999 unsigned int type, major, minor;
1000
1001 ret = t3_get_fw_version(adapter, &vers);
1002 if (ret)
1003 return ret;
1004
1005 type = G_FW_VERSION_TYPE(vers);
1006 major = G_FW_VERSION_MAJOR(vers);
1007 minor = G_FW_VERSION_MINOR(vers);
1008
1009 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1010 minor == FW_VERSION_MINOR)
1011 return 0;
1012 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1013 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1014 "driver compiled for version %u.%u\n", major, minor,
1015 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1016 else {
1017 CH_WARN(adapter, "found newer FW version(%u.%u), "
1018 "driver compiled for version %u.%u\n", major, minor,
1019 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1020 return 0;
1021 }
1022 return -EINVAL;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1034 {
1035 while (start <= end) {
1036 int ret;
1037
1038 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1039 (ret = sf1_write(adapter, 4, 0,
1040 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1041 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1042 return ret;
1043 start++;
1044 }
1045 return 0;
1046 }
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1060 {
1061 u32 csum;
1062 unsigned int i;
1063 const __be32 *p = (const __be32 *)fw_data;
1064 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1065
1066 if ((size & 3) || size < FW_MIN_SIZE)
1067 return -EINVAL;
1068 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1069 return -EFBIG;
1070
1071 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1072 csum += ntohl(p[i]);
1073 if (csum != 0xffffffff) {
1074 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1075 csum);
1076 return -EINVAL;
1077 }
1078
1079 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1080 if (ret)
1081 goto out;
1082
1083 size -= 8;
1084 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1085 unsigned int chunk_size = min(size, 256U);
1086
1087 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1088 if (ret)
1089 goto out;
1090
1091 addr += chunk_size;
1092 fw_data += chunk_size;
1093 size -= chunk_size;
1094 }
1095
1096 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1097 out:
1098 if (ret)
1099 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1100 return ret;
1101 }
1102
1103 #define CIM_CTL_BASE 0x2000
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1116 unsigned int n, unsigned int *valp)
1117 {
1118 int ret = 0;
1119
1120 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1121 return -EBUSY;
1122
1123 for ( ; !ret && n--; addr += 4) {
1124 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1125 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1126 0, 5, 2);
1127 if (!ret)
1128 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1129 }
1130 return ret;
1131 }
1132
1133 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1134 u32 *rx_hash_high, u32 *rx_hash_low)
1135 {
1136
1137 t3_mac_disable_exact_filters(mac);
1138
1139
1140 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1141 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1142 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1143 F_DISBCAST);
1144
1145 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1146 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1147
1148 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1149 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1150
1151
1152 msleep(1);
1153 }
1154
1155 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1156 u32 rx_hash_high, u32 rx_hash_low)
1157 {
1158 t3_mac_enable_exact_filters(mac);
1159 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1160 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1161 rx_cfg);
1162 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1163 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1164 }
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 void t3_link_changed(struct adapter *adapter, int port_id)
1176 {
1177 int link_ok, speed, duplex, fc;
1178 struct port_info *pi = adap2pinfo(adapter, port_id);
1179 struct cphy *phy = &pi->phy;
1180 struct cmac *mac = &pi->mac;
1181 struct link_config *lc = &pi->link_config;
1182
1183 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1184
1185 if (!lc->link_ok && link_ok) {
1186 u32 rx_cfg, rx_hash_high, rx_hash_low;
1187 u32 status;
1188
1189 t3_xgm_intr_enable(adapter, port_id);
1190 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1191 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1192 t3_mac_enable(mac, MAC_DIRECTION_RX);
1193
1194 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1195 if (status & F_LINKFAULTCHANGE) {
1196 mac->stats.link_faults++;
1197 pi->link_fault = 1;
1198 }
1199 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1200 }
1201
1202 if (lc->requested_fc & PAUSE_AUTONEG)
1203 fc &= lc->requested_fc;
1204 else
1205 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1206
1207 if (link_ok == lc->link_ok && speed == lc->speed &&
1208 duplex == lc->duplex && fc == lc->fc)
1209 return;
1210
1211 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1212 uses_xaui(adapter)) {
1213 if (link_ok)
1214 t3b_pcs_reset(mac);
1215 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1216 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1217 }
1218 lc->link_ok = link_ok;
1219 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1220 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1221
1222 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1223
1224 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1225 lc->fc = fc;
1226 }
1227
1228 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1229 speed, duplex, fc);
1230 }
1231
1232 void t3_link_fault(struct adapter *adapter, int port_id)
1233 {
1234 struct port_info *pi = adap2pinfo(adapter, port_id);
1235 struct cmac *mac = &pi->mac;
1236 struct cphy *phy = &pi->phy;
1237 struct link_config *lc = &pi->link_config;
1238 int link_ok, speed, duplex, fc, link_fault;
1239 u32 rx_cfg, rx_hash_high, rx_hash_low;
1240
1241 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1242
1243 if (adapter->params.rev > 0 && uses_xaui(adapter))
1244 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1245
1246 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1247 t3_mac_enable(mac, MAC_DIRECTION_RX);
1248
1249 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1250
1251 link_fault = t3_read_reg(adapter,
1252 A_XGM_INT_STATUS + mac->offset);
1253 link_fault &= F_LINKFAULTCHANGE;
1254
1255 link_ok = lc->link_ok;
1256 speed = lc->speed;
1257 duplex = lc->duplex;
1258 fc = lc->fc;
1259
1260 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1261
1262 if (link_fault) {
1263 lc->link_ok = 0;
1264 lc->speed = SPEED_INVALID;
1265 lc->duplex = DUPLEX_INVALID;
1266
1267 t3_os_link_fault(adapter, port_id, 0);
1268
1269
1270 if (link_ok)
1271 mac->stats.link_faults++;
1272 } else {
1273 if (link_ok)
1274 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1275 F_TXACTENABLE | F_RXEN);
1276
1277 pi->link_fault = 0;
1278 lc->link_ok = (unsigned char)link_ok;
1279 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1280 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1281 t3_os_link_fault(adapter, port_id, link_ok);
1282 }
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1299 {
1300 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1301
1302 lc->link_ok = 0;
1303 if (lc->supported & SUPPORTED_Autoneg) {
1304 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1305 if (fc) {
1306 lc->advertising |= ADVERTISED_Asym_Pause;
1307 if (fc & PAUSE_RX)
1308 lc->advertising |= ADVERTISED_Pause;
1309 }
1310 phy->ops->advertise(phy, lc->advertising);
1311
1312 if (lc->autoneg == AUTONEG_DISABLE) {
1313 lc->speed = lc->requested_speed;
1314 lc->duplex = lc->requested_duplex;
1315 lc->fc = (unsigned char)fc;
1316 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1317 fc);
1318
1319 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1320 } else
1321 phy->ops->autoneg_enable(phy);
1322 } else {
1323 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1324 lc->fc = (unsigned char)fc;
1325 phy->ops->reset(phy, 0);
1326 }
1327 return 0;
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1339 {
1340 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1341 ports << S_VLANEXTRACTIONENABLE,
1342 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1343 }
1344
1345 struct intr_info {
1346 unsigned int mask;
1347 const char *msg;
1348 short stat_idx;
1349 unsigned short fatal;
1350 };
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1368 unsigned int mask,
1369 const struct intr_info *acts,
1370 unsigned long *stats)
1371 {
1372 int fatal = 0;
1373 unsigned int status = t3_read_reg(adapter, reg) & mask;
1374
1375 for (; acts->mask; ++acts) {
1376 if (!(status & acts->mask))
1377 continue;
1378 if (acts->fatal) {
1379 fatal++;
1380 CH_ALERT(adapter, "%s (0x%x)\n",
1381 acts->msg, status & acts->mask);
1382 status &= ~acts->mask;
1383 } else if (acts->msg)
1384 CH_WARN(adapter, "%s (0x%x)\n",
1385 acts->msg, status & acts->mask);
1386 if (acts->stat_idx >= 0)
1387 stats[acts->stat_idx]++;
1388 }
1389 if (status)
1390 t3_write_reg(adapter, reg, status);
1391 return fatal;
1392 }
1393
1394 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1395 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1396 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1397 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1398 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1399 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1400 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1401 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1402 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1403 F_LOPIODRBDROPERR)
1404 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1405 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1406 F_NFASRCHFAIL)
1407 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1408 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1409 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1410 F_TXFIFO_UNDERRUN)
1411 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1412 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1413 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1414 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1415 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1416 V_CFPARERR(M_CFPARERR) )
1417 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1418 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1419 \
1420 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1421 F_TXPARERR | V_BISTERR(M_BISTERR))
1422 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1423 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1424 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1425 #define ULPTX_INTR_MASK 0xfc
1426 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1427 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1428 F_ZERO_SWITCH_ERROR)
1429 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1430 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1431 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1432 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1433 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1434 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1435 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1436 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1437 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1438 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1439 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1440 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1441 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1442 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1443 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1444 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1445 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1446 V_MCAPARERRENB(M_MCAPARERRENB))
1447 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1448 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1449 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1450 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1451 F_MPS0 | F_CPL_SWITCH)
1452
1453
1454
1455 static void pci_intr_handler(struct adapter *adapter)
1456 {
1457 static const struct intr_info pcix1_intr_info[] = {
1458 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1459 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1460 {F_RCVTARABT, "PCI received target abort", -1, 1},
1461 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1462 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1463 {F_DETPARERR, "PCI detected parity error", -1, 1},
1464 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1465 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1466 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1467 1},
1468 {F_DETCORECCERR, "PCI correctable ECC error",
1469 STAT_PCI_CORR_ECC, 0},
1470 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1471 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1472 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1473 1},
1474 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1475 1},
1476 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1477 1},
1478 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1479 "error", -1, 1},
1480 {0}
1481 };
1482
1483 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1484 pcix1_intr_info, adapter->irq_stats))
1485 t3_fatal_err(adapter);
1486 }
1487
1488
1489
1490
1491 static void pcie_intr_handler(struct adapter *adapter)
1492 {
1493 static const struct intr_info pcie_intr_info[] = {
1494 {F_PEXERR, "PCI PEX error", -1, 1},
1495 {F_UNXSPLCPLERRR,
1496 "PCI unexpected split completion DMA read error", -1, 1},
1497 {F_UNXSPLCPLERRC,
1498 "PCI unexpected split completion DMA command error", -1, 1},
1499 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1500 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1501 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1502 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1503 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1504 "PCI MSI-X table/PBA parity error", -1, 1},
1505 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1506 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1507 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1508 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1509 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1510 {0}
1511 };
1512
1513 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1514 CH_ALERT(adapter, "PEX error code 0x%x\n",
1515 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1516
1517 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1518 pcie_intr_info, adapter->irq_stats))
1519 t3_fatal_err(adapter);
1520 }
1521
1522
1523
1524
1525 static void tp_intr_handler(struct adapter *adapter)
1526 {
1527 static const struct intr_info tp_intr_info[] = {
1528 {0xffffff, "TP parity error", -1, 1},
1529 {0x1000000, "TP out of Rx pages", -1, 1},
1530 {0x2000000, "TP out of Tx pages", -1, 1},
1531 {0}
1532 };
1533
1534 static const struct intr_info tp_intr_info_t3c[] = {
1535 {0x1fffffff, "TP parity error", -1, 1},
1536 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1537 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1538 {0}
1539 };
1540
1541 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1542 adapter->params.rev < T3_REV_C ?
1543 tp_intr_info : tp_intr_info_t3c, NULL))
1544 t3_fatal_err(adapter);
1545 }
1546
1547
1548
1549
1550 static void cim_intr_handler(struct adapter *adapter)
1551 {
1552 static const struct intr_info cim_intr_info[] = {
1553 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1554 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1555 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1556 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1557 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1558 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1559 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1560 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1561 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1562 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1563 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1564 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1565 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1566 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1567 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1568 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1569 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1570 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1571 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1572 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1573 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1574 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1575 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1576 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1577 {0}
1578 };
1579
1580 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1581 cim_intr_info, NULL))
1582 t3_fatal_err(adapter);
1583 }
1584
1585
1586
1587
1588 static void ulprx_intr_handler(struct adapter *adapter)
1589 {
1590 static const struct intr_info ulprx_intr_info[] = {
1591 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1592 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1593 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1594 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1595 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1596 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1597 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1598 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1599 {0}
1600 };
1601
1602 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1603 ulprx_intr_info, NULL))
1604 t3_fatal_err(adapter);
1605 }
1606
1607
1608
1609
1610 static void ulptx_intr_handler(struct adapter *adapter)
1611 {
1612 static const struct intr_info ulptx_intr_info[] = {
1613 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1614 STAT_ULP_CH0_PBL_OOB, 0},
1615 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1616 STAT_ULP_CH1_PBL_OOB, 0},
1617 {0xfc, "ULP TX parity error", -1, 1},
1618 {0}
1619 };
1620
1621 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1622 ulptx_intr_info, adapter->irq_stats))
1623 t3_fatal_err(adapter);
1624 }
1625
1626 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1627 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1628 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1629 F_ICSPI1_TX_FRAMING_ERROR)
1630 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1631 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1632 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1633 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1634
1635
1636
1637
1638 static void pmtx_intr_handler(struct adapter *adapter)
1639 {
1640 static const struct intr_info pmtx_intr_info[] = {
1641 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1642 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1643 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1644 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1645 "PMTX ispi parity error", -1, 1},
1646 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1647 "PMTX ospi parity error", -1, 1},
1648 {0}
1649 };
1650
1651 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1652 pmtx_intr_info, NULL))
1653 t3_fatal_err(adapter);
1654 }
1655
1656 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1657 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1658 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1659 F_IESPI1_TX_FRAMING_ERROR)
1660 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1661 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1662 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1663 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1664
1665
1666
1667
1668 static void pmrx_intr_handler(struct adapter *adapter)
1669 {
1670 static const struct intr_info pmrx_intr_info[] = {
1671 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1672 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1673 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1674 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1675 "PMRX ispi parity error", -1, 1},
1676 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1677 "PMRX ospi parity error", -1, 1},
1678 {0}
1679 };
1680
1681 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1682 pmrx_intr_info, NULL))
1683 t3_fatal_err(adapter);
1684 }
1685
1686
1687
1688
1689 static void cplsw_intr_handler(struct adapter *adapter)
1690 {
1691 static const struct intr_info cplsw_intr_info[] = {
1692 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1693 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1694 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1695 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1696 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1697 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1698 {0}
1699 };
1700
1701 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1702 cplsw_intr_info, NULL))
1703 t3_fatal_err(adapter);
1704 }
1705
1706
1707
1708
1709 static void mps_intr_handler(struct adapter *adapter)
1710 {
1711 static const struct intr_info mps_intr_info[] = {
1712 {0x1ff, "MPS parity error", -1, 1},
1713 {0}
1714 };
1715
1716 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1717 mps_intr_info, NULL))
1718 t3_fatal_err(adapter);
1719 }
1720
1721 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1722
1723
1724
1725
1726 static void mc7_intr_handler(struct mc7 *mc7)
1727 {
1728 struct adapter *adapter = mc7->adapter;
1729 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1730
1731 if (cause & F_CE) {
1732 mc7->stats.corr_err++;
1733 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1734 "data 0x%x 0x%x 0x%x\n", mc7->name,
1735 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1736 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1737 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1738 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1739 }
1740
1741 if (cause & F_UE) {
1742 mc7->stats.uncorr_err++;
1743 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1744 "data 0x%x 0x%x 0x%x\n", mc7->name,
1745 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1746 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1747 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1748 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1749 }
1750
1751 if (G_PE(cause)) {
1752 mc7->stats.parity_err++;
1753 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1754 mc7->name, G_PE(cause));
1755 }
1756
1757 if (cause & F_AE) {
1758 u32 addr = 0;
1759
1760 if (adapter->params.rev > 0)
1761 addr = t3_read_reg(adapter,
1762 mc7->offset + A_MC7_ERR_ADDR);
1763 mc7->stats.addr_err++;
1764 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1765 mc7->name, addr);
1766 }
1767
1768 if (cause & MC7_INTR_FATAL)
1769 t3_fatal_err(adapter);
1770
1771 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1772 }
1773
1774 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1775 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1776
1777
1778
1779 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1780 {
1781 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1782
1783
1784
1785
1786
1787
1788 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1789 ~F_RXFIFO_OVERFLOW;
1790
1791 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1792 mac->stats.tx_fifo_parity_err++;
1793 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1794 }
1795 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1796 mac->stats.rx_fifo_parity_err++;
1797 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1798 }
1799 if (cause & F_TXFIFO_UNDERRUN)
1800 mac->stats.tx_fifo_urun++;
1801 if (cause & F_RXFIFO_OVERFLOW)
1802 mac->stats.rx_fifo_ovfl++;
1803 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1804 mac->stats.serdes_signal_loss++;
1805 if (cause & F_XAUIPCSCTCERR)
1806 mac->stats.xaui_pcs_ctc_err++;
1807 if (cause & F_XAUIPCSALIGNCHANGE)
1808 mac->stats.xaui_pcs_align_change++;
1809 if (cause & F_XGM_INT) {
1810 t3_set_reg_field(adap,
1811 A_XGM_INT_ENABLE + mac->offset,
1812 F_XGM_INT, 0);
1813 mac->stats.link_faults++;
1814
1815 t3_os_link_fault_handler(adap, idx);
1816 }
1817
1818 if (cause & XGM_INTR_FATAL)
1819 t3_fatal_err(adap);
1820
1821 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1822 return cause != 0;
1823 }
1824
1825
1826
1827
1828 int t3_phy_intr_handler(struct adapter *adapter)
1829 {
1830 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1831
1832 for_each_port(adapter, i) {
1833 struct port_info *p = adap2pinfo(adapter, i);
1834
1835 if (!(p->phy.caps & SUPPORTED_IRQ))
1836 continue;
1837
1838 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1839 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1840
1841 if (phy_cause & cphy_cause_link_change)
1842 t3_link_changed(adapter, i);
1843 if (phy_cause & cphy_cause_fifo_error)
1844 p->phy.fifo_errors++;
1845 if (phy_cause & cphy_cause_module_change)
1846 t3_os_phymod_changed(adapter, i);
1847 }
1848 }
1849
1850 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1851 return 0;
1852 }
1853
1854
1855
1856
1857 int t3_slow_intr_handler(struct adapter *adapter)
1858 {
1859 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1860
1861 cause &= adapter->slow_intr_mask;
1862 if (!cause)
1863 return 0;
1864 if (cause & F_PCIM0) {
1865 if (is_pcie(adapter))
1866 pcie_intr_handler(adapter);
1867 else
1868 pci_intr_handler(adapter);
1869 }
1870 if (cause & F_SGE3)
1871 t3_sge_err_intr_handler(adapter);
1872 if (cause & F_MC7_PMRX)
1873 mc7_intr_handler(&adapter->pmrx);
1874 if (cause & F_MC7_PMTX)
1875 mc7_intr_handler(&adapter->pmtx);
1876 if (cause & F_MC7_CM)
1877 mc7_intr_handler(&adapter->cm);
1878 if (cause & F_CIM)
1879 cim_intr_handler(adapter);
1880 if (cause & F_TP1)
1881 tp_intr_handler(adapter);
1882 if (cause & F_ULP2_RX)
1883 ulprx_intr_handler(adapter);
1884 if (cause & F_ULP2_TX)
1885 ulptx_intr_handler(adapter);
1886 if (cause & F_PM1_RX)
1887 pmrx_intr_handler(adapter);
1888 if (cause & F_PM1_TX)
1889 pmtx_intr_handler(adapter);
1890 if (cause & F_CPL_SWITCH)
1891 cplsw_intr_handler(adapter);
1892 if (cause & F_MPS0)
1893 mps_intr_handler(adapter);
1894 if (cause & F_MC5A)
1895 t3_mc5_intr_handler(&adapter->mc5);
1896 if (cause & F_XGMAC0_0)
1897 mac_intr_handler(adapter, 0);
1898 if (cause & F_XGMAC0_1)
1899 mac_intr_handler(adapter, 1);
1900 if (cause & F_T3DBG)
1901 t3_os_ext_intr_handler(adapter);
1902
1903
1904 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1905 t3_read_reg(adapter, A_PL_INT_CAUSE0);
1906 return 1;
1907 }
1908
1909 static unsigned int calc_gpio_intr(struct adapter *adap)
1910 {
1911 unsigned int i, gpi_intr = 0;
1912
1913 for_each_port(adap, i)
1914 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1915 adapter_info(adap)->gpio_intr[i])
1916 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1917 return gpi_intr;
1918 }
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 void t3_intr_enable(struct adapter *adapter)
1929 {
1930 static const struct addr_val_pair intr_en_avp[] = {
1931 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1932 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1933 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1934 MC7_INTR_MASK},
1935 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1936 MC7_INTR_MASK},
1937 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1938 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1939 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1940 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1941 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1942 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1943 };
1944
1945 adapter->slow_intr_mask = PL_INTR_MASK;
1946
1947 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1948 t3_write_reg(adapter, A_TP_INT_ENABLE,
1949 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1950
1951 if (adapter->params.rev > 0) {
1952 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1953 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1954 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1955 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1956 F_PBL_BOUND_ERR_CH1);
1957 } else {
1958 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1959 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1960 }
1961
1962 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1963
1964 if (is_pcie(adapter))
1965 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1966 else
1967 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1968 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1969 t3_read_reg(adapter, A_PL_INT_ENABLE0);
1970 }
1971
1972
1973
1974
1975
1976
1977
1978
1979 void t3_intr_disable(struct adapter *adapter)
1980 {
1981 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1982 t3_read_reg(adapter, A_PL_INT_ENABLE0);
1983 adapter->slow_intr_mask = 0;
1984 }
1985
1986
1987
1988
1989
1990
1991
1992 void t3_intr_clear(struct adapter *adapter)
1993 {
1994 static const unsigned int cause_reg_addr[] = {
1995 A_SG_INT_CAUSE,
1996 A_SG_RSPQ_FL_STATUS,
1997 A_PCIX_INT_CAUSE,
1998 A_MC7_INT_CAUSE,
1999 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2000 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2001 A_CIM_HOST_INT_CAUSE,
2002 A_TP_INT_CAUSE,
2003 A_MC5_DB_INT_CAUSE,
2004 A_ULPRX_INT_CAUSE,
2005 A_ULPTX_INT_CAUSE,
2006 A_CPL_INTR_CAUSE,
2007 A_PM1_TX_INT_CAUSE,
2008 A_PM1_RX_INT_CAUSE,
2009 A_MPS_INT_CAUSE,
2010 A_T3DBG_INT_CAUSE,
2011 };
2012 unsigned int i;
2013
2014
2015 for_each_port(adapter, i)
2016 t3_port_intr_clear(adapter, i);
2017
2018 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2019 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2020
2021 if (is_pcie(adapter))
2022 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2023 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2024 t3_read_reg(adapter, A_PL_INT_CAUSE0);
2025 }
2026
2027 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2028 {
2029 struct port_info *pi = adap2pinfo(adapter, idx);
2030
2031 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2032 XGM_EXTRA_INTR_MASK);
2033 }
2034
2035 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2036 {
2037 struct port_info *pi = adap2pinfo(adapter, idx);
2038
2039 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2040 0x7ff);
2041 }
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 void t3_port_intr_enable(struct adapter *adapter, int idx)
2052 {
2053 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2054
2055 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2056 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx));
2057 phy->ops->intr_enable(phy);
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068 void t3_port_intr_disable(struct adapter *adapter, int idx)
2069 {
2070 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2071
2072 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2073 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx));
2074 phy->ops->intr_disable(phy);
2075 }
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 static void t3_port_intr_clear(struct adapter *adapter, int idx)
2086 {
2087 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2088
2089 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2090 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx));
2091 phy->ops->intr_clear(phy);
2092 }
2093
2094 #define SG_CONTEXT_CMD_ATTEMPTS 100
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2106 unsigned int type)
2107 {
2108 if (type == F_RESPONSEQ) {
2109
2110
2111
2112
2113
2114
2115 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2116 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2117 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2118 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2119 } else {
2120 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2121 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2122 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2123 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2124 }
2125 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2126 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2127 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2128 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2143 unsigned int type)
2144 {
2145 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2146 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2147 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2148 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2149 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2150 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2151 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2152 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2153 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2154 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2155 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2156 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2157 }
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2177 enum sge_context_type type, int respq, u64 base_addr,
2178 unsigned int size, unsigned int token, int gen,
2179 unsigned int cidx)
2180 {
2181 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2182
2183 if (base_addr & 0xfff)
2184 return -EINVAL;
2185 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2186 return -EBUSY;
2187
2188 base_addr >>= 12;
2189 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2190 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2191 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2192 V_EC_BASE_LO(base_addr & 0xffff));
2193 base_addr >>= 16;
2194 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2195 base_addr >>= 32;
2196 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2197 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2198 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2199 F_EC_VALID);
2200 return t3_sge_write_context(adapter, id, F_EGRESS);
2201 }
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2220 int gts_enable, u64 base_addr, unsigned int size,
2221 unsigned int bsize, unsigned int cong_thres, int gen,
2222 unsigned int cidx)
2223 {
2224 if (base_addr & 0xfff)
2225 return -EINVAL;
2226 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2227 return -EBUSY;
2228
2229 base_addr >>= 12;
2230 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2231 base_addr >>= 32;
2232 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2233 V_FL_BASE_HI((u32) base_addr) |
2234 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2235 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2236 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2237 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2238 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2239 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2240 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2241 return t3_sge_write_context(adapter, id, F_FREELIST);
2242 }
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2260 int irq_vec_idx, u64 base_addr, unsigned int size,
2261 unsigned int fl_thres, int gen, unsigned int cidx)
2262 {
2263 unsigned int intr = 0;
2264
2265 if (base_addr & 0xfff)
2266 return -EINVAL;
2267 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2268 return -EBUSY;
2269
2270 base_addr >>= 12;
2271 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2272 V_CQ_INDEX(cidx));
2273 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2274 base_addr >>= 32;
2275 if (irq_vec_idx >= 0)
2276 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2277 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2278 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2279 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2280 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2281 }
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2299 unsigned int size, int rspq, int ovfl_mode,
2300 unsigned int credits, unsigned int credit_thres)
2301 {
2302 if (base_addr & 0xfff)
2303 return -EINVAL;
2304 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2305 return -EBUSY;
2306
2307 base_addr >>= 12;
2308 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2309 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2310 base_addr >>= 32;
2311 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2312 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2313 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2314 V_CQ_ERR(ovfl_mode));
2315 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2316 V_CQ_CREDIT_THRES(credit_thres));
2317 return t3_sge_write_context(adapter, id, F_CQ);
2318 }
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2330 {
2331 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2332 return -EBUSY;
2333
2334 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2335 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2336 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2337 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2338 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2339 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2340 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2341 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2342 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2354 {
2355 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2356 return -EBUSY;
2357
2358 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2359 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2360 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2361 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2362 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2363 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2364 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2365 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2366 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2378 {
2379 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2380 return -EBUSY;
2381
2382 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2383 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2384 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2385 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2386 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2387 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2388 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2389 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2390 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2402 {
2403 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2404 return -EBUSY;
2405
2406 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2407 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2408 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2409 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2410 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2411 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2412 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2413 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2414 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2415 }
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2429 unsigned int credits)
2430 {
2431 u32 val;
2432
2433 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2434 return -EBUSY;
2435
2436 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2437 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2438 V_CONTEXT(id) | F_CQ);
2439 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2440 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2441 return -EIO;
2442
2443 if (op >= 2 && op < 7) {
2444 if (adapter->params.rev > 0)
2445 return G_CQ_INDEX(val);
2446
2447 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2448 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2449 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2450 F_CONTEXT_CMD_BUSY, 0,
2451 SG_CONTEXT_CMD_ATTEMPTS, 1))
2452 return -EIO;
2453 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2454 }
2455 return 0;
2456 }
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2471 const u8 * cpus, const u16 *rspq)
2472 {
2473 int i, j, cpu_idx = 0, q_idx = 0;
2474
2475 if (cpus)
2476 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2477 u32 val = i << 16;
2478
2479 for (j = 0; j < 2; ++j) {
2480 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2481 if (cpus[cpu_idx] == 0xff)
2482 cpu_idx = 0;
2483 }
2484 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2485 }
2486
2487 if (rspq)
2488 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2489 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2490 (i << 16) | rspq[q_idx++]);
2491 if (rspq[q_idx] == 0xffff)
2492 q_idx = 0;
2493 }
2494
2495 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2496 }
2497
2498
2499
2500
2501
2502
2503
2504
2505 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2506 {
2507 if (is_offload(adap) || !enable)
2508 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2509 V_NICMODE(!enable));
2510 }
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 static inline unsigned int pm_num_pages(unsigned int mem_size,
2522 unsigned int pg_size)
2523 {
2524 unsigned int n = mem_size / pg_size;
2525
2526 return n - n % 24;
2527 }
2528
2529 #define mem_region(adap, start, size, reg) \
2530 t3_write_reg((adap), A_ ## reg, (start)); \
2531 start += size
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2542 {
2543 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2544 unsigned int timers = 0, timers_shift = 22;
2545
2546 if (adap->params.rev > 0) {
2547 if (tids <= 16 * 1024) {
2548 timers = 1;
2549 timers_shift = 16;
2550 } else if (tids <= 64 * 1024) {
2551 timers = 2;
2552 timers_shift = 18;
2553 } else if (tids <= 256 * 1024) {
2554 timers = 3;
2555 timers_shift = 20;
2556 }
2557 }
2558
2559 t3_write_reg(adap, A_TP_PMM_SIZE,
2560 p->chan_rx_size | (p->chan_tx_size >> 16));
2561
2562 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2563 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2564 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2565 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2566 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2567
2568 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2569 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2570 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2571
2572 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2573
2574 pstructs += 48;
2575 pstructs -= pstructs % 24;
2576 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2577
2578 m = tids * TCB_SIZE;
2579 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2580 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2581 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2582 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2583 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2584 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2585 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2586 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2587
2588 m = (m + 4095) & ~0xfff;
2589 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2590 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2591
2592 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2593 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2594 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2595 if (tids < m)
2596 adap->params.mc5.nservers += m - tids;
2597 }
2598
2599 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2600 u32 val)
2601 {
2602 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2603 t3_write_reg(adap, A_TP_PIO_DATA, val);
2604 }
2605
2606 static void tp_config(struct adapter *adap, const struct tp_params *p)
2607 {
2608 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2609 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2610 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2611 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2612 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2613 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2614 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2615 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2616 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2617 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2618 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2619 F_IPV6ENABLE | F_NICMODE);
2620 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2621 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2622 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2623 adap->params.rev > 0 ? F_ENABLEESND :
2624 F_T3A_ENABLEESND);
2625
2626 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2627 F_ENABLEEPCMDAFULL,
2628 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2629 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2630 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2631 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2632 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2633 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2634 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2635
2636 if (adap->params.rev > 0) {
2637 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2638 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2639 F_TXPACEAUTO);
2640 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2641 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2642 } else
2643 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2644
2645 if (adap->params.rev == T3_REV_C)
2646 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2647 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2648 V_TABLELATENCYDELTA(4));
2649
2650 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2651 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2652 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2653 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2654 }
2655
2656
2657 #define TP_TMR_RES 50
2658
2659
2660 #define TP_DACK_TIMER 50
2661 #define TP_RTO_MIN 250
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2672 {
2673 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2674 unsigned int dack_re = fls(core_clk / 5000) - 1;
2675 unsigned int tstamp_re = fls(core_clk / 1000);
2676 unsigned int tps = core_clk >> tre;
2677
2678 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2679 V_DELAYEDACKRESOLUTION(dack_re) |
2680 V_TIMESTAMPRESOLUTION(tstamp_re));
2681 t3_write_reg(adap, A_TP_DACK_TIMER,
2682 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2683 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2684 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2685 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2686 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2687 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2688 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2689 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2690 V_KEEPALIVEMAX(9));
2691
2692 #define SECONDS * tps
2693
2694 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2695 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2696 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2697 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2698 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2699 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2700 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2701 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2702 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2703
2704 #undef SECONDS
2705 }
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715 static int t3_tp_set_coalescing_size(struct adapter *adap,
2716 unsigned int size, int psh)
2717 {
2718 u32 val;
2719
2720 if (size > MAX_RX_COALESCING_LEN)
2721 return -EINVAL;
2722
2723 val = t3_read_reg(adap, A_TP_PARA_REG3);
2724 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2725
2726 if (size) {
2727 val |= F_RXCOALESCEENABLE;
2728 if (psh)
2729 val |= F_RXCOALESCEPSHEN;
2730 size = min(MAX_RX_COALESCING_LEN, size);
2731 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2732 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2733 }
2734 t3_write_reg(adap, A_TP_PARA_REG3, val);
2735 return 0;
2736 }
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746 static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2747 {
2748 t3_write_reg(adap, A_TP_PARA_REG7,
2749 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2750 }
2751
2752 static void init_mtus(unsigned short mtus[])
2753 {
2754
2755
2756
2757
2758
2759 mtus[0] = 88;
2760 mtus[1] = 88;
2761 mtus[2] = 256;
2762 mtus[3] = 512;
2763 mtus[4] = 576;
2764 mtus[5] = 1024;
2765 mtus[6] = 1280;
2766 mtus[7] = 1492;
2767 mtus[8] = 1500;
2768 mtus[9] = 2002;
2769 mtus[10] = 2048;
2770 mtus[11] = 4096;
2771 mtus[12] = 4352;
2772 mtus[13] = 8192;
2773 mtus[14] = 9000;
2774 mtus[15] = 9600;
2775 }
2776
2777
2778
2779
2780 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2781 {
2782 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2783 a[9] = 2;
2784 a[10] = 3;
2785 a[11] = 4;
2786 a[12] = 5;
2787 a[13] = 6;
2788 a[14] = 7;
2789 a[15] = 8;
2790 a[16] = 9;
2791 a[17] = 10;
2792 a[18] = 14;
2793 a[19] = 17;
2794 a[20] = 21;
2795 a[21] = 25;
2796 a[22] = 30;
2797 a[23] = 35;
2798 a[24] = 45;
2799 a[25] = 60;
2800 a[26] = 80;
2801 a[27] = 100;
2802 a[28] = 200;
2803 a[29] = 300;
2804 a[30] = 400;
2805 a[31] = 500;
2806
2807 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2808 b[9] = b[10] = 1;
2809 b[11] = b[12] = 2;
2810 b[13] = b[14] = b[15] = b[16] = 3;
2811 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2812 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2813 b[28] = b[29] = 6;
2814 b[30] = b[31] = 7;
2815 }
2816
2817
2818 #define CC_MIN_INCR 2U
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2833 unsigned short alpha[NCCTRL_WIN],
2834 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2835 {
2836 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2837 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2838 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2839 28672, 40960, 57344, 81920, 114688, 163840, 229376
2840 };
2841
2842 unsigned int i, w;
2843
2844 for (i = 0; i < NMTUS; ++i) {
2845 unsigned int mtu = min(mtus[i], mtu_cap);
2846 unsigned int log2 = fls(mtu);
2847
2848 if (!(mtu & ((1 << log2) >> 2)))
2849 log2--;
2850 t3_write_reg(adap, A_TP_MTU_TABLE,
2851 (i << 24) | (log2 << 16) | mtu);
2852
2853 for (w = 0; w < NCCTRL_WIN; ++w) {
2854 unsigned int inc;
2855
2856 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2857 CC_MIN_INCR);
2858
2859 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2860 (w << 16) | (beta[w] << 13) | inc);
2861 }
2862 }
2863 }
2864
2865
2866
2867
2868
2869
2870
2871
2872 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2873 {
2874 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2875 sizeof(*tps) / sizeof(u32), 0);
2876 }
2877
2878 #define ulp_region(adap, name, start, len) \
2879 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2880 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2881 (start) + (len) - 1); \
2882 start += len
2883
2884 #define ulptx_region(adap, name, start, len) \
2885 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2886 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2887 (start) + (len) - 1)
2888
2889 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2890 {
2891 unsigned int m = p->chan_rx_size;
2892
2893 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2894 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2895 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2896 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2897 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2898 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2899 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2900 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2901 }
2902
2903
2904
2905
2906
2907
2908
2909
2910 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2911 {
2912 int i;
2913 const __be32 *buf = (const __be32 *)data;
2914
2915 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2916 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2917 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2918 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2919 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2920 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2921
2922 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2923 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2924 return -EIO;
2925 }
2926 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2927
2928 return 0;
2929 }
2930
2931 void t3_config_trace_filter(struct adapter *adapter,
2932 const struct trace_params *tp, int filter_index,
2933 int invert, int enable)
2934 {
2935 u32 addr, key[4], mask[4];
2936
2937 key[0] = tp->sport | (tp->sip << 16);
2938 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2939 key[2] = tp->dip;
2940 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2941
2942 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2943 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2944 mask[2] = tp->dip_mask;
2945 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2946
2947 if (invert)
2948 key[3] |= (1 << 29);
2949 if (enable)
2950 key[3] |= (1 << 28);
2951
2952 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2953 tp_wr_indirect(adapter, addr++, key[0]);
2954 tp_wr_indirect(adapter, addr++, mask[0]);
2955 tp_wr_indirect(adapter, addr++, key[1]);
2956 tp_wr_indirect(adapter, addr++, mask[1]);
2957 tp_wr_indirect(adapter, addr++, key[2]);
2958 tp_wr_indirect(adapter, addr++, mask[2]);
2959 tp_wr_indirect(adapter, addr++, key[3]);
2960 tp_wr_indirect(adapter, addr, mask[3]);
2961 t3_read_reg(adapter, A_TP_PIO_DATA);
2962 }
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2973 {
2974 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2975 unsigned int clk = adap->params.vpd.cclk * 1000;
2976 unsigned int selected_cpt = 0, selected_bpt = 0;
2977
2978 if (kbps > 0) {
2979 kbps *= 125;
2980 for (cpt = 1; cpt <= 255; cpt++) {
2981 tps = clk / cpt;
2982 bpt = (kbps + tps / 2) / tps;
2983 if (bpt > 0 && bpt <= 255) {
2984 v = bpt * tps;
2985 delta = v >= kbps ? v - kbps : kbps - v;
2986 if (delta <= mindelta) {
2987 mindelta = delta;
2988 selected_cpt = cpt;
2989 selected_bpt = bpt;
2990 }
2991 } else if (selected_cpt)
2992 break;
2993 }
2994 if (!selected_cpt)
2995 return -EINVAL;
2996 }
2997 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2998 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2999 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3000 if (sched & 1)
3001 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3002 else
3003 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3004 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3005 return 0;
3006 }
3007
3008 static int tp_init(struct adapter *adap, const struct tp_params *p)
3009 {
3010 int busy = 0;
3011
3012 tp_config(adap, p);
3013 t3_set_vlan_accel(adap, 3, 0);
3014
3015 if (is_offload(adap)) {
3016 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3017 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3018 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3019 0, 1000, 5);
3020 if (busy)
3021 CH_ERR(adap, "TP initialization timed out\n");
3022 }
3023
3024 if (!busy)
3025 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3026 return busy;
3027 }
3028
3029
3030
3031
3032
3033 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3034 {
3035 int i;
3036
3037 if (chan_map != 3) {
3038 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3039 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3040 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3041 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3042 F_TPTXPORT1EN | F_PORT1ACTIVE));
3043 t3_write_reg(adap, A_PM1_TX_CFG,
3044 chan_map == 1 ? 0xffffffff : 0);
3045 } else {
3046 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3047 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3048 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3049 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3050 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3051 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3052 F_ENFORCEPKT);
3053 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3054 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3055 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3056 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3057 for (i = 0; i < 16; i++)
3058 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3059 (i << 16) | 0x1010);
3060 }
3061 }
3062
3063 static int calibrate_xgm(struct adapter *adapter)
3064 {
3065 if (uses_xaui(adapter)) {
3066 unsigned int v, i;
3067
3068 for (i = 0; i < 5; ++i) {
3069 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3070 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3071 msleep(1);
3072 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3073 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3074 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3075 V_XAUIIMP(G_CALIMP(v) >> 2));
3076 return 0;
3077 }
3078 }
3079 CH_ERR(adapter, "MAC calibration failed\n");
3080 return -1;
3081 } else {
3082 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3083 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3084 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3085 F_XGM_IMPSETUPDATE);
3086 }
3087 return 0;
3088 }
3089
3090 static void calibrate_xgm_t3b(struct adapter *adapter)
3091 {
3092 if (!uses_xaui(adapter)) {
3093 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3094 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3095 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3096 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3097 F_XGM_IMPSETUPDATE);
3098 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3099 0);
3100 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3101 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3102 }
3103 }
3104
3105 struct mc7_timing_params {
3106 unsigned char ActToPreDly;
3107 unsigned char ActToRdWrDly;
3108 unsigned char PreCyc;
3109 unsigned char RefCyc[5];
3110 unsigned char BkCyc;
3111 unsigned char WrToRdDly;
3112 unsigned char RdToWrDly;
3113 };
3114
3115
3116
3117
3118
3119
3120 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3121 {
3122 t3_write_reg(adapter, addr, val);
3123 t3_read_reg(adapter, addr);
3124 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3125 return 0;
3126 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3127 return -EIO;
3128 }
3129
3130 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3131 {
3132 static const unsigned int mc7_mode[] = {
3133 0x632, 0x642, 0x652, 0x432, 0x442
3134 };
3135 static const struct mc7_timing_params mc7_timings[] = {
3136 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3137 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3138 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3139 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3140 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3141 };
3142
3143 u32 val;
3144 unsigned int width, density, slow, attempts;
3145 struct adapter *adapter = mc7->adapter;
3146 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3147
3148 if (!mc7->size)
3149 return 0;
3150
3151 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3152 slow = val & F_SLOW;
3153 width = G_WIDTH(val);
3154 density = G_DEN(val);
3155
3156 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3157 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3158 msleep(1);
3159
3160 if (!slow) {
3161 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3162 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3163 msleep(1);
3164 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3165 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3166 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3167 mc7->name);
3168 goto out_fail;
3169 }
3170 }
3171
3172 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3173 V_ACTTOPREDLY(p->ActToPreDly) |
3174 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3175 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3176 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3177
3178 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3179 val | F_CLKEN | F_TERM150);
3180 t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3181
3182 if (!slow)
3183 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3184 F_DLLENB);
3185 udelay(1);
3186
3187 val = slow ? 3 : 6;
3188 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3189 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3190 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3191 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3192 goto out_fail;
3193
3194 if (!slow) {
3195 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3196 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3197 udelay(5);
3198 }
3199
3200 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3201 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3202 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3203 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3204 mc7_mode[mem_type]) ||
3205 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3206 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3207 goto out_fail;
3208
3209
3210 mc7_clock = mc7_clock * 7812 + mc7_clock / 2;
3211 mc7_clock /= 1000000;
3212
3213 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3214 F_PERREFEN | V_PREREFDIV(mc7_clock));
3215 t3_read_reg(adapter, mc7->offset + A_MC7_REF);
3216
3217 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3218 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3219 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3220 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3221 (mc7->size << width) - 1);
3222 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3223 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3224
3225 attempts = 50;
3226 do {
3227 msleep(250);
3228 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3229 } while ((val & F_BUSY) && --attempts);
3230 if (val & F_BUSY) {
3231 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3232 goto out_fail;
3233 }
3234
3235
3236 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3237 return 0;
3238
3239 out_fail:
3240 return -1;
3241 }
3242
3243 static void config_pcie(struct adapter *adap)
3244 {
3245 static const u16 ack_lat[4][6] = {
3246 {237, 416, 559, 1071, 2095, 4143},
3247 {128, 217, 289, 545, 1057, 2081},
3248 {73, 118, 154, 282, 538, 1050},
3249 {67, 107, 86, 150, 278, 534}
3250 };
3251 static const u16 rpl_tmr[4][6] = {
3252 {711, 1248, 1677, 3213, 6285, 12429},
3253 {384, 651, 867, 1635, 3171, 6243},
3254 {219, 354, 462, 846, 1614, 3150},
3255 {201, 321, 258, 450, 834, 1602}
3256 };
3257
3258 u16 val, devid;
3259 unsigned int log2_width, pldsize;
3260 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3261
3262 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3263 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3264
3265 pci_read_config_word(adap->pdev, 0x2, &devid);
3266 if (devid == 0x37) {
3267 pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3268 val & ~PCI_EXP_DEVCTL_READRQ &
3269 ~PCI_EXP_DEVCTL_PAYLOAD);
3270 pldsize = 0;
3271 }
3272
3273 pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3274
3275 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3276 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3277 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3278 log2_width = fls(adap->params.pci.width) - 1;
3279 acklat = ack_lat[log2_width][pldsize];
3280 if (val & PCI_EXP_LNKCTL_ASPM_L0S)
3281 acklat += fst_trn_tx * 4;
3282 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3283
3284 if (adap->params.rev == 0)
3285 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3286 V_T3A_ACKLAT(M_T3A_ACKLAT),
3287 V_T3A_ACKLAT(acklat));
3288 else
3289 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3290 V_ACKLAT(acklat));
3291
3292 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3293 V_REPLAYLMT(rpllmt));
3294
3295 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3296 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3297 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3298 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3299 }
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3310 {
3311 int err = -EIO, attempts, i;
3312 const struct vpd_params *vpd = &adapter->params.vpd;
3313
3314 if (adapter->params.rev > 0)
3315 calibrate_xgm_t3b(adapter);
3316 else if (calibrate_xgm(adapter))
3317 goto out_err;
3318
3319 if (vpd->mclk) {
3320 partition_mem(adapter, &adapter->params.tp);
3321
3322 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3323 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3324 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3325 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3326 adapter->params.mc5.nfilters,
3327 adapter->params.mc5.nroutes))
3328 goto out_err;
3329
3330 for (i = 0; i < 32; i++)
3331 if (clear_sge_ctxt(adapter, i, F_CQ))
3332 goto out_err;
3333 }
3334
3335 if (tp_init(adapter, &adapter->params.tp))
3336 goto out_err;
3337
3338 t3_tp_set_coalescing_size(adapter,
3339 min(adapter->params.sge.max_pkt_size,
3340 MAX_RX_COALESCING_LEN), 1);
3341 t3_tp_set_max_rxsize(adapter,
3342 min(adapter->params.sge.max_pkt_size, 16384U));
3343 ulp_config(adapter, &adapter->params.tp);
3344
3345 if (is_pcie(adapter))
3346 config_pcie(adapter);
3347 else
3348 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3349 F_DMASTOPEN | F_CLIDECEN);
3350
3351 if (adapter->params.rev == T3_REV_C)
3352 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3353 F_CFG_CQE_SOP_MASK);
3354
3355 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3356 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3357 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3358 chan_init_hw(adapter, adapter->params.chan_map);
3359 t3_sge_init(adapter, &adapter->params.sge);
3360 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3361
3362 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3363
3364 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3365 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3366 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3367 t3_read_reg(adapter, A_CIM_BOOT_CFG);
3368
3369 attempts = 100;
3370 do {
3371 msleep(20);
3372 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3373 if (!attempts) {
3374 CH_ERR(adapter, "uP initialization timed out\n");
3375 goto out_err;
3376 }
3377
3378 err = 0;
3379 out_err:
3380 return err;
3381 }
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3392 {
3393 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3394 u32 pci_mode;
3395
3396 if (pci_is_pcie(adapter->pdev)) {
3397 u16 val;
3398
3399 p->variant = PCI_VARIANT_PCIE;
3400 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3401 p->width = (val >> 4) & 0x3f;
3402 return;
3403 }
3404
3405 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3406 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3407 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3408 pci_mode = G_PCIXINITPAT(pci_mode);
3409 if (pci_mode == 0)
3410 p->variant = PCI_VARIANT_PCI;
3411 else if (pci_mode < 4)
3412 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3413 else if (pci_mode < 8)
3414 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3415 else
3416 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3417 }
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428 static void init_link_config(struct link_config *lc, unsigned int caps)
3429 {
3430 lc->supported = caps;
3431 lc->requested_speed = lc->speed = SPEED_INVALID;
3432 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3433 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3434 if (lc->supported & SUPPORTED_Autoneg) {
3435 lc->advertising = lc->supported;
3436 lc->autoneg = AUTONEG_ENABLE;
3437 lc->requested_fc |= PAUSE_AUTONEG;
3438 } else {
3439 lc->advertising = 0;
3440 lc->autoneg = AUTONEG_DISABLE;
3441 }
3442 }
3443
3444
3445
3446
3447
3448
3449
3450
3451 static unsigned int mc7_calc_size(u32 cfg)
3452 {
3453 unsigned int width = G_WIDTH(cfg);
3454 unsigned int banks = !!(cfg & F_BKS) + 1;
3455 unsigned int org = !!(cfg & F_ORG) + 1;
3456 unsigned int density = G_DEN(cfg);
3457 unsigned int MBs = ((256 << density) * banks) / (org << width);
3458
3459 return MBs << 20;
3460 }
3461
3462 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3463 unsigned int base_addr, const char *name)
3464 {
3465 u32 cfg;
3466
3467 mc7->adapter = adapter;
3468 mc7->name = name;
3469 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3470 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3471 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3472 mc7->width = G_WIDTH(cfg);
3473 }
3474
3475 static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3476 {
3477 u16 devid;
3478
3479 mac->adapter = adapter;
3480 pci_read_config_word(adapter->pdev, 0x2, &devid);
3481
3482 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3483 index = 0;
3484 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3485 mac->nucast = 1;
3486
3487 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3488 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3489 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3490 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3491 F_ENRGMII, 0);
3492 }
3493 }
3494
3495 static void early_hw_init(struct adapter *adapter,
3496 const struct adapter_info *ai)
3497 {
3498 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3499
3500 mi1_init(adapter, ai);
3501 t3_write_reg(adapter, A_I2C_CFG,
3502 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3503 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3504 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3505 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3506 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3507
3508 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3509 val |= F_ENRGMII;
3510
3511
3512 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3513 t3_read_reg(adapter, A_XGM_PORT_CFG);
3514
3515 val |= F_CLKDIVRESET_;
3516 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3517 t3_read_reg(adapter, A_XGM_PORT_CFG);
3518 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3519 t3_read_reg(adapter, A_XGM_PORT_CFG);
3520 }
3521
3522
3523
3524
3525
3526
3527 int t3_reset_adapter(struct adapter *adapter)
3528 {
3529 int i, save_and_restore_pcie =
3530 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3531 uint16_t devid = 0;
3532
3533 if (save_and_restore_pcie)
3534 pci_save_state(adapter->pdev);
3535 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3536
3537
3538
3539
3540
3541 for (i = 0; i < 10; i++) {
3542 msleep(50);
3543 pci_read_config_word(adapter->pdev, 0x00, &devid);
3544 if (devid == 0x1425)
3545 break;
3546 }
3547
3548 if (devid != 0x1425)
3549 return -1;
3550
3551 if (save_and_restore_pcie)
3552 pci_restore_state(adapter->pdev);
3553 return 0;
3554 }
3555
3556 static int init_parity(struct adapter *adap)
3557 {
3558 int i, err, addr;
3559
3560 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3561 return -EBUSY;
3562
3563 for (err = i = 0; !err && i < 16; i++)
3564 err = clear_sge_ctxt(adap, i, F_EGRESS);
3565 for (i = 0xfff0; !err && i <= 0xffff; i++)
3566 err = clear_sge_ctxt(adap, i, F_EGRESS);
3567 for (i = 0; !err && i < SGE_QSETS; i++)
3568 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3569 if (err)
3570 return err;
3571
3572 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3573 for (i = 0; i < 4; i++)
3574 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3575 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3576 F_IBQDBGWR | V_IBQDBGQID(i) |
3577 V_IBQDBGADDR(addr));
3578 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3579 F_IBQDBGBUSY, 0, 2, 1);
3580 if (err)
3581 return err;
3582 }
3583 return 0;
3584 }
3585
3586
3587
3588
3589
3590
3591 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3592 int reset)
3593 {
3594 int ret;
3595 unsigned int i, j = -1;
3596
3597 get_pci_mode(adapter, &adapter->params.pci);
3598
3599 adapter->params.info = ai;
3600 adapter->params.nports = ai->nports0 + ai->nports1;
3601 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3602 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3603
3604
3605
3606
3607
3608
3609
3610
3611 adapter->params.linkpoll_period = 10;
3612 adapter->params.stats_update_period = is_10G(adapter) ?
3613 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3614 adapter->params.pci.vpd_cap_addr =
3615 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3616 if (!adapter->params.pci.vpd_cap_addr)
3617 return -ENODEV;
3618 ret = get_vpd_params(adapter, &adapter->params.vpd);
3619 if (ret < 0)
3620 return ret;
3621
3622 if (reset && t3_reset_adapter(adapter))
3623 return -1;
3624
3625 t3_sge_prep(adapter, &adapter->params.sge);
3626
3627 if (adapter->params.vpd.mclk) {
3628 struct tp_params *p = &adapter->params.tp;
3629
3630 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3631 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3632 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3633
3634 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3635 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3636 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3637 p->cm_size = t3_mc7_size(&adapter->cm);
3638 p->chan_rx_size = p->pmrx_size / 2;
3639 p->chan_tx_size = p->pmtx_size / p->nchan;
3640 p->rx_pg_size = 64 * 1024;
3641 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3642 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3643 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3644 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3645 adapter->params.rev > 0 ? 12 : 6;
3646 }
3647
3648 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3649 t3_mc7_size(&adapter->pmtx) &&
3650 t3_mc7_size(&adapter->cm);
3651
3652 if (is_offload(adapter)) {
3653 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3654 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3655 DEFAULT_NFILTERS : 0;
3656 adapter->params.mc5.nroutes = 0;
3657 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3658
3659 init_mtus(adapter->params.mtus);
3660 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3661 }
3662
3663 early_hw_init(adapter, ai);
3664 ret = init_parity(adapter);
3665 if (ret)
3666 return ret;
3667
3668 for_each_port(adapter, i) {
3669 u8 hw_addr[6];
3670 const struct port_type_info *pti;
3671 struct port_info *p = adap2pinfo(adapter, i);
3672
3673 while (!adapter->params.vpd.port_type[++j])
3674 ;
3675
3676 pti = &port_types[adapter->params.vpd.port_type[j]];
3677 if (!pti->phy_prep) {
3678 CH_ALERT(adapter, "Invalid port type index %d\n",
3679 adapter->params.vpd.port_type[j]);
3680 return -EINVAL;
3681 }
3682
3683 p->phy.mdio.dev = adapter->port[i];
3684 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3685 ai->mdio_ops);
3686 if (ret)
3687 return ret;
3688 mac_prep(&p->mac, adapter, j);
3689
3690
3691
3692
3693
3694
3695 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3696 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3697
3698 eth_hw_addr_set(adapter->port[i], hw_addr);
3699 init_link_config(&p->link_config, p->phy.caps);
3700 p->phy.ops->power_down(&p->phy, 1);
3701
3702
3703
3704
3705
3706
3707 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3708 adapter->params.linkpoll_period > 10)
3709 adapter->params.linkpoll_period = 10;
3710 }
3711
3712 return 0;
3713 }
3714
3715 void t3_led_ready(struct adapter *adapter)
3716 {
3717 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3718 F_GPIO0_OUT_VAL);
3719 }
3720
3721 int t3_replay_prep_adapter(struct adapter *adapter)
3722 {
3723 const struct adapter_info *ai = adapter->params.info;
3724 unsigned int i, j = -1;
3725 int ret;
3726
3727 early_hw_init(adapter, ai);
3728 ret = init_parity(adapter);
3729 if (ret)
3730 return ret;
3731
3732 for_each_port(adapter, i) {
3733 const struct port_type_info *pti;
3734 struct port_info *p = adap2pinfo(adapter, i);
3735
3736 while (!adapter->params.vpd.port_type[++j])
3737 ;
3738
3739 pti = &port_types[adapter->params.vpd.port_type[j]];
3740 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3741 if (ret)
3742 return ret;
3743 p->phy.ops->power_down(&p->phy, 1);
3744 }
3745
3746 return 0;
3747 }
3748