0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #include <linux/pci.h>
0040 #include <linux/delay.h>
0041 #include <linux/module.h>
0042 #include <linux/firmware.h>
0043
0044 #include "qib.h"
0045 #include "qib_7220.h"
0046
0047 #define SD7220_FW_NAME "qlogic/sd7220.fw"
0048 MODULE_FIRMWARE(SD7220_FW_NAME);
0049
0050
0051
0052
0053
0054
0055 #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
0056 #define kr_hwerrclear KREG_IDX(HwErrClear)
0057 #define kr_hwerrmask KREG_IDX(HwErrMask)
0058 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
0059 #define kr_ibcstatus KREG_IDX(IBCStatus)
0060 #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
0061 #define kr_scratch KREG_IDX(Scratch)
0062 #define kr_xgxs_cfg KREG_IDX(XGXSCfg)
0063
0064 #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
0065 #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
0066 #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
0067 #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
0068 #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
0069
0070
0071
0072
0073
0074 #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
0075
0076
0077
0078
0079
0080 #define PCIE_SERDES0 0
0081 #define PCIE_SERDES1 1
0082
0083
0084
0085
0086
0087 #define EPB_ADDR_SHF 8
0088 #define EPB_LOC(chn, elt, reg) \
0089 (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
0090 EPB_ADDR_SHF)
0091 #define EPB_IB_QUAD0_CS_SHF (25)
0092 #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
0093 #define EPB_IB_UC_CS_SHF (26)
0094 #define EPB_PCIE_UC_CS_SHF (27)
0095 #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
0096
0097
0098 static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
0099 u32 data, u32 mask);
0100 static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
0101 int mask);
0102 static int qib_sd_trimdone_poll(struct qib_devdata *dd);
0103 static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
0104 static int qib_sd_setvals(struct qib_devdata *dd);
0105 static int qib_sd_early(struct qib_devdata *dd);
0106 static int qib_sd_dactrim(struct qib_devdata *dd);
0107 static int qib_internal_presets(struct qib_devdata *dd);
0108
0109 static int qib_sd_trimself(struct qib_devdata *dd, int val);
0110 static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
0111 static int qib_sd7220_ib_load(struct qib_devdata *dd,
0112 const struct firmware *fw);
0113 static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
0114 const struct firmware *fw);
0115
0116
0117
0118
0119
0120
0121
0122
0123 static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
0124 const struct firmware *fw)
0125 {
0126 struct qib_devdata *dd = ppd->dd;
0127
0128 if (!dd->cspec->serdes_first_init_done &&
0129 qib_sd7220_ib_vfy(dd, fw) > 0)
0130 dd->cspec->serdes_first_init_done = 1;
0131 return dd->cspec->serdes_first_init_done;
0132 }
0133
0134
0135 #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
0136 #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
0137 #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
0138 #define UC_PAR_CLR_D 8
0139 #define UC_PAR_CLR_M 0xC
0140 #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
0141 #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
0142
0143 void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
0144 {
0145 int ret;
0146
0147
0148 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
0149 UC_PAR_CLR_D, UC_PAR_CLR_M);
0150 if (ret < 0) {
0151 qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
0152 goto bail;
0153 }
0154 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
0155 UC_PAR_CLR_M);
0156
0157 qib_read_kreg32(dd, kr_scratch);
0158 udelay(4);
0159 qib_write_kreg(dd, kr_hwerrclear,
0160 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
0161 qib_read_kreg32(dd, kr_scratch);
0162 bail:
0163 return;
0164 }
0165
0166
0167
0168
0169
0170
0171 #define IBSD_RESYNC_TRIES 3
0172 #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
0173 #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
0174
0175 static int qib_resync_ibepb(struct qib_devdata *dd)
0176 {
0177 int ret, pat, tries, chn;
0178 u32 loc;
0179
0180 ret = -1;
0181 chn = 0;
0182 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
0183 loc = IB_PGUDP(chn);
0184 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
0185 if (ret < 0) {
0186 qib_dev_err(dd, "Failed read in resync\n");
0187 continue;
0188 }
0189 if (ret != 0xF0 && ret != 0x55 && tries == 0)
0190 qib_dev_err(dd, "unexpected pattern in resync\n");
0191 pat = ret ^ 0xA5;
0192 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
0193 if (ret < 0) {
0194 qib_dev_err(dd, "Failed write in resync\n");
0195 continue;
0196 }
0197 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
0198 if (ret < 0) {
0199 qib_dev_err(dd, "Failed re-read in resync\n");
0200 continue;
0201 }
0202 if (ret != pat) {
0203 qib_dev_err(dd, "Failed compare1 in resync\n");
0204 continue;
0205 }
0206 loc = IB_CMUDONE(chn);
0207 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
0208 if (ret < 0) {
0209 qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
0210 continue;
0211 }
0212 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
0213 qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
0214 ret, chn);
0215 continue;
0216 }
0217 if (++chn == 4)
0218 break;
0219 }
0220 return (ret > 0) ? 0 : ret;
0221 }
0222
0223
0224
0225
0226
0227 static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
0228 {
0229 u64 rst_val;
0230 int ret = 0;
0231 unsigned long flags;
0232
0233 rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
0234 if (assert_rst) {
0235
0236
0237
0238
0239 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
0240 epb_access(dd, IB_7220_SERDES, 1);
0241 rst_val |= 1ULL;
0242
0243 qib_write_kreg(dd, kr_hwerrmask,
0244 dd->cspec->hwerrmask &
0245 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
0246 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
0247
0248 qib_read_kreg32(dd, kr_scratch);
0249 udelay(2);
0250
0251 epb_access(dd, IB_7220_SERDES, -1);
0252 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
0253 } else {
0254
0255
0256
0257
0258
0259
0260
0261 u64 val;
0262
0263 rst_val &= ~(1ULL);
0264 qib_write_kreg(dd, kr_hwerrmask,
0265 dd->cspec->hwerrmask &
0266 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
0267
0268 ret = qib_resync_ibepb(dd);
0269 if (ret < 0)
0270 qib_dev_err(dd, "unable to re-sync IB EPB\n");
0271
0272
0273 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
0274 if (ret < 0)
0275 goto bail;
0276
0277 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
0278 0x80);
0279 if (ret < 0) {
0280 qib_dev_err(dd, "Failed to set WDOG disable\n");
0281 goto bail;
0282 }
0283 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
0284
0285 qib_read_kreg32(dd, kr_scratch);
0286 udelay(1);
0287
0288 qib_sd7220_clr_ibpar(dd);
0289 val = qib_read_kreg64(dd, kr_hwerrstatus);
0290 if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
0291 qib_dev_err(dd, "IBUC Parity still set after RST\n");
0292 dd->cspec->hwerrmask &=
0293 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
0294 }
0295 qib_write_kreg(dd, kr_hwerrmask,
0296 dd->cspec->hwerrmask);
0297 }
0298
0299 bail:
0300 return ret;
0301 }
0302
0303 static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
0304 const char *where)
0305 {
0306 int ret, chn, baduns;
0307 u64 val;
0308
0309 if (!where)
0310 where = "?";
0311
0312
0313 udelay(2);
0314
0315 ret = qib_resync_ibepb(dd);
0316 if (ret < 0)
0317 qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
0318
0319
0320 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
0321 if (ret < 0)
0322 qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
0323
0324
0325 val = qib_read_kreg64(dd, kr_ibcstatus);
0326 if (!(val & (1ULL << 11)))
0327 qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
0328
0329
0330
0331
0332 udelay(2);
0333
0334 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
0335 if (ret < 0)
0336 qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
0337 udelay(10);
0338
0339 baduns = 0;
0340
0341 for (chn = 3; chn >= 0; --chn) {
0342
0343 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
0344 IB_CTRL2(chn), 0, 0);
0345 if (ret < 0)
0346 qib_dev_err(dd,
0347 "Failed checking TRIMDONE, chn %d (%s)\n",
0348 chn, where);
0349
0350 if (!(ret & 0x10)) {
0351 int probe;
0352
0353 baduns |= (1 << chn);
0354 qib_dev_err(dd,
0355 "TRIMDONE cleared on chn %d (%02X). (%s)\n",
0356 chn, ret, where);
0357 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
0358 IB_PGUDP(0), 0, 0);
0359 qib_dev_err(dd, "probe is %d (%02X)\n",
0360 probe, probe);
0361 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
0362 IB_CTRL2(chn), 0, 0);
0363 qib_dev_err(dd, "re-read: %d (%02X)\n",
0364 probe, probe);
0365 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
0366 IB_CTRL2(chn), 0x10, 0x10);
0367 if (ret < 0)
0368 qib_dev_err(dd,
0369 "Err on TRIMDONE rewrite1\n");
0370 }
0371 }
0372 for (chn = 3; chn >= 0; --chn) {
0373
0374 if (baduns & (1 << chn)) {
0375 qib_dev_err(dd,
0376 "Resetting TRIMDONE on chn %d (%s)\n",
0377 chn, where);
0378 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
0379 IB_CTRL2(chn), 0x10, 0x10);
0380 if (ret < 0)
0381 qib_dev_err(dd,
0382 "Failed re-setting TRIMDONE, chn %d (%s)\n",
0383 chn, where);
0384 }
0385 }
0386 }
0387
0388
0389
0390
0391
0392
0393
0394 int qib_sd7220_init(struct qib_devdata *dd)
0395 {
0396 const struct firmware *fw;
0397 int ret = 1;
0398 int first_reset, was_reset;
0399
0400
0401 was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
0402 if (!was_reset) {
0403
0404 qib_ibsd_reset(dd, 1);
0405 qib_sd_trimdone_monitor(dd, "Driver-reload");
0406 }
0407
0408 ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
0409 if (ret) {
0410 qib_dev_err(dd, "Failed to load IB SERDES image\n");
0411 goto done;
0412 }
0413
0414
0415 ret = qib_ibsd_ucode_loaded(dd->pport, fw);
0416 if (ret < 0)
0417 goto bail;
0418
0419 first_reset = !ret;
0420
0421
0422
0423
0424 ret = qib_sd_early(dd);
0425 if (ret < 0) {
0426 qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
0427 goto bail;
0428 }
0429
0430
0431
0432
0433
0434 if (first_reset) {
0435 ret = qib_sd_dactrim(dd);
0436 if (ret < 0) {
0437 qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
0438 goto bail;
0439 }
0440 }
0441
0442
0443
0444
0445
0446
0447 ret = qib_internal_presets(dd);
0448 if (ret < 0) {
0449 qib_dev_err(dd, "Failed to set IB SERDES presets\n");
0450 goto bail;
0451 }
0452 ret = qib_sd_trimself(dd, 0x80);
0453 if (ret < 0) {
0454 qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
0455 goto bail;
0456 }
0457
0458
0459 ret = 0;
0460 if (first_reset) {
0461 int vfy;
0462 int trim_done;
0463
0464 ret = qib_sd7220_ib_load(dd, fw);
0465 if (ret < 0) {
0466 qib_dev_err(dd, "Failed to load IB SERDES image\n");
0467 goto bail;
0468 } else {
0469
0470 vfy = qib_sd7220_ib_vfy(dd, fw);
0471 if (vfy != ret) {
0472 qib_dev_err(dd, "SERDES PRAM VFY failed\n");
0473 goto bail;
0474 }
0475 }
0476
0477
0478
0479
0480
0481 ret = 0;
0482
0483
0484
0485
0486
0487
0488
0489
0490 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
0491 if (ret < 0) {
0492 qib_dev_err(dd, "Failed clearing START_EQ1\n");
0493 goto bail;
0494 }
0495
0496 qib_ibsd_reset(dd, 0);
0497
0498
0499
0500
0501 trim_done = qib_sd_trimdone_poll(dd);
0502
0503
0504
0505
0506
0507 qib_ibsd_reset(dd, 1);
0508
0509 if (!trim_done) {
0510 qib_dev_err(dd, "No TRIMDONE seen\n");
0511 goto bail;
0512 }
0513
0514
0515
0516
0517 qib_sd_trimdone_monitor(dd, "First-reset");
0518
0519 dd->cspec->serdes_first_init_done = 1;
0520 }
0521
0522
0523
0524
0525 ret = 0;
0526 if (qib_sd_setvals(dd) >= 0)
0527 goto done;
0528 bail:
0529 ret = 1;
0530 done:
0531
0532 set_7220_relock_poll(dd, -1);
0533
0534 release_firmware(fw);
0535 return ret;
0536 }
0537
0538 #define EPB_ACC_REQ 1
0539 #define EPB_ACC_GNT 0x100
0540 #define EPB_DATA_MASK 0xFF
0541 #define EPB_RD (1ULL << 24)
0542 #define EPB_TRANS_RDY (1ULL << 31)
0543 #define EPB_TRANS_ERR (1ULL << 30)
0544 #define EPB_TRANS_TRIES 5
0545
0546
0547
0548
0549
0550
0551
0552 static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
0553 {
0554 u16 acc;
0555 u64 accval;
0556 int owned = 0;
0557 u64 oct_sel = 0;
0558
0559 switch (sdnum) {
0560 case IB_7220_SERDES:
0561
0562
0563
0564
0565 acc = kr_ibsd_epb_access_ctrl;
0566 break;
0567
0568 case PCIE_SERDES0:
0569 case PCIE_SERDES1:
0570
0571 acc = kr_pciesd_epb_access_ctrl;
0572 oct_sel = (2 << (sdnum - PCIE_SERDES0));
0573 break;
0574
0575 default:
0576 return 0;
0577 }
0578
0579
0580 qib_read_kreg32(dd, kr_scratch);
0581 udelay(15);
0582
0583 accval = qib_read_kreg32(dd, acc);
0584
0585 owned = !!(accval & EPB_ACC_GNT);
0586 if (claim < 0) {
0587
0588 u64 pollval;
0589
0590
0591
0592
0593 u64 newval = 0;
0594
0595 qib_write_kreg(dd, acc, newval);
0596
0597 pollval = qib_read_kreg32(dd, acc);
0598 udelay(5);
0599 pollval = qib_read_kreg32(dd, acc);
0600 if (pollval & EPB_ACC_GNT)
0601 owned = -1;
0602 } else if (claim > 0) {
0603
0604 u64 pollval;
0605 u64 newval = EPB_ACC_REQ | oct_sel;
0606
0607 qib_write_kreg(dd, acc, newval);
0608
0609 pollval = qib_read_kreg32(dd, acc);
0610 udelay(5);
0611 pollval = qib_read_kreg32(dd, acc);
0612 if (!(pollval & EPB_ACC_GNT))
0613 owned = -1;
0614 }
0615 return owned;
0616 }
0617
0618
0619
0620
0621 static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
0622 {
0623 int tries;
0624 u64 transval;
0625
0626 qib_write_kreg(dd, reg, i_val);
0627
0628 transval = qib_read_kreg64(dd, reg);
0629
0630 for (tries = EPB_TRANS_TRIES; tries; --tries) {
0631 transval = qib_read_kreg32(dd, reg);
0632 if (transval & EPB_TRANS_RDY)
0633 break;
0634 udelay(5);
0635 }
0636 if (transval & EPB_TRANS_ERR)
0637 return -1;
0638 if (tries > 0 && o_vp)
0639 *o_vp = transval;
0640 return tries;
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
0657 u32 wd, u32 mask)
0658 {
0659 u16 trans;
0660 u64 transval;
0661 int owned;
0662 int tries, ret;
0663 unsigned long flags;
0664
0665 switch (sdnum) {
0666 case IB_7220_SERDES:
0667 trans = kr_ibsd_epb_transaction_reg;
0668 break;
0669
0670 case PCIE_SERDES0:
0671 case PCIE_SERDES1:
0672 trans = kr_pciesd_epb_transaction_reg;
0673 break;
0674
0675 default:
0676 return -1;
0677 }
0678
0679
0680
0681
0682
0683 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
0684
0685 owned = epb_access(dd, sdnum, 1);
0686 if (owned < 0) {
0687 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
0688 return -1;
0689 }
0690 for (tries = EPB_TRANS_TRIES; tries; --tries) {
0691 transval = qib_read_kreg32(dd, trans);
0692 if (transval & EPB_TRANS_RDY)
0693 break;
0694 udelay(5);
0695 }
0696
0697 if (tries > 0) {
0698 tries = 1;
0699 if (mask != 0xFF) {
0700
0701
0702
0703
0704 transval = loc | EPB_RD;
0705 tries = epb_trans(dd, trans, transval, &transval);
0706 }
0707 if (tries > 0 && mask != 0) {
0708
0709
0710
0711 wd = (wd & mask) | (transval & ~mask);
0712 transval = loc | (wd & EPB_DATA_MASK);
0713 tries = epb_trans(dd, trans, transval, &transval);
0714 }
0715 }
0716
0717
0718
0719
0720
0721 if (epb_access(dd, sdnum, -1) < 0)
0722 ret = -1;
0723 else
0724 ret = transval & EPB_DATA_MASK;
0725
0726 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
0727 if (tries <= 0)
0728 ret = -1;
0729 return ret;
0730 }
0731
0732 #define EPB_ROM_R (2)
0733 #define EPB_ROM_W (1)
0734
0735
0736
0737
0738 #define EPB_UC_CTL EPB_LOC(6, 0, 0)
0739 #define EPB_MADDRL EPB_LOC(6, 0, 2)
0740 #define EPB_MADDRH EPB_LOC(6, 0, 3)
0741 #define EPB_ROMDATA EPB_LOC(6, 0, 4)
0742 #define EPB_RAMDATA EPB_LOC(6, 0, 5)
0743
0744
0745 static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
0746 u8 *buf, int cnt, int rd_notwr)
0747 {
0748 u16 trans;
0749 u64 transval;
0750 u64 csbit;
0751 int owned;
0752 int tries;
0753 int sofar;
0754 int addr;
0755 int ret;
0756 unsigned long flags;
0757
0758
0759 switch (sdnum) {
0760 case IB_7220_SERDES:
0761 csbit = 1ULL << EPB_IB_UC_CS_SHF;
0762 trans = kr_ibsd_epb_transaction_reg;
0763 break;
0764
0765 case PCIE_SERDES0:
0766 case PCIE_SERDES1:
0767
0768 csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
0769 trans = kr_pciesd_epb_transaction_reg;
0770 break;
0771
0772 default:
0773 return -1;
0774 }
0775
0776 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
0777
0778 owned = epb_access(dd, sdnum, 1);
0779 if (owned < 0) {
0780 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
0781 return -1;
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791 addr = loc & 0x1FFF;
0792 for (tries = EPB_TRANS_TRIES; tries; --tries) {
0793 transval = qib_read_kreg32(dd, trans);
0794 if (transval & EPB_TRANS_RDY)
0795 break;
0796 udelay(5);
0797 }
0798
0799 sofar = 0;
0800 if (tries > 0) {
0801
0802
0803
0804
0805
0806
0807
0808 transval = csbit | EPB_UC_CTL |
0809 (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
0810 tries = epb_trans(dd, trans, transval, &transval);
0811 while (tries > 0 && sofar < cnt) {
0812 if (!sofar) {
0813
0814 int addrbyte = (addr + sofar) >> 8;
0815
0816 transval = csbit | EPB_MADDRH | addrbyte;
0817 tries = epb_trans(dd, trans, transval,
0818 &transval);
0819 if (tries <= 0)
0820 break;
0821 addrbyte = (addr + sofar) & 0xFF;
0822 transval = csbit | EPB_MADDRL | addrbyte;
0823 tries = epb_trans(dd, trans, transval,
0824 &transval);
0825 if (tries <= 0)
0826 break;
0827 }
0828
0829 if (rd_notwr)
0830 transval = csbit | EPB_ROMDATA | EPB_RD;
0831 else
0832 transval = csbit | EPB_ROMDATA | buf[sofar];
0833 tries = epb_trans(dd, trans, transval, &transval);
0834 if (tries <= 0)
0835 break;
0836 if (rd_notwr)
0837 buf[sofar] = transval & EPB_DATA_MASK;
0838 ++sofar;
0839 }
0840
0841 transval = csbit | EPB_UC_CTL;
0842 tries = epb_trans(dd, trans, transval, &transval);
0843 }
0844
0845 ret = sofar;
0846
0847 if (epb_access(dd, sdnum, -1) < 0)
0848 ret = -1;
0849
0850 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
0851 if (tries <= 0)
0852 ret = -1;
0853 return ret;
0854 }
0855
0856 #define PROG_CHUNK 64
0857
0858 static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
0859 const u8 *img, int len, int offset)
0860 {
0861 int cnt, sofar, req;
0862
0863 sofar = 0;
0864 while (sofar < len) {
0865 req = len - sofar;
0866 if (req > PROG_CHUNK)
0867 req = PROG_CHUNK;
0868 cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
0869 (u8 *)img + sofar, req, 0);
0870 if (cnt < req) {
0871 sofar = -1;
0872 break;
0873 }
0874 sofar += req;
0875 }
0876 return sofar;
0877 }
0878
0879 #define VFY_CHUNK 64
0880 #define SD_PRAM_ERROR_LIMIT 42
0881
0882 static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
0883 const u8 *img, int len, int offset)
0884 {
0885 int cnt, sofar, req, idx, errors;
0886 unsigned char readback[VFY_CHUNK];
0887
0888 errors = 0;
0889 sofar = 0;
0890 while (sofar < len) {
0891 req = len - sofar;
0892 if (req > VFY_CHUNK)
0893 req = VFY_CHUNK;
0894 cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
0895 readback, req, 1);
0896 if (cnt < req) {
0897
0898 sofar = -1;
0899 break;
0900 }
0901 for (idx = 0; idx < cnt; ++idx) {
0902 if (readback[idx] != img[idx+sofar])
0903 ++errors;
0904 }
0905 sofar += cnt;
0906 }
0907 return errors ? -errors : sofar;
0908 }
0909
0910 static int
0911 qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
0912 {
0913 return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
0914 }
0915
0916 static int
0917 qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
0918 {
0919 return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
0920 }
0921
0922
0923
0924
0925 #define IB_SERDES_TRIM_DONE (1ULL << 11)
0926 #define TRIM_TMO (15)
0927
0928 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
0929 {
0930 int trim_tmo, ret;
0931 uint64_t val;
0932
0933
0934
0935
0936
0937 ret = 0;
0938 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
0939 val = qib_read_kreg64(dd, kr_ibcstatus);
0940 if (val & IB_SERDES_TRIM_DONE) {
0941 ret = 1;
0942 break;
0943 }
0944 msleep(20);
0945 }
0946 if (trim_tmo >= TRIM_TMO) {
0947 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
0948 ret = 0;
0949 }
0950 return ret;
0951 }
0952
0953 #define TX_FAST_ELT (9)
0954
0955
0956
0957
0958
0959
0960
0961
0962 #define NUM_DDS_REGS 6
0963 #define DDS_REG_MAP 0x76A910
0964
0965 #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
0966 { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
0967 (main_d << 3) | 4 | (ipre_d >> 2), \
0968 (main_s << 3) | 4 | (ipre_s >> 2), \
0969 ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
0970 ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
0971
0972 static struct dds_init {
0973 uint8_t reg_vals[NUM_DDS_REGS];
0974 } dds_init_vals[] = {
0975
0976
0977 #define DDS_3M 0
0978 DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
0979 DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
0980 DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
0981 DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
0982 DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
0983 DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
0984 DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
0985 DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
0986 DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
0987 DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
0988 DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
0989 DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
0990 DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
0991
0992 #define DDS_1M 13
0993 DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
0994 DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
0995 DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
0996 };
0997
0998
0999
1000
1001
1002 #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
1003 #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
1004 {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
1005
1006 #define RXEQ_VAL_ALL(elt, adr, val) \
1007 {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
1008
1009 #define RXEQ_SDR_DFELTH 0
1010 #define RXEQ_SDR_TLTH 0
1011 #define RXEQ_SDR_G1CNT_Z1CNT 0x11
1012 #define RXEQ_SDR_ZCNT 23
1013
1014 static struct rxeq_init {
1015 u16 rdesc;
1016 u8 rdata[4];
1017 } rxeq_init_vals[] = {
1018
1019 RXEQ_VAL_ALL(7, 0x27, 0x10),
1020
1021 RXEQ_VAL(7, 8, 0, 0, 0, 0),
1022 RXEQ_VAL(7, 0x21, 0, 0, 0, 0),
1023
1024 RXEQ_VAL(7, 9, 2, 2, 2, 2),
1025 RXEQ_VAL(7, 0x23, 2, 2, 2, 2),
1026
1027 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12),
1028 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12),
1029
1030 RXEQ_VAL(7, 0x1E, 16, 16, 16, 16),
1031 RXEQ_VAL(7, 0x1F, 16, 16, 16, 16),
1032
1033 RXEQ_VAL_ALL(6, 6, 0x20),
1034 RXEQ_VAL_ALL(6, 6, 0),
1035 };
1036
1037
1038 #define DDS_ROWS (16)
1039 #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1040
1041 static int qib_sd_setvals(struct qib_devdata *dd)
1042 {
1043 int idx, midx;
1044 int min_idx;
1045 uint32_t dds_reg_map;
1046 u64 __iomem *taddr, *iaddr;
1047 uint64_t data;
1048 uint64_t sdctl;
1049
1050 taddr = dd->kregbase + kr_serdes_maptable;
1051 iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
1052
1053
1054
1055
1056
1057
1058 sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
1059 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
1060 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
1061 qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
1062
1063
1064
1065
1066 dds_reg_map = DDS_REG_MAP;
1067 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1068 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
1069 writeq(data, iaddr + idx);
1070 qib_read_kreg32(dd, kr_scratch);
1071 dds_reg_map >>= 4;
1072 for (midx = 0; midx < DDS_ROWS; ++midx) {
1073 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1074
1075 data = dds_init_vals[midx].reg_vals[idx];
1076 writeq(data, daddr);
1077 qib_read_kreg32(dd, kr_scratch);
1078 }
1079 }
1080
1081
1082
1083
1084
1085
1086
1087 min_idx = idx;
1088 taddr += 0x100;
1089
1090 for (idx = 0; idx < RXEQ_ROWS; ++idx) {
1091 int didx;
1092 int vidx;
1093
1094
1095 didx = idx + min_idx;
1096
1097 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1098 qib_read_kreg32(dd, kr_scratch);
1099
1100 for (vidx = 0; vidx < 4; vidx++) {
1101 data = rxeq_init_vals[idx].rdata[vidx];
1102 writeq(data, taddr + (vidx << 6) + idx);
1103 qib_read_kreg32(dd, kr_scratch);
1104 }
1105 }
1106 return 0;
1107 }
1108
1109 #define CMUCTRL5 EPB_LOC(7, 0, 0x15)
1110 #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
1111 #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
1112 #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
1113 #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1114 #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
1127 int mask)
1128 {
1129 int ret = -1;
1130 int chnl;
1131
1132 if (loc & EPB_GLOBAL_WR) {
1133
1134
1135
1136
1137
1138
1139 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1140 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
1141 if (mask != 0xFF) {
1142 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
1143 loc & ~EPB_GLOBAL_WR, 0, 0);
1144 if (ret < 0) {
1145 int sloc = loc >> EPB_ADDR_SHF;
1146
1147 qib_dev_err(dd,
1148 "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
1149 (sloc & 0xF),
1150 (sloc >> 9) & 0x3f, chnl);
1151 return ret;
1152 }
1153 val = (ret & ~mask) | (val & mask);
1154 }
1155 loc &= ~(7 << (4+EPB_ADDR_SHF));
1156 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1157 if (ret < 0) {
1158 int sloc = loc >> EPB_ADDR_SHF;
1159
1160 qib_dev_err(dd,
1161 "Global WR failed: elt %d, addr 0x%X, val %02X\n",
1162 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1163 }
1164 return ret;
1165 }
1166
1167 loc &= ~(7 << (4+EPB_ADDR_SHF));
1168 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1169 for (chnl = 0; chnl < 4; ++chnl) {
1170 int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
1171
1172 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
1173 if (ret < 0) {
1174 int sloc = loc >> EPB_ADDR_SHF;
1175
1176 qib_dev_err(dd,
1177 "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
1178 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1179 val & 0xFF, mask & 0xFF);
1180 break;
1181 }
1182 }
1183 return ret;
1184 }
1185
1186
1187
1188
1189
1190 static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
1191 {
1192 int ret;
1193 int idx, reg, data;
1194 uint32_t regmap;
1195
1196 regmap = DDS_REG_MAP;
1197 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1198 reg = (regmap & 0xF);
1199 regmap >>= 4;
1200 data = ddi->reg_vals[idx];
1201
1202 ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
1203 if (ret < 0)
1204 break;
1205 }
1206 return ret;
1207 }
1208
1209
1210
1211
1212
1213 static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
1214 {
1215 int ret;
1216 int ridx;
1217 int cnt = ARRAY_SIZE(rxeq_init_vals);
1218
1219 for (ridx = 0; ridx < cnt; ++ridx) {
1220 int elt, reg, val, loc;
1221
1222 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1223 reg = rxeq_init_vals[ridx].rdesc >> 4;
1224 loc = EPB_LOC(0, elt, reg);
1225 val = rxeq_init_vals[ridx].rdata[vsel];
1226
1227 ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
1228 if (ret < 0)
1229 break;
1230 }
1231 return ret;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 static unsigned qib_rxeq_set = 2;
1244 module_param_named(rxeq_default_set, qib_rxeq_set, uint,
1245 S_IWUSR | S_IRUGO);
1246 MODULE_PARM_DESC(rxeq_default_set,
1247 "Which set [0..3] of Rx Equalization values is default");
1248
1249 static int qib_internal_presets(struct qib_devdata *dd)
1250 {
1251 int ret = 0;
1252
1253 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1254
1255 if (ret < 0)
1256 qib_dev_err(dd, "Failed to set default DDS values\n");
1257 ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
1258 if (ret < 0)
1259 qib_dev_err(dd, "Failed to set default RXEQ values\n");
1260 return ret;
1261 }
1262
1263 int qib_sd7220_presets(struct qib_devdata *dd)
1264 {
1265 int ret = 0;
1266
1267 if (!dd->cspec->presets_needed)
1268 return ret;
1269 dd->cspec->presets_needed = 0;
1270
1271 qib_ibsd_reset(dd, 1);
1272 udelay(2);
1273 qib_sd_trimdone_monitor(dd, "link-down");
1274
1275 ret = qib_internal_presets(dd);
1276 return ret;
1277 }
1278
1279 static int qib_sd_trimself(struct qib_devdata *dd, int val)
1280 {
1281 int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
1282
1283 return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1284 }
1285
1286 static int qib_sd_early(struct qib_devdata *dd)
1287 {
1288 int ret;
1289
1290 ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
1291 if (ret < 0)
1292 goto bail;
1293 ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
1294 if (ret < 0)
1295 goto bail;
1296 ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
1297 bail:
1298 return ret;
1299 }
1300
1301 #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
1302 #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1303 #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1304
1305 static int qib_sd_dactrim(struct qib_devdata *dd)
1306 {
1307 int ret;
1308
1309 ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
1310 if (ret < 0)
1311 goto bail;
1312
1313
1314 ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
1315 if (ret < 0)
1316 goto bail;
1317
1318 ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
1319 if (ret < 0)
1320 goto bail;
1321
1322 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1323 if (ret < 0)
1324 goto bail;
1325
1326 ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1327 if (ret < 0)
1328 goto bail;
1329
1330
1331
1332
1333
1334 udelay(415);
1335
1336 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
1337
1338 bail:
1339 return ret;
1340 }
1341
1342 #define RELOCK_FIRST_MS 3
1343 #define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1344 void toggle_7220_rclkrls(struct qib_devdata *dd)
1345 {
1346 int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
1347 int ret;
1348
1349 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1350 if (ret < 0)
1351 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1352 else {
1353 udelay(1);
1354 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1355 }
1356
1357 udelay(1);
1358 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1359 if (ret < 0)
1360 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1361 else {
1362 udelay(1);
1363 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1364 }
1365
1366 dd->f_xgxs_reset(dd->pport);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375 void shutdown_7220_relock_poll(struct qib_devdata *dd)
1376 {
1377 if (dd->cspec->relock_timer_active)
1378 del_timer_sync(&dd->cspec->relock_timer);
1379 }
1380
1381 static unsigned qib_relock_by_timer = 1;
1382 module_param_named(relock_by_timer, qib_relock_by_timer, uint,
1383 S_IWUSR | S_IRUGO);
1384 MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
1385
1386 static void qib_run_relock(struct timer_list *t)
1387 {
1388 struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
1389 struct qib_devdata *dd = cs->dd;
1390 struct qib_pportdata *ppd = dd->pport;
1391 int timeoff;
1392
1393
1394
1395
1396
1397
1398
1399 if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
1400 (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
1401 QIBL_LINKACTIVE))) {
1402 if (qib_relock_by_timer) {
1403 if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
1404 toggle_7220_rclkrls(dd);
1405 }
1406
1407 timeoff = cs->relock_interval << 1;
1408 if (timeoff > HZ)
1409 timeoff = HZ;
1410 cs->relock_interval = timeoff;
1411 } else
1412 timeoff = HZ;
1413 mod_timer(&cs->relock_timer, jiffies + timeoff);
1414 }
1415
1416 void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
1417 {
1418 struct qib_chip_specific *cs = dd->cspec;
1419
1420 if (ibup) {
1421
1422 if (cs->relock_timer_active) {
1423 cs->relock_interval = HZ;
1424 mod_timer(&cs->relock_timer, jiffies + HZ);
1425 }
1426 } else {
1427
1428 unsigned int timeout;
1429
1430 timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
1431 if (timeout == 0)
1432 timeout = 1;
1433
1434 if (!cs->relock_timer_active) {
1435 cs->relock_timer_active = 1;
1436 timer_setup(&cs->relock_timer, qib_run_relock, 0);
1437 cs->relock_interval = timeout;
1438 cs->relock_timer.expires = jiffies + timeout;
1439 add_timer(&cs->relock_timer);
1440 } else {
1441 cs->relock_interval = timeout;
1442 mod_timer(&cs->relock_timer, jiffies + timeout);
1443 }
1444 }
1445 }