Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
0003  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
0004  *
0005  * This software is available to you under a choice of one of two
0006  * licenses.  You may choose to be licensed under the terms of the GNU
0007  * General Public License (GPL) Version 2, available from the file
0008  * COPYING in the main directory of this source tree, or the
0009  * OpenIB.org BSD license below:
0010  *
0011  *     Redistribution and use in source and binary forms, with or
0012  *     without modification, are permitted provided that the following
0013  *     conditions are met:
0014  *
0015  *      - Redistributions of source code must retain the above
0016  *        copyright notice, this list of conditions and the following
0017  *        disclaimer.
0018  *
0019  *      - Redistributions in binary form must reproduce the above
0020  *        copyright notice, this list of conditions and the following
0021  *        disclaimer in the documentation and/or other materials
0022  *        provided with the distribution.
0023  *
0024  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0025  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0026  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0027  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0028  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0029  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0030  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0031  * SOFTWARE.
0032  */
0033 
0034 /*
0035  * This file contains all of the code that is specific to the
0036  * InfiniPath 7322 chip
0037  */
0038 
0039 #include <linux/interrupt.h>
0040 #include <linux/pci.h>
0041 #include <linux/delay.h>
0042 #include <linux/io.h>
0043 #include <linux/jiffies.h>
0044 #include <linux/module.h>
0045 #include <rdma/ib_verbs.h>
0046 #include <rdma/ib_smi.h>
0047 #ifdef CONFIG_INFINIBAND_QIB_DCA
0048 #include <linux/dca.h>
0049 #endif
0050 
0051 #include "qib.h"
0052 #include "qib_7322_regs.h"
0053 #include "qib_qsfp.h"
0054 
0055 #include "qib_mad.h"
0056 #include "qib_verbs.h"
0057 
0058 #undef pr_fmt
0059 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
0060 
0061 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
0062 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
0063 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
0064 static irqreturn_t qib_7322intr(int irq, void *data);
0065 static irqreturn_t qib_7322bufavail(int irq, void *data);
0066 static irqreturn_t sdma_intr(int irq, void *data);
0067 static irqreturn_t sdma_idle_intr(int irq, void *data);
0068 static irqreturn_t sdma_progress_intr(int irq, void *data);
0069 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
0070 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
0071                   struct qib_ctxtdata *rcd);
0072 static u8 qib_7322_phys_portstate(u64);
0073 static u32 qib_7322_iblink_state(u64);
0074 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
0075                    u16 linitcmd);
0076 static void force_h1(struct qib_pportdata *);
0077 static void adj_tx_serdes(struct qib_pportdata *);
0078 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
0079 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
0080 
0081 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
0082 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
0083 static void serdes_7322_los_enable(struct qib_pportdata *, int);
0084 static int serdes_7322_init_old(struct qib_pportdata *);
0085 static int serdes_7322_init_new(struct qib_pportdata *);
0086 static void dump_sdma_7322_state(struct qib_pportdata *);
0087 
0088 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
0089 
0090 /* LE2 serdes values for different cases */
0091 #define LE2_DEFAULT 5
0092 #define LE2_5m 4
0093 #define LE2_QME 0
0094 
0095 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
0096 #define IBSD(hw_pidx) (hw_pidx + 2)
0097 
0098 /* these are variables for documentation and experimentation purposes */
0099 static const unsigned rcv_int_timeout = 375;
0100 static const unsigned rcv_int_count = 16;
0101 static const unsigned sdma_idle_cnt = 64;
0102 
0103 /* Time to stop altering Rx Equalization parameters, after link up. */
0104 #define RXEQ_DISABLE_MSECS 2500
0105 
0106 /*
0107  * Number of VLs we are configured to use (to allow for more
0108  * credits per vl, etc.)
0109  */
0110 ushort qib_num_cfg_vls = 2;
0111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
0112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
0113 
0114 static ushort qib_chase = 1;
0115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
0116 MODULE_PARM_DESC(chase, "Enable state chase handling");
0117 
0118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
0119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
0120 MODULE_PARM_DESC(long_attenuation,
0121          "attenuation cutoff (dB) for long copper cable setup");
0122 
0123 static ushort qib_singleport;
0124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
0125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
0126 
0127 static ushort qib_krcvq01_no_msi;
0128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
0129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
0130 
0131 /*
0132  * Receive header queue sizes
0133  */
0134 static unsigned qib_rcvhdrcnt;
0135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
0136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
0137 
0138 static unsigned qib_rcvhdrsize;
0139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
0140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
0141 
0142 static unsigned qib_rcvhdrentsize;
0143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
0144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
0145 
0146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
0147 /* for read back, default index is ~5m copper cable */
0148 static char txselect_list[MAX_ATTEN_LEN] = "10";
0149 static struct kparam_string kp_txselect = {
0150     .string = txselect_list,
0151     .maxlen = MAX_ATTEN_LEN
0152 };
0153 static int  setup_txselect(const char *, const struct kernel_param *);
0154 module_param_call(txselect, setup_txselect, param_get_string,
0155           &kp_txselect, S_IWUSR | S_IRUGO);
0156 MODULE_PARM_DESC(txselect,
0157          "Tx serdes indices (for no QSFP or invalid QSFP data)");
0158 
0159 #define BOARD_QME7342 5
0160 #define BOARD_QMH7342 6
0161 #define BOARD_QMH7360 9
0162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
0163             BOARD_QMH7342)
0164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
0165             BOARD_QME7342)
0166 
0167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
0168 
0169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
0170 
0171 #define MASK_ACROSS(lsb, msb) \
0172     (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
0173 
0174 #define SYM_RMASK(regname, fldname) ((u64)              \
0175     QIB_7322_##regname##_##fldname##_RMASK)
0176 
0177 #define SYM_MASK(regname, fldname) ((u64)               \
0178     QIB_7322_##regname##_##fldname##_RMASK <<       \
0179      QIB_7322_##regname##_##fldname##_LSB)
0180 
0181 #define SYM_FIELD(value, regname, fldname) ((u64)   \
0182     (((value) >> SYM_LSB(regname, fldname)) &   \
0183      SYM_RMASK(regname, fldname)))
0184 
0185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
0186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
0187     (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
0188 
0189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
0190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
0191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
0192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
0193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
0194 /* Below because most, but not all, fields of IntMask have that full suffix */
0195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
0196 
0197 
0198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
0199 
0200 /*
0201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
0202  * and 7 is reserved.  We currently use only 2KB and 4KB
0203  */
0204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
0205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
0206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
0207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
0208 
0209 #define SendIBSLIDAssignMask \
0210     QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
0211 #define SendIBSLMCMask \
0212     QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
0213 
0214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
0215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
0216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
0217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
0218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
0219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
0220 
0221 #define _QIB_GPIO_SDA_NUM 1
0222 #define _QIB_GPIO_SCL_NUM 0
0223 #define QIB_EEPROM_WEN_NUM 14
0224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
0225 
0226 /* HW counter clock is at 4nsec */
0227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
0228 
0229 /* full speed IB port 1 only */
0230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
0231 #define PORT_SPD_CAP_SHIFT 3
0232 
0233 /* full speed featuremask, both ports */
0234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
0235 
0236 /*
0237  * This file contains almost all the chip-specific register information and
0238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
0239  */
0240 
0241 /* Use defines to tie machine-generated names to lower-case names */
0242 #define kr_contextcnt KREG_IDX(ContextCnt)
0243 #define kr_control KREG_IDX(Control)
0244 #define kr_counterregbase KREG_IDX(CntrRegBase)
0245 #define kr_errclear KREG_IDX(ErrClear)
0246 #define kr_errmask KREG_IDX(ErrMask)
0247 #define kr_errstatus KREG_IDX(ErrStatus)
0248 #define kr_extctrl KREG_IDX(EXTCtrl)
0249 #define kr_extstatus KREG_IDX(EXTStatus)
0250 #define kr_gpio_clear KREG_IDX(GPIOClear)
0251 #define kr_gpio_mask KREG_IDX(GPIOMask)
0252 #define kr_gpio_out KREG_IDX(GPIOOut)
0253 #define kr_gpio_status KREG_IDX(GPIOStatus)
0254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
0255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
0256 #define kr_fmask KREG_IDX(feature_mask)
0257 #define kr_act_fmask KREG_IDX(active_feature_mask)
0258 #define kr_hwerrclear KREG_IDX(HwErrClear)
0259 #define kr_hwerrmask KREG_IDX(HwErrMask)
0260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
0261 #define kr_intclear KREG_IDX(IntClear)
0262 #define kr_intmask KREG_IDX(IntMask)
0263 #define kr_intredirect KREG_IDX(IntRedirect0)
0264 #define kr_intstatus KREG_IDX(IntStatus)
0265 #define kr_pagealign KREG_IDX(PageAlign)
0266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
0267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
0268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
0269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
0270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
0271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
0272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
0273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
0274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
0275 #define kr_revision KREG_IDX(Revision)
0276 #define kr_scratch KREG_IDX(Scratch)
0277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
0278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
0279 #define kr_sendctrl KREG_IDX(SendCtrl)
0280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
0281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
0282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
0283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
0284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
0285 #define kr_sendpiosize KREG_IDX(SendBufSize)
0286 #define kr_sendregbase KREG_IDX(SendRegBase)
0287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
0288 #define kr_userregbase KREG_IDX(UserRegBase)
0289 #define kr_intgranted KREG_IDX(Int_Granted)
0290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
0291 #define kr_intblocked KREG_IDX(IntBlocked)
0292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
0293 
0294 /*
0295  * per-port kernel registers.  Access only with qib_read_kreg_port()
0296  * or qib_write_kreg_port()
0297  */
0298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
0299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
0300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
0301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
0302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
0303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
0304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
0305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
0306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
0307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
0308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
0309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
0310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
0311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
0312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
0313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
0314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
0315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
0316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
0317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
0318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
0319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
0320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
0321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
0322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
0323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
0324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
0325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
0326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
0327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
0328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
0329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
0330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
0331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
0332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
0333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
0334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
0335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
0336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
0337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
0338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
0339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
0340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
0341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
0342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
0343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
0344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
0345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
0346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
0347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
0348 
0349 /*
0350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
0351  * or qib_write_kreg_ctxt()
0352  */
0353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
0354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
0355 
0356 /*
0357  * TID Flow table, per context.  Reduces
0358  * number of hdrq updates to one per flow (or on errors).
0359  * context 0 and 1 share same memory, but have distinct
0360  * addresses.  Since for now, we never use expected sends
0361  * on kernel contexts, we don't worry about that (we initialize
0362  * those entries for ctxt 0/1 on driver load twice, for example).
0363  */
0364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
0365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
0366 
0367 /* these are the error bits in the tid flows, and are W1C */
0368 #define TIDFLOW_ERRBITS  ( \
0369     (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
0370     SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
0371     (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
0372     SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
0373 
0374 /* Most (not all) Counters are per-IBport.
0375  * Requires LBIntCnt is at offset 0 in the group
0376  */
0377 #define CREG_IDX(regname) \
0378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
0379 
0380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
0381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
0382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
0383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
0384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
0385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
0386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
0387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
0388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
0389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
0390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
0391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
0392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
0393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
0394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
0395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
0396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
0397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
0398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
0399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
0400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
0401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
0402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
0403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
0404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
0405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
0406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
0407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
0408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
0409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
0410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
0411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
0412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
0413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
0414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
0415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
0416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
0417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
0418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
0419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
0420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
0421 #define crp_wordsend CREG_IDX(TxDwordCnt)
0422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
0423 
0424 /* these are the (few) counters that are not port-specific */
0425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
0426             QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
0427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
0428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
0429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
0430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
0431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
0432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
0433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
0434 
0435 /* no chip register for # of IB ports supported, so define */
0436 #define NUM_IB_PORTS 2
0437 
0438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
0439 #define NUM_VL15_BUFS NUM_IB_PORTS
0440 
0441 /*
0442  * context 0 and 1 are special, and there is no chip register that
0443  * defines this value, so we have to define it here.
0444  * These are all allocated to either 0 or 1 for single port
0445  * hardware configuration, otherwise each gets half
0446  */
0447 #define KCTXT0_EGRCNT 2048
0448 
0449 /* values for vl and port fields in PBC, 7322-specific */
0450 #define PBC_PORT_SEL_LSB 26
0451 #define PBC_PORT_SEL_RMASK 1
0452 #define PBC_VL_NUM_LSB 27
0453 #define PBC_VL_NUM_RMASK 7
0454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
0455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
0456 
0457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
0458     [IB_RATE_2_5_GBPS] = 16,
0459     [IB_RATE_5_GBPS] = 8,
0460     [IB_RATE_10_GBPS] = 4,
0461     [IB_RATE_20_GBPS] = 2,
0462     [IB_RATE_30_GBPS] = 2,
0463     [IB_RATE_40_GBPS] = 1
0464 };
0465 
0466 static const char * const qib_sdma_state_names[] = {
0467     [qib_sdma_state_s00_hw_down]          = "s00_HwDown",
0468     [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
0469     [qib_sdma_state_s20_idle]             = "s20_Idle",
0470     [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
0471     [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
0472     [qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
0473     [qib_sdma_state_s99_running]          = "s99_Running",
0474 };
0475 
0476 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
0477 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
0478 
0479 /* link training states, from IBC */
0480 #define IB_7322_LT_STATE_DISABLED        0x00
0481 #define IB_7322_LT_STATE_LINKUP          0x01
0482 #define IB_7322_LT_STATE_POLLACTIVE      0x02
0483 #define IB_7322_LT_STATE_POLLQUIET       0x03
0484 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
0485 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
0486 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
0487 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
0488 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
0489 #define IB_7322_LT_STATE_CFGIDLE         0x0b
0490 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
0491 #define IB_7322_LT_STATE_TXREVLANES      0x0d
0492 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
0493 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
0494 #define IB_7322_LT_STATE_CFGENH          0x10
0495 #define IB_7322_LT_STATE_CFGTEST         0x11
0496 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
0497 #define IB_7322_LT_STATE_CFGWAITENH      0x13
0498 
0499 /* link state machine states from IBC */
0500 #define IB_7322_L_STATE_DOWN             0x0
0501 #define IB_7322_L_STATE_INIT             0x1
0502 #define IB_7322_L_STATE_ARM              0x2
0503 #define IB_7322_L_STATE_ACTIVE           0x3
0504 #define IB_7322_L_STATE_ACT_DEFER        0x4
0505 
0506 static const u8 qib_7322_physportstate[0x20] = {
0507     [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
0508     [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
0509     [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
0510     [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
0511     [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
0512     [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
0513     [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
0514     [IB_7322_LT_STATE_CFGRCVFCFG] =
0515         IB_PHYSPORTSTATE_CFG_TRAIN,
0516     [IB_7322_LT_STATE_CFGWAITRMT] =
0517         IB_PHYSPORTSTATE_CFG_TRAIN,
0518     [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
0519     [IB_7322_LT_STATE_RECOVERRETRAIN] =
0520         IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
0521     [IB_7322_LT_STATE_RECOVERWAITRMT] =
0522         IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
0523     [IB_7322_LT_STATE_RECOVERIDLE] =
0524         IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
0525     [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
0526     [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
0527     [IB_7322_LT_STATE_CFGWAITRMTTEST] =
0528         IB_PHYSPORTSTATE_CFG_TRAIN,
0529     [IB_7322_LT_STATE_CFGWAITENH] =
0530         IB_PHYSPORTSTATE_CFG_WAIT_ENH,
0531     [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
0532     [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
0533     [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
0534     [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
0535 };
0536 
0537 #ifdef CONFIG_INFINIBAND_QIB_DCA
0538 struct qib_irq_notify {
0539     int rcv;
0540     void *arg;
0541     struct irq_affinity_notify notify;
0542 };
0543 #endif
0544 
0545 struct qib_chip_specific {
0546     u64 __iomem *cregbase;
0547     u64 *cntrs;
0548     spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
0549     spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
0550     u64 main_int_mask;      /* clear bits which have dedicated handlers */
0551     u64 int_enable_mask;  /* for per port interrupts in single port mode */
0552     u64 errormask;
0553     u64 hwerrmask;
0554     u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
0555     u64 gpio_mask; /* shadow the gpio mask register */
0556     u64 extctrl; /* shadow the gpio output enable, etc... */
0557     u32 ncntrs;
0558     u32 nportcntrs;
0559     u32 cntrnamelen;
0560     u32 portcntrnamelen;
0561     u32 numctxts;
0562     u32 rcvegrcnt;
0563     u32 updthresh; /* current AvailUpdThld */
0564     u32 updthresh_dflt; /* default AvailUpdThld */
0565     u32 r1;
0566     u32 num_msix_entries;
0567     u32 sdmabufcnt;
0568     u32 lastbuf_for_pio;
0569     u32 stay_in_freeze;
0570     u32 recovery_ports_initted;
0571 #ifdef CONFIG_INFINIBAND_QIB_DCA
0572     u32 dca_ctrl;
0573     int rhdr_cpu[18];
0574     int sdma_cpu[2];
0575     u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
0576 #endif
0577     struct qib_msix_entry *msix_entries;
0578     unsigned long *sendchkenable;
0579     unsigned long *sendgrhchk;
0580     unsigned long *sendibchk;
0581     u32 rcvavail_timeout[18];
0582     char emsgbuf[128]; /* for device error interrupt msg buffer */
0583 };
0584 
0585 /* Table of entries in "human readable" form Tx Emphasis. */
0586 struct txdds_ent {
0587     u8 amp;
0588     u8 pre;
0589     u8 main;
0590     u8 post;
0591 };
0592 
0593 struct vendor_txdds_ent {
0594     u8 oui[QSFP_VOUI_LEN];
0595     u8 *partnum;
0596     struct txdds_ent sdr;
0597     struct txdds_ent ddr;
0598     struct txdds_ent qdr;
0599 };
0600 
0601 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
0602 
0603 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
0604 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
0605 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
0606 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
0607 
0608 #define H1_FORCE_VAL 8
0609 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
0610 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
0611 
0612 /* The static and dynamic registers are paired, and the pairs indexed by spd */
0613 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
0614     + ((spd) * 2))
0615 
0616 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
0617 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
0618 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
0619 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
0620 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
0621 
0622 struct qib_chippport_specific {
0623     u64 __iomem *kpregbase;
0624     u64 __iomem *cpregbase;
0625     u64 *portcntrs;
0626     struct qib_pportdata *ppd;
0627     wait_queue_head_t autoneg_wait;
0628     struct delayed_work autoneg_work;
0629     struct delayed_work ipg_work;
0630     struct timer_list chase_timer;
0631     /*
0632      * these 5 fields are used to establish deltas for IB symbol
0633      * errors and linkrecovery errors.  They can be reported on
0634      * some chips during link negotiation prior to INIT, and with
0635      * DDR when faking DDR negotiations with non-IBTA switches.
0636      * The chip counters are adjusted at driver unload if there is
0637      * a non-zero delta.
0638      */
0639     u64 ibdeltainprog;
0640     u64 ibsymdelta;
0641     u64 ibsymsnap;
0642     u64 iblnkerrdelta;
0643     u64 iblnkerrsnap;
0644     u64 iblnkdownsnap;
0645     u64 iblnkdowndelta;
0646     u64 ibmalfdelta;
0647     u64 ibmalfsnap;
0648     u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
0649     u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
0650     unsigned long qdr_dfe_time;
0651     unsigned long chase_end;
0652     u32 autoneg_tries;
0653     u32 recovery_init;
0654     u32 qdr_dfe_on;
0655     u32 qdr_reforce;
0656     /*
0657      * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
0658      * entry zero is unused, to simplify indexing
0659      */
0660     u8 h1_val;
0661     u8 no_eep;  /* txselect table index to use if no qsfp info */
0662     u8 ipg_tries;
0663     u8 ibmalfusesnap;
0664     struct qib_qsfp_data qsfp_data;
0665     char epmsgbuf[192]; /* for port error interrupt msg buffer */
0666     char sdmamsgbuf[192]; /* for per-port sdma error messages */
0667 };
0668 
0669 static struct {
0670     const char *name;
0671     irq_handler_t handler;
0672     int lsb;
0673     int port; /* 0 if not port-specific, else port # */
0674     int dca;
0675 } irq_table[] = {
0676     { "", qib_7322intr, -1, 0, 0 },
0677     { " (buf avail)", qib_7322bufavail,
0678         SYM_LSB(IntStatus, SendBufAvail), 0, 0},
0679     { " (sdma 0)", sdma_intr,
0680         SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
0681     { " (sdma 1)", sdma_intr,
0682         SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
0683     { " (sdmaI 0)", sdma_idle_intr,
0684         SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
0685     { " (sdmaI 1)", sdma_idle_intr,
0686         SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
0687     { " (sdmaP 0)", sdma_progress_intr,
0688         SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
0689     { " (sdmaP 1)", sdma_progress_intr,
0690         SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
0691     { " (sdmaC 0)", sdma_cleanup_intr,
0692         SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
0693     { " (sdmaC 1)", sdma_cleanup_intr,
0694         SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
0695 };
0696 
0697 #ifdef CONFIG_INFINIBAND_QIB_DCA
0698 
0699 static const struct dca_reg_map {
0700     int     shadow_inx;
0701     int     lsb;
0702     u64     mask;
0703     u16     regno;
0704 } dca_rcvhdr_reg_map[] = {
0705     { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
0706        ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
0707     { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
0708        ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
0709     { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
0710        ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
0711     { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
0712        ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
0713     { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
0714        ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
0715     { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
0716        ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
0717     { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
0718        ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
0719     { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
0720        ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
0721     { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
0722        ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
0723     { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
0724        ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
0725     { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
0726        ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
0727     { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
0728        ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
0729     { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
0730        ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
0731     { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
0732        ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
0733     { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
0734        ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
0735     { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
0736        ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
0737     { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
0738        ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
0739     { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
0740        ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
0741 };
0742 #endif
0743 
0744 /* ibcctrl bits */
0745 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
0746 /* cycle through TS1/TS2 till OK */
0747 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
0748 /* wait for TS1, then go on */
0749 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
0750 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
0751 
0752 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
0753 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
0754 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
0755 
0756 #define BLOB_7322_IBCHG 0x101
0757 
0758 static inline void qib_write_kreg(const struct qib_devdata *dd,
0759                   const u32 regno, u64 value);
0760 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
0761 static void write_7322_initregs(struct qib_devdata *);
0762 static void write_7322_init_portregs(struct qib_pportdata *);
0763 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
0764 static void check_7322_rxe_status(struct qib_pportdata *);
0765 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
0766 #ifdef CONFIG_INFINIBAND_QIB_DCA
0767 static void qib_setup_dca(struct qib_devdata *dd);
0768 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
0769 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
0770 #endif
0771 
0772 /**
0773  * qib_read_ureg32 - read 32-bit virtualized per-context register
0774  * @dd: device
0775  * @regno: register number
0776  * @ctxt: context number
0777  *
0778  * Return the contents of a register that is virtualized to be per context.
0779  * Returns -1 on errors (not distinguishable from valid contents at
0780  * runtime; we may add a separate error variable at some point).
0781  */
0782 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
0783                   enum qib_ureg regno, int ctxt)
0784 {
0785     if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
0786         return 0;
0787     return readl(regno + (u64 __iomem *)(
0788         (dd->ureg_align * ctxt) + (dd->userbase ?
0789          (char __iomem *)dd->userbase :
0790          (char __iomem *)dd->kregbase + dd->uregbase)));
0791 }
0792 
0793 /**
0794  * qib_write_ureg - write virtualized per-context register
0795  * @dd: device
0796  * @regno: register number
0797  * @value: value
0798  * @ctxt: context
0799  *
0800  * Write the contents of a register that is virtualized to be per context.
0801  */
0802 static inline void qib_write_ureg(const struct qib_devdata *dd,
0803                   enum qib_ureg regno, u64 value, int ctxt)
0804 {
0805     u64 __iomem *ubase;
0806 
0807     if (dd->userbase)
0808         ubase = (u64 __iomem *)
0809             ((char __iomem *) dd->userbase +
0810              dd->ureg_align * ctxt);
0811     else
0812         ubase = (u64 __iomem *)
0813             (dd->uregbase +
0814              (char __iomem *) dd->kregbase +
0815              dd->ureg_align * ctxt);
0816 
0817     if (dd->kregbase && (dd->flags & QIB_PRESENT))
0818         writeq(value, &ubase[regno]);
0819 }
0820 
0821 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
0822                   const u32 regno)
0823 {
0824     if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
0825         return -1;
0826     return readl((u32 __iomem *) &dd->kregbase[regno]);
0827 }
0828 
0829 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
0830                   const u32 regno)
0831 {
0832     if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
0833         return -1;
0834     return readq(&dd->kregbase[regno]);
0835 }
0836 
0837 static inline void qib_write_kreg(const struct qib_devdata *dd,
0838                   const u32 regno, u64 value)
0839 {
0840     if (dd->kregbase && (dd->flags & QIB_PRESENT))
0841         writeq(value, &dd->kregbase[regno]);
0842 }
0843 
0844 /*
0845  * not many sanity checks for the port-specific kernel register routines,
0846  * since they are only used when it's known to be safe.
0847 */
0848 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
0849                      const u16 regno)
0850 {
0851     if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
0852         return 0ULL;
0853     return readq(&ppd->cpspec->kpregbase[regno]);
0854 }
0855 
0856 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
0857                        const u16 regno, u64 value)
0858 {
0859     if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
0860         (ppd->dd->flags & QIB_PRESENT))
0861         writeq(value, &ppd->cpspec->kpregbase[regno]);
0862 }
0863 
0864 /**
0865  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
0866  * @dd: the qlogic_ib device
0867  * @regno: the register number to write
0868  * @ctxt: the context containing the register
0869  * @value: the value to write
0870  */
0871 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
0872                        const u16 regno, unsigned ctxt,
0873                        u64 value)
0874 {
0875     qib_write_kreg(dd, regno + ctxt, value);
0876 }
0877 
0878 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
0879 {
0880     if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
0881         return 0;
0882     return readq(&dd->cspec->cregbase[regno]);
0883 
0884 
0885 }
0886 
0887 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
0888 {
0889     if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
0890         return 0;
0891     return readl(&dd->cspec->cregbase[regno]);
0892 
0893 
0894 }
0895 
0896 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
0897                     u16 regno, u64 value)
0898 {
0899     if (ppd->cpspec && ppd->cpspec->cpregbase &&
0900         (ppd->dd->flags & QIB_PRESENT))
0901         writeq(value, &ppd->cpspec->cpregbase[regno]);
0902 }
0903 
0904 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
0905                       u16 regno)
0906 {
0907     if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
0908         !(ppd->dd->flags & QIB_PRESENT))
0909         return 0;
0910     return readq(&ppd->cpspec->cpregbase[regno]);
0911 }
0912 
0913 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
0914                     u16 regno)
0915 {
0916     if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
0917         !(ppd->dd->flags & QIB_PRESENT))
0918         return 0;
0919     return readl(&ppd->cpspec->cpregbase[regno]);
0920 }
0921 
0922 /* bits in Control register */
0923 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
0924 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
0925 
0926 /* bits in general interrupt regs */
0927 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
0928 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
0929 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
0930 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
0931 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
0932 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
0933 #define QIB_I_C_ERROR INT_MASK(Err)
0934 
0935 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
0936 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
0937 #define QIB_I_GPIO INT_MASK(AssertGPIO)
0938 #define QIB_I_P_SDMAINT(pidx) \
0939     (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
0940      INT_MASK_P(SDmaProgress, pidx) | \
0941      INT_MASK_PM(SDmaCleanupDone, pidx))
0942 
0943 /* Interrupt bits that are "per port" */
0944 #define QIB_I_P_BITSEXTANT(pidx) \
0945     (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
0946     INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
0947     INT_MASK_P(SDmaProgress, pidx) | \
0948     INT_MASK_PM(SDmaCleanupDone, pidx))
0949 
0950 /* Interrupt bits that are common to a device */
0951 /* currently unused: QIB_I_SPIOSENT */
0952 #define QIB_I_C_BITSEXTANT \
0953     (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
0954     QIB_I_SPIOSENT | \
0955     QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
0956 
0957 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
0958     QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
0959 
0960 /*
0961  * Error bits that are "per port".
0962  */
0963 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
0964 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
0965 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
0966 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
0967 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
0968 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
0969 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
0970 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
0971 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
0972 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
0973 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
0974 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
0975 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
0976 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
0977 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
0978 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
0979 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
0980 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
0981 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
0982 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
0983 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
0984 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
0985 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
0986 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
0987 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
0988 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
0989 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
0990 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
0991 
0992 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
0993 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
0994 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
0995 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
0996 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
0997 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
0998 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
0999 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1000 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1001 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1002 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1003 
1004 /* Error bits that are common to a device */
1005 #define QIB_E_RESET ERR_MASK(ResetNegated)
1006 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1007 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1008 
1009 
1010 /*
1011  * Per chip (rather than per-port) errors.  Most either do
1012  * nothing but trigger a print (because they self-recover, or
1013  * always occur in tandem with other errors that handle the
1014  * issue), or because they indicate errors with no recovery,
1015  * but we want to know that they happened.
1016  */
1017 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1018 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1019 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1020 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1021 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1022 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1023 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1024 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1025 
1026 /* SDMA chip errors (not per port)
1027  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1028  * the SDMAHALT error immediately, so we just print the dup error via the
1029  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1030  * as well, but since this is port-independent, by definition, it's
1031  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1032  * packet send errors, and so are handled in the same manner as other
1033  * per-packet errors.
1034  */
1035 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1036 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1037 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1038 
1039 /*
1040  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1041  * it is used to print "common" packet errors.
1042  */
1043 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1044     QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1045     QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1046     QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1047     QIB_E_P_REBP)
1048 
1049 /* Error Bits that Packet-related (Receive, per-port) */
1050 #define QIB_E_P_RPKTERRS (\
1051     QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1052     QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1053     QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1054     QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1055     QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1056     QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1057 
1058 /*
1059  * Error bits that are Send-related (per port)
1060  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1061  * All of these potentially need to have a buffer disarmed
1062  */
1063 #define QIB_E_P_SPKTERRS (\
1064     QIB_E_P_SUNEXP_PKTNUM |\
1065     QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1066     QIB_E_P_SMAXPKTLEN |\
1067     QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1068     QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1069     QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1070 
1071 #define QIB_E_SPKTERRS ( \
1072         QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1073         ERR_MASK_N(SendUnsupportedVLErr) |          \
1074         QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1075 
1076 #define QIB_E_P_SDMAERRS ( \
1077     QIB_E_P_SDMAHALT | \
1078     QIB_E_P_SDMADESCADDRMISALIGN | \
1079     QIB_E_P_SDMAUNEXPDATA | \
1080     QIB_E_P_SDMAMISSINGDW | \
1081     QIB_E_P_SDMADWEN | \
1082     QIB_E_P_SDMARPYTAG | \
1083     QIB_E_P_SDMA1STDESC | \
1084     QIB_E_P_SDMABASE | \
1085     QIB_E_P_SDMATAILOUTOFBOUND | \
1086     QIB_E_P_SDMAOUTOFBOUND | \
1087     QIB_E_P_SDMAGENMISMATCH)
1088 
1089 /*
1090  * This sets some bits more than once, but makes it more obvious which
1091  * bits are not handled under other categories, and the repeat definition
1092  * is not a problem.
1093  */
1094 #define QIB_E_P_BITSEXTANT ( \
1095     QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1096     QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1097     QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1098     QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1099     )
1100 
1101 /*
1102  * These are errors that can occur when the link
1103  * changes state while a packet is being sent or received.  This doesn't
1104  * cover things like EBP or VCRC that can be the result of a sending
1105  * having the link change state, so we receive a "known bad" packet.
1106  * All of these are "per port", so renamed:
1107  */
1108 #define QIB_E_P_LINK_PKTERRS (\
1109     QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1110     QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1111     QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1112     QIB_E_P_RUNEXPCHAR)
1113 
1114 /*
1115  * This sets some bits more than once, but makes it more obvious which
1116  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1117  * and the repeat definition is not a problem.
1118  */
1119 #define QIB_E_C_BITSEXTANT (\
1120     QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1121     QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1122     QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1123 
1124 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1125 #define E_SPKT_ERRS_IGNORE 0
1126 
1127 #define QIB_EXTS_MEMBIST_DISABLED \
1128     SYM_MASK(EXTStatus, MemBISTDisabled)
1129 #define QIB_EXTS_MEMBIST_ENDTEST \
1130     SYM_MASK(EXTStatus, MemBISTEndTest)
1131 
1132 #define QIB_E_SPIOARMLAUNCH \
1133     ERR_MASK(SendArmLaunchErr)
1134 
1135 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1136 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1137 
1138 /*
1139  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1140  * and also if forced QDR (only QDR enabled).  It's enabled for the
1141  * forced QDR case so that scrambling will be enabled by the TS3
1142  * exchange, when supported by both sides of the link.
1143  */
1144 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1145 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1146 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1147 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1148 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1149 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1150     SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1151 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1152 
1153 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1154 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1155 
1156 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1157 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1158 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1159 
1160 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1161 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1162 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1163     SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1164 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1165     SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1166 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1167 
1168 #define IBA7322_REDIRECT_VEC_PER_REG 12
1169 
1170 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1171 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1172 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1173 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1174 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1175 
1176 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1177 
1178 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1179     .msg = #fldname , .sz = sizeof(#fldname) }
1180 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1181     fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1182 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1183     HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1184     HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1185     HWE_AUTO(PCIESerdesPClkNotDetect),
1186     HWE_AUTO(PowerOnBISTFailed),
1187     HWE_AUTO(TempsenseTholdReached),
1188     HWE_AUTO(MemoryErr),
1189     HWE_AUTO(PCIeBusParityErr),
1190     HWE_AUTO(PcieCplTimeout),
1191     HWE_AUTO(PciePoisonedTLP),
1192     HWE_AUTO_P(SDmaMemReadErr, 1),
1193     HWE_AUTO_P(SDmaMemReadErr, 0),
1194     HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1195     HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1196     HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1197     HWE_AUTO(statusValidNoEop),
1198     HWE_AUTO(LATriggered),
1199     { .mask = 0, .sz = 0 }
1200 };
1201 
1202 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1203     .msg = #fldname, .sz = sizeof(#fldname) }
1204 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1205     .msg = #fldname, .sz = sizeof(#fldname) }
1206 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1207     E_AUTO(RcvEgrFullErr),
1208     E_AUTO(RcvHdrFullErr),
1209     E_AUTO(ResetNegated),
1210     E_AUTO(HardwareErr),
1211     E_AUTO(InvalidAddrErr),
1212     E_AUTO(SDmaVL15Err),
1213     E_AUTO(SBufVL15MisUseErr),
1214     E_AUTO(InvalidEEPCmd),
1215     E_AUTO(RcvContextShareErr),
1216     E_AUTO(SendVLMismatchErr),
1217     E_AUTO(SendArmLaunchErr),
1218     E_AUTO(SendSpecialTriggerErr),
1219     E_AUTO(SDmaWrongPortErr),
1220     E_AUTO(SDmaBufMaskDuplicateErr),
1221     { .mask = 0, .sz = 0 }
1222 };
1223 
1224 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1225     E_P_AUTO(IBStatusChanged),
1226     E_P_AUTO(SHeadersErr),
1227     E_P_AUTO(VL15BufMisuseErr),
1228     /*
1229      * SDmaHaltErr is not really an error, make it clearer;
1230      */
1231     {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1232         .sz = 11},
1233     E_P_AUTO(SDmaDescAddrMisalignErr),
1234     E_P_AUTO(SDmaUnexpDataErr),
1235     E_P_AUTO(SDmaMissingDwErr),
1236     E_P_AUTO(SDmaDwEnErr),
1237     E_P_AUTO(SDmaRpyTagErr),
1238     E_P_AUTO(SDma1stDescErr),
1239     E_P_AUTO(SDmaBaseErr),
1240     E_P_AUTO(SDmaTailOutOfBoundErr),
1241     E_P_AUTO(SDmaOutOfBoundErr),
1242     E_P_AUTO(SDmaGenMismatchErr),
1243     E_P_AUTO(SendBufMisuseErr),
1244     E_P_AUTO(SendUnsupportedVLErr),
1245     E_P_AUTO(SendUnexpectedPktNumErr),
1246     E_P_AUTO(SendDroppedDataPktErr),
1247     E_P_AUTO(SendDroppedSmpPktErr),
1248     E_P_AUTO(SendPktLenErr),
1249     E_P_AUTO(SendUnderRunErr),
1250     E_P_AUTO(SendMaxPktLenErr),
1251     E_P_AUTO(SendMinPktLenErr),
1252     E_P_AUTO(RcvIBLostLinkErr),
1253     E_P_AUTO(RcvHdrErr),
1254     E_P_AUTO(RcvHdrLenErr),
1255     E_P_AUTO(RcvBadTidErr),
1256     E_P_AUTO(RcvBadVersionErr),
1257     E_P_AUTO(RcvIBFlowErr),
1258     E_P_AUTO(RcvEBPErr),
1259     E_P_AUTO(RcvUnsupportedVLErr),
1260     E_P_AUTO(RcvUnexpectedCharErr),
1261     E_P_AUTO(RcvShortPktLenErr),
1262     E_P_AUTO(RcvLongPktLenErr),
1263     E_P_AUTO(RcvMaxPktLenErr),
1264     E_P_AUTO(RcvMinPktLenErr),
1265     E_P_AUTO(RcvICRCErr),
1266     E_P_AUTO(RcvVCRCErr),
1267     E_P_AUTO(RcvFormatErr),
1268     { .mask = 0, .sz = 0 }
1269 };
1270 
1271 /*
1272  * Below generates "auto-message" for interrupts not specific to any port or
1273  * context
1274  */
1275 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1276     .msg = #fldname, .sz = sizeof(#fldname) }
1277 /* Below generates "auto-message" for interrupts specific to a port */
1278 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1279     SYM_LSB(IntMask, fldname##Mask##_0), \
1280     SYM_LSB(IntMask, fldname##Mask##_1)), \
1281     .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1282 /* For some reason, the SerDesTrimDone bits are reversed */
1283 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1284     SYM_LSB(IntMask, fldname##Mask##_1), \
1285     SYM_LSB(IntMask, fldname##Mask##_0)), \
1286     .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1287 /*
1288  * Below generates "auto-message" for interrupts specific to a context,
1289  * with ctxt-number appended
1290  */
1291 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1292     SYM_LSB(IntMask, fldname##0IntMask), \
1293     SYM_LSB(IntMask, fldname##17IntMask)), \
1294     .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1295 
1296 #define TXSYMPTOM_AUTO_P(fldname) \
1297     { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1298     .msg = #fldname, .sz = sizeof(#fldname) }
1299 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1300     TXSYMPTOM_AUTO_P(NonKeyPacket),
1301     TXSYMPTOM_AUTO_P(GRHFail),
1302     TXSYMPTOM_AUTO_P(PkeyFail),
1303     TXSYMPTOM_AUTO_P(QPFail),
1304     TXSYMPTOM_AUTO_P(SLIDFail),
1305     TXSYMPTOM_AUTO_P(RawIPV6),
1306     TXSYMPTOM_AUTO_P(PacketTooSmall),
1307     { .mask = 0, .sz = 0 }
1308 };
1309 
1310 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1311 
1312 /*
1313  * Called when we might have an error that is specific to a particular
1314  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1315  * because we don't need to force the update of pioavail
1316  */
1317 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1318 {
1319     struct qib_devdata *dd = ppd->dd;
1320     u32 i;
1321     int any;
1322     u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1323     u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1324     unsigned long sbuf[4];
1325 
1326     /*
1327      * It's possible that sendbuffererror could have bits set; might
1328      * have already done this as a result of hardware error handling.
1329      */
1330     any = 0;
1331     for (i = 0; i < regcnt; ++i) {
1332         sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1333         if (sbuf[i]) {
1334             any = 1;
1335             qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1336         }
1337     }
1338 
1339     if (any)
1340         qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1341 }
1342 
1343 /* No txe_recover yet, if ever */
1344 
1345 /* No decode__errors yet */
1346 static void err_decode(char *msg, size_t len, u64 errs,
1347                const struct qib_hwerror_msgs *msp)
1348 {
1349     u64 these, lmask;
1350     int took, multi, n = 0;
1351 
1352     while (errs && msp && msp->mask) {
1353         multi = (msp->mask & (msp->mask - 1));
1354         while (errs & msp->mask) {
1355             these = (errs & msp->mask);
1356             lmask = (these & (these - 1)) ^ these;
1357             if (len) {
1358                 if (n++) {
1359                     /* separate the strings */
1360                     *msg++ = ',';
1361                     len--;
1362                 }
1363                 /* msp->sz counts the nul */
1364                 took = min_t(size_t, msp->sz - (size_t)1, len);
1365                 memcpy(msg,  msp->msg, took);
1366                 len -= took;
1367                 msg += took;
1368                 if (len)
1369                     *msg = '\0';
1370             }
1371             errs &= ~lmask;
1372             if (len && multi) {
1373                 /* More than one bit this mask */
1374                 int idx = -1;
1375 
1376                 while (lmask & msp->mask) {
1377                     ++idx;
1378                     lmask >>= 1;
1379                 }
1380                 took = scnprintf(msg, len, "_%d", idx);
1381                 len -= took;
1382                 msg += took;
1383             }
1384         }
1385         ++msp;
1386     }
1387     /* If some bits are left, show in hex. */
1388     if (len && errs)
1389         snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1390             (unsigned long long) errs);
1391 }
1392 
1393 /* only called if r1 set */
1394 static void flush_fifo(struct qib_pportdata *ppd)
1395 {
1396     struct qib_devdata *dd = ppd->dd;
1397     u32 __iomem *piobuf;
1398     u32 bufn;
1399     u32 *hdr;
1400     u64 pbc;
1401     const unsigned hdrwords = 7;
1402     static struct ib_header ibhdr = {
1403         .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1404         .lrh[1] = IB_LID_PERMISSIVE,
1405         .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1406         .lrh[3] = IB_LID_PERMISSIVE,
1407         .u.oth.bth[0] = cpu_to_be32(
1408             (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1409         .u.oth.bth[1] = cpu_to_be32(0),
1410         .u.oth.bth[2] = cpu_to_be32(0),
1411         .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1412         .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1413     };
1414 
1415     /*
1416      * Send a dummy VL15 packet to flush the launch FIFO.
1417      * This will not actually be sent since the TxeBypassIbc bit is set.
1418      */
1419     pbc = PBC_7322_VL15_SEND |
1420         (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1421         (hdrwords + SIZE_OF_CRC);
1422     piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1423     if (!piobuf)
1424         return;
1425     writeq(pbc, piobuf);
1426     hdr = (u32 *) &ibhdr;
1427     if (dd->flags & QIB_PIO_FLUSH_WC) {
1428         qib_flush_wc();
1429         qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1430         qib_flush_wc();
1431         __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1432         qib_flush_wc();
1433     } else
1434         qib_pio_copy(piobuf + 2, hdr, hdrwords);
1435     qib_sendbuf_done(dd, bufn);
1436 }
1437 
1438 /*
1439  * This is called with interrupts disabled and sdma_lock held.
1440  */
1441 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1442 {
1443     struct qib_devdata *dd = ppd->dd;
1444     u64 set_sendctrl = 0;
1445     u64 clr_sendctrl = 0;
1446 
1447     if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1448         set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1449     else
1450         clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1451 
1452     if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1453         set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1454     else
1455         clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1456 
1457     if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1458         set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1459     else
1460         clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1461 
1462     if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1463         set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1464                 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1465                 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1466     else
1467         clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1468                 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1469                 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1470 
1471     spin_lock(&dd->sendctrl_lock);
1472 
1473     /* If we are draining everything, block sends first */
1474     if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1475         ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1476         qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1477         qib_write_kreg(dd, kr_scratch, 0);
1478     }
1479 
1480     ppd->p_sendctrl |= set_sendctrl;
1481     ppd->p_sendctrl &= ~clr_sendctrl;
1482 
1483     if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1484         qib_write_kreg_port(ppd, krp_sendctrl,
1485                     ppd->p_sendctrl |
1486                     SYM_MASK(SendCtrl_0, SDmaCleanup));
1487     else
1488         qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1489     qib_write_kreg(dd, kr_scratch, 0);
1490 
1491     if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1492         ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1493         qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1494         qib_write_kreg(dd, kr_scratch, 0);
1495     }
1496 
1497     spin_unlock(&dd->sendctrl_lock);
1498 
1499     if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1500         flush_fifo(ppd);
1501 }
1502 
1503 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1504 {
1505     __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1506 }
1507 
1508 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1509 {
1510     /*
1511      * Set SendDmaLenGen and clear and set
1512      * the MSB of the generation count to enable generation checking
1513      * and load the internal generation counter.
1514      */
1515     qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1516     qib_write_kreg_port(ppd, krp_senddmalengen,
1517                 ppd->sdma_descq_cnt |
1518                 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1519 }
1520 
1521 /*
1522  * Must be called with sdma_lock held, or before init finished.
1523  */
1524 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1525 {
1526     /* Commit writes to memory and advance the tail on the chip */
1527     wmb();
1528     ppd->sdma_descq_tail = tail;
1529     qib_write_kreg_port(ppd, krp_senddmatail, tail);
1530 }
1531 
1532 /*
1533  * This is called with interrupts disabled and sdma_lock held.
1534  */
1535 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1536 {
1537     /*
1538      * Drain all FIFOs.
1539      * The hardware doesn't require this but we do it so that verbs
1540      * and user applications don't wait for link active to send stale
1541      * data.
1542      */
1543     sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1544 
1545     qib_sdma_7322_setlengen(ppd);
1546     qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1547     ppd->sdma_head_dma[0] = 0;
1548     qib_7322_sdma_sendctrl(ppd,
1549         ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1550 }
1551 
1552 #define DISABLES_SDMA ( \
1553     QIB_E_P_SDMAHALT | \
1554     QIB_E_P_SDMADESCADDRMISALIGN | \
1555     QIB_E_P_SDMAMISSINGDW | \
1556     QIB_E_P_SDMADWEN | \
1557     QIB_E_P_SDMARPYTAG | \
1558     QIB_E_P_SDMA1STDESC | \
1559     QIB_E_P_SDMABASE | \
1560     QIB_E_P_SDMATAILOUTOFBOUND | \
1561     QIB_E_P_SDMAOUTOFBOUND | \
1562     QIB_E_P_SDMAGENMISMATCH)
1563 
1564 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1565 {
1566     unsigned long flags;
1567     struct qib_devdata *dd = ppd->dd;
1568 
1569     errs &= QIB_E_P_SDMAERRS;
1570     err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1571            errs, qib_7322p_error_msgs);
1572 
1573     if (errs & QIB_E_P_SDMAUNEXPDATA)
1574         qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1575                 ppd->port);
1576 
1577     spin_lock_irqsave(&ppd->sdma_lock, flags);
1578 
1579     if (errs != QIB_E_P_SDMAHALT) {
1580         /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1581         qib_dev_porterr(dd, ppd->port,
1582             "SDMA %s 0x%016llx %s\n",
1583             qib_sdma_state_names[ppd->sdma_state.current_state],
1584             errs, ppd->cpspec->sdmamsgbuf);
1585         dump_sdma_7322_state(ppd);
1586     }
1587 
1588     switch (ppd->sdma_state.current_state) {
1589     case qib_sdma_state_s00_hw_down:
1590         break;
1591 
1592     case qib_sdma_state_s10_hw_start_up_wait:
1593         if (errs & QIB_E_P_SDMAHALT)
1594             __qib_sdma_process_event(ppd,
1595                 qib_sdma_event_e20_hw_started);
1596         break;
1597 
1598     case qib_sdma_state_s20_idle:
1599         break;
1600 
1601     case qib_sdma_state_s30_sw_clean_up_wait:
1602         break;
1603 
1604     case qib_sdma_state_s40_hw_clean_up_wait:
1605         if (errs & QIB_E_P_SDMAHALT)
1606             __qib_sdma_process_event(ppd,
1607                 qib_sdma_event_e50_hw_cleaned);
1608         break;
1609 
1610     case qib_sdma_state_s50_hw_halt_wait:
1611         if (errs & QIB_E_P_SDMAHALT)
1612             __qib_sdma_process_event(ppd,
1613                 qib_sdma_event_e60_hw_halted);
1614         break;
1615 
1616     case qib_sdma_state_s99_running:
1617         __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1618         __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1619         break;
1620     }
1621 
1622     spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1623 }
1624 
1625 /*
1626  * handle per-device errors (not per-port errors)
1627  */
1628 static noinline void handle_7322_errors(struct qib_devdata *dd)
1629 {
1630     char *msg;
1631     u64 iserr = 0;
1632     u64 errs;
1633     u64 mask;
1634 
1635     qib_stats.sps_errints++;
1636     errs = qib_read_kreg64(dd, kr_errstatus);
1637     if (!errs) {
1638         qib_devinfo(dd->pcidev,
1639             "device error interrupt, but no error bits set!\n");
1640         goto done;
1641     }
1642 
1643     /* don't report errors that are masked */
1644     errs &= dd->cspec->errormask;
1645     msg = dd->cspec->emsgbuf;
1646 
1647     /* do these first, they are most important */
1648     if (errs & QIB_E_HARDWARE) {
1649         *msg = '\0';
1650         qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1651     }
1652 
1653     if (errs & QIB_E_SPKTERRS) {
1654         qib_disarm_7322_senderrbufs(dd->pport);
1655         qib_stats.sps_txerrs++;
1656     } else if (errs & QIB_E_INVALIDADDR)
1657         qib_stats.sps_txerrs++;
1658     else if (errs & QIB_E_ARMLAUNCH) {
1659         qib_stats.sps_txerrs++;
1660         qib_disarm_7322_senderrbufs(dd->pport);
1661     }
1662     qib_write_kreg(dd, kr_errclear, errs);
1663 
1664     /*
1665      * The ones we mask off are handled specially below
1666      * or above.  Also mask SDMADISABLED by default as it
1667      * is too chatty.
1668      */
1669     mask = QIB_E_HARDWARE;
1670     *msg = '\0';
1671 
1672     err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1673            qib_7322error_msgs);
1674 
1675     /*
1676      * Getting reset is a tragedy for all ports. Mark the device
1677      * _and_ the ports as "offline" in way meaningful to each.
1678      */
1679     if (errs & QIB_E_RESET) {
1680         int pidx;
1681 
1682         qib_dev_err(dd,
1683             "Got reset, requires re-init (unload and reload driver)\n");
1684         dd->flags &= ~QIB_INITTED;  /* needs re-init */
1685         /* mark as having had error */
1686         *dd->devstatusp |= QIB_STATUS_HWERROR;
1687         for (pidx = 0; pidx < dd->num_pports; ++pidx)
1688             if (dd->pport[pidx].link_speed_supported)
1689                 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1690     }
1691 
1692     if (*msg && iserr)
1693         qib_dev_err(dd, "%s error\n", msg);
1694 
1695     /*
1696      * If there were hdrq or egrfull errors, wake up any processes
1697      * waiting in poll.  We used to try to check which contexts had
1698      * the overflow, but given the cost of that and the chip reads
1699      * to support it, it's better to just wake everybody up if we
1700      * get an overflow; waiters can poll again if it's not them.
1701      */
1702     if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1703         qib_handle_urcv(dd, ~0U);
1704         if (errs & ERR_MASK(RcvEgrFullErr))
1705             qib_stats.sps_buffull++;
1706         else
1707             qib_stats.sps_hdrfull++;
1708     }
1709 
1710 done:
1711     return;
1712 }
1713 
1714 static void qib_error_tasklet(struct tasklet_struct *t)
1715 {
1716     struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
1717 
1718     handle_7322_errors(dd);
1719     qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1720 }
1721 
1722 static void reenable_chase(struct timer_list *t)
1723 {
1724     struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1725     struct qib_pportdata *ppd = cp->ppd;
1726 
1727     ppd->cpspec->chase_timer.expires = 0;
1728     qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1729         QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1730 }
1731 
1732 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1733         u8 ibclt)
1734 {
1735     ppd->cpspec->chase_end = 0;
1736 
1737     if (!qib_chase)
1738         return;
1739 
1740     qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1741         QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1742     ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1743     add_timer(&ppd->cpspec->chase_timer);
1744 }
1745 
1746 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1747 {
1748     u8 ibclt;
1749     unsigned long tnow;
1750 
1751     ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1752 
1753     /*
1754      * Detect and handle the state chase issue, where we can
1755      * get stuck if we are unlucky on timing on both sides of
1756      * the link.   If we are, we disable, set a timer, and
1757      * then re-enable.
1758      */
1759     switch (ibclt) {
1760     case IB_7322_LT_STATE_CFGRCVFCFG:
1761     case IB_7322_LT_STATE_CFGWAITRMT:
1762     case IB_7322_LT_STATE_TXREVLANES:
1763     case IB_7322_LT_STATE_CFGENH:
1764         tnow = jiffies;
1765         if (ppd->cpspec->chase_end &&
1766              time_after(tnow, ppd->cpspec->chase_end))
1767             disable_chase(ppd, tnow, ibclt);
1768         else if (!ppd->cpspec->chase_end)
1769             ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1770         break;
1771     default:
1772         ppd->cpspec->chase_end = 0;
1773         break;
1774     }
1775 
1776     if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1777           ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1778          ibclt == IB_7322_LT_STATE_LINKUP) &&
1779         (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1780         force_h1(ppd);
1781         ppd->cpspec->qdr_reforce = 1;
1782         if (!ppd->dd->cspec->r1)
1783             serdes_7322_los_enable(ppd, 0);
1784     } else if (ppd->cpspec->qdr_reforce &&
1785         (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1786          (ibclt == IB_7322_LT_STATE_CFGENH ||
1787         ibclt == IB_7322_LT_STATE_CFGIDLE ||
1788         ibclt == IB_7322_LT_STATE_LINKUP))
1789         force_h1(ppd);
1790 
1791     if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1792         ppd->link_speed_enabled == QIB_IB_QDR &&
1793         (ibclt == IB_7322_LT_STATE_CFGTEST ||
1794          ibclt == IB_7322_LT_STATE_CFGENH ||
1795          (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1796           ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1797         adj_tx_serdes(ppd);
1798 
1799     if (ibclt != IB_7322_LT_STATE_LINKUP) {
1800         u8 ltstate = qib_7322_phys_portstate(ibcst);
1801         u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1802                       LinkTrainingState);
1803         if (!ppd->dd->cspec->r1 &&
1804             pibclt == IB_7322_LT_STATE_LINKUP &&
1805             ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1806             ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1807             ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1808             ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1809             /* If the link went down (but no into recovery,
1810              * turn LOS back on */
1811             serdes_7322_los_enable(ppd, 1);
1812         if (!ppd->cpspec->qdr_dfe_on &&
1813             ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1814             ppd->cpspec->qdr_dfe_on = 1;
1815             ppd->cpspec->qdr_dfe_time = 0;
1816             /* On link down, reenable QDR adaptation */
1817             qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1818                         ppd->dd->cspec->r1 ?
1819                         QDR_STATIC_ADAPT_DOWN_R1 :
1820                         QDR_STATIC_ADAPT_DOWN);
1821             pr_info(
1822                 "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1823                 ppd->dd->unit, ppd->port, ibclt);
1824         }
1825     }
1826 }
1827 
1828 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1829 
1830 /*
1831  * This is per-pport error handling.
1832  * will likely get it's own MSIx interrupt (one for each port,
1833  * although just a single handler).
1834  */
1835 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1836 {
1837     char *msg;
1838     u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1839     struct qib_devdata *dd = ppd->dd;
1840 
1841     /* do this as soon as possible */
1842     fmask = qib_read_kreg64(dd, kr_act_fmask);
1843     if (!fmask)
1844         check_7322_rxe_status(ppd);
1845 
1846     errs = qib_read_kreg_port(ppd, krp_errstatus);
1847     if (!errs)
1848         qib_devinfo(dd->pcidev,
1849              "Port%d error interrupt, but no error bits set!\n",
1850              ppd->port);
1851     if (!fmask)
1852         errs &= ~QIB_E_P_IBSTATUSCHANGED;
1853     if (!errs)
1854         goto done;
1855 
1856     msg = ppd->cpspec->epmsgbuf;
1857     *msg = '\0';
1858 
1859     if (errs & ~QIB_E_P_BITSEXTANT) {
1860         err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1861                errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1862         if (!*msg)
1863             snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1864                  "no others");
1865         qib_dev_porterr(dd, ppd->port,
1866             "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1867             (errs & ~QIB_E_P_BITSEXTANT), msg);
1868         *msg = '\0';
1869     }
1870 
1871     if (errs & QIB_E_P_SHDR) {
1872         u64 symptom;
1873 
1874         /* determine cause, then write to clear */
1875         symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1876         qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1877         err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1878                hdrchk_msgs);
1879         *msg = '\0';
1880         /* senderrbuf cleared in SPKTERRS below */
1881     }
1882 
1883     if (errs & QIB_E_P_SPKTERRS) {
1884         if ((errs & QIB_E_P_LINK_PKTERRS) &&
1885             !(ppd->lflags & QIBL_LINKACTIVE)) {
1886             /*
1887              * This can happen when trying to bring the link
1888              * up, but the IB link changes state at the "wrong"
1889              * time. The IB logic then complains that the packet
1890              * isn't valid.  We don't want to confuse people, so
1891              * we just don't print them, except at debug
1892              */
1893             err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1894                    (errs & QIB_E_P_LINK_PKTERRS),
1895                    qib_7322p_error_msgs);
1896             *msg = '\0';
1897             ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1898         }
1899         qib_disarm_7322_senderrbufs(ppd);
1900     } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1901            !(ppd->lflags & QIBL_LINKACTIVE)) {
1902         /*
1903          * This can happen when SMA is trying to bring the link
1904          * up, but the IB link changes state at the "wrong" time.
1905          * The IB logic then complains that the packet isn't
1906          * valid.  We don't want to confuse people, so we just
1907          * don't print them, except at debug
1908          */
1909         err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1910                qib_7322p_error_msgs);
1911         ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1912         *msg = '\0';
1913     }
1914 
1915     qib_write_kreg_port(ppd, krp_errclear, errs);
1916 
1917     errs &= ~ignore_this_time;
1918     if (!errs)
1919         goto done;
1920 
1921     if (errs & QIB_E_P_RPKTERRS)
1922         qib_stats.sps_rcverrs++;
1923     if (errs & QIB_E_P_SPKTERRS)
1924         qib_stats.sps_txerrs++;
1925 
1926     iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1927 
1928     if (errs & QIB_E_P_SDMAERRS)
1929         sdma_7322_p_errors(ppd, errs);
1930 
1931     if (errs & QIB_E_P_IBSTATUSCHANGED) {
1932         u64 ibcs;
1933         u8 ltstate;
1934 
1935         ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1936         ltstate = qib_7322_phys_portstate(ibcs);
1937 
1938         if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1939             handle_serdes_issues(ppd, ibcs);
1940         if (!(ppd->cpspec->ibcctrl_a &
1941               SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1942             /*
1943              * We got our interrupt, so init code should be
1944              * happy and not try alternatives. Now squelch
1945              * other "chatter" from link-negotiation (pre Init)
1946              */
1947             ppd->cpspec->ibcctrl_a |=
1948                 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1949             qib_write_kreg_port(ppd, krp_ibcctrl_a,
1950                         ppd->cpspec->ibcctrl_a);
1951         }
1952 
1953         /* Update our picture of width and speed from chip */
1954         ppd->link_width_active =
1955             (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1956                 IB_WIDTH_4X : IB_WIDTH_1X;
1957         ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1958             LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1959               SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1960                    QIB_IB_DDR : QIB_IB_SDR;
1961 
1962         if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1963             IB_PHYSPORTSTATE_DISABLED)
1964             qib_set_ib_7322_lstate(ppd, 0,
1965                    QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1966         else
1967             /*
1968              * Since going into a recovery state causes the link
1969              * state to go down and since recovery is transitory,
1970              * it is better if we "miss" ever seeing the link
1971              * training state go into recovery (i.e., ignore this
1972              * transition for link state special handling purposes)
1973              * without updating lastibcstat.
1974              */
1975             if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1976                 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1977                 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1978                 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1979                 qib_handle_e_ibstatuschanged(ppd, ibcs);
1980     }
1981     if (*msg && iserr)
1982         qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1983 
1984     if (ppd->state_wanted & ppd->lflags)
1985         wake_up_interruptible(&ppd->state_wait);
1986 done:
1987     return;
1988 }
1989 
1990 /* enable/disable chip from delivering interrupts */
1991 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1992 {
1993     if (enable) {
1994         if (dd->flags & QIB_BADINTR)
1995             return;
1996         qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1997         /* cause any pending enabled interrupts to be re-delivered */
1998         qib_write_kreg(dd, kr_intclear, 0ULL);
1999         if (dd->cspec->num_msix_entries) {
2000             /* and same for MSIx */
2001             u64 val = qib_read_kreg64(dd, kr_intgranted);
2002 
2003             if (val)
2004                 qib_write_kreg(dd, kr_intgranted, val);
2005         }
2006     } else
2007         qib_write_kreg(dd, kr_intmask, 0ULL);
2008 }
2009 
2010 /*
2011  * Try to cleanup as much as possible for anything that might have gone
2012  * wrong while in freeze mode, such as pio buffers being written by user
2013  * processes (causing armlaunch), send errors due to going into freeze mode,
2014  * etc., and try to avoid causing extra interrupts while doing so.
2015  * Forcibly update the in-memory pioavail register copies after cleanup
2016  * because the chip won't do it while in freeze mode (the register values
2017  * themselves are kept correct).
2018  * Make sure that we don't lose any important interrupts by using the chip
2019  * feature that says that writing 0 to a bit in *clear that is set in
2020  * *status will cause an interrupt to be generated again (if allowed by
2021  * the *mask value).
2022  * This is in chip-specific code because of all of the register accesses,
2023  * even though the details are similar on most chips.
2024  */
2025 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2026 {
2027     int pidx;
2028 
2029     /* disable error interrupts, to avoid confusion */
2030     qib_write_kreg(dd, kr_errmask, 0ULL);
2031 
2032     for (pidx = 0; pidx < dd->num_pports; ++pidx)
2033         if (dd->pport[pidx].link_speed_supported)
2034             qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2035                         0ULL);
2036 
2037     /* also disable interrupts; errormask is sometimes overwritten */
2038     qib_7322_set_intr_state(dd, 0);
2039 
2040     /* clear the freeze, and be sure chip saw it */
2041     qib_write_kreg(dd, kr_control, dd->control);
2042     qib_read_kreg32(dd, kr_scratch);
2043 
2044     /*
2045      * Force new interrupt if any hwerr, error or interrupt bits are
2046      * still set, and clear "safe" send packet errors related to freeze
2047      * and cancelling sends.  Re-enable error interrupts before possible
2048      * force of re-interrupt on pending interrupts.
2049      */
2050     qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2051     qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2052     qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2053     /* We need to purge per-port errs and reset mask, too */
2054     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2055         if (!dd->pport[pidx].link_speed_supported)
2056             continue;
2057         qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2058         qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2059     }
2060     qib_7322_set_intr_state(dd, 1);
2061 }
2062 
2063 /* no error handling to speak of */
2064 /**
2065  * qib_7322_handle_hwerrors - display hardware errors.
2066  * @dd: the qlogic_ib device
2067  * @msg: the output buffer
2068  * @msgl: the size of the output buffer
2069  *
2070  * Use same msg buffer as regular errors to avoid excessive stack
2071  * use.  Most hardware errors are catastrophic, but for right now,
2072  * we'll print them and continue.  We reuse the same message buffer as
2073  * qib_handle_errors() to avoid excessive stack usage.
2074  */
2075 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2076                      size_t msgl)
2077 {
2078     u64 hwerrs;
2079     u32 ctrl;
2080     int isfatal = 0;
2081 
2082     hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2083     if (!hwerrs)
2084         goto bail;
2085     if (hwerrs == ~0ULL) {
2086         qib_dev_err(dd,
2087             "Read of hardware error status failed (all bits set); ignoring\n");
2088         goto bail;
2089     }
2090     qib_stats.sps_hwerrs++;
2091 
2092     /* Always clear the error status register, except BIST fail */
2093     qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2094                ~HWE_MASK(PowerOnBISTFailed));
2095 
2096     hwerrs &= dd->cspec->hwerrmask;
2097 
2098     /* no EEPROM logging, yet */
2099 
2100     if (hwerrs)
2101         qib_devinfo(dd->pcidev,
2102             "Hardware error: hwerr=0x%llx (cleared)\n",
2103             (unsigned long long) hwerrs);
2104 
2105     ctrl = qib_read_kreg32(dd, kr_control);
2106     if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2107         /*
2108          * No recovery yet...
2109          */
2110         if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2111             dd->cspec->stay_in_freeze) {
2112             /*
2113              * If any set that we aren't ignoring only make the
2114              * complaint once, in case it's stuck or recurring,
2115              * and we get here multiple times
2116              * Force link down, so switch knows, and
2117              * LEDs are turned off.
2118              */
2119             if (dd->flags & QIB_INITTED)
2120                 isfatal = 1;
2121         } else
2122             qib_7322_clear_freeze(dd);
2123     }
2124 
2125     if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2126         isfatal = 1;
2127         strlcpy(msg,
2128             "[Memory BIST test failed, InfiniPath hardware unusable]",
2129             msgl);
2130         /* ignore from now on, so disable until driver reloaded */
2131         dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2132         qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2133     }
2134 
2135     err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2136 
2137     /* Ignore esoteric PLL failures et al. */
2138 
2139     qib_dev_err(dd, "%s hardware error\n", msg);
2140 
2141     if (hwerrs &
2142            (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2143             SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2144         int pidx = 0;
2145         int err;
2146         unsigned long flags;
2147         struct qib_pportdata *ppd = dd->pport;
2148 
2149         for (; pidx < dd->num_pports; ++pidx, ppd++) {
2150             err = 0;
2151             if (pidx == 0 && (hwerrs &
2152                 SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2153                 err++;
2154             if (pidx == 1 && (hwerrs &
2155                 SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2156                 err++;
2157             if (err) {
2158                 spin_lock_irqsave(&ppd->sdma_lock, flags);
2159                 dump_sdma_7322_state(ppd);
2160                 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2161             }
2162         }
2163     }
2164 
2165     if (isfatal && !dd->diag_client) {
2166         qib_dev_err(dd,
2167             "Fatal Hardware Error, no longer usable, SN %.16s\n",
2168             dd->serial);
2169         /*
2170          * for /sys status file and user programs to print; if no
2171          * trailing brace is copied, we'll know it was truncated.
2172          */
2173         if (dd->freezemsg)
2174             snprintf(dd->freezemsg, dd->freezelen,
2175                  "{%s}", msg);
2176         qib_disable_after_error(dd);
2177     }
2178 bail:;
2179 }
2180 
2181 /**
2182  * qib_7322_init_hwerrors - enable hardware errors
2183  * @dd: the qlogic_ib device
2184  *
2185  * now that we have finished initializing everything that might reasonably
2186  * cause a hardware error, and cleared those errors bits as they occur,
2187  * we can enable hardware errors in the mask (potentially enabling
2188  * freeze mode), and enable hardware errors as errors (along with
2189  * everything else) in errormask
2190  */
2191 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2192 {
2193     int pidx;
2194     u64 extsval;
2195 
2196     extsval = qib_read_kreg64(dd, kr_extstatus);
2197     if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2198              QIB_EXTS_MEMBIST_ENDTEST)))
2199         qib_dev_err(dd, "MemBIST did not complete!\n");
2200 
2201     /* never clear BIST failure, so reported on each driver load */
2202     qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2203     qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2204 
2205     /* clear all */
2206     qib_write_kreg(dd, kr_errclear, ~0ULL);
2207     /* enable errors that are masked, at least this first time. */
2208     qib_write_kreg(dd, kr_errmask, ~0ULL);
2209     dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2210     for (pidx = 0; pidx < dd->num_pports; ++pidx)
2211         if (dd->pport[pidx].link_speed_supported)
2212             qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2213                         ~0ULL);
2214 }
2215 
2216 /*
2217  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2218  * on chips that are count-based, rather than trigger-based.  There is no
2219  * reference counting, but that's also fine, given the intended use.
2220  * Only chip-specific because it's all register accesses
2221  */
2222 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2223 {
2224     if (enable) {
2225         qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2226         dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2227     } else
2228         dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2229     qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2230 }
2231 
2232 /*
2233  * Formerly took parameter <which> in pre-shifted,
2234  * pre-merged form with LinkCmd and LinkInitCmd
2235  * together, and assuming the zero was NOP.
2236  */
2237 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2238                    u16 linitcmd)
2239 {
2240     u64 mod_wd;
2241     struct qib_devdata *dd = ppd->dd;
2242     unsigned long flags;
2243 
2244     if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2245         /*
2246          * If we are told to disable, note that so link-recovery
2247          * code does not attempt to bring us back up.
2248          * Also reset everything that we can, so we start
2249          * completely clean when re-enabled (before we
2250          * actually issue the disable to the IBC)
2251          */
2252         qib_7322_mini_pcs_reset(ppd);
2253         spin_lock_irqsave(&ppd->lflags_lock, flags);
2254         ppd->lflags |= QIBL_IB_LINK_DISABLED;
2255         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2256     } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2257         /*
2258          * Any other linkinitcmd will lead to LINKDOWN and then
2259          * to INIT (if all is well), so clear flag to let
2260          * link-recovery code attempt to bring us back up.
2261          */
2262         spin_lock_irqsave(&ppd->lflags_lock, flags);
2263         ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2264         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2265         /*
2266          * Clear status change interrupt reduction so the
2267          * new state is seen.
2268          */
2269         ppd->cpspec->ibcctrl_a &=
2270             ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2271     }
2272 
2273     mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2274         (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2275 
2276     qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2277                 mod_wd);
2278     /* write to chip to prevent back-to-back writes of ibc reg */
2279     qib_write_kreg(dd, kr_scratch, 0);
2280 
2281 }
2282 
2283 /*
2284  * The total RCV buffer memory is 64KB, used for both ports, and is
2285  * in units of 64 bytes (same as IB flow control credit unit).
2286  * The consumedVL unit in the same registers are in 32 byte units!
2287  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2288  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2289  * in krp_rxcreditvl15, rather than 10.
2290  */
2291 #define RCV_BUF_UNITSZ 64
2292 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2293 
2294 static void set_vls(struct qib_pportdata *ppd)
2295 {
2296     int i, numvls, totcred, cred_vl, vl0extra;
2297     struct qib_devdata *dd = ppd->dd;
2298     u64 val;
2299 
2300     numvls = qib_num_vls(ppd->vls_operational);
2301 
2302     /*
2303      * Set up per-VL credits. Below is kluge based on these assumptions:
2304      * 1) port is disabled at the time early_init is called.
2305      * 2) give VL15 17 credits, for two max-plausible packets.
2306      * 3) Give VL0-N the rest, with any rounding excess used for VL0
2307      */
2308     /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2309     totcred = NUM_RCV_BUF_UNITS(dd);
2310     cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2311     totcred -= cred_vl;
2312     qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2313     cred_vl = totcred / numvls;
2314     vl0extra = totcred - cred_vl * numvls;
2315     qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2316     for (i = 1; i < numvls; i++)
2317         qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2318     for (; i < 8; i++) /* no buffer space for other VLs */
2319         qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2320 
2321     /* Notify IBC that credits need to be recalculated */
2322     val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2323     val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2324     qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2325     qib_write_kreg(dd, kr_scratch, 0ULL);
2326     val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2327     qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2328 
2329     for (i = 0; i < numvls; i++)
2330         val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2331     val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2332 
2333     /* Change the number of operational VLs */
2334     ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2335                 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2336         ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2337     qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2338     qib_write_kreg(dd, kr_scratch, 0ULL);
2339 }
2340 
2341 /*
2342  * The code that deals with actual SerDes is in serdes_7322_init().
2343  * Compared to the code for iba7220, it is minimal.
2344  */
2345 static int serdes_7322_init(struct qib_pportdata *ppd);
2346 
2347 /**
2348  * qib_7322_bringup_serdes - bring up the serdes
2349  * @ppd: physical port on the qlogic_ib device
2350  */
2351 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2352 {
2353     struct qib_devdata *dd = ppd->dd;
2354     u64 val, guid, ibc;
2355     unsigned long flags;
2356 
2357     /*
2358      * SerDes model not in Pd, but still need to
2359      * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2360      * eventually.
2361      */
2362     /* Put IBC in reset, sends disabled (should be in reset already) */
2363     ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2364     qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2365     qib_write_kreg(dd, kr_scratch, 0ULL);
2366 
2367     /* ensure previous Tx parameters are not still forced */
2368     qib_write_kreg_port(ppd, krp_tx_deemph_override,
2369         SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2370         reset_tx_deemphasis_override));
2371 
2372     if (qib_compat_ddr_negotiate) {
2373         ppd->cpspec->ibdeltainprog = 1;
2374         ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2375                         crp_ibsymbolerr);
2376         ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2377                         crp_iblinkerrrecov);
2378     }
2379 
2380     /* flowcontrolwatermark is in units of KBytes */
2381     ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2382     /*
2383      * Flow control is sent this often, even if no changes in
2384      * buffer space occur.  Units are 128ns for this chip.
2385      * Set to 3usec.
2386      */
2387     ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2388     /* max error tolerance */
2389     ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2390     /* IB credit flow control. */
2391     ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2392     /*
2393      * set initial max size pkt IBC will send, including ICRC; it's the
2394      * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2395      */
2396     ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2397         SYM_LSB(IBCCtrlA_0, MaxPktLen);
2398     ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2399 
2400     /*
2401      * Reset the PCS interface to the serdes (and also ibc, which is still
2402      * in reset from above).  Writes new value of ibcctrl_a as last step.
2403      */
2404     qib_7322_mini_pcs_reset(ppd);
2405 
2406     if (!ppd->cpspec->ibcctrl_b) {
2407         unsigned lse = ppd->link_speed_enabled;
2408 
2409         /*
2410          * Not on re-init after reset, establish shadow
2411          * and force initial config.
2412          */
2413         ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2414                                  krp_ibcctrl_b);
2415         ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2416                 IBA7322_IBC_SPEED_DDR |
2417                 IBA7322_IBC_SPEED_SDR |
2418                 IBA7322_IBC_WIDTH_AUTONEG |
2419                 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2420         if (lse & (lse - 1)) /* Muliple speeds enabled */
2421             ppd->cpspec->ibcctrl_b |=
2422                 (lse << IBA7322_IBC_SPEED_LSB) |
2423                 IBA7322_IBC_IBTA_1_2_MASK |
2424                 IBA7322_IBC_MAX_SPEED_MASK;
2425         else
2426             ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2427                 IBA7322_IBC_SPEED_QDR |
2428                  IBA7322_IBC_IBTA_1_2_MASK :
2429                 (lse == QIB_IB_DDR) ?
2430                     IBA7322_IBC_SPEED_DDR :
2431                     IBA7322_IBC_SPEED_SDR;
2432         if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2433             (IB_WIDTH_1X | IB_WIDTH_4X))
2434             ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2435         else
2436             ppd->cpspec->ibcctrl_b |=
2437                 ppd->link_width_enabled == IB_WIDTH_4X ?
2438                 IBA7322_IBC_WIDTH_4X_ONLY :
2439                 IBA7322_IBC_WIDTH_1X_ONLY;
2440 
2441         /* always enable these on driver reload, not sticky */
2442         ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2443             IBA7322_IBC_HRTBT_MASK);
2444     }
2445     qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2446 
2447     /* setup so we have more time at CFGTEST to change H1 */
2448     val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2449     val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2450     val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2451     qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2452 
2453     serdes_7322_init(ppd);
2454 
2455     guid = be64_to_cpu(ppd->guid);
2456     if (!guid) {
2457         if (dd->base_guid)
2458             guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2459         ppd->guid = cpu_to_be64(guid);
2460     }
2461 
2462     qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2463     /* write to chip to prevent back-to-back writes of ibc reg */
2464     qib_write_kreg(dd, kr_scratch, 0);
2465 
2466     /* Enable port */
2467     ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2468     set_vls(ppd);
2469 
2470     /* initially come up DISABLED, without sending anything. */
2471     val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2472                     QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2473     qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2474     qib_write_kreg(dd, kr_scratch, 0ULL);
2475     /* clear the linkinit cmds */
2476     ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2477 
2478     /* be paranoid against later code motion, etc. */
2479     spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2480     ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2481     qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2482     spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2483 
2484     /* Also enable IBSTATUSCHG interrupt.  */
2485     val = qib_read_kreg_port(ppd, krp_errmask);
2486     qib_write_kreg_port(ppd, krp_errmask,
2487         val | ERR_MASK_N(IBStatusChanged));
2488 
2489     /* Always zero until we start messing with SerDes for real */
2490     return 0;
2491 }
2492 
2493 /**
2494  * qib_7322_mini_quiet_serdes - set serdes to txidle
2495  * @ppd: the qlogic_ib device
2496  * Called when driver is being unloaded
2497  */
2498 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2499 {
2500     u64 val;
2501     unsigned long flags;
2502 
2503     qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2504 
2505     spin_lock_irqsave(&ppd->lflags_lock, flags);
2506     ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2507     spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2508     wake_up(&ppd->cpspec->autoneg_wait);
2509     cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2510     if (ppd->dd->cspec->r1)
2511         cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2512 
2513     ppd->cpspec->chase_end = 0;
2514     if (ppd->cpspec->chase_timer.function) /* if initted */
2515         del_timer_sync(&ppd->cpspec->chase_timer);
2516 
2517     /*
2518      * Despite the name, actually disables IBC as well. Do it when
2519      * we are as sure as possible that no more packets can be
2520      * received, following the down and the PCS reset.
2521      * The actual disabling happens in qib_7322_mini_pci_reset(),
2522      * along with the PCS being reset.
2523      */
2524     ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2525     qib_7322_mini_pcs_reset(ppd);
2526 
2527     /*
2528      * Update the adjusted counters so the adjustment persists
2529      * across driver reload.
2530      */
2531     if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2532         ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2533         struct qib_devdata *dd = ppd->dd;
2534         u64 diagc;
2535 
2536         /* enable counter writes */
2537         diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2538         qib_write_kreg(dd, kr_hwdiagctrl,
2539                    diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2540 
2541         if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2542             val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2543             if (ppd->cpspec->ibdeltainprog)
2544                 val -= val - ppd->cpspec->ibsymsnap;
2545             val -= ppd->cpspec->ibsymdelta;
2546             write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2547         }
2548         if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2549             val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2550             if (ppd->cpspec->ibdeltainprog)
2551                 val -= val - ppd->cpspec->iblnkerrsnap;
2552             val -= ppd->cpspec->iblnkerrdelta;
2553             write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2554         }
2555         if (ppd->cpspec->iblnkdowndelta) {
2556             val = read_7322_creg32_port(ppd, crp_iblinkdown);
2557             val += ppd->cpspec->iblnkdowndelta;
2558             write_7322_creg_port(ppd, crp_iblinkdown, val);
2559         }
2560         /*
2561          * No need to save ibmalfdelta since IB perfcounters
2562          * are cleared on driver reload.
2563          */
2564 
2565         /* and disable counter writes */
2566         qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2567     }
2568 }
2569 
2570 /**
2571  * qib_setup_7322_setextled - set the state of the two external LEDs
2572  * @ppd: physical port on the qlogic_ib device
2573  * @on: whether the link is up or not
2574  *
2575  * The exact combo of LEDs if on is true is determined by looking
2576  * at the ibcstatus.
2577  *
2578  * These LEDs indicate the physical and logical state of IB link.
2579  * For this chip (at least with recommended board pinouts), LED1
2580  * is Yellow (logical state) and LED2 is Green (physical state),
2581  *
2582  * Note:  We try to match the Mellanox HCA LED behavior as best
2583  * we can.  Green indicates physical link state is OK (something is
2584  * plugged in, and we can train).
2585  * Amber indicates the link is logically up (ACTIVE).
2586  * Mellanox further blinks the amber LED to indicate data packet
2587  * activity, but we have no hardware support for that, so it would
2588  * require waking up every 10-20 msecs and checking the counters
2589  * on the chip, and then turning the LED off if appropriate.  That's
2590  * visible overhead, so not something we will do.
2591  */
2592 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2593 {
2594     struct qib_devdata *dd = ppd->dd;
2595     u64 extctl, ledblink = 0, val;
2596     unsigned long flags;
2597     int yel, grn;
2598 
2599     /*
2600      * The diags use the LED to indicate diag info, so we leave
2601      * the external LED alone when the diags are running.
2602      */
2603     if (dd->diag_client)
2604         return;
2605 
2606     /* Allow override of LED display for, e.g. Locating system in rack */
2607     if (ppd->led_override) {
2608         grn = (ppd->led_override & QIB_LED_PHYS);
2609         yel = (ppd->led_override & QIB_LED_LOG);
2610     } else if (on) {
2611         val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2612         grn = qib_7322_phys_portstate(val) ==
2613             IB_PHYSPORTSTATE_LINKUP;
2614         yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2615     } else {
2616         grn = 0;
2617         yel = 0;
2618     }
2619 
2620     spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2621     extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2622         ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2623     if (grn) {
2624         extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2625         /*
2626          * Counts are in chip clock (4ns) periods.
2627          * This is 1/16 sec (66.6ms) on,
2628          * 3/16 sec (187.5 ms) off, with packets rcvd.
2629          */
2630         ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2631             ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2632     }
2633     if (yel)
2634         extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2635     dd->cspec->extctrl = extctl;
2636     qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2637     spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2638 
2639     if (ledblink) /* blink the LED on packet receive */
2640         qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2641 }
2642 
2643 #ifdef CONFIG_INFINIBAND_QIB_DCA
2644 
2645 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2646 {
2647     switch (event) {
2648     case DCA_PROVIDER_ADD:
2649         if (dd->flags & QIB_DCA_ENABLED)
2650             break;
2651         if (!dca_add_requester(&dd->pcidev->dev)) {
2652             qib_devinfo(dd->pcidev, "DCA enabled\n");
2653             dd->flags |= QIB_DCA_ENABLED;
2654             qib_setup_dca(dd);
2655         }
2656         break;
2657     case DCA_PROVIDER_REMOVE:
2658         if (dd->flags & QIB_DCA_ENABLED) {
2659             dca_remove_requester(&dd->pcidev->dev);
2660             dd->flags &= ~QIB_DCA_ENABLED;
2661             dd->cspec->dca_ctrl = 0;
2662             qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2663                 dd->cspec->dca_ctrl);
2664         }
2665         break;
2666     }
2667     return 0;
2668 }
2669 
2670 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2671 {
2672     struct qib_devdata *dd = rcd->dd;
2673     struct qib_chip_specific *cspec = dd->cspec;
2674 
2675     if (!(dd->flags & QIB_DCA_ENABLED))
2676         return;
2677     if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2678         const struct dca_reg_map *rmp;
2679 
2680         cspec->rhdr_cpu[rcd->ctxt] = cpu;
2681         rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2682         cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2683         cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2684             (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2685         qib_devinfo(dd->pcidev,
2686             "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2687             (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2688         qib_write_kreg(dd, rmp->regno,
2689                    cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2690         cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2691         qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2692     }
2693 }
2694 
2695 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2696 {
2697     struct qib_devdata *dd = ppd->dd;
2698     struct qib_chip_specific *cspec = dd->cspec;
2699     unsigned pidx = ppd->port - 1;
2700 
2701     if (!(dd->flags & QIB_DCA_ENABLED))
2702         return;
2703     if (cspec->sdma_cpu[pidx] != cpu) {
2704         cspec->sdma_cpu[pidx] = cpu;
2705         cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2706             SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2707             SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2708         cspec->dca_rcvhdr_ctrl[4] |=
2709             (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2710                 (ppd->hw_pidx ?
2711                     SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2712                     SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2713         qib_devinfo(dd->pcidev,
2714             "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2715             (long long) cspec->dca_rcvhdr_ctrl[4]);
2716         qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2717                    cspec->dca_rcvhdr_ctrl[4]);
2718         cspec->dca_ctrl |= ppd->hw_pidx ?
2719             SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2720             SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2721         qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2722     }
2723 }
2724 
2725 static void qib_setup_dca(struct qib_devdata *dd)
2726 {
2727     struct qib_chip_specific *cspec = dd->cspec;
2728     int i;
2729 
2730     for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2731         cspec->rhdr_cpu[i] = -1;
2732     for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2733         cspec->sdma_cpu[i] = -1;
2734     cspec->dca_rcvhdr_ctrl[0] =
2735         (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2736         (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2737         (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2738         (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2739     cspec->dca_rcvhdr_ctrl[1] =
2740         (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2741         (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2742         (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2743         (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2744     cspec->dca_rcvhdr_ctrl[2] =
2745         (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2746         (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2747         (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2748         (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2749     cspec->dca_rcvhdr_ctrl[3] =
2750         (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2751         (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2752         (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2753         (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2754     cspec->dca_rcvhdr_ctrl[4] =
2755         (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2756         (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2757     for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2758         qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2759                    cspec->dca_rcvhdr_ctrl[i]);
2760     for (i = 0; i < cspec->num_msix_entries; i++)
2761         setup_dca_notifier(dd, i);
2762 }
2763 
2764 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2765                  const cpumask_t *mask)
2766 {
2767     struct qib_irq_notify *n =
2768         container_of(notify, struct qib_irq_notify, notify);
2769     int cpu = cpumask_first(mask);
2770 
2771     if (n->rcv) {
2772         struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2773 
2774         qib_update_rhdrq_dca(rcd, cpu);
2775     } else {
2776         struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2777 
2778         qib_update_sdma_dca(ppd, cpu);
2779     }
2780 }
2781 
2782 static void qib_irq_notifier_release(struct kref *ref)
2783 {
2784     struct qib_irq_notify *n =
2785         container_of(ref, struct qib_irq_notify, notify.kref);
2786     struct qib_devdata *dd;
2787 
2788     if (n->rcv) {
2789         struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2790 
2791         dd = rcd->dd;
2792     } else {
2793         struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2794 
2795         dd = ppd->dd;
2796     }
2797     qib_devinfo(dd->pcidev,
2798         "release on HCA notify 0x%p n 0x%p\n", ref, n);
2799     kfree(n);
2800 }
2801 #endif
2802 
2803 static void qib_7322_free_irq(struct qib_devdata *dd)
2804 {
2805     u64 intgranted;
2806     int i;
2807 
2808     dd->cspec->main_int_mask = ~0ULL;
2809 
2810     for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2811         /* only free IRQs that were allocated */
2812         if (dd->cspec->msix_entries[i].arg) {
2813 #ifdef CONFIG_INFINIBAND_QIB_DCA
2814             reset_dca_notifier(dd, i);
2815 #endif
2816             irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2817                           NULL);
2818             free_cpumask_var(dd->cspec->msix_entries[i].mask);
2819             pci_free_irq(dd->pcidev, i,
2820                      dd->cspec->msix_entries[i].arg);
2821         }
2822     }
2823 
2824     /* If num_msix_entries was 0, disable the INTx IRQ */
2825     if (!dd->cspec->num_msix_entries)
2826         pci_free_irq(dd->pcidev, 0, dd);
2827     else
2828         dd->cspec->num_msix_entries = 0;
2829 
2830     pci_free_irq_vectors(dd->pcidev);
2831 
2832     /* make sure no MSIx interrupts are left pending */
2833     intgranted = qib_read_kreg64(dd, kr_intgranted);
2834     if (intgranted)
2835         qib_write_kreg(dd, kr_intgranted, intgranted);
2836 }
2837 
2838 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2839 {
2840     int i;
2841 
2842 #ifdef CONFIG_INFINIBAND_QIB_DCA
2843     if (dd->flags & QIB_DCA_ENABLED) {
2844         dca_remove_requester(&dd->pcidev->dev);
2845         dd->flags &= ~QIB_DCA_ENABLED;
2846         dd->cspec->dca_ctrl = 0;
2847         qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2848     }
2849 #endif
2850 
2851     qib_7322_free_irq(dd);
2852     kfree(dd->cspec->cntrs);
2853     bitmap_free(dd->cspec->sendchkenable);
2854     bitmap_free(dd->cspec->sendgrhchk);
2855     bitmap_free(dd->cspec->sendibchk);
2856     kfree(dd->cspec->msix_entries);
2857     for (i = 0; i < dd->num_pports; i++) {
2858         unsigned long flags;
2859         u32 mask = QSFP_GPIO_MOD_PRS_N |
2860             (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2861 
2862         kfree(dd->pport[i].cpspec->portcntrs);
2863         if (dd->flags & QIB_HAS_QSFP) {
2864             spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2865             dd->cspec->gpio_mask &= ~mask;
2866             qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2867             spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2868         }
2869     }
2870 }
2871 
2872 /* handle SDMA interrupts */
2873 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2874 {
2875     struct qib_pportdata *ppd0 = &dd->pport[0];
2876     struct qib_pportdata *ppd1 = &dd->pport[1];
2877     u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2878         INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2879     u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2880         INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2881 
2882     if (intr0)
2883         qib_sdma_intr(ppd0);
2884     if (intr1)
2885         qib_sdma_intr(ppd1);
2886 
2887     if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2888         qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2889     if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2890         qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2891 }
2892 
2893 /*
2894  * Set or clear the Send buffer available interrupt enable bit.
2895  */
2896 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2897 {
2898     unsigned long flags;
2899 
2900     spin_lock_irqsave(&dd->sendctrl_lock, flags);
2901     if (needint)
2902         dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2903     else
2904         dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2905     qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2906     qib_write_kreg(dd, kr_scratch, 0ULL);
2907     spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2908 }
2909 
2910 /*
2911  * Somehow got an interrupt with reserved bits set in interrupt status.
2912  * Print a message so we know it happened, then clear them.
2913  * keep mainline interrupt handler cache-friendly
2914  */
2915 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2916 {
2917     u64 kills;
2918     char msg[128];
2919 
2920     kills = istat & ~QIB_I_BITSEXTANT;
2921     qib_dev_err(dd,
2922         "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2923         (unsigned long long) kills, msg);
2924     qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2925 }
2926 
2927 /* keep mainline interrupt handler cache-friendly */
2928 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2929 {
2930     u32 gpiostatus;
2931     int handled = 0;
2932     int pidx;
2933 
2934     /*
2935      * Boards for this chip currently don't use GPIO interrupts,
2936      * so clear by writing GPIOstatus to GPIOclear, and complain
2937      * to developer.  To avoid endless repeats, clear
2938      * the bits in the mask, since there is some kind of
2939      * programming error or chip problem.
2940      */
2941     gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2942     /*
2943      * In theory, writing GPIOstatus to GPIOclear could
2944      * have a bad side-effect on some diagnostic that wanted
2945      * to poll for a status-change, but the various shadows
2946      * make that problematic at best. Diags will just suppress
2947      * all GPIO interrupts during such tests.
2948      */
2949     qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2950     /*
2951      * Check for QSFP MOD_PRS changes
2952      * only works for single port if IB1 != pidx1
2953      */
2954     for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2955          ++pidx) {
2956         struct qib_pportdata *ppd;
2957         struct qib_qsfp_data *qd;
2958         u32 mask;
2959 
2960         if (!dd->pport[pidx].link_speed_supported)
2961             continue;
2962         mask = QSFP_GPIO_MOD_PRS_N;
2963         ppd = dd->pport + pidx;
2964         mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2965         if (gpiostatus & dd->cspec->gpio_mask & mask) {
2966             u64 pins;
2967 
2968             qd = &ppd->cpspec->qsfp_data;
2969             gpiostatus &= ~mask;
2970             pins = qib_read_kreg64(dd, kr_extstatus);
2971             pins >>= SYM_LSB(EXTStatus, GPIOIn);
2972             if (!(pins & mask)) {
2973                 ++handled;
2974                 qd->t_insert = jiffies;
2975                 queue_work(ib_wq, &qd->work);
2976             }
2977         }
2978     }
2979 
2980     if (gpiostatus && !handled) {
2981         const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2982         u32 gpio_irq = mask & gpiostatus;
2983 
2984         /*
2985          * Clear any troublemakers, and update chip from shadow
2986          */
2987         dd->cspec->gpio_mask &= ~gpio_irq;
2988         qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2989     }
2990 }
2991 
2992 /*
2993  * Handle errors and unusual events first, separate function
2994  * to improve cache hits for fast path interrupt handling.
2995  */
2996 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2997 {
2998     if (istat & ~QIB_I_BITSEXTANT)
2999         unknown_7322_ibits(dd, istat);
3000     if (istat & QIB_I_GPIO)
3001         unknown_7322_gpio_intr(dd);
3002     if (istat & QIB_I_C_ERROR) {
3003         qib_write_kreg(dd, kr_errmask, 0ULL);
3004         tasklet_schedule(&dd->error_tasklet);
3005     }
3006     if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3007         handle_7322_p_errors(dd->rcd[0]->ppd);
3008     if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3009         handle_7322_p_errors(dd->rcd[1]->ppd);
3010 }
3011 
3012 /*
3013  * Dynamically adjust the rcv int timeout for a context based on incoming
3014  * packet rate.
3015  */
3016 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3017 {
3018     struct qib_devdata *dd = rcd->dd;
3019     u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3020 
3021     /*
3022      * Dynamically adjust idle timeout on chip
3023      * based on number of packets processed.
3024      */
3025     if (npkts < rcv_int_count && timeout > 2)
3026         timeout >>= 1;
3027     else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3028         timeout = min(timeout << 1, rcv_int_timeout);
3029     else
3030         return;
3031 
3032     dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3033     qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3034 }
3035 
3036 /*
3037  * This is the main interrupt handler.
3038  * It will normally only be used for low frequency interrupts but may
3039  * have to handle all interrupts if INTx is enabled or fewer than normal
3040  * MSIx interrupts were allocated.
3041  * This routine should ignore the interrupt bits for any of the
3042  * dedicated MSIx handlers.
3043  */
3044 static irqreturn_t qib_7322intr(int irq, void *data)
3045 {
3046     struct qib_devdata *dd = data;
3047     irqreturn_t ret;
3048     u64 istat;
3049     u64 ctxtrbits;
3050     u64 rmask;
3051     unsigned i;
3052     u32 npkts;
3053 
3054     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3055         /*
3056          * This return value is not great, but we do not want the
3057          * interrupt core code to remove our interrupt handler
3058          * because we don't appear to be handling an interrupt
3059          * during a chip reset.
3060          */
3061         ret = IRQ_HANDLED;
3062         goto bail;
3063     }
3064 
3065     istat = qib_read_kreg64(dd, kr_intstatus);
3066 
3067     if (unlikely(istat == ~0ULL)) {
3068         qib_bad_intrstatus(dd);
3069         qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3070         /* don't know if it was our interrupt or not */
3071         ret = IRQ_NONE;
3072         goto bail;
3073     }
3074 
3075     istat &= dd->cspec->main_int_mask;
3076     if (unlikely(!istat)) {
3077         /* already handled, or shared and not us */
3078         ret = IRQ_NONE;
3079         goto bail;
3080     }
3081 
3082     this_cpu_inc(*dd->int_counter);
3083 
3084     /* handle "errors" of various kinds first, device ahead of port */
3085     if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3086                   QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3087                   INT_MASK_P(Err, 1))))
3088         unlikely_7322_intr(dd, istat);
3089 
3090     /*
3091      * Clear the interrupt bits we found set, relatively early, so we
3092      * "know" know the chip will have seen this by the time we process
3093      * the queue, and will re-interrupt if necessary.  The processor
3094      * itself won't take the interrupt again until we return.
3095      */
3096     qib_write_kreg(dd, kr_intclear, istat);
3097 
3098     /*
3099      * Handle kernel receive queues before checking for pio buffers
3100      * available since receives can overflow; piobuf waiters can afford
3101      * a few extra cycles, since they were waiting anyway.
3102      */
3103     ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3104     if (ctxtrbits) {
3105         rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3106             (1ULL << QIB_I_RCVURG_LSB);
3107         for (i = 0; i < dd->first_user_ctxt; i++) {
3108             if (ctxtrbits & rmask) {
3109                 ctxtrbits &= ~rmask;
3110                 if (dd->rcd[i])
3111                     qib_kreceive(dd->rcd[i], NULL, &npkts);
3112             }
3113             rmask <<= 1;
3114         }
3115         if (ctxtrbits) {
3116             ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3117                 (ctxtrbits >> QIB_I_RCVURG_LSB);
3118             qib_handle_urcv(dd, ctxtrbits);
3119         }
3120     }
3121 
3122     if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3123         sdma_7322_intr(dd, istat);
3124 
3125     if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3126         qib_ib_piobufavail(dd);
3127 
3128     ret = IRQ_HANDLED;
3129 bail:
3130     return ret;
3131 }
3132 
3133 /*
3134  * Dedicated receive packet available interrupt handler.
3135  */
3136 static irqreturn_t qib_7322pintr(int irq, void *data)
3137 {
3138     struct qib_ctxtdata *rcd = data;
3139     struct qib_devdata *dd = rcd->dd;
3140     u32 npkts;
3141 
3142     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3143         /*
3144          * This return value is not great, but we do not want the
3145          * interrupt core code to remove our interrupt handler
3146          * because we don't appear to be handling an interrupt
3147          * during a chip reset.
3148          */
3149         return IRQ_HANDLED;
3150 
3151     this_cpu_inc(*dd->int_counter);
3152 
3153     /* Clear the interrupt bit we expect to be set. */
3154     qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3155                (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3156 
3157     qib_kreceive(rcd, NULL, &npkts);
3158 
3159     return IRQ_HANDLED;
3160 }
3161 
3162 /*
3163  * Dedicated Send buffer available interrupt handler.
3164  */
3165 static irqreturn_t qib_7322bufavail(int irq, void *data)
3166 {
3167     struct qib_devdata *dd = data;
3168 
3169     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3170         /*
3171          * This return value is not great, but we do not want the
3172          * interrupt core code to remove our interrupt handler
3173          * because we don't appear to be handling an interrupt
3174          * during a chip reset.
3175          */
3176         return IRQ_HANDLED;
3177 
3178     this_cpu_inc(*dd->int_counter);
3179 
3180     /* Clear the interrupt bit we expect to be set. */
3181     qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3182 
3183     /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3184     if (dd->flags & QIB_INITTED)
3185         qib_ib_piobufavail(dd);
3186     else
3187         qib_wantpiobuf_7322_intr(dd, 0);
3188 
3189     return IRQ_HANDLED;
3190 }
3191 
3192 /*
3193  * Dedicated Send DMA interrupt handler.
3194  */
3195 static irqreturn_t sdma_intr(int irq, void *data)
3196 {
3197     struct qib_pportdata *ppd = data;
3198     struct qib_devdata *dd = ppd->dd;
3199 
3200     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3201         /*
3202          * This return value is not great, but we do not want the
3203          * interrupt core code to remove our interrupt handler
3204          * because we don't appear to be handling an interrupt
3205          * during a chip reset.
3206          */
3207         return IRQ_HANDLED;
3208 
3209     this_cpu_inc(*dd->int_counter);
3210 
3211     /* Clear the interrupt bit we expect to be set. */
3212     qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3213                INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3214     qib_sdma_intr(ppd);
3215 
3216     return IRQ_HANDLED;
3217 }
3218 
3219 /*
3220  * Dedicated Send DMA idle interrupt handler.
3221  */
3222 static irqreturn_t sdma_idle_intr(int irq, void *data)
3223 {
3224     struct qib_pportdata *ppd = data;
3225     struct qib_devdata *dd = ppd->dd;
3226 
3227     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3228         /*
3229          * This return value is not great, but we do not want the
3230          * interrupt core code to remove our interrupt handler
3231          * because we don't appear to be handling an interrupt
3232          * during a chip reset.
3233          */
3234         return IRQ_HANDLED;
3235 
3236     this_cpu_inc(*dd->int_counter);
3237 
3238     /* Clear the interrupt bit we expect to be set. */
3239     qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3240                INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3241     qib_sdma_intr(ppd);
3242 
3243     return IRQ_HANDLED;
3244 }
3245 
3246 /*
3247  * Dedicated Send DMA progress interrupt handler.
3248  */
3249 static irqreturn_t sdma_progress_intr(int irq, void *data)
3250 {
3251     struct qib_pportdata *ppd = data;
3252     struct qib_devdata *dd = ppd->dd;
3253 
3254     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3255         /*
3256          * This return value is not great, but we do not want the
3257          * interrupt core code to remove our interrupt handler
3258          * because we don't appear to be handling an interrupt
3259          * during a chip reset.
3260          */
3261         return IRQ_HANDLED;
3262 
3263     this_cpu_inc(*dd->int_counter);
3264 
3265     /* Clear the interrupt bit we expect to be set. */
3266     qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3267                INT_MASK_P(SDmaProgress, 1) :
3268                INT_MASK_P(SDmaProgress, 0));
3269     qib_sdma_intr(ppd);
3270 
3271     return IRQ_HANDLED;
3272 }
3273 
3274 /*
3275  * Dedicated Send DMA cleanup interrupt handler.
3276  */
3277 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3278 {
3279     struct qib_pportdata *ppd = data;
3280     struct qib_devdata *dd = ppd->dd;
3281 
3282     if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3283         /*
3284          * This return value is not great, but we do not want the
3285          * interrupt core code to remove our interrupt handler
3286          * because we don't appear to be handling an interrupt
3287          * during a chip reset.
3288          */
3289         return IRQ_HANDLED;
3290 
3291     this_cpu_inc(*dd->int_counter);
3292 
3293     /* Clear the interrupt bit we expect to be set. */
3294     qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3295                INT_MASK_PM(SDmaCleanupDone, 1) :
3296                INT_MASK_PM(SDmaCleanupDone, 0));
3297     qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3298 
3299     return IRQ_HANDLED;
3300 }
3301 
3302 #ifdef CONFIG_INFINIBAND_QIB_DCA
3303 
3304 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3305 {
3306     if (!dd->cspec->msix_entries[msixnum].dca)
3307         return;
3308 
3309     qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3310             dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3311     irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3312     dd->cspec->msix_entries[msixnum].notifier = NULL;
3313 }
3314 
3315 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3316 {
3317     struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3318     struct qib_irq_notify *n;
3319 
3320     if (!m->dca)
3321         return;
3322     n = kzalloc(sizeof(*n), GFP_KERNEL);
3323     if (n) {
3324         int ret;
3325 
3326         m->notifier = n;
3327         n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3328         n->notify.notify = qib_irq_notifier_notify;
3329         n->notify.release = qib_irq_notifier_release;
3330         n->arg = m->arg;
3331         n->rcv = m->rcv;
3332         qib_devinfo(dd->pcidev,
3333             "set notifier irq %d rcv %d notify %p\n",
3334             n->notify.irq, n->rcv, &n->notify);
3335         ret = irq_set_affinity_notifier(
3336                 n->notify.irq,
3337                 &n->notify);
3338         if (ret) {
3339             m->notifier = NULL;
3340             kfree(n);
3341         }
3342     }
3343 }
3344 
3345 #endif
3346 
3347 /*
3348  * Set up our chip-specific interrupt handler.
3349  * The interrupt type has already been setup, so
3350  * we just need to do the registration and error checking.
3351  * If we are using MSIx interrupts, we may fall back to
3352  * INTx later, if the interrupt handler doesn't get called
3353  * within 1/2 second (see verify_interrupt()).
3354  */
3355 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3356 {
3357     int ret, i, msixnum;
3358     u64 redirect[6];
3359     u64 mask;
3360     const struct cpumask *local_mask;
3361     int firstcpu, secondcpu = 0, currrcvcpu = 0;
3362 
3363     if (!dd->num_pports)
3364         return;
3365 
3366     if (clearpend) {
3367         /*
3368          * if not switching interrupt types, be sure interrupts are
3369          * disabled, and then clear anything pending at this point,
3370          * because we are starting clean.
3371          */
3372         qib_7322_set_intr_state(dd, 0);
3373 
3374         /* clear the reset error, init error/hwerror mask */
3375         qib_7322_init_hwerrors(dd);
3376 
3377         /* clear any interrupt bits that might be set */
3378         qib_write_kreg(dd, kr_intclear, ~0ULL);
3379 
3380         /* make sure no pending MSIx intr, and clear diag reg */
3381         qib_write_kreg(dd, kr_intgranted, ~0ULL);
3382         qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3383     }
3384 
3385     if (!dd->cspec->num_msix_entries) {
3386         /* Try to get INTx interrupt */
3387 try_intx:
3388         ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3389                       QIB_DRV_NAME);
3390         if (ret) {
3391             qib_dev_err(
3392                 dd,
3393                 "Couldn't setup INTx interrupt (irq=%d): %d\n",
3394                 pci_irq_vector(dd->pcidev, 0), ret);
3395             return;
3396         }
3397         dd->cspec->main_int_mask = ~0ULL;
3398         return;
3399     }
3400 
3401     /* Try to get MSIx interrupts */
3402     memset(redirect, 0, sizeof(redirect));
3403     mask = ~0ULL;
3404     msixnum = 0;
3405     local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3406     firstcpu = cpumask_first(local_mask);
3407     if (firstcpu >= nr_cpu_ids ||
3408             cpumask_weight(local_mask) == num_online_cpus()) {
3409         local_mask = topology_core_cpumask(0);
3410         firstcpu = cpumask_first(local_mask);
3411     }
3412     if (firstcpu < nr_cpu_ids) {
3413         secondcpu = cpumask_next(firstcpu, local_mask);
3414         if (secondcpu >= nr_cpu_ids)
3415             secondcpu = firstcpu;
3416         currrcvcpu = secondcpu;
3417     }
3418     for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3419         irq_handler_t handler;
3420         void *arg;
3421         int lsb, reg, sh;
3422 #ifdef CONFIG_INFINIBAND_QIB_DCA
3423         int dca = 0;
3424 #endif
3425         if (i < ARRAY_SIZE(irq_table)) {
3426             if (irq_table[i].port) {
3427                 /* skip if for a non-configured port */
3428                 if (irq_table[i].port > dd->num_pports)
3429                     continue;
3430                 arg = dd->pport + irq_table[i].port - 1;
3431             } else
3432                 arg = dd;
3433 #ifdef CONFIG_INFINIBAND_QIB_DCA
3434             dca = irq_table[i].dca;
3435 #endif
3436             lsb = irq_table[i].lsb;
3437             handler = irq_table[i].handler;
3438             ret = pci_request_irq(dd->pcidev, msixnum, handler,
3439                           NULL, arg, QIB_DRV_NAME "%d%s",
3440                           dd->unit,
3441                           irq_table[i].name);
3442         } else {
3443             unsigned ctxt;
3444 
3445             ctxt = i - ARRAY_SIZE(irq_table);
3446             /* per krcvq context receive interrupt */
3447             arg = dd->rcd[ctxt];
3448             if (!arg)
3449                 continue;
3450             if (qib_krcvq01_no_msi && ctxt < 2)
3451                 continue;
3452 #ifdef CONFIG_INFINIBAND_QIB_DCA
3453             dca = 1;
3454 #endif
3455             lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3456             handler = qib_7322pintr;
3457             ret = pci_request_irq(dd->pcidev, msixnum, handler,
3458                           NULL, arg,
3459                           QIB_DRV_NAME "%d (kctx)",
3460                           dd->unit);
3461         }
3462 
3463         if (ret) {
3464             /*
3465              * Shouldn't happen since the enable said we could
3466              * have as many as we are trying to setup here.
3467              */
3468             qib_dev_err(dd,
3469                     "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3470                     msixnum,
3471                     pci_irq_vector(dd->pcidev, msixnum),
3472                     ret);
3473             qib_7322_free_irq(dd);
3474             pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3475                           PCI_IRQ_LEGACY);
3476             goto try_intx;
3477         }
3478         dd->cspec->msix_entries[msixnum].arg = arg;
3479 #ifdef CONFIG_INFINIBAND_QIB_DCA
3480         dd->cspec->msix_entries[msixnum].dca = dca;
3481         dd->cspec->msix_entries[msixnum].rcv =
3482             handler == qib_7322pintr;
3483 #endif
3484         if (lsb >= 0) {
3485             reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3486             sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3487                 SYM_LSB(IntRedirect0, vec1);
3488             mask &= ~(1ULL << lsb);
3489             redirect[reg] |= ((u64) msixnum) << sh;
3490         }
3491         qib_read_kreg64(dd, 2 * msixnum + 1 +
3492                 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3493         if (firstcpu < nr_cpu_ids &&
3494             zalloc_cpumask_var(
3495                 &dd->cspec->msix_entries[msixnum].mask,
3496                 GFP_KERNEL)) {
3497             if (handler == qib_7322pintr) {
3498                 cpumask_set_cpu(currrcvcpu,
3499                     dd->cspec->msix_entries[msixnum].mask);
3500                 currrcvcpu = cpumask_next(currrcvcpu,
3501                     local_mask);
3502                 if (currrcvcpu >= nr_cpu_ids)
3503                     currrcvcpu = secondcpu;
3504             } else {
3505                 cpumask_set_cpu(firstcpu,
3506                     dd->cspec->msix_entries[msixnum].mask);
3507             }
3508             irq_set_affinity_hint(
3509                 pci_irq_vector(dd->pcidev, msixnum),
3510                 dd->cspec->msix_entries[msixnum].mask);
3511         }
3512         msixnum++;
3513     }
3514     /* Initialize the vector mapping */
3515     for (i = 0; i < ARRAY_SIZE(redirect); i++)
3516         qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3517     dd->cspec->main_int_mask = mask;
3518     tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
3519 }
3520 
3521 /**
3522  * qib_7322_boardname - fill in the board name and note features
3523  * @dd: the qlogic_ib device
3524  *
3525  * info will be based on the board revision register
3526  */
3527 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3528 {
3529     /* Will need enumeration of board-types here */
3530     u32 boardid;
3531     unsigned int features = DUAL_PORT_CAP;
3532 
3533     boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3534 
3535     switch (boardid) {
3536     case 0:
3537         dd->boardname = "InfiniPath_QLE7342_Emulation";
3538         break;
3539     case 1:
3540         dd->boardname = "InfiniPath_QLE7340";
3541         dd->flags |= QIB_HAS_QSFP;
3542         features = PORT_SPD_CAP;
3543         break;
3544     case 2:
3545         dd->boardname = "InfiniPath_QLE7342";
3546         dd->flags |= QIB_HAS_QSFP;
3547         break;
3548     case 3:
3549         dd->boardname = "InfiniPath_QMI7342";
3550         break;
3551     case 4:
3552         dd->boardname = "InfiniPath_Unsupported7342";
3553         qib_dev_err(dd, "Unsupported version of QMH7342\n");
3554         features = 0;
3555         break;
3556     case BOARD_QMH7342:
3557         dd->boardname = "InfiniPath_QMH7342";
3558         features = 0x24;
3559         break;
3560     case BOARD_QME7342:
3561         dd->boardname = "InfiniPath_QME7342";
3562         break;
3563     case 8:
3564         dd->boardname = "InfiniPath_QME7362";
3565         dd->flags |= QIB_HAS_QSFP;
3566         break;
3567     case BOARD_QMH7360:
3568         dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3569         dd->flags |= QIB_HAS_QSFP;
3570         break;
3571     case 15:
3572         dd->boardname = "InfiniPath_QLE7342_TEST";
3573         dd->flags |= QIB_HAS_QSFP;
3574         break;
3575     default:
3576         dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3577         qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3578         break;
3579     }
3580     dd->board_atten = 1; /* index into txdds_Xdr */
3581 
3582     snprintf(dd->boardversion, sizeof(dd->boardversion),
3583          "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3584          QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3585          (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3586          dd->majrev, dd->minrev,
3587          (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3588 
3589     if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3590         qib_devinfo(dd->pcidev,
3591                 "IB%u: Forced to single port mode by module parameter\n",
3592                 dd->unit);
3593         features &= PORT_SPD_CAP;
3594     }
3595 
3596     return features;
3597 }
3598 
3599 /*
3600  * This routine sleeps, so it can only be called from user context, not
3601  * from interrupt context.
3602  */
3603 static int qib_do_7322_reset(struct qib_devdata *dd)
3604 {
3605     u64 val;
3606     u64 *msix_vecsave = NULL;
3607     int i, msix_entries, ret = 1;
3608     u16 cmdval;
3609     u8 int_line, clinesz;
3610     unsigned long flags;
3611 
3612     /* Use dev_err so it shows up in logs, etc. */
3613     qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3614 
3615     qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3616 
3617     msix_entries = dd->cspec->num_msix_entries;
3618 
3619     /* no interrupts till re-initted */
3620     qib_7322_set_intr_state(dd, 0);
3621 
3622     qib_7322_free_irq(dd);
3623 
3624     if (msix_entries) {
3625         /* can be up to 512 bytes, too big for stack */
3626         msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3627                          sizeof(u64),
3628                          GFP_KERNEL);
3629     }
3630 
3631     /*
3632      * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3633      * info that is set up by the BIOS, so we have to save and restore
3634      * it ourselves.   There is some risk something could change it,
3635      * after we save it, but since we have disabled the MSIx, it
3636      * shouldn't be touched...
3637      */
3638     for (i = 0; i < msix_entries; i++) {
3639         u64 vecaddr, vecdata;
3640 
3641         vecaddr = qib_read_kreg64(dd, 2 * i +
3642                   (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3643         vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3644                   (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3645         if (msix_vecsave) {
3646             msix_vecsave[2 * i] = vecaddr;
3647             /* save it without the masked bit set */
3648             msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3649         }
3650     }
3651 
3652     dd->pport->cpspec->ibdeltainprog = 0;
3653     dd->pport->cpspec->ibsymdelta = 0;
3654     dd->pport->cpspec->iblnkerrdelta = 0;
3655     dd->pport->cpspec->ibmalfdelta = 0;
3656     /* so we check interrupts work again */
3657     dd->z_int_counter = qib_int_counter(dd);
3658 
3659     /*
3660      * Keep chip from being accessed until we are ready.  Use
3661      * writeq() directly, to allow the write even though QIB_PRESENT
3662      * isn't set.
3663      */
3664     dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3665     dd->flags |= QIB_DOING_RESET;
3666     val = dd->control | QLOGIC_IB_C_RESET;
3667     writeq(val, &dd->kregbase[kr_control]);
3668 
3669     for (i = 1; i <= 5; i++) {
3670         /*
3671          * Allow MBIST, etc. to complete; longer on each retry.
3672          * We sometimes get machine checks from bus timeout if no
3673          * response, so for now, make it *really* long.
3674          */
3675         msleep(1000 + (1 + i) * 3000);
3676 
3677         qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3678 
3679         /*
3680          * Use readq directly, so we don't need to mark it as PRESENT
3681          * until we get a successful indication that all is well.
3682          */
3683         val = readq(&dd->kregbase[kr_revision]);
3684         if (val == dd->revision)
3685             break;
3686         if (i == 5) {
3687             qib_dev_err(dd,
3688                 "Failed to initialize after reset, unusable\n");
3689             ret = 0;
3690             goto  bail;
3691         }
3692     }
3693 
3694     dd->flags |= QIB_PRESENT; /* it's back */
3695 
3696     if (msix_entries) {
3697         /* restore the MSIx vector address and data if saved above */
3698         for (i = 0; i < msix_entries; i++) {
3699             if (!msix_vecsave || !msix_vecsave[2 * i])
3700                 continue;
3701             qib_write_kreg(dd, 2 * i +
3702                 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3703                 msix_vecsave[2 * i]);
3704             qib_write_kreg(dd, 1 + 2 * i +
3705                 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3706                 msix_vecsave[1 + 2 * i]);
3707         }
3708     }
3709 
3710     /* initialize the remaining registers.  */
3711     for (i = 0; i < dd->num_pports; ++i)
3712         write_7322_init_portregs(&dd->pport[i]);
3713     write_7322_initregs(dd);
3714 
3715     if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3716         qib_dev_err(dd,
3717             "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3718 
3719     dd->cspec->num_msix_entries = msix_entries;
3720     qib_setup_7322_interrupt(dd, 1);
3721 
3722     for (i = 0; i < dd->num_pports; ++i) {
3723         struct qib_pportdata *ppd = &dd->pport[i];
3724 
3725         spin_lock_irqsave(&ppd->lflags_lock, flags);
3726         ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3727         ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3728         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3729     }
3730 
3731 bail:
3732     dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3733     kfree(msix_vecsave);
3734     return ret;
3735 }
3736 
3737 /**
3738  * qib_7322_put_tid - write a TID to the chip
3739  * @dd: the qlogic_ib device
3740  * @tidptr: pointer to the expected TID (in chip) to update
3741  * @type: 0 for eager, 1 for expected
3742  * @pa: physical address of in memory buffer; tidinvalid if freeing
3743  */
3744 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3745                  u32 type, unsigned long pa)
3746 {
3747     if (!(dd->flags & QIB_PRESENT))
3748         return;
3749     if (pa != dd->tidinvalid) {
3750         u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3751 
3752         /* paranoia checks */
3753         if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3754             qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3755                     pa);
3756             return;
3757         }
3758         if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3759             qib_dev_err(dd,
3760                 "Physical page address 0x%lx larger than supported\n",
3761                 pa);
3762             return;
3763         }
3764 
3765         if (type == RCVHQ_RCV_TYPE_EAGER)
3766             chippa |= dd->tidtemplate;
3767         else /* for now, always full 4KB page */
3768             chippa |= IBA7322_TID_SZ_4K;
3769         pa = chippa;
3770     }
3771     writeq(pa, tidptr);
3772 }
3773 
3774 /**
3775  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3776  * @dd: the qlogic_ib device
3777  * @rcd: the ctxt
3778  *
3779  * clear all TID entries for a ctxt, expected and eager.
3780  * Used from qib_close().
3781  */
3782 static void qib_7322_clear_tids(struct qib_devdata *dd,
3783                 struct qib_ctxtdata *rcd)
3784 {
3785     u64 __iomem *tidbase;
3786     unsigned long tidinv;
3787     u32 ctxt;
3788     int i;
3789 
3790     if (!dd->kregbase || !rcd)
3791         return;
3792 
3793     ctxt = rcd->ctxt;
3794 
3795     tidinv = dd->tidinvalid;
3796     tidbase = (u64 __iomem *)
3797         ((char __iomem *) dd->kregbase +
3798          dd->rcvtidbase +
3799          ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3800 
3801     for (i = 0; i < dd->rcvtidcnt; i++)
3802         qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3803                  tidinv);
3804 
3805     tidbase = (u64 __iomem *)
3806         ((char __iomem *) dd->kregbase +
3807          dd->rcvegrbase +
3808          rcd->rcvegr_tid_base * sizeof(*tidbase));
3809 
3810     for (i = 0; i < rcd->rcvegrcnt; i++)
3811         qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3812                  tidinv);
3813 }
3814 
3815 /**
3816  * qib_7322_tidtemplate - setup constants for TID updates
3817  * @dd: the qlogic_ib device
3818  *
3819  * We setup stuff that we use a lot, to avoid calculating each time
3820  */
3821 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3822 {
3823     /*
3824      * For now, we always allocate 4KB buffers (at init) so we can
3825      * receive max size packets.  We may want a module parameter to
3826      * specify 2KB or 4KB and/or make it per port instead of per device
3827      * for those who want to reduce memory footprint.  Note that the
3828      * rcvhdrentsize size must be large enough to hold the largest
3829      * IB header (currently 96 bytes) that we expect to handle (plus of
3830      * course the 2 dwords of RHF).
3831      */
3832     if (dd->rcvegrbufsize == 2048)
3833         dd->tidtemplate = IBA7322_TID_SZ_2K;
3834     else if (dd->rcvegrbufsize == 4096)
3835         dd->tidtemplate = IBA7322_TID_SZ_4K;
3836     dd->tidinvalid = 0;
3837 }
3838 
3839 /**
3840  * qib_7322_get_base_info - set chip-specific flags for user code
3841  * @rcd: the qlogic_ib ctxt
3842  * @kinfo: qib_base_info pointer
3843  *
3844  * We set the PCIE flag because the lower bandwidth on PCIe vs
3845  * HyperTransport can affect some user packet algorithims.
3846  */
3847 
3848 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3849                   struct qib_base_info *kinfo)
3850 {
3851     kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3852         QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3853         QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3854     if (rcd->dd->cspec->r1)
3855         kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3856     if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3857         kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3858 
3859     return 0;
3860 }
3861 
3862 static struct qib_message_header *
3863 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3864 {
3865     u32 offset = qib_hdrget_offset(rhf_addr);
3866 
3867     return (struct qib_message_header *)
3868         (rhf_addr - dd->rhf_offset + offset);
3869 }
3870 
3871 /*
3872  * Configure number of contexts.
3873  */
3874 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3875 {
3876     unsigned long flags;
3877     u32 nchipctxts;
3878 
3879     nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3880     dd->cspec->numctxts = nchipctxts;
3881     if (qib_n_krcv_queues > 1 && dd->num_pports) {
3882         dd->first_user_ctxt = NUM_IB_PORTS +
3883             (qib_n_krcv_queues - 1) * dd->num_pports;
3884         if (dd->first_user_ctxt > nchipctxts)
3885             dd->first_user_ctxt = nchipctxts;
3886         dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3887     } else {
3888         dd->first_user_ctxt = NUM_IB_PORTS;
3889         dd->n_krcv_queues = 1;
3890     }
3891 
3892     if (!qib_cfgctxts) {
3893         int nctxts = dd->first_user_ctxt + num_online_cpus();
3894 
3895         if (nctxts <= 6)
3896             dd->ctxtcnt = 6;
3897         else if (nctxts <= 10)
3898             dd->ctxtcnt = 10;
3899         else if (nctxts <= nchipctxts)
3900             dd->ctxtcnt = nchipctxts;
3901     } else if (qib_cfgctxts < dd->num_pports)
3902         dd->ctxtcnt = dd->num_pports;
3903     else if (qib_cfgctxts <= nchipctxts)
3904         dd->ctxtcnt = qib_cfgctxts;
3905     if (!dd->ctxtcnt) /* none of the above, set to max */
3906         dd->ctxtcnt = nchipctxts;
3907 
3908     /*
3909      * Chip can be configured for 6, 10, or 18 ctxts, and choice
3910      * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3911      * Lock to be paranoid about later motion, etc.
3912      */
3913     spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3914     if (dd->ctxtcnt > 10)
3915         dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3916     else if (dd->ctxtcnt > 6)
3917         dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3918     /* else configure for default 6 receive ctxts */
3919 
3920     /* The XRC opcode is 5. */
3921     dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3922 
3923     /*
3924      * RcvCtrl *must* be written here so that the
3925      * chip understands how to change rcvegrcnt below.
3926      */
3927     qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3928     spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3929 
3930     /* kr_rcvegrcnt changes based on the number of contexts enabled */
3931     dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3932     if (qib_rcvhdrcnt)
3933         dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3934     else
3935         dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3936                     dd->num_pports > 1 ? 1024U : 2048U);
3937 }
3938 
3939 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3940 {
3941 
3942     int lsb, ret = 0;
3943     u64 maskr; /* right-justified mask */
3944 
3945     switch (which) {
3946 
3947     case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3948         ret = ppd->link_width_enabled;
3949         goto done;
3950 
3951     case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3952         ret = ppd->link_width_active;
3953         goto done;
3954 
3955     case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3956         ret = ppd->link_speed_enabled;
3957         goto done;
3958 
3959     case QIB_IB_CFG_SPD: /* Get current Link spd */
3960         ret = ppd->link_speed_active;
3961         goto done;
3962 
3963     case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3964         lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3965         maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3966         break;
3967 
3968     case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3969         lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3970         maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3971         break;
3972 
3973     case QIB_IB_CFG_LINKLATENCY:
3974         ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3975             SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3976         goto done;
3977 
3978     case QIB_IB_CFG_OP_VLS:
3979         ret = ppd->vls_operational;
3980         goto done;
3981 
3982     case QIB_IB_CFG_VL_HIGH_CAP:
3983         ret = 16;
3984         goto done;
3985 
3986     case QIB_IB_CFG_VL_LOW_CAP:
3987         ret = 16;
3988         goto done;
3989 
3990     case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3991         ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3992                 OverrunThreshold);
3993         goto done;
3994 
3995     case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3996         ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3997                 PhyerrThreshold);
3998         goto done;
3999 
4000     case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4001         /* will only take effect when the link state changes */
4002         ret = (ppd->cpspec->ibcctrl_a &
4003                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4004             IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4005         goto done;
4006 
4007     case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4008         lsb = IBA7322_IBC_HRTBT_LSB;
4009         maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4010         break;
4011 
4012     case QIB_IB_CFG_PMA_TICKS:
4013         /*
4014          * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4015          * Since the clock is always 250MHz, the value is 3, 1 or 0.
4016          */
4017         if (ppd->link_speed_active == QIB_IB_QDR)
4018             ret = 3;
4019         else if (ppd->link_speed_active == QIB_IB_DDR)
4020             ret = 1;
4021         else
4022             ret = 0;
4023         goto done;
4024 
4025     default:
4026         ret = -EINVAL;
4027         goto done;
4028     }
4029     ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4030 done:
4031     return ret;
4032 }
4033 
4034 /*
4035  * Below again cribbed liberally from older version. Do not lean
4036  * heavily on it.
4037  */
4038 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4039 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4040     | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4041 
4042 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4043 {
4044     struct qib_devdata *dd = ppd->dd;
4045     u64 maskr; /* right-justified mask */
4046     int lsb, ret = 0;
4047     u16 lcmd, licmd;
4048     unsigned long flags;
4049 
4050     switch (which) {
4051     case QIB_IB_CFG_LIDLMC:
4052         /*
4053          * Set LID and LMC. Combined to avoid possible hazard
4054          * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4055          */
4056         lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4057         maskr = IBA7322_IBC_DLIDLMC_MASK;
4058         /*
4059          * For header-checking, the SLID in the packet will
4060          * be masked with SendIBSLMCMask, and compared
4061          * with SendIBSLIDAssignMask. Make sure we do not
4062          * set any bits not covered by the mask, or we get
4063          * false-positives.
4064          */
4065         qib_write_kreg_port(ppd, krp_sendslid,
4066                     val & (val >> 16) & SendIBSLIDAssignMask);
4067         qib_write_kreg_port(ppd, krp_sendslidmask,
4068                     (val >> 16) & SendIBSLMCMask);
4069         break;
4070 
4071     case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4072         ppd->link_width_enabled = val;
4073         /* convert IB value to chip register value */
4074         if (val == IB_WIDTH_1X)
4075             val = 0;
4076         else if (val == IB_WIDTH_4X)
4077             val = 1;
4078         else
4079             val = 3;
4080         maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4081         lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4082         break;
4083 
4084     case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4085         /*
4086          * As with width, only write the actual register if the
4087          * link is currently down, otherwise takes effect on next
4088          * link change.  Since setting is being explicitly requested
4089          * (via MAD or sysfs), clear autoneg failure status if speed
4090          * autoneg is enabled.
4091          */
4092         ppd->link_speed_enabled = val;
4093         val <<= IBA7322_IBC_SPEED_LSB;
4094         maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4095             IBA7322_IBC_MAX_SPEED_MASK;
4096         if (val & (val - 1)) {
4097             /* Muliple speeds enabled */
4098             val |= IBA7322_IBC_IBTA_1_2_MASK |
4099                 IBA7322_IBC_MAX_SPEED_MASK;
4100             spin_lock_irqsave(&ppd->lflags_lock, flags);
4101             ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4102             spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4103         } else if (val & IBA7322_IBC_SPEED_QDR)
4104             val |= IBA7322_IBC_IBTA_1_2_MASK;
4105         /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4106         lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4107         break;
4108 
4109     case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4110         lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4111         maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4112         break;
4113 
4114     case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4115         lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4116         maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4117         break;
4118 
4119     case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4120         maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4121                   OverrunThreshold);
4122         if (maskr != val) {
4123             ppd->cpspec->ibcctrl_a &=
4124                 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4125             ppd->cpspec->ibcctrl_a |= (u64) val <<
4126                 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4127             qib_write_kreg_port(ppd, krp_ibcctrl_a,
4128                         ppd->cpspec->ibcctrl_a);
4129             qib_write_kreg(dd, kr_scratch, 0ULL);
4130         }
4131         goto bail;
4132 
4133     case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4134         maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4135                   PhyerrThreshold);
4136         if (maskr != val) {
4137             ppd->cpspec->ibcctrl_a &=
4138                 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4139             ppd->cpspec->ibcctrl_a |= (u64) val <<
4140                 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4141             qib_write_kreg_port(ppd, krp_ibcctrl_a,
4142                         ppd->cpspec->ibcctrl_a);
4143             qib_write_kreg(dd, kr_scratch, 0ULL);
4144         }
4145         goto bail;
4146 
4147     case QIB_IB_CFG_PKEYS: /* update pkeys */
4148         maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4149             ((u64) ppd->pkeys[2] << 32) |
4150             ((u64) ppd->pkeys[3] << 48);
4151         qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4152         goto bail;
4153 
4154     case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4155         /* will only take effect when the link state changes */
4156         if (val == IB_LINKINITCMD_POLL)
4157             ppd->cpspec->ibcctrl_a &=
4158                 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4159         else /* SLEEP */
4160             ppd->cpspec->ibcctrl_a |=
4161                 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4162         qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4163         qib_write_kreg(dd, kr_scratch, 0ULL);
4164         goto bail;
4165 
4166     case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4167         /*
4168          * Update our housekeeping variables, and set IBC max
4169          * size, same as init code; max IBC is max we allow in
4170          * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4171          * Set even if it's unchanged, print debug message only
4172          * on changes.
4173          */
4174         val = (ppd->ibmaxlen >> 2) + 1;
4175         ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4176         ppd->cpspec->ibcctrl_a |= (u64)val <<
4177             SYM_LSB(IBCCtrlA_0, MaxPktLen);
4178         qib_write_kreg_port(ppd, krp_ibcctrl_a,
4179                     ppd->cpspec->ibcctrl_a);
4180         qib_write_kreg(dd, kr_scratch, 0ULL);
4181         goto bail;
4182 
4183     case QIB_IB_CFG_LSTATE: /* set the IB link state */
4184         switch (val & 0xffff0000) {
4185         case IB_LINKCMD_DOWN:
4186             lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4187             ppd->cpspec->ibmalfusesnap = 1;
4188             ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4189                 crp_errlink);
4190             if (!ppd->cpspec->ibdeltainprog &&
4191                 qib_compat_ddr_negotiate) {
4192                 ppd->cpspec->ibdeltainprog = 1;
4193                 ppd->cpspec->ibsymsnap =
4194                     read_7322_creg32_port(ppd,
4195                                   crp_ibsymbolerr);
4196                 ppd->cpspec->iblnkerrsnap =
4197                     read_7322_creg32_port(ppd,
4198                               crp_iblinkerrrecov);
4199             }
4200             break;
4201 
4202         case IB_LINKCMD_ARMED:
4203             lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4204             if (ppd->cpspec->ibmalfusesnap) {
4205                 ppd->cpspec->ibmalfusesnap = 0;
4206                 ppd->cpspec->ibmalfdelta +=
4207                     read_7322_creg32_port(ppd,
4208                                   crp_errlink) -
4209                     ppd->cpspec->ibmalfsnap;
4210             }
4211             break;
4212 
4213         case IB_LINKCMD_ACTIVE:
4214             lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4215             break;
4216 
4217         default:
4218             ret = -EINVAL;
4219             qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4220             goto bail;
4221         }
4222         switch (val & 0xffff) {
4223         case IB_LINKINITCMD_NOP:
4224             licmd = 0;
4225             break;
4226 
4227         case IB_LINKINITCMD_POLL:
4228             licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4229             break;
4230 
4231         case IB_LINKINITCMD_SLEEP:
4232             licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4233             break;
4234 
4235         case IB_LINKINITCMD_DISABLE:
4236             licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4237             ppd->cpspec->chase_end = 0;
4238             /*
4239              * stop state chase counter and timer, if running.
4240              * wait forpending timer, but don't clear .data (ppd)!
4241              */
4242             if (ppd->cpspec->chase_timer.expires) {
4243                 del_timer_sync(&ppd->cpspec->chase_timer);
4244                 ppd->cpspec->chase_timer.expires = 0;
4245             }
4246             break;
4247 
4248         default:
4249             ret = -EINVAL;
4250             qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4251                     val & 0xffff);
4252             goto bail;
4253         }
4254         qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4255         goto bail;
4256 
4257     case QIB_IB_CFG_OP_VLS:
4258         if (ppd->vls_operational != val) {
4259             ppd->vls_operational = val;
4260             set_vls(ppd);
4261         }
4262         goto bail;
4263 
4264     case QIB_IB_CFG_VL_HIGH_LIMIT:
4265         qib_write_kreg_port(ppd, krp_highprio_limit, val);
4266         goto bail;
4267 
4268     case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4269         if (val > 3) {
4270             ret = -EINVAL;
4271             goto bail;
4272         }
4273         lsb = IBA7322_IBC_HRTBT_LSB;
4274         maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4275         break;
4276 
4277     case QIB_IB_CFG_PORT:
4278         /* val is the port number of the switch we are connected to. */
4279         if (ppd->dd->cspec->r1) {
4280             cancel_delayed_work(&ppd->cpspec->ipg_work);
4281             ppd->cpspec->ipg_tries = 0;
4282         }
4283         goto bail;
4284 
4285     default:
4286         ret = -EINVAL;
4287         goto bail;
4288     }
4289     ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4290     ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4291     qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4292     qib_write_kreg(dd, kr_scratch, 0);
4293 bail:
4294     return ret;
4295 }
4296 
4297 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4298 {
4299     int ret = 0;
4300     u64 val, ctrlb;
4301 
4302     /* only IBC loopback, may add serdes and xgxs loopbacks later */
4303     if (!strncmp(what, "ibc", 3)) {
4304         ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4305                                Loopback);
4306         val = 0; /* disable heart beat, so link will come up */
4307         qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4308              ppd->dd->unit, ppd->port);
4309     } else if (!strncmp(what, "off", 3)) {
4310         ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4311                             Loopback);
4312         /* enable heart beat again */
4313         val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4314         qib_devinfo(ppd->dd->pcidev,
4315             "Disabling IB%u:%u IBC loopback (normal)\n",
4316             ppd->dd->unit, ppd->port);
4317     } else
4318         ret = -EINVAL;
4319     if (!ret) {
4320         qib_write_kreg_port(ppd, krp_ibcctrl_a,
4321                     ppd->cpspec->ibcctrl_a);
4322         ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4323                          << IBA7322_IBC_HRTBT_LSB);
4324         ppd->cpspec->ibcctrl_b = ctrlb | val;
4325         qib_write_kreg_port(ppd, krp_ibcctrl_b,
4326                     ppd->cpspec->ibcctrl_b);
4327         qib_write_kreg(ppd->dd, kr_scratch, 0);
4328     }
4329     return ret;
4330 }
4331 
4332 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4333                struct ib_vl_weight_elem *vl)
4334 {
4335     unsigned i;
4336 
4337     for (i = 0; i < 16; i++, regno++, vl++) {
4338         u32 val = qib_read_kreg_port(ppd, regno);
4339 
4340         vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4341             SYM_RMASK(LowPriority0_0, VirtualLane);
4342         vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4343             SYM_RMASK(LowPriority0_0, Weight);
4344     }
4345 }
4346 
4347 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4348                struct ib_vl_weight_elem *vl)
4349 {
4350     unsigned i;
4351 
4352     for (i = 0; i < 16; i++, regno++, vl++) {
4353         u64 val;
4354 
4355         val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4356             SYM_LSB(LowPriority0_0, VirtualLane)) |
4357               ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4358             SYM_LSB(LowPriority0_0, Weight));
4359         qib_write_kreg_port(ppd, regno, val);
4360     }
4361     if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4362         struct qib_devdata *dd = ppd->dd;
4363         unsigned long flags;
4364 
4365         spin_lock_irqsave(&dd->sendctrl_lock, flags);
4366         ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4367         qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4368         qib_write_kreg(dd, kr_scratch, 0);
4369         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4370     }
4371 }
4372 
4373 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4374 {
4375     switch (which) {
4376     case QIB_IB_TBL_VL_HIGH_ARB:
4377         get_vl_weights(ppd, krp_highprio_0, t);
4378         break;
4379 
4380     case QIB_IB_TBL_VL_LOW_ARB:
4381         get_vl_weights(ppd, krp_lowprio_0, t);
4382         break;
4383 
4384     default:
4385         return -EINVAL;
4386     }
4387     return 0;
4388 }
4389 
4390 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4391 {
4392     switch (which) {
4393     case QIB_IB_TBL_VL_HIGH_ARB:
4394         set_vl_weights(ppd, krp_highprio_0, t);
4395         break;
4396 
4397     case QIB_IB_TBL_VL_LOW_ARB:
4398         set_vl_weights(ppd, krp_lowprio_0, t);
4399         break;
4400 
4401     default:
4402         return -EINVAL;
4403     }
4404     return 0;
4405 }
4406 
4407 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4408                     u32 updegr, u32 egrhd, u32 npkts)
4409 {
4410     /*
4411      * Need to write timeout register before updating rcvhdrhead to ensure
4412      * that the timer is enabled on reception of a packet.
4413      */
4414     if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4415         adjust_rcv_timeout(rcd, npkts);
4416     if (updegr)
4417         qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4418     qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4419     qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4420 }
4421 
4422 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4423 {
4424     u32 head, tail;
4425 
4426     head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4427     if (rcd->rcvhdrtail_kvaddr)
4428         tail = qib_get_rcvhdrtail(rcd);
4429     else
4430         tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4431     return head == tail;
4432 }
4433 
4434 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4435     QIB_RCVCTRL_CTXT_DIS | \
4436     QIB_RCVCTRL_TIDFLOW_ENB | \
4437     QIB_RCVCTRL_TIDFLOW_DIS | \
4438     QIB_RCVCTRL_TAILUPD_ENB | \
4439     QIB_RCVCTRL_TAILUPD_DIS | \
4440     QIB_RCVCTRL_INTRAVAIL_ENB | \
4441     QIB_RCVCTRL_INTRAVAIL_DIS | \
4442     QIB_RCVCTRL_BP_ENB | \
4443     QIB_RCVCTRL_BP_DIS)
4444 
4445 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4446     QIB_RCVCTRL_CTXT_DIS | \
4447     QIB_RCVCTRL_PKEY_DIS | \
4448     QIB_RCVCTRL_PKEY_ENB)
4449 
4450 /*
4451  * Modify the RCVCTRL register in chip-specific way. This
4452  * is a function because bit positions and (future) register
4453  * location is chip-specifc, but the needed operations are
4454  * generic. <op> is a bit-mask because we often want to
4455  * do multiple modifications.
4456  */
4457 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4458                  int ctxt)
4459 {
4460     struct qib_devdata *dd = ppd->dd;
4461     struct qib_ctxtdata *rcd;
4462     u64 mask, val;
4463     unsigned long flags;
4464 
4465     spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4466 
4467     if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4468         dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4469     if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4470         dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4471     if (op & QIB_RCVCTRL_TAILUPD_ENB)
4472         dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4473     if (op & QIB_RCVCTRL_TAILUPD_DIS)
4474         dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4475     if (op & QIB_RCVCTRL_PKEY_ENB)
4476         ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4477     if (op & QIB_RCVCTRL_PKEY_DIS)
4478         ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4479     if (ctxt < 0) {
4480         mask = (1ULL << dd->ctxtcnt) - 1;
4481         rcd = NULL;
4482     } else {
4483         mask = (1ULL << ctxt);
4484         rcd = dd->rcd[ctxt];
4485     }
4486     if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4487         ppd->p_rcvctrl |=
4488             (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4489         if (!(dd->flags & QIB_NODMA_RTAIL)) {
4490             op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4491             dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4492         }
4493         /* Write these registers before the context is enabled. */
4494         qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4495                     rcd->rcvhdrqtailaddr_phys);
4496         qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4497                     rcd->rcvhdrq_phys);
4498         rcd->seq_cnt = 1;
4499     }
4500     if (op & QIB_RCVCTRL_CTXT_DIS)
4501         ppd->p_rcvctrl &=
4502             ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4503     if (op & QIB_RCVCTRL_BP_ENB)
4504         dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4505     if (op & QIB_RCVCTRL_BP_DIS)
4506         dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4507     if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4508         dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4509     if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4510         dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4511     /*
4512      * Decide which registers to write depending on the ops enabled.
4513      * Special case is "flush" (no bits set at all)
4514      * which needs to write both.
4515      */
4516     if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4517         qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4518     if (op == 0 || (op & RCVCTRL_PORT_MODS))
4519         qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4520     if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4521         /*
4522          * Init the context registers also; if we were
4523          * disabled, tail and head should both be zero
4524          * already from the enable, but since we don't
4525          * know, we have to do it explicitly.
4526          */
4527         val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4528         qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4529 
4530         /* be sure enabling write seen; hd/tl should be 0 */
4531         (void) qib_read_kreg32(dd, kr_scratch);
4532         val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4533         dd->rcd[ctxt]->head = val;
4534         /* If kctxt, interrupt on next receive. */
4535         if (ctxt < dd->first_user_ctxt)
4536             val |= dd->rhdrhead_intr_off;
4537         qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4538     } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4539         dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4540         /* arm rcv interrupt */
4541         val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4542         qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4543     }
4544     if (op & QIB_RCVCTRL_CTXT_DIS) {
4545         unsigned f;
4546 
4547         /* Now that the context is disabled, clear these registers. */
4548         if (ctxt >= 0) {
4549             qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4550             qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4551             for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4552                 qib_write_ureg(dd, ur_rcvflowtable + f,
4553                            TIDFLOW_ERRBITS, ctxt);
4554         } else {
4555             unsigned i;
4556 
4557             for (i = 0; i < dd->cfgctxts; i++) {
4558                 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4559                             i, 0);
4560                 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4561                 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4562                     qib_write_ureg(dd, ur_rcvflowtable + f,
4563                                TIDFLOW_ERRBITS, i);
4564             }
4565         }
4566     }
4567     spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4568 }
4569 
4570 /*
4571  * Modify the SENDCTRL register in chip-specific way. This
4572  * is a function where there are multiple such registers with
4573  * slightly different layouts.
4574  * The chip doesn't allow back-to-back sendctrl writes, so write
4575  * the scratch register after writing sendctrl.
4576  *
4577  * Which register is written depends on the operation.
4578  * Most operate on the common register, while
4579  * SEND_ENB and SEND_DIS operate on the per-port ones.
4580  * SEND_ENB is included in common because it can change SPCL_TRIG
4581  */
4582 #define SENDCTRL_COMMON_MODS (\
4583     QIB_SENDCTRL_CLEAR | \
4584     QIB_SENDCTRL_AVAIL_DIS | \
4585     QIB_SENDCTRL_AVAIL_ENB | \
4586     QIB_SENDCTRL_AVAIL_BLIP | \
4587     QIB_SENDCTRL_DISARM | \
4588     QIB_SENDCTRL_DISARM_ALL | \
4589     QIB_SENDCTRL_SEND_ENB)
4590 
4591 #define SENDCTRL_PORT_MODS (\
4592     QIB_SENDCTRL_CLEAR | \
4593     QIB_SENDCTRL_SEND_ENB | \
4594     QIB_SENDCTRL_SEND_DIS | \
4595     QIB_SENDCTRL_FLUSH)
4596 
4597 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4598 {
4599     struct qib_devdata *dd = ppd->dd;
4600     u64 tmp_dd_sendctrl;
4601     unsigned long flags;
4602 
4603     spin_lock_irqsave(&dd->sendctrl_lock, flags);
4604 
4605     /* First the dd ones that are "sticky", saved in shadow */
4606     if (op & QIB_SENDCTRL_CLEAR)
4607         dd->sendctrl = 0;
4608     if (op & QIB_SENDCTRL_AVAIL_DIS)
4609         dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4610     else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4611         dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4612         if (dd->flags & QIB_USE_SPCL_TRIG)
4613             dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4614     }
4615 
4616     /* Then the ppd ones that are "sticky", saved in shadow */
4617     if (op & QIB_SENDCTRL_SEND_DIS)
4618         ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4619     else if (op & QIB_SENDCTRL_SEND_ENB)
4620         ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4621 
4622     if (op & QIB_SENDCTRL_DISARM_ALL) {
4623         u32 i, last;
4624 
4625         tmp_dd_sendctrl = dd->sendctrl;
4626         last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4627         /*
4628          * Disarm any buffers that are not yet launched,
4629          * disabling updates until done.
4630          */
4631         tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4632         for (i = 0; i < last; i++) {
4633             qib_write_kreg(dd, kr_sendctrl,
4634                        tmp_dd_sendctrl |
4635                        SYM_MASK(SendCtrl, Disarm) | i);
4636             qib_write_kreg(dd, kr_scratch, 0);
4637         }
4638     }
4639 
4640     if (op & QIB_SENDCTRL_FLUSH) {
4641         u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4642 
4643         /*
4644          * Now drain all the fifos.  The Abort bit should never be
4645          * needed, so for now, at least, we don't use it.
4646          */
4647         tmp_ppd_sendctrl |=
4648             SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4649             SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4650             SYM_MASK(SendCtrl_0, TxeBypassIbc);
4651         qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4652         qib_write_kreg(dd, kr_scratch, 0);
4653     }
4654 
4655     tmp_dd_sendctrl = dd->sendctrl;
4656 
4657     if (op & QIB_SENDCTRL_DISARM)
4658         tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4659             ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4660              SYM_LSB(SendCtrl, DisarmSendBuf));
4661     if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4662         (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4663         tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4664 
4665     if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4666         qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4667         qib_write_kreg(dd, kr_scratch, 0);
4668     }
4669 
4670     if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4671         qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4672         qib_write_kreg(dd, kr_scratch, 0);
4673     }
4674 
4675     if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4676         qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4677         qib_write_kreg(dd, kr_scratch, 0);
4678     }
4679 
4680     spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4681 
4682     if (op & QIB_SENDCTRL_FLUSH) {
4683         u32 v;
4684         /*
4685          * ensure writes have hit chip, then do a few
4686          * more reads, to allow DMA of pioavail registers
4687          * to occur, so in-memory copy is in sync with
4688          * the chip.  Not always safe to sleep.
4689          */
4690         v = qib_read_kreg32(dd, kr_scratch);
4691         qib_write_kreg(dd, kr_scratch, v);
4692         v = qib_read_kreg32(dd, kr_scratch);
4693         qib_write_kreg(dd, kr_scratch, v);
4694         qib_read_kreg32(dd, kr_scratch);
4695     }
4696 }
4697 
4698 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4699 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4700 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4701 
4702 /**
4703  * qib_portcntr_7322 - read a per-port chip counter
4704  * @ppd: the qlogic_ib pport
4705  * @reg: the counter to read (not a chip offset)
4706  */
4707 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4708 {
4709     struct qib_devdata *dd = ppd->dd;
4710     u64 ret = 0ULL;
4711     u16 creg;
4712     /* 0xffff for unimplemented or synthesized counters */
4713     static const u32 xlator[] = {
4714         [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4715         [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4716         [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4717         [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4718         [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4719         [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4720         [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4721         [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4722         [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4723         [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4724         [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4725         [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4726         [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4727         [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4728         [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4729         [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4730         [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4731         [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4732         [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4733         [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4734         [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4735         [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4736         [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4737         [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4738         [QIBPORTCNTR_ERRLINK] = crp_errlink,
4739         [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4740         [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4741         [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4742         [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4743         [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4744         /*
4745          * the next 3 aren't really counters, but were implemented
4746          * as counters in older chips, so still get accessed as
4747          * though they were counters from this code.
4748          */
4749         [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4750         [QIBPORTCNTR_PSSTART] = krp_psstart,
4751         [QIBPORTCNTR_PSSTAT] = krp_psstat,
4752         /* pseudo-counter, summed for all ports */
4753         [QIBPORTCNTR_KHDROVFL] = 0xffff,
4754     };
4755 
4756     if (reg >= ARRAY_SIZE(xlator)) {
4757         qib_devinfo(ppd->dd->pcidev,
4758              "Unimplemented portcounter %u\n", reg);
4759         goto done;
4760     }
4761     creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4762 
4763     /* handle non-counters and special cases first */
4764     if (reg == QIBPORTCNTR_KHDROVFL) {
4765         int i;
4766 
4767         /* sum over all kernel contexts (skip if mini_init) */
4768         for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4769             struct qib_ctxtdata *rcd = dd->rcd[i];
4770 
4771             if (!rcd || rcd->ppd != ppd)
4772                 continue;
4773             ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4774         }
4775         goto done;
4776     } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4777         /*
4778          * Used as part of the synthesis of port_rcv_errors
4779          * in the verbs code for IBTA counters.  Not needed for 7322,
4780          * because all the errors are already counted by other cntrs.
4781          */
4782         goto done;
4783     } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4784            reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4785         /* were counters in older chips, now per-port kernel regs */
4786         ret = qib_read_kreg_port(ppd, creg);
4787         goto done;
4788     }
4789 
4790     /*
4791      * Only fast increment counters are 64 bits; use 32 bit reads to
4792      * avoid two independent reads when on Opteron.
4793      */
4794     if (xlator[reg] & _PORT_64BIT_FLAG)
4795         ret = read_7322_creg_port(ppd, creg);
4796     else
4797         ret = read_7322_creg32_port(ppd, creg);
4798     if (creg == crp_ibsymbolerr) {
4799         if (ppd->cpspec->ibdeltainprog)
4800             ret -= ret - ppd->cpspec->ibsymsnap;
4801         ret -= ppd->cpspec->ibsymdelta;
4802     } else if (creg == crp_iblinkerrrecov) {
4803         if (ppd->cpspec->ibdeltainprog)
4804             ret -= ret - ppd->cpspec->iblnkerrsnap;
4805         ret -= ppd->cpspec->iblnkerrdelta;
4806     } else if (creg == crp_errlink)
4807         ret -= ppd->cpspec->ibmalfdelta;
4808     else if (creg == crp_iblinkdown)
4809         ret += ppd->cpspec->iblnkdowndelta;
4810 done:
4811     return ret;
4812 }
4813 
4814 /*
4815  * Device counter names (not port-specific), one line per stat,
4816  * single string.  Used by utilities like ipathstats to print the stats
4817  * in a way which works for different versions of drivers, without changing
4818  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4819  * display by utility.
4820  * Non-error counters are first.
4821  * Start of "error" conters is indicated by a leading "E " on the first
4822  * "error" counter, and doesn't count in label length.
4823  * The EgrOvfl list needs to be last so we truncate them at the configured
4824  * context count for the device.
4825  * cntr7322indices contains the corresponding register indices.
4826  */
4827 static const char cntr7322names[] =
4828     "Interrupts\n"
4829     "HostBusStall\n"
4830     "E RxTIDFull\n"
4831     "RxTIDInvalid\n"
4832     "RxTIDFloDrop\n" /* 7322 only */
4833     "Ctxt0EgrOvfl\n"
4834     "Ctxt1EgrOvfl\n"
4835     "Ctxt2EgrOvfl\n"
4836     "Ctxt3EgrOvfl\n"
4837     "Ctxt4EgrOvfl\n"
4838     "Ctxt5EgrOvfl\n"
4839     "Ctxt6EgrOvfl\n"
4840     "Ctxt7EgrOvfl\n"
4841     "Ctxt8EgrOvfl\n"
4842     "Ctxt9EgrOvfl\n"
4843     "Ctx10EgrOvfl\n"
4844     "Ctx11EgrOvfl\n"
4845     "Ctx12EgrOvfl\n"
4846     "Ctx13EgrOvfl\n"
4847     "Ctx14EgrOvfl\n"
4848     "Ctx15EgrOvfl\n"
4849     "Ctx16EgrOvfl\n"
4850     "Ctx17EgrOvfl\n"
4851     ;
4852 
4853 static const u32 cntr7322indices[] = {
4854     cr_lbint | _PORT_64BIT_FLAG,
4855     cr_lbstall | _PORT_64BIT_FLAG,
4856     cr_tidfull,
4857     cr_tidinvalid,
4858     cr_rxtidflowdrop,
4859     cr_base_egrovfl + 0,
4860     cr_base_egrovfl + 1,
4861     cr_base_egrovfl + 2,
4862     cr_base_egrovfl + 3,
4863     cr_base_egrovfl + 4,
4864     cr_base_egrovfl + 5,
4865     cr_base_egrovfl + 6,
4866     cr_base_egrovfl + 7,
4867     cr_base_egrovfl + 8,
4868     cr_base_egrovfl + 9,
4869     cr_base_egrovfl + 10,
4870     cr_base_egrovfl + 11,
4871     cr_base_egrovfl + 12,
4872     cr_base_egrovfl + 13,
4873     cr_base_egrovfl + 14,
4874     cr_base_egrovfl + 15,
4875     cr_base_egrovfl + 16,
4876     cr_base_egrovfl + 17,
4877 };
4878 
4879 /*
4880  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4881  * portcntr7322indices is somewhat complicated by some registers needing
4882  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4883  */
4884 static const char portcntr7322names[] =
4885     "TxPkt\n"
4886     "TxFlowPkt\n"
4887     "TxWords\n"
4888     "RxPkt\n"
4889     "RxFlowPkt\n"
4890     "RxWords\n"
4891     "TxFlowStall\n"
4892     "TxDmaDesc\n"  /* 7220 and 7322-only */
4893     "E RxDlidFltr\n"  /* 7220 and 7322-only */
4894     "IBStatusChng\n"
4895     "IBLinkDown\n"
4896     "IBLnkRecov\n"
4897     "IBRxLinkErr\n"
4898     "IBSymbolErr\n"
4899     "RxLLIErr\n"
4900     "RxBadFormat\n"
4901     "RxBadLen\n"
4902     "RxBufOvrfl\n"
4903     "RxEBP\n"
4904     "RxFlowCtlErr\n"
4905     "RxICRCerr\n"
4906     "RxLPCRCerr\n"
4907     "RxVCRCerr\n"
4908     "RxInvalLen\n"
4909     "RxInvalPKey\n"
4910     "RxPktDropped\n"
4911     "TxBadLength\n"
4912     "TxDropped\n"
4913     "TxInvalLen\n"
4914     "TxUnderrun\n"
4915     "TxUnsupVL\n"
4916     "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4917     "RxVL15Drop\n"
4918     "RxVlErr\n"
4919     "XcessBufOvfl\n"
4920     "RxQPBadCtxt\n" /* 7322-only from here down */
4921     "TXBadHeader\n"
4922     ;
4923 
4924 static const u32 portcntr7322indices[] = {
4925     QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4926     crp_pktsendflow,
4927     QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4928     QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4929     crp_pktrcvflowctrl,
4930     QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4931     QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4932     crp_txsdmadesc | _PORT_64BIT_FLAG,
4933     crp_rxdlidfltr,
4934     crp_ibstatuschange,
4935     QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4936     QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4937     QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4938     QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4939     QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4940     QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4941     QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4942     QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4943     QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4944     crp_rcvflowctrlviol,
4945     QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4946     QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4947     QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4948     QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4949     QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4950     QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4951     crp_txminmaxlenerr,
4952     crp_txdroppedpkt,
4953     crp_txlenerr,
4954     crp_txunderrun,
4955     crp_txunsupvl,
4956     QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4957     QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4958     QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4959     QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4960     crp_rxqpinvalidctxt,
4961     crp_txhdrerr,
4962 };
4963 
4964 /* do all the setup to make the counter reads efficient later */
4965 static void init_7322_cntrnames(struct qib_devdata *dd)
4966 {
4967     int i, j = 0;
4968     char *s;
4969 
4970     for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4971          i++) {
4972         /* we always have at least one counter before the egrovfl */
4973         if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4974             j = 1;
4975         s = strchr(s + 1, '\n');
4976         if (s && j)
4977             j++;
4978     }
4979     dd->cspec->ncntrs = i;
4980     if (!s)
4981         /* full list; size is without terminating null */
4982         dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4983     else
4984         dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4985     dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
4986                      GFP_KERNEL);
4987 
4988     for (i = 0, s = (char *)portcntr7322names; s; i++)
4989         s = strchr(s + 1, '\n');
4990     dd->cspec->nportcntrs = i - 1;
4991     dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4992     for (i = 0; i < dd->num_pports; ++i) {
4993         dd->pport[i].cpspec->portcntrs =
4994             kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
4995                       GFP_KERNEL);
4996     }
4997 }
4998 
4999 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5000                   u64 **cntrp)
5001 {
5002     u32 ret;
5003 
5004     if (namep) {
5005         ret = dd->cspec->cntrnamelen;
5006         if (pos >= ret)
5007             ret = 0; /* final read after getting everything */
5008         else
5009             *namep = (char *) cntr7322names;
5010     } else {
5011         u64 *cntr = dd->cspec->cntrs;
5012         int i;
5013 
5014         ret = dd->cspec->ncntrs * sizeof(u64);
5015         if (!cntr || pos >= ret) {
5016             /* everything read, or couldn't get memory */
5017             ret = 0;
5018             goto done;
5019         }
5020         *cntrp = cntr;
5021         for (i = 0; i < dd->cspec->ncntrs; i++)
5022             if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5023                 *cntr++ = read_7322_creg(dd,
5024                              cntr7322indices[i] &
5025                              _PORT_CNTR_IDXMASK);
5026             else
5027                 *cntr++ = read_7322_creg32(dd,
5028                                cntr7322indices[i]);
5029     }
5030 done:
5031     return ret;
5032 }
5033 
5034 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5035                   char **namep, u64 **cntrp)
5036 {
5037     u32 ret;
5038 
5039     if (namep) {
5040         ret = dd->cspec->portcntrnamelen;
5041         if (pos >= ret)
5042             ret = 0; /* final read after getting everything */
5043         else
5044             *namep = (char *)portcntr7322names;
5045     } else {
5046         struct qib_pportdata *ppd = &dd->pport[port];
5047         u64 *cntr = ppd->cpspec->portcntrs;
5048         int i;
5049 
5050         ret = dd->cspec->nportcntrs * sizeof(u64);
5051         if (!cntr || pos >= ret) {
5052             /* everything read, or couldn't get memory */
5053             ret = 0;
5054             goto done;
5055         }
5056         *cntrp = cntr;
5057         for (i = 0; i < dd->cspec->nportcntrs; i++) {
5058             if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5059                 *cntr++ = qib_portcntr_7322(ppd,
5060                     portcntr7322indices[i] &
5061                     _PORT_CNTR_IDXMASK);
5062             else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5063                 *cntr++ = read_7322_creg_port(ppd,
5064                        portcntr7322indices[i] &
5065                         _PORT_CNTR_IDXMASK);
5066             else
5067                 *cntr++ = read_7322_creg32_port(ppd,
5068                        portcntr7322indices[i]);
5069         }
5070     }
5071 done:
5072     return ret;
5073 }
5074 
5075 /**
5076  * qib_get_7322_faststats - get word counters from chip before they overflow
5077  * @t: contains a pointer to the qlogic_ib device qib_devdata
5078  *
5079  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5080  * real purpose of this function is to maintain the notion of
5081  * "active time", which in turn is only logged into the eeprom,
5082  * which we don;t have, yet, for 7322-based boards.
5083  *
5084  * called from add_timer
5085  */
5086 static void qib_get_7322_faststats(struct timer_list *t)
5087 {
5088     struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5089     struct qib_pportdata *ppd;
5090     unsigned long flags;
5091     u64 traffic_wds;
5092     int pidx;
5093 
5094     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5095         ppd = dd->pport + pidx;
5096 
5097         /*
5098          * If port isn't enabled or not operational ports, or
5099          * diags is running (can cause memory diags to fail)
5100          * skip this port this time.
5101          */
5102         if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5103             || dd->diag_client)
5104             continue;
5105 
5106         /*
5107          * Maintain an activity timer, based on traffic
5108          * exceeding a threshold, so we need to check the word-counts
5109          * even if they are 64-bit.
5110          */
5111         traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5112             qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5113         spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5114         traffic_wds -= ppd->dd->traffic_wds;
5115         ppd->dd->traffic_wds += traffic_wds;
5116         spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5117         if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5118                         QIB_IB_QDR) &&
5119             (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5120                     QIBL_LINKACTIVE)) &&
5121             ppd->cpspec->qdr_dfe_time &&
5122             time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5123             ppd->cpspec->qdr_dfe_on = 0;
5124 
5125             qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5126                         ppd->dd->cspec->r1 ?
5127                         QDR_STATIC_ADAPT_INIT_R1 :
5128                         QDR_STATIC_ADAPT_INIT);
5129             force_h1(ppd);
5130         }
5131     }
5132     mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5133 }
5134 
5135 /*
5136  * If we were using MSIx, try to fallback to INTx.
5137  */
5138 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5139 {
5140     if (!dd->cspec->num_msix_entries)
5141         return 0; /* already using INTx */
5142 
5143     qib_devinfo(dd->pcidev,
5144         "MSIx interrupt not detected, trying INTx interrupts\n");
5145     qib_7322_free_irq(dd);
5146     if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5147         qib_dev_err(dd, "Failed to enable INTx\n");
5148     qib_setup_7322_interrupt(dd, 0);
5149     return 1;
5150 }
5151 
5152 /*
5153  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5154  * than resetting the IBC or external link state, and useful in some
5155  * cases to cause some retraining.  To do this right, we reset IBC
5156  * as well, then return to previous state (which may be still in reset)
5157  * NOTE: some callers of this "know" this writes the current value
5158  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5159  * check all callers.
5160  */
5161 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5162 {
5163     u64 val;
5164     struct qib_devdata *dd = ppd->dd;
5165     const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5166         SYM_MASK(IBPCSConfig_0, xcv_treset) |
5167         SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5168 
5169     val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5170     qib_write_kreg(dd, kr_hwerrmask,
5171                dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5172     qib_write_kreg_port(ppd, krp_ibcctrl_a,
5173                 ppd->cpspec->ibcctrl_a &
5174                 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5175 
5176     qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5177     qib_read_kreg32(dd, kr_scratch);
5178     qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5179     qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5180     qib_write_kreg(dd, kr_scratch, 0ULL);
5181     qib_write_kreg(dd, kr_hwerrclear,
5182                SYM_MASK(HwErrClear, statusValidNoEopClear));
5183     qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5184 }
5185 
5186 /*
5187  * This code for non-IBTA-compliant IB speed negotiation is only known to
5188  * work for the SDR to DDR transition, and only between an HCA and a switch
5189  * with recent firmware.  It is based on observed heuristics, rather than
5190  * actual knowledge of the non-compliant speed negotiation.
5191  * It has a number of hard-coded fields, since the hope is to rewrite this
5192  * when a spec is available on how the negoation is intended to work.
5193  */
5194 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5195                  u32 dcnt, u32 *data)
5196 {
5197     int i;
5198     u64 pbc;
5199     u32 __iomem *piobuf;
5200     u32 pnum, control, len;
5201     struct qib_devdata *dd = ppd->dd;
5202 
5203     i = 0;
5204     len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5205     control = qib_7322_setpbc_control(ppd, len, 0, 15);
5206     pbc = ((u64) control << 32) | len;
5207     while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5208         if (i++ > 15)
5209             return;
5210         udelay(2);
5211     }
5212     /* disable header check on this packet, since it can't be valid */
5213     dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5214     writeq(pbc, piobuf);
5215     qib_flush_wc();
5216     qib_pio_copy(piobuf + 2, hdr, 7);
5217     qib_pio_copy(piobuf + 9, data, dcnt);
5218     if (dd->flags & QIB_USE_SPCL_TRIG) {
5219         u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5220 
5221         qib_flush_wc();
5222         __raw_writel(0xaebecede, piobuf + spcl_off);
5223     }
5224     qib_flush_wc();
5225     qib_sendbuf_done(dd, pnum);
5226     /* and re-enable hdr check */
5227     dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5228 }
5229 
5230 /*
5231  * _start packet gets sent twice at start, _done gets sent twice at end
5232  */
5233 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5234 {
5235     struct qib_devdata *dd = ppd->dd;
5236     static u32 swapped;
5237     u32 dw, i, hcnt, dcnt, *data;
5238     static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5239     static u32 madpayload_start[0x40] = {
5240         0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5241         0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5242         0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5243         };
5244     static u32 madpayload_done[0x40] = {
5245         0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5246         0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5247         0x40000001, 0x1388, 0x15e, /* rest 0's */
5248         };
5249 
5250     dcnt = ARRAY_SIZE(madpayload_start);
5251     hcnt = ARRAY_SIZE(hdr);
5252     if (!swapped) {
5253         /* for maintainability, do it at runtime */
5254         for (i = 0; i < hcnt; i++) {
5255             dw = (__force u32) cpu_to_be32(hdr[i]);
5256             hdr[i] = dw;
5257         }
5258         for (i = 0; i < dcnt; i++) {
5259             dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5260             madpayload_start[i] = dw;
5261             dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5262             madpayload_done[i] = dw;
5263         }
5264         swapped = 1;
5265     }
5266 
5267     data = which ? madpayload_done : madpayload_start;
5268 
5269     autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5270     qib_read_kreg64(dd, kr_scratch);
5271     udelay(2);
5272     autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5273     qib_read_kreg64(dd, kr_scratch);
5274     udelay(2);
5275 }
5276 
5277 /*
5278  * Do the absolute minimum to cause an IB speed change, and make it
5279  * ready, but don't actually trigger the change.   The caller will
5280  * do that when ready (if link is in Polling training state, it will
5281  * happen immediately, otherwise when link next goes down)
5282  *
5283  * This routine should only be used as part of the DDR autonegotation
5284  * code for devices that are not compliant with IB 1.2 (or code that
5285  * fixes things up for same).
5286  *
5287  * When link has gone down, and autoneg enabled, or autoneg has
5288  * failed and we give up until next time we set both speeds, and
5289  * then we want IBTA enabled as well as "use max enabled speed.
5290  */
5291 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5292 {
5293     u64 newctrlb;
5294 
5295     newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5296                     IBA7322_IBC_IBTA_1_2_MASK |
5297                     IBA7322_IBC_MAX_SPEED_MASK);
5298 
5299     if (speed & (speed - 1)) /* multiple speeds */
5300         newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5301                     IBA7322_IBC_IBTA_1_2_MASK |
5302                     IBA7322_IBC_MAX_SPEED_MASK;
5303     else
5304         newctrlb |= speed == QIB_IB_QDR ?
5305             IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5306             ((speed == QIB_IB_DDR ?
5307               IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5308 
5309     if (newctrlb == ppd->cpspec->ibcctrl_b)
5310         return;
5311 
5312     ppd->cpspec->ibcctrl_b = newctrlb;
5313     qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5314     qib_write_kreg(ppd->dd, kr_scratch, 0);
5315 }
5316 
5317 /*
5318  * This routine is only used when we are not talking to another
5319  * IB 1.2-compliant device that we think can do DDR.
5320  * (This includes all existing switch chips as of Oct 2007.)
5321  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5322  */
5323 static void try_7322_autoneg(struct qib_pportdata *ppd)
5324 {
5325     unsigned long flags;
5326 
5327     spin_lock_irqsave(&ppd->lflags_lock, flags);
5328     ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5329     spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5330     qib_autoneg_7322_send(ppd, 0);
5331     set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5332     qib_7322_mini_pcs_reset(ppd);
5333     /* 2 msec is minimum length of a poll cycle */
5334     queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5335                msecs_to_jiffies(2));
5336 }
5337 
5338 /*
5339  * Handle the empirically determined mechanism for auto-negotiation
5340  * of DDR speed with switches.
5341  */
5342 static void autoneg_7322_work(struct work_struct *work)
5343 {
5344     struct qib_pportdata *ppd;
5345     u32 i;
5346     unsigned long flags;
5347 
5348     ppd = container_of(work, struct qib_chippport_specific,
5349                 autoneg_work.work)->ppd;
5350 
5351     /*
5352      * Busy wait for this first part, it should be at most a
5353      * few hundred usec, since we scheduled ourselves for 2msec.
5354      */
5355     for (i = 0; i < 25; i++) {
5356         if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5357              == IB_7322_LT_STATE_POLLQUIET) {
5358             qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5359             break;
5360         }
5361         udelay(100);
5362     }
5363 
5364     if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5365         goto done; /* we got there early or told to stop */
5366 
5367     /* we expect this to timeout */
5368     if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5369                    !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5370                    msecs_to_jiffies(90)))
5371         goto done;
5372     qib_7322_mini_pcs_reset(ppd);
5373 
5374     /* we expect this to timeout */
5375     if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5376                    !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5377                    msecs_to_jiffies(1700)))
5378         goto done;
5379     qib_7322_mini_pcs_reset(ppd);
5380 
5381     set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5382 
5383     /*
5384      * Wait up to 250 msec for link to train and get to INIT at DDR;
5385      * this should terminate early.
5386      */
5387     wait_event_timeout(ppd->cpspec->autoneg_wait,
5388         !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5389         msecs_to_jiffies(250));
5390 done:
5391     if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5392         spin_lock_irqsave(&ppd->lflags_lock, flags);
5393         ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5394         if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5395             ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5396             ppd->cpspec->autoneg_tries = 0;
5397         }
5398         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5399         set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5400     }
5401 }
5402 
5403 /*
5404  * This routine is used to request IPG set in the QLogic switch.
5405  * Only called if r1.
5406  */
5407 static void try_7322_ipg(struct qib_pportdata *ppd)
5408 {
5409     struct qib_ibport *ibp = &ppd->ibport_data;
5410     struct ib_mad_send_buf *send_buf;
5411     struct ib_mad_agent *agent;
5412     struct ib_smp *smp;
5413     unsigned delay;
5414     int ret;
5415 
5416     agent = ibp->rvp.send_agent;
5417     if (!agent)
5418         goto retry;
5419 
5420     send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5421                       IB_MGMT_MAD_DATA, GFP_ATOMIC,
5422                       IB_MGMT_BASE_VERSION);
5423     if (IS_ERR(send_buf))
5424         goto retry;
5425 
5426     if (!ibp->smi_ah) {
5427         struct ib_ah *ah;
5428 
5429         ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5430         if (IS_ERR(ah))
5431             ret = PTR_ERR(ah);
5432         else {
5433             send_buf->ah = ah;
5434             ibp->smi_ah = ibah_to_rvtah(ah);
5435             ret = 0;
5436         }
5437     } else {
5438         send_buf->ah = &ibp->smi_ah->ibah;
5439         ret = 0;
5440     }
5441 
5442     smp = send_buf->mad;
5443     smp->base_version = IB_MGMT_BASE_VERSION;
5444     smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5445     smp->class_version = 1;
5446     smp->method = IB_MGMT_METHOD_SEND;
5447     smp->hop_cnt = 1;
5448     smp->attr_id = QIB_VENDOR_IPG;
5449     smp->attr_mod = 0;
5450 
5451     if (!ret)
5452         ret = ib_post_send_mad(send_buf, NULL);
5453     if (ret)
5454         ib_free_send_mad(send_buf);
5455 retry:
5456     delay = 2 << ppd->cpspec->ipg_tries;
5457     queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5458                msecs_to_jiffies(delay));
5459 }
5460 
5461 /*
5462  * Timeout handler for setting IPG.
5463  * Only called if r1.
5464  */
5465 static void ipg_7322_work(struct work_struct *work)
5466 {
5467     struct qib_pportdata *ppd;
5468 
5469     ppd = container_of(work, struct qib_chippport_specific,
5470                ipg_work.work)->ppd;
5471     if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5472         && ++ppd->cpspec->ipg_tries <= 10)
5473         try_7322_ipg(ppd);
5474 }
5475 
5476 static u32 qib_7322_iblink_state(u64 ibcs)
5477 {
5478     u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5479 
5480     switch (state) {
5481     case IB_7322_L_STATE_INIT:
5482         state = IB_PORT_INIT;
5483         break;
5484     case IB_7322_L_STATE_ARM:
5485         state = IB_PORT_ARMED;
5486         break;
5487     case IB_7322_L_STATE_ACTIVE:
5488     case IB_7322_L_STATE_ACT_DEFER:
5489         state = IB_PORT_ACTIVE;
5490         break;
5491     default:
5492         fallthrough;
5493     case IB_7322_L_STATE_DOWN:
5494         state = IB_PORT_DOWN;
5495         break;
5496     }
5497     return state;
5498 }
5499 
5500 /* returns the IBTA port state, rather than the IBC link training state */
5501 static u8 qib_7322_phys_portstate(u64 ibcs)
5502 {
5503     u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5504     return qib_7322_physportstate[state];
5505 }
5506 
5507 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5508 {
5509     int ret = 0, symadj = 0;
5510     unsigned long flags;
5511     int mult;
5512 
5513     spin_lock_irqsave(&ppd->lflags_lock, flags);
5514     ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5515     spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5516 
5517     /* Update our picture of width and speed from chip */
5518     if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5519         ppd->link_speed_active = QIB_IB_QDR;
5520         mult = 4;
5521     } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5522         ppd->link_speed_active = QIB_IB_DDR;
5523         mult = 2;
5524     } else {
5525         ppd->link_speed_active = QIB_IB_SDR;
5526         mult = 1;
5527     }
5528     if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5529         ppd->link_width_active = IB_WIDTH_4X;
5530         mult *= 4;
5531     } else
5532         ppd->link_width_active = IB_WIDTH_1X;
5533     ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5534 
5535     if (!ibup) {
5536         u64 clr;
5537 
5538         /* Link went down. */
5539         /* do IPG MAD again after linkdown, even if last time failed */
5540         ppd->cpspec->ipg_tries = 0;
5541         clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5542             (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5543              SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5544         if (clr)
5545             qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5546         if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5547                      QIBL_IB_AUTONEG_INPROG)))
5548             set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5549         if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5550             struct qib_qsfp_data *qd =
5551                 &ppd->cpspec->qsfp_data;
5552             /* unlock the Tx settings, speed may change */
5553             qib_write_kreg_port(ppd, krp_tx_deemph_override,
5554                 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5555                 reset_tx_deemphasis_override));
5556             qib_cancel_sends(ppd);
5557             /* on link down, ensure sane pcs state */
5558             qib_7322_mini_pcs_reset(ppd);
5559             /* schedule the qsfp refresh which should turn the link
5560                off */
5561             if (ppd->dd->flags & QIB_HAS_QSFP) {
5562                 qd->t_insert = jiffies;
5563                 queue_work(ib_wq, &qd->work);
5564             }
5565             spin_lock_irqsave(&ppd->sdma_lock, flags);
5566             if (__qib_sdma_running(ppd))
5567                 __qib_sdma_process_event(ppd,
5568                     qib_sdma_event_e70_go_idle);
5569             spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5570         }
5571         clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5572         if (clr == ppd->cpspec->iblnkdownsnap)
5573             ppd->cpspec->iblnkdowndelta++;
5574     } else {
5575         if (qib_compat_ddr_negotiate &&
5576             !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5577                      QIBL_IB_AUTONEG_INPROG)) &&
5578             ppd->link_speed_active == QIB_IB_SDR &&
5579             (ppd->link_speed_enabled & QIB_IB_DDR)
5580             && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5581             /* we are SDR, and auto-negotiation enabled */
5582             ++ppd->cpspec->autoneg_tries;
5583             if (!ppd->cpspec->ibdeltainprog) {
5584                 ppd->cpspec->ibdeltainprog = 1;
5585                 ppd->cpspec->ibsymdelta +=
5586                     read_7322_creg32_port(ppd,
5587                         crp_ibsymbolerr) -
5588                         ppd->cpspec->ibsymsnap;
5589                 ppd->cpspec->iblnkerrdelta +=
5590                     read_7322_creg32_port(ppd,
5591                         crp_iblinkerrrecov) -
5592                         ppd->cpspec->iblnkerrsnap;
5593             }
5594             try_7322_autoneg(ppd);
5595             ret = 1; /* no other IB status change processing */
5596         } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5597                ppd->link_speed_active == QIB_IB_SDR) {
5598             qib_autoneg_7322_send(ppd, 1);
5599             set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5600             qib_7322_mini_pcs_reset(ppd);
5601             udelay(2);
5602             ret = 1; /* no other IB status change processing */
5603         } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5604                (ppd->link_speed_active & QIB_IB_DDR)) {
5605             spin_lock_irqsave(&ppd->lflags_lock, flags);
5606             ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5607                      QIBL_IB_AUTONEG_FAILED);
5608             spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5609             ppd->cpspec->autoneg_tries = 0;
5610             /* re-enable SDR, for next link down */
5611             set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5612             wake_up(&ppd->cpspec->autoneg_wait);
5613             symadj = 1;
5614         } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5615             /*
5616              * Clear autoneg failure flag, and do setup
5617              * so we'll try next time link goes down and
5618              * back to INIT (possibly connected to a
5619              * different device).
5620              */
5621             spin_lock_irqsave(&ppd->lflags_lock, flags);
5622             ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5623             spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5624             ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5625             symadj = 1;
5626         }
5627         if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5628             symadj = 1;
5629             if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5630                 try_7322_ipg(ppd);
5631             if (!ppd->cpspec->recovery_init)
5632                 setup_7322_link_recovery(ppd, 0);
5633             ppd->cpspec->qdr_dfe_time = jiffies +
5634                 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5635         }
5636         ppd->cpspec->ibmalfusesnap = 0;
5637         ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5638             crp_errlink);
5639     }
5640     if (symadj) {
5641         ppd->cpspec->iblnkdownsnap =
5642             read_7322_creg32_port(ppd, crp_iblinkdown);
5643         if (ppd->cpspec->ibdeltainprog) {
5644             ppd->cpspec->ibdeltainprog = 0;
5645             ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5646                 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5647             ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5648                 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5649         }
5650     } else if (!ibup && qib_compat_ddr_negotiate &&
5651            !ppd->cpspec->ibdeltainprog &&
5652             !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5653         ppd->cpspec->ibdeltainprog = 1;
5654         ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5655             crp_ibsymbolerr);
5656         ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5657             crp_iblinkerrrecov);
5658     }
5659 
5660     if (!ret)
5661         qib_setup_7322_setextled(ppd, ibup);
5662     return ret;
5663 }
5664 
5665 /*
5666  * Does read/modify/write to appropriate registers to
5667  * set output and direction bits selected by mask.
5668  * these are in their canonical positions (e.g. lsb of
5669  * dir will end up in D48 of extctrl on existing chips).
5670  * returns contents of GP Inputs.
5671  */
5672 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5673 {
5674     u64 read_val, new_out;
5675     unsigned long flags;
5676 
5677     if (mask) {
5678         /* some bits being written, lock access to GPIO */
5679         dir &= mask;
5680         out &= mask;
5681         spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5682         dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5683         dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5684         new_out = (dd->cspec->gpio_out & ~mask) | out;
5685 
5686         qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5687         qib_write_kreg(dd, kr_gpio_out, new_out);
5688         dd->cspec->gpio_out = new_out;
5689         spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5690     }
5691     /*
5692      * It is unlikely that a read at this time would get valid
5693      * data on a pin whose direction line was set in the same
5694      * call to this function. We include the read here because
5695      * that allows us to potentially combine a change on one pin with
5696      * a read on another, and because the old code did something like
5697      * this.
5698      */
5699     read_val = qib_read_kreg64(dd, kr_extstatus);
5700     return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5701 }
5702 
5703 /* Enable writes to config EEPROM, if possible. Returns previous state */
5704 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5705 {
5706     int prev_wen;
5707     u32 mask;
5708 
5709     mask = 1 << QIB_EEPROM_WEN_NUM;
5710     prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5711     gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5712 
5713     return prev_wen & 1;
5714 }
5715 
5716 /*
5717  * Read fundamental info we need to use the chip.  These are
5718  * the registers that describe chip capabilities, and are
5719  * saved in shadow registers.
5720  */
5721 static void get_7322_chip_params(struct qib_devdata *dd)
5722 {
5723     u64 val;
5724     u32 piobufs;
5725     int mtu;
5726 
5727     dd->palign = qib_read_kreg32(dd, kr_pagealign);
5728 
5729     dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5730 
5731     dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5732     dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5733     dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5734     dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5735     dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5736 
5737     val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5738     dd->piobcnt2k = val & ~0U;
5739     dd->piobcnt4k = val >> 32;
5740     val = qib_read_kreg64(dd, kr_sendpiosize);
5741     dd->piosize2k = val & ~0U;
5742     dd->piosize4k = val >> 32;
5743 
5744     mtu = ib_mtu_enum_to_int(qib_ibmtu);
5745     if (mtu == -1)
5746         mtu = QIB_DEFAULT_MTU;
5747     dd->pport[0].ibmtu = (u32)mtu;
5748     dd->pport[1].ibmtu = (u32)mtu;
5749 
5750     /* these may be adjusted in init_chip_wc_pat() */
5751     dd->pio2kbase = (u32 __iomem *)
5752         ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5753     dd->pio4kbase = (u32 __iomem *)
5754         ((char __iomem *) dd->kregbase +
5755          (dd->piobufbase >> 32));
5756     /*
5757      * 4K buffers take 2 pages; we use roundup just to be
5758      * paranoid; we calculate it once here, rather than on
5759      * ever buf allocate
5760      */
5761     dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5762 
5763     piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5764 
5765     dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5766         (sizeof(u64) * BITS_PER_BYTE / 2);
5767 }
5768 
5769 /*
5770  * The chip base addresses in cspec and cpspec have to be set
5771  * after possible init_chip_wc_pat(), rather than in
5772  * get_7322_chip_params(), so split out as separate function
5773  */
5774 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5775 {
5776     u32 cregbase;
5777 
5778     cregbase = qib_read_kreg32(dd, kr_counterregbase);
5779 
5780     dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5781         (char __iomem *)dd->kregbase);
5782 
5783     dd->egrtidbase = (u64 __iomem *)
5784         ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5785 
5786     /* port registers are defined as relative to base of chip */
5787     dd->pport[0].cpspec->kpregbase =
5788         (u64 __iomem *)((char __iomem *)dd->kregbase);
5789     dd->pport[1].cpspec->kpregbase =
5790         (u64 __iomem *)(dd->palign +
5791         (char __iomem *)dd->kregbase);
5792     dd->pport[0].cpspec->cpregbase =
5793         (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5794         kr_counterregbase) + (char __iomem *)dd->kregbase);
5795     dd->pport[1].cpspec->cpregbase =
5796         (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5797         kr_counterregbase) + (char __iomem *)dd->kregbase);
5798 }
5799 
5800 /*
5801  * This is a fairly special-purpose observer, so we only support
5802  * the port-specific parts of SendCtrl
5803  */
5804 
5805 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |       \
5806                SYM_MASK(SendCtrl_0, SDmaEnable) |       \
5807                SYM_MASK(SendCtrl_0, SDmaIntEnable) |    \
5808                SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5809                SYM_MASK(SendCtrl_0, SDmaHalt) |     \
5810                SYM_MASK(SendCtrl_0, IBVLArbiterEn) |    \
5811                SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5812 
5813 static int sendctrl_hook(struct qib_devdata *dd,
5814              const struct diag_observer *op, u32 offs,
5815              u64 *data, u64 mask, int only_32)
5816 {
5817     unsigned long flags;
5818     unsigned idx;
5819     unsigned pidx;
5820     struct qib_pportdata *ppd = NULL;
5821     u64 local_data, all_bits;
5822 
5823     /*
5824      * The fixed correspondence between Physical ports and pports is
5825      * severed. We need to hunt for the ppd that corresponds
5826      * to the offset we got. And we have to do that without admitting
5827      * we know the stride, apparently.
5828      */
5829     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5830         u64 __iomem *psptr;
5831         u32 psoffs;
5832 
5833         ppd = dd->pport + pidx;
5834         if (!ppd->cpspec->kpregbase)
5835             continue;
5836 
5837         psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5838         psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5839         if (psoffs == offs)
5840             break;
5841     }
5842 
5843     /* If pport is not being managed by driver, just avoid shadows. */
5844     if (pidx >= dd->num_pports)
5845         ppd = NULL;
5846 
5847     /* In any case, "idx" is flat index in kreg space */
5848     idx = offs / sizeof(u64);
5849 
5850     all_bits = ~0ULL;
5851     if (only_32)
5852         all_bits >>= 32;
5853 
5854     spin_lock_irqsave(&dd->sendctrl_lock, flags);
5855     if (!ppd || (mask & all_bits) != all_bits) {
5856         /*
5857          * At least some mask bits are zero, so we need
5858          * to read. The judgement call is whether from
5859          * reg or shadow. First-cut: read reg, and complain
5860          * if any bits which should be shadowed are different
5861          * from their shadowed value.
5862          */
5863         if (only_32)
5864             local_data = (u64)qib_read_kreg32(dd, idx);
5865         else
5866             local_data = qib_read_kreg64(dd, idx);
5867         *data = (local_data & ~mask) | (*data & mask);
5868     }
5869     if (mask) {
5870         /*
5871          * At least some mask bits are one, so we need
5872          * to write, but only shadow some bits.
5873          */
5874         u64 sval, tval; /* Shadowed, transient */
5875 
5876         /*
5877          * New shadow val is bits we don't want to touch,
5878          * ORed with bits we do, that are intended for shadow.
5879          */
5880         if (ppd) {
5881             sval = ppd->p_sendctrl & ~mask;
5882             sval |= *data & SENDCTRL_SHADOWED & mask;
5883             ppd->p_sendctrl = sval;
5884         } else
5885             sval = *data & SENDCTRL_SHADOWED & mask;
5886         tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5887         qib_write_kreg(dd, idx, tval);
5888         qib_write_kreg(dd, kr_scratch, 0Ull);
5889     }
5890     spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5891     return only_32 ? 4 : 8;
5892 }
5893 
5894 static const struct diag_observer sendctrl_0_observer = {
5895     sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5896     KREG_IDX(SendCtrl_0) * sizeof(u64)
5897 };
5898 
5899 static const struct diag_observer sendctrl_1_observer = {
5900     sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5901     KREG_IDX(SendCtrl_1) * sizeof(u64)
5902 };
5903 
5904 static ushort sdma_fetch_prio = 8;
5905 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5906 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5907 
5908 /* Besides logging QSFP events, we set appropriate TxDDS values */
5909 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5910 
5911 static void qsfp_7322_event(struct work_struct *work)
5912 {
5913     struct qib_qsfp_data *qd;
5914     struct qib_pportdata *ppd;
5915     unsigned long pwrup;
5916     unsigned long flags;
5917     int ret;
5918     u32 le2;
5919 
5920     qd = container_of(work, struct qib_qsfp_data, work);
5921     ppd = qd->ppd;
5922     pwrup = qd->t_insert +
5923         msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5924 
5925     /* Delay for 20 msecs to allow ModPrs resistor to setup */
5926     mdelay(QSFP_MODPRS_LAG_MSEC);
5927 
5928     if (!qib_qsfp_mod_present(ppd)) {
5929         ppd->cpspec->qsfp_data.modpresent = 0;
5930         /* Set the physical link to disabled */
5931         qib_set_ib_7322_lstate(ppd, 0,
5932                        QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5933         spin_lock_irqsave(&ppd->lflags_lock, flags);
5934         ppd->lflags &= ~QIBL_LINKV;
5935         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5936     } else {
5937         /*
5938          * Some QSFP's not only do not respond until the full power-up
5939          * time, but may behave badly if we try. So hold off responding
5940          * to insertion.
5941          */
5942         while (1) {
5943             if (time_is_before_jiffies(pwrup))
5944                 break;
5945             msleep(20);
5946         }
5947 
5948         ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5949 
5950         /*
5951          * Need to change LE2 back to defaults if we couldn't
5952          * read the cable type (to handle cable swaps), so do this
5953          * even on failure to read cable information.  We don't
5954          * get here for QME, so IS_QME check not needed here.
5955          */
5956         if (!ret && !ppd->dd->cspec->r1) {
5957             if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5958                 le2 = LE2_QME;
5959             else if (qd->cache.atten[1] >= qib_long_atten &&
5960                  QSFP_IS_CU(qd->cache.tech))
5961                 le2 = LE2_5m;
5962             else
5963                 le2 = LE2_DEFAULT;
5964         } else
5965             le2 = LE2_DEFAULT;
5966         ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5967         /*
5968          * We always change parameteters, since we can choose
5969          * values for cables without eeproms, and the cable may have
5970          * changed from a cable with full or partial eeprom content
5971          * to one with partial or no content.
5972          */
5973         init_txdds_table(ppd, 0);
5974         /* The physical link is being re-enabled only when the
5975          * previous state was DISABLED and the VALID bit is not
5976          * set. This should only happen when  the cable has been
5977          * physically pulled. */
5978         if (!ppd->cpspec->qsfp_data.modpresent &&
5979             (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5980             ppd->cpspec->qsfp_data.modpresent = 1;
5981             qib_set_ib_7322_lstate(ppd, 0,
5982                 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5983             spin_lock_irqsave(&ppd->lflags_lock, flags);
5984             ppd->lflags |= QIBL_LINKV;
5985             spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5986         }
5987     }
5988 }
5989 
5990 /*
5991  * There is little we can do but complain to the user if QSFP
5992  * initialization fails.
5993  */
5994 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5995 {
5996     unsigned long flags;
5997     struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5998     struct qib_devdata *dd = ppd->dd;
5999     u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6000 
6001     mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6002     qd->ppd = ppd;
6003     qib_qsfp_init(qd, qsfp_7322_event);
6004     spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6005     dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6006     dd->cspec->gpio_mask |= mod_prs_bit;
6007     qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6008     qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6009     spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6010 }
6011 
6012 /*
6013  * called at device initialization time, and also if the txselect
6014  * module parameter is changed.  This is used for cables that don't
6015  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6016  * We initialize to the default, then if there is a specific
6017  * unit,port match, we use that (and set it immediately, for the
6018  * current speed, if the link is at INIT or better).
6019  * String format is "default# unit#,port#=# ... u,p=#", separators must
6020  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6021  * optionally have "u,p=#,#", where the final # is the H1 value
6022  * The last specific match is used (actually, all are used, but last
6023  * one is the one that winds up set); if none at all, fall back on default.
6024  */
6025 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6026 {
6027     char *nxt, *str;
6028     u32 pidx, unit, port, deflt, h1;
6029     unsigned long val;
6030     int any = 0, seth1;
6031     int txdds_size;
6032 
6033     str = txselect_list;
6034 
6035     /* default number is validated in setup_txselect() */
6036     deflt = simple_strtoul(str, &nxt, 0);
6037     for (pidx = 0; pidx < dd->num_pports; ++pidx)
6038         dd->pport[pidx].cpspec->no_eep = deflt;
6039 
6040     txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6041     if (IS_QME(dd) || IS_QMH(dd))
6042         txdds_size += TXDDS_MFG_SZ;
6043 
6044     while (*nxt && nxt[1]) {
6045         str = ++nxt;
6046         unit = simple_strtoul(str, &nxt, 0);
6047         if (nxt == str || !*nxt || *nxt != ',') {
6048             while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6049                 ;
6050             continue;
6051         }
6052         str = ++nxt;
6053         port = simple_strtoul(str, &nxt, 0);
6054         if (nxt == str || *nxt != '=') {
6055             while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6056                 ;
6057             continue;
6058         }
6059         str = ++nxt;
6060         val = simple_strtoul(str, &nxt, 0);
6061         if (nxt == str) {
6062             while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6063                 ;
6064             continue;
6065         }
6066         if (val >= txdds_size)
6067             continue;
6068         seth1 = 0;
6069         h1 = 0; /* gcc thinks it might be used uninitted */
6070         if (*nxt == ',' && nxt[1]) {
6071             str = ++nxt;
6072             h1 = (u32)simple_strtoul(str, &nxt, 0);
6073             if (nxt == str)
6074                 while (*nxt && *nxt++ != ' ') /* skip */
6075                     ;
6076             else
6077                 seth1 = 1;
6078         }
6079         for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6080              ++pidx) {
6081             struct qib_pportdata *ppd = &dd->pport[pidx];
6082 
6083             if (ppd->port != port || !ppd->link_speed_supported)
6084                 continue;
6085             ppd->cpspec->no_eep = val;
6086             if (seth1)
6087                 ppd->cpspec->h1_val = h1;
6088             /* now change the IBC and serdes, overriding generic */
6089             init_txdds_table(ppd, 1);
6090             /* Re-enable the physical state machine on mezz boards
6091              * now that the correct settings have been set.
6092              * QSFP boards are handles by the QSFP event handler */
6093             if (IS_QMH(dd) || IS_QME(dd))
6094                 qib_set_ib_7322_lstate(ppd, 0,
6095                         QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6096             any++;
6097         }
6098         if (*nxt == '\n')
6099             break; /* done */
6100     }
6101     if (change && !any) {
6102         /* no specific setting, use the default.
6103          * Change the IBC and serdes, but since it's
6104          * general, don't override specific settings.
6105          */
6106         for (pidx = 0; pidx < dd->num_pports; ++pidx)
6107             if (dd->pport[pidx].link_speed_supported)
6108                 init_txdds_table(&dd->pport[pidx], 0);
6109     }
6110 }
6111 
6112 /* handle the txselect parameter changing */
6113 static int setup_txselect(const char *str, const struct kernel_param *kp)
6114 {
6115     struct qib_devdata *dd;
6116     unsigned long index, val;
6117     char *n;
6118 
6119     if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6120         pr_info("txselect_values string too long\n");
6121         return -ENOSPC;
6122     }
6123     val = simple_strtoul(str, &n, 0);
6124     if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6125                 TXDDS_MFG_SZ)) {
6126         pr_info("txselect_values must start with a number < %d\n",
6127             TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6128         return -EINVAL;
6129     }
6130     strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6131 
6132     xa_for_each(&qib_dev_table, index, dd)
6133         if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6134             set_no_qsfp_atten(dd, 1);
6135     return 0;
6136 }
6137 
6138 /*
6139  * Write the final few registers that depend on some of the
6140  * init setup.  Done late in init, just before bringing up
6141  * the serdes.
6142  */
6143 static int qib_late_7322_initreg(struct qib_devdata *dd)
6144 {
6145     int ret = 0, n;
6146     u64 val;
6147 
6148     qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6149     qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6150     qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6151     qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6152     val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6153     if (val != dd->pioavailregs_phys) {
6154         qib_dev_err(dd,
6155             "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6156             (unsigned long) dd->pioavailregs_phys,
6157             (unsigned long long) val);
6158         ret = -EINVAL;
6159     }
6160 
6161     n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6162     qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6163     /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6164     qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6165 
6166     qib_register_observer(dd, &sendctrl_0_observer);
6167     qib_register_observer(dd, &sendctrl_1_observer);
6168 
6169     dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6170     qib_write_kreg(dd, kr_control, dd->control);
6171     /*
6172      * Set SendDmaFetchPriority and init Tx params, including
6173      * QSFP handler on boards that have QSFP.
6174      * First set our default attenuation entry for cables that
6175      * don't have valid attenuation.
6176      */
6177     set_no_qsfp_atten(dd, 0);
6178     for (n = 0; n < dd->num_pports; ++n) {
6179         struct qib_pportdata *ppd = dd->pport + n;
6180 
6181         qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6182                     sdma_fetch_prio & 0xf);
6183         /* Initialize qsfp if present on board. */
6184         if (dd->flags & QIB_HAS_QSFP)
6185             qib_init_7322_qsfp(ppd);
6186     }
6187     dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6188     qib_write_kreg(dd, kr_control, dd->control);
6189 
6190     return ret;
6191 }
6192 
6193 /* per IB port errors.  */
6194 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6195     MASK_ACROSS(8, 15))
6196 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6197 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6198     MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6199     MASK_ACROSS(0, 11))
6200 
6201 /*
6202  * Write the initialization per-port registers that need to be done at
6203  * driver load and after reset completes (i.e., that aren't done as part
6204  * of other init procedures called from qib_init.c).
6205  * Some of these should be redundant on reset, but play safe.
6206  */
6207 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6208 {
6209     u64 val;
6210     int i;
6211 
6212     if (!ppd->link_speed_supported) {
6213         /* no buffer credits for this port */
6214         for (i = 1; i < 8; i++)
6215             qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6216         qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6217         qib_write_kreg(ppd->dd, kr_scratch, 0);
6218         return;
6219     }
6220 
6221     /*
6222      * Set the number of supported virtual lanes in IBC,
6223      * for flow control packet handling on unsupported VLs
6224      */
6225     val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6226     val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6227     val |= (u64)(ppd->vls_supported - 1) <<
6228         SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6229     qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6230 
6231     qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6232 
6233     /* enable tx header checking */
6234     qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6235                 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6236                 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6237 
6238     qib_write_kreg_port(ppd, krp_ncmodectrl,
6239         SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6240 
6241     /*
6242      * Unconditionally clear the bufmask bits.  If SDMA is
6243      * enabled, we'll set them appropriately later.
6244      */
6245     qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6246     qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6247     qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6248     if (ppd->dd->cspec->r1)
6249         ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6250 }
6251 
6252 /*
6253  * Write the initialization per-device registers that need to be done at
6254  * driver load and after reset completes (i.e., that aren't done as part
6255  * of other init procedures called from qib_init.c).  Also write per-port
6256  * registers that are affected by overall device config, such as QP mapping
6257  * Some of these should be redundant on reset, but play safe.
6258  */
6259 static void write_7322_initregs(struct qib_devdata *dd)
6260 {
6261     struct qib_pportdata *ppd;
6262     int i, pidx;
6263     u64 val;
6264 
6265     /* Set Multicast QPs received by port 2 to map to context one. */
6266     qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6267 
6268     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6269         unsigned n, regno;
6270         unsigned long flags;
6271 
6272         if (dd->n_krcv_queues < 2 ||
6273             !dd->pport[pidx].link_speed_supported)
6274             continue;
6275 
6276         ppd = &dd->pport[pidx];
6277 
6278         /* be paranoid against later code motion, etc. */
6279         spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6280         ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6281         spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6282 
6283         /* Initialize QP to context mapping */
6284         regno = krp_rcvqpmaptable;
6285         val = 0;
6286         if (dd->num_pports > 1)
6287             n = dd->first_user_ctxt / dd->num_pports;
6288         else
6289             n = dd->first_user_ctxt - 1;
6290         for (i = 0; i < 32; ) {
6291             unsigned ctxt;
6292 
6293             if (dd->num_pports > 1)
6294                 ctxt = (i % n) * dd->num_pports + pidx;
6295             else if (i % n)
6296                 ctxt = (i % n) + 1;
6297             else
6298                 ctxt = ppd->hw_pidx;
6299             val |= ctxt << (5 * (i % 6));
6300             i++;
6301             if (i % 6 == 0) {
6302                 qib_write_kreg_port(ppd, regno, val);
6303                 val = 0;
6304                 regno++;
6305             }
6306         }
6307         qib_write_kreg_port(ppd, regno, val);
6308     }
6309 
6310     /*
6311      * Setup up interrupt mitigation for kernel contexts, but
6312      * not user contexts (user contexts use interrupts when
6313      * stalled waiting for any packet, so want those interrupts
6314      * right away).
6315      */
6316     for (i = 0; i < dd->first_user_ctxt; i++) {
6317         dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6318         qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6319     }
6320 
6321     /*
6322      * Initialize  as (disabled) rcvflow tables.  Application code
6323      * will setup each flow as it uses the flow.
6324      * Doesn't clear any of the error bits that might be set.
6325      */
6326     val = TIDFLOW_ERRBITS; /* these are W1C */
6327     for (i = 0; i < dd->cfgctxts; i++) {
6328         int flow;
6329 
6330         for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6331             qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6332     }
6333 
6334     /*
6335      * dual cards init to dual port recovery, single port cards to
6336      * the one port.  Dual port cards may later adjust to 1 port,
6337      * and then back to dual port if both ports are connected
6338      * */
6339     if (dd->num_pports)
6340         setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6341 }
6342 
6343 static int qib_init_7322_variables(struct qib_devdata *dd)
6344 {
6345     struct qib_pportdata *ppd;
6346     unsigned features, pidx, sbufcnt;
6347     int ret, mtu;
6348     u32 sbufs, updthresh;
6349     resource_size_t vl15off;
6350 
6351     /* pport structs are contiguous, allocated after devdata */
6352     ppd = (struct qib_pportdata *)(dd + 1);
6353     dd->pport = ppd;
6354     ppd[0].dd = dd;
6355     ppd[1].dd = dd;
6356 
6357     dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6358 
6359     ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6360     ppd[1].cpspec = &ppd[0].cpspec[1];
6361     ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6362     ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6363 
6364     spin_lock_init(&dd->cspec->rcvmod_lock);
6365     spin_lock_init(&dd->cspec->gpio_lock);
6366 
6367     /* we haven't yet set QIB_PRESENT, so use read directly */
6368     dd->revision = readq(&dd->kregbase[kr_revision]);
6369 
6370     if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6371         qib_dev_err(dd,
6372             "Revision register read failure, giving up initialization\n");
6373         ret = -ENODEV;
6374         goto bail;
6375     }
6376     dd->flags |= QIB_PRESENT;  /* now register routines work */
6377 
6378     dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6379     dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6380     dd->cspec->r1 = dd->minrev == 1;
6381 
6382     get_7322_chip_params(dd);
6383     features = qib_7322_boardname(dd);
6384 
6385     /* now that piobcnt2k and 4k set, we can allocate these */
6386     sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6387 
6388     dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
6389     dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
6390     dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
6391     if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6392         !dd->cspec->sendibchk) {
6393         ret = -ENOMEM;
6394         goto bail;
6395     }
6396 
6397     ppd = dd->pport;
6398 
6399     /*
6400      * GPIO bits for TWSI data and clock,
6401      * used for serial EEPROM.
6402      */
6403     dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6404     dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6405     dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6406 
6407     dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6408         QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6409         QIB_HAS_THRESH_UPDATE |
6410         (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6411     dd->flags |= qib_special_trigger ?
6412         QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6413 
6414     /*
6415      * Setup initial values.  These may change when PAT is enabled, but
6416      * we need these to do initial chip register accesses.
6417      */
6418     qib_7322_set_baseaddrs(dd);
6419 
6420     mtu = ib_mtu_enum_to_int(qib_ibmtu);
6421     if (mtu == -1)
6422         mtu = QIB_DEFAULT_MTU;
6423 
6424     dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6425     /* all hwerrors become interrupts, unless special purposed */
6426     dd->cspec->hwerrmask = ~0ULL;
6427     /*  link_recovery setup causes these errors, so ignore them,
6428      *  other than clearing them when they occur */
6429     dd->cspec->hwerrmask &=
6430         ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6431           SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6432           HWE_MASK(LATriggered));
6433 
6434     for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6435         struct qib_chippport_specific *cp = ppd->cpspec;
6436 
6437         ppd->link_speed_supported = features & PORT_SPD_CAP;
6438         features >>=  PORT_SPD_CAP_SHIFT;
6439         if (!ppd->link_speed_supported) {
6440             /* single port mode (7340, or configured) */
6441             dd->skip_kctxt_mask |= 1 << pidx;
6442             if (pidx == 0) {
6443                 /* Make sure port is disabled. */
6444                 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6445                 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6446                 ppd[0] = ppd[1];
6447                 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6448                           IBSerdesPClkNotDetectMask_0)
6449                           | SYM_MASK(HwErrMask,
6450                           SDmaMemReadErrMask_0));
6451                 dd->cspec->int_enable_mask &= ~(
6452                      SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6453                      SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6454                      SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6455                      SYM_MASK(IntMask, SDmaIntMask_0) |
6456                      SYM_MASK(IntMask, ErrIntMask_0) |
6457                      SYM_MASK(IntMask, SendDoneIntMask_0));
6458             } else {
6459                 /* Make sure port is disabled. */
6460                 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6461                 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6462                 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6463                           IBSerdesPClkNotDetectMask_1)
6464                           | SYM_MASK(HwErrMask,
6465                           SDmaMemReadErrMask_1));
6466                 dd->cspec->int_enable_mask &= ~(
6467                      SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6468                      SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6469                      SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6470                      SYM_MASK(IntMask, SDmaIntMask_1) |
6471                      SYM_MASK(IntMask, ErrIntMask_1) |
6472                      SYM_MASK(IntMask, SendDoneIntMask_1));
6473             }
6474             continue;
6475         }
6476 
6477         dd->num_pports++;
6478         ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6479         if (ret) {
6480             dd->num_pports--;
6481             goto bail;
6482         }
6483 
6484         ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6485         ppd->link_width_enabled = IB_WIDTH_4X;
6486         ppd->link_speed_enabled = ppd->link_speed_supported;
6487         /*
6488          * Set the initial values to reasonable default, will be set
6489          * for real when link is up.
6490          */
6491         ppd->link_width_active = IB_WIDTH_4X;
6492         ppd->link_speed_active = QIB_IB_SDR;
6493         ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6494         switch (qib_num_cfg_vls) {
6495         case 1:
6496             ppd->vls_supported = IB_VL_VL0;
6497             break;
6498         case 2:
6499             ppd->vls_supported = IB_VL_VL0_1;
6500             break;
6501         default:
6502             qib_devinfo(dd->pcidev,
6503                     "Invalid num_vls %u, using 4 VLs\n",
6504                     qib_num_cfg_vls);
6505             qib_num_cfg_vls = 4;
6506             fallthrough;
6507         case 4:
6508             ppd->vls_supported = IB_VL_VL0_3;
6509             break;
6510         case 8:
6511             if (mtu <= 2048)
6512                 ppd->vls_supported = IB_VL_VL0_7;
6513             else {
6514                 qib_devinfo(dd->pcidev,
6515                         "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6516                         qib_num_cfg_vls, mtu);
6517                 ppd->vls_supported = IB_VL_VL0_3;
6518                 qib_num_cfg_vls = 4;
6519             }
6520             break;
6521         }
6522         ppd->vls_operational = ppd->vls_supported;
6523 
6524         init_waitqueue_head(&cp->autoneg_wait);
6525         INIT_DELAYED_WORK(&cp->autoneg_work,
6526                   autoneg_7322_work);
6527         if (ppd->dd->cspec->r1)
6528             INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6529 
6530         /*
6531          * For Mez and similar cards, no qsfp info, so do
6532          * the "cable info" setup here.  Can be overridden
6533          * in adapter-specific routines.
6534          */
6535         if (!(dd->flags & QIB_HAS_QSFP)) {
6536             if (!IS_QMH(dd) && !IS_QME(dd))
6537                 qib_devinfo(dd->pcidev,
6538                     "IB%u:%u: Unknown mezzanine card type\n",
6539                     dd->unit, ppd->port);
6540             cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6541             /*
6542              * Choose center value as default tx serdes setting
6543              * until changed through module parameter.
6544              */
6545             ppd->cpspec->no_eep = IS_QMH(dd) ?
6546                 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6547         } else
6548             cp->h1_val = H1_FORCE_VAL;
6549 
6550         /* Avoid writes to chip for mini_init */
6551         if (!qib_mini_init)
6552             write_7322_init_portregs(ppd);
6553 
6554         timer_setup(&cp->chase_timer, reenable_chase, 0);
6555 
6556         ppd++;
6557     }
6558 
6559     dd->rcvhdrentsize = qib_rcvhdrentsize ?
6560         qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6561     dd->rcvhdrsize = qib_rcvhdrsize ?
6562         qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6563     dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6564 
6565     /* we always allocate at least 2048 bytes for eager buffers */
6566     dd->rcvegrbufsize = max(mtu, 2048);
6567     dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6568 
6569     qib_7322_tidtemplate(dd);
6570 
6571     /*
6572      * We can request a receive interrupt for 1 or
6573      * more packets from current offset.
6574      */
6575     dd->rhdrhead_intr_off =
6576         (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6577 
6578     /* setup the stats timer; the add_timer is done at end of init */
6579     timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6580 
6581     dd->ureg_align = 0x10000;  /* 64KB alignment */
6582 
6583     dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6584 
6585     qib_7322_config_ctxts(dd);
6586     qib_set_ctxtcnt(dd);
6587 
6588     /*
6589      * We do not set WC on the VL15 buffers to avoid
6590      * a rare problem with unaligned writes from
6591      * interrupt-flushed store buffers, so we need
6592      * to map those separately here.  We can't solve
6593      * this for the rarely used mtrr case.
6594      */
6595     ret = init_chip_wc_pat(dd, 0);
6596     if (ret)
6597         goto bail;
6598 
6599     /* vl15 buffers start just after the 4k buffers */
6600     vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6601           dd->piobcnt4k * dd->align4k;
6602     dd->piovl15base = ioremap(vl15off,
6603                       NUM_VL15_BUFS * dd->align4k);
6604     if (!dd->piovl15base) {
6605         ret = -ENOMEM;
6606         goto bail;
6607     }
6608 
6609     qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6610 
6611     ret = 0;
6612     if (qib_mini_init)
6613         goto bail;
6614     if (!dd->num_pports) {
6615         qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6616         goto bail; /* no error, so can still figure out why err */
6617     }
6618 
6619     write_7322_initregs(dd);
6620     ret = qib_create_ctxts(dd);
6621     init_7322_cntrnames(dd);
6622 
6623     updthresh = 8U; /* update threshold */
6624 
6625     /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6626      * reserve the update threshold amount for other kernel use, such
6627      * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6628      * unless we aren't enabling SDMA, in which case we want to use
6629      * all the 4k bufs for the kernel.
6630      * if this was less than the update threshold, we could wait
6631      * a long time for an update.  Coded this way because we
6632      * sometimes change the update threshold for various reasons,
6633      * and we want this to remain robust.
6634      */
6635     if (dd->flags & QIB_HAS_SEND_DMA) {
6636         dd->cspec->sdmabufcnt = dd->piobcnt4k;
6637         sbufs = updthresh > 3 ? updthresh : 3;
6638     } else {
6639         dd->cspec->sdmabufcnt = 0;
6640         sbufs = dd->piobcnt4k;
6641     }
6642     dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6643         dd->cspec->sdmabufcnt;
6644     dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6645     dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6646     dd->last_pio = dd->cspec->lastbuf_for_pio;
6647     dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6648         dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6649 
6650     /*
6651      * If we have 16 user contexts, we will have 7 sbufs
6652      * per context, so reduce the update threshold to match.  We
6653      * want to update before we actually run out, at low pbufs/ctxt
6654      * so give ourselves some margin.
6655      */
6656     if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6657         updthresh = dd->pbufsctxt - 2;
6658     dd->cspec->updthresh_dflt = updthresh;
6659     dd->cspec->updthresh = updthresh;
6660 
6661     /* before full enable, no interrupts, no locking needed */
6662     dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6663                  << SYM_LSB(SendCtrl, AvailUpdThld)) |
6664             SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6665 
6666     dd->psxmitwait_supported = 1;
6667     dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6668 bail:
6669     if (!dd->ctxtcnt)
6670         dd->ctxtcnt = 1; /* for other initialization code */
6671 
6672     return ret;
6673 }
6674 
6675 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6676                     u32 *pbufnum)
6677 {
6678     u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6679     struct qib_devdata *dd = ppd->dd;
6680 
6681     /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6682     if (pbc & PBC_7322_VL15_SEND) {
6683         first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6684         last = first;
6685     } else {
6686         if ((plen + 1) > dd->piosize2kmax_dwords)
6687             first = dd->piobcnt2k;
6688         else
6689             first = 0;
6690         last = dd->cspec->lastbuf_for_pio;
6691     }
6692     return qib_getsendbuf_range(dd, pbufnum, first, last);
6693 }
6694 
6695 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6696                      u32 start)
6697 {
6698     qib_write_kreg_port(ppd, krp_psinterval, intv);
6699     qib_write_kreg_port(ppd, krp_psstart, start);
6700 }
6701 
6702 /*
6703  * Must be called with sdma_lock held, or before init finished.
6704  */
6705 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6706 {
6707     qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6708 }
6709 
6710 /*
6711  * sdma_lock should be acquired before calling this routine
6712  */
6713 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6714 {
6715     u64 reg, reg1, reg2;
6716 
6717     reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6718     qib_dev_porterr(ppd->dd, ppd->port,
6719         "SDMA senddmastatus: 0x%016llx\n", reg);
6720 
6721     reg = qib_read_kreg_port(ppd, krp_sendctrl);
6722     qib_dev_porterr(ppd->dd, ppd->port,
6723         "SDMA sendctrl: 0x%016llx\n", reg);
6724 
6725     reg = qib_read_kreg_port(ppd, krp_senddmabase);
6726     qib_dev_porterr(ppd->dd, ppd->port,
6727         "SDMA senddmabase: 0x%016llx\n", reg);
6728 
6729     reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6730     reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6731     reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6732     qib_dev_porterr(ppd->dd, ppd->port,
6733         "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6734          reg, reg1, reg2);
6735 
6736     /* get bufuse bits, clear them, and print them again if non-zero */
6737     reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6738     qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6739     reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6740     qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6741     reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6742     qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6743     /* 0 and 1 should always be zero, so print as short form */
6744     qib_dev_porterr(ppd->dd, ppd->port,
6745          "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6746          reg, reg1, reg2);
6747     reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6748     reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6749     reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6750     /* 0 and 1 should always be zero, so print as short form */
6751     qib_dev_porterr(ppd->dd, ppd->port,
6752          "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6753          reg, reg1, reg2);
6754 
6755     reg = qib_read_kreg_port(ppd, krp_senddmatail);
6756     qib_dev_porterr(ppd->dd, ppd->port,
6757         "SDMA senddmatail: 0x%016llx\n", reg);
6758 
6759     reg = qib_read_kreg_port(ppd, krp_senddmahead);
6760     qib_dev_porterr(ppd->dd, ppd->port,
6761         "SDMA senddmahead: 0x%016llx\n", reg);
6762 
6763     reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6764     qib_dev_porterr(ppd->dd, ppd->port,
6765         "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6766 
6767     reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6768     qib_dev_porterr(ppd->dd, ppd->port,
6769         "SDMA senddmalengen: 0x%016llx\n", reg);
6770 
6771     reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6772     qib_dev_porterr(ppd->dd, ppd->port,
6773         "SDMA senddmadesccnt: 0x%016llx\n", reg);
6774 
6775     reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6776     qib_dev_porterr(ppd->dd, ppd->port,
6777         "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6778 
6779     reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6780     qib_dev_porterr(ppd->dd, ppd->port,
6781         "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6782 
6783     reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6784     qib_dev_porterr(ppd->dd, ppd->port,
6785         "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6786 
6787     dump_sdma_state(ppd);
6788 }
6789 
6790 static struct sdma_set_state_action sdma_7322_action_table[] = {
6791     [qib_sdma_state_s00_hw_down] = {
6792         .go_s99_running_tofalse = 1,
6793         .op_enable = 0,
6794         .op_intenable = 0,
6795         .op_halt = 0,
6796         .op_drain = 0,
6797     },
6798     [qib_sdma_state_s10_hw_start_up_wait] = {
6799         .op_enable = 0,
6800         .op_intenable = 1,
6801         .op_halt = 1,
6802         .op_drain = 0,
6803     },
6804     [qib_sdma_state_s20_idle] = {
6805         .op_enable = 1,
6806         .op_intenable = 1,
6807         .op_halt = 1,
6808         .op_drain = 0,
6809     },
6810     [qib_sdma_state_s30_sw_clean_up_wait] = {
6811         .op_enable = 0,
6812         .op_intenable = 1,
6813         .op_halt = 1,
6814         .op_drain = 0,
6815     },
6816     [qib_sdma_state_s40_hw_clean_up_wait] = {
6817         .op_enable = 1,
6818         .op_intenable = 1,
6819         .op_halt = 1,
6820         .op_drain = 0,
6821     },
6822     [qib_sdma_state_s50_hw_halt_wait] = {
6823         .op_enable = 1,
6824         .op_intenable = 1,
6825         .op_halt = 1,
6826         .op_drain = 1,
6827     },
6828     [qib_sdma_state_s99_running] = {
6829         .op_enable = 1,
6830         .op_intenable = 1,
6831         .op_halt = 0,
6832         .op_drain = 0,
6833         .go_s99_running_totrue = 1,
6834     },
6835 };
6836 
6837 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6838 {
6839     ppd->sdma_state.set_state_action = sdma_7322_action_table;
6840 }
6841 
6842 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6843 {
6844     struct qib_devdata *dd = ppd->dd;
6845     unsigned lastbuf, erstbuf;
6846     u64 senddmabufmask[3] = { 0 };
6847     int n;
6848 
6849     qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6850     qib_sdma_7322_setlengen(ppd);
6851     qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6852     qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6853     qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6854     qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6855 
6856     if (dd->num_pports)
6857         n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6858     else
6859         n = dd->cspec->sdmabufcnt; /* failsafe for init */
6860     erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6861         ((dd->num_pports == 1 || ppd->port == 2) ? n :
6862         dd->cspec->sdmabufcnt);
6863     lastbuf = erstbuf + n;
6864 
6865     ppd->sdma_state.first_sendbuf = erstbuf;
6866     ppd->sdma_state.last_sendbuf = lastbuf;
6867     for (; erstbuf < lastbuf; ++erstbuf) {
6868         unsigned word = erstbuf / BITS_PER_LONG;
6869         unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6870 
6871         senddmabufmask[word] |= 1ULL << bit;
6872     }
6873     qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6874     qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6875     qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6876     return 0;
6877 }
6878 
6879 /* sdma_lock must be held */
6880 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6881 {
6882     struct qib_devdata *dd = ppd->dd;
6883     int sane;
6884     int use_dmahead;
6885     u16 swhead;
6886     u16 swtail;
6887     u16 cnt;
6888     u16 hwhead;
6889 
6890     use_dmahead = __qib_sdma_running(ppd) &&
6891         (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6892 retry:
6893     hwhead = use_dmahead ?
6894         (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6895         (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6896 
6897     swhead = ppd->sdma_descq_head;
6898     swtail = ppd->sdma_descq_tail;
6899     cnt = ppd->sdma_descq_cnt;
6900 
6901     if (swhead < swtail)
6902         /* not wrapped */
6903         sane = (hwhead >= swhead) & (hwhead <= swtail);
6904     else if (swhead > swtail)
6905         /* wrapped around */
6906         sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6907             (hwhead <= swtail);
6908     else
6909         /* empty */
6910         sane = (hwhead == swhead);
6911 
6912     if (unlikely(!sane)) {
6913         if (use_dmahead) {
6914             /* try one more time, directly from the register */
6915             use_dmahead = 0;
6916             goto retry;
6917         }
6918         /* proceed as if no progress */
6919         hwhead = swhead;
6920     }
6921 
6922     return hwhead;
6923 }
6924 
6925 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6926 {
6927     u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6928 
6929     return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6930            (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6931            !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6932            !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6933 }
6934 
6935 /*
6936  * Compute the amount of delay before sending the next packet if the
6937  * port's send rate differs from the static rate set for the QP.
6938  * The delay affects the next packet and the amount of the delay is
6939  * based on the length of the this packet.
6940  */
6941 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6942                    u8 srate, u8 vl)
6943 {
6944     u8 snd_mult = ppd->delay_mult;
6945     u8 rcv_mult = ib_rate_to_delay[srate];
6946     u32 ret;
6947 
6948     ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6949 
6950     /* Indicate VL15, else set the VL in the control word */
6951     if (vl == 15)
6952         ret |= PBC_7322_VL15_SEND_CTRL;
6953     else
6954         ret |= vl << PBC_VL_NUM_LSB;
6955     ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6956 
6957     return ret;
6958 }
6959 
6960 /*
6961  * Enable the per-port VL15 send buffers for use.
6962  * They follow the rest of the buffers, without a config parameter.
6963  * This was in initregs, but that is done before the shadow
6964  * is set up, and this has to be done after the shadow is
6965  * set up.
6966  */
6967 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6968 {
6969     unsigned vl15bufs;
6970 
6971     vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6972     qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6973                    TXCHK_CHG_TYPE_KERN, NULL);
6974 }
6975 
6976 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6977 {
6978     if (rcd->ctxt < NUM_IB_PORTS) {
6979         if (rcd->dd->num_pports > 1) {
6980             rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6981             rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6982         } else {
6983             rcd->rcvegrcnt = KCTXT0_EGRCNT;
6984             rcd->rcvegr_tid_base = 0;
6985         }
6986     } else {
6987         rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6988         rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6989             (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6990     }
6991 }
6992 
6993 #define QTXSLEEPS 5000
6994 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6995                   u32 len, u32 which, struct qib_ctxtdata *rcd)
6996 {
6997     int i;
6998     const int last = start + len - 1;
6999     const int lastr = last / BITS_PER_LONG;
7000     u32 sleeps = 0;
7001     int wait = rcd != NULL;
7002     unsigned long flags;
7003 
7004     while (wait) {
7005         unsigned long shadow = 0;
7006         int cstart, previ = -1;
7007 
7008         /*
7009          * when flipping from kernel to user, we can't change
7010          * the checking type if the buffer is allocated to the
7011          * driver.   It's OK the other direction, because it's
7012          * from close, and we have just disarm'ed all the
7013          * buffers.  All the kernel to kernel changes are also
7014          * OK.
7015          */
7016         for (cstart = start; cstart <= last; cstart++) {
7017             i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7018                 / BITS_PER_LONG;
7019             if (i != previ) {
7020                 shadow = (unsigned long)
7021                     le64_to_cpu(dd->pioavailregs_dma[i]);
7022                 previ = i;
7023             }
7024             if (test_bit(((2 * cstart) +
7025                       QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7026                      % BITS_PER_LONG, &shadow))
7027                 break;
7028         }
7029 
7030         if (cstart > last)
7031             break;
7032 
7033         if (sleeps == QTXSLEEPS)
7034             break;
7035         /* make sure we see an updated copy next time around */
7036         sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7037         sleeps++;
7038         msleep(20);
7039     }
7040 
7041     switch (which) {
7042     case TXCHK_CHG_TYPE_DIS1:
7043         /*
7044          * disable checking on a range; used by diags; just
7045          * one buffer, but still written generically
7046          */
7047         for (i = start; i <= last; i++)
7048             clear_bit(i, dd->cspec->sendchkenable);
7049         break;
7050 
7051     case TXCHK_CHG_TYPE_ENAB1:
7052         /*
7053          * (re)enable checking on a range; used by diags; just
7054          * one buffer, but still written generically; read
7055          * scratch to be sure buffer actually triggered, not
7056          * just flushed from processor.
7057          */
7058         qib_read_kreg32(dd, kr_scratch);
7059         for (i = start; i <= last; i++)
7060             set_bit(i, dd->cspec->sendchkenable);
7061         break;
7062 
7063     case TXCHK_CHG_TYPE_KERN:
7064         /* usable by kernel */
7065         for (i = start; i <= last; i++) {
7066             set_bit(i, dd->cspec->sendibchk);
7067             clear_bit(i, dd->cspec->sendgrhchk);
7068         }
7069         spin_lock_irqsave(&dd->uctxt_lock, flags);
7070         /* see if we need to raise avail update threshold */
7071         for (i = dd->first_user_ctxt;
7072              dd->cspec->updthresh != dd->cspec->updthresh_dflt
7073              && i < dd->cfgctxts; i++)
7074             if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7075                ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7076                < dd->cspec->updthresh_dflt)
7077                 break;
7078         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7079         if (i == dd->cfgctxts) {
7080             spin_lock_irqsave(&dd->sendctrl_lock, flags);
7081             dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7082             dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7083             dd->sendctrl |= (dd->cspec->updthresh &
7084                      SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7085                        SYM_LSB(SendCtrl, AvailUpdThld);
7086             spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7087             sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7088         }
7089         break;
7090 
7091     case TXCHK_CHG_TYPE_USER:
7092         /* for user process */
7093         for (i = start; i <= last; i++) {
7094             clear_bit(i, dd->cspec->sendibchk);
7095             set_bit(i, dd->cspec->sendgrhchk);
7096         }
7097         spin_lock_irqsave(&dd->sendctrl_lock, flags);
7098         if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7099             / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7100             dd->cspec->updthresh = (rcd->piocnt /
7101                         rcd->subctxt_cnt) - 1;
7102             dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7103             dd->sendctrl |= (dd->cspec->updthresh &
7104                     SYM_RMASK(SendCtrl, AvailUpdThld))
7105                     << SYM_LSB(SendCtrl, AvailUpdThld);
7106             spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7107             sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7108         } else
7109             spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7110         break;
7111 
7112     default:
7113         break;
7114     }
7115 
7116     for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7117         qib_write_kreg(dd, kr_sendcheckmask + i,
7118                    dd->cspec->sendchkenable[i]);
7119 
7120     for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7121         qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7122                    dd->cspec->sendgrhchk[i]);
7123         qib_write_kreg(dd, kr_sendibpktmask + i,
7124                    dd->cspec->sendibchk[i]);
7125     }
7126 
7127     /*
7128      * Be sure whatever we did was seen by the chip and acted upon,
7129      * before we return.  Mostly important for which >= 2.
7130      */
7131     qib_read_kreg32(dd, kr_scratch);
7132 }
7133 
7134 
7135 /* useful for trigger analyzers, etc. */
7136 static void writescratch(struct qib_devdata *dd, u32 val)
7137 {
7138     qib_write_kreg(dd, kr_scratch, val);
7139 }
7140 
7141 /* Dummy for now, use chip regs soon */
7142 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7143 {
7144     return -ENXIO;
7145 }
7146 
7147 /**
7148  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7149  * @pdev: the pci_dev for qlogic_ib device
7150  * @ent: pci_device_id struct for this dev
7151  *
7152  * Also allocates, inits, and returns the devdata struct for this
7153  * device instance
7154  *
7155  * This is global, and is called directly at init to set up the
7156  * chip-specific function pointers for later use.
7157  */
7158 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7159                        const struct pci_device_id *ent)
7160 {
7161     struct qib_devdata *dd;
7162     int ret, i;
7163     u32 tabsize, actual_cnt = 0;
7164 
7165     dd = qib_alloc_devdata(pdev,
7166         NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7167         sizeof(struct qib_chip_specific) +
7168         NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7169     if (IS_ERR(dd))
7170         goto bail;
7171 
7172     dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7173     dd->f_cleanup           = qib_setup_7322_cleanup;
7174     dd->f_clear_tids        = qib_7322_clear_tids;
7175     dd->f_free_irq          = qib_7322_free_irq;
7176     dd->f_get_base_info     = qib_7322_get_base_info;
7177     dd->f_get_msgheader     = qib_7322_get_msgheader;
7178     dd->f_getsendbuf        = qib_7322_getsendbuf;
7179     dd->f_gpio_mod          = gpio_7322_mod;
7180     dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7181     dd->f_hdrqempty         = qib_7322_hdrqempty;
7182     dd->f_ib_updown         = qib_7322_ib_updown;
7183     dd->f_init_ctxt         = qib_7322_init_ctxt;
7184     dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7185     dd->f_intr_fallback     = qib_7322_intr_fallback;
7186     dd->f_late_initreg      = qib_late_7322_initreg;
7187     dd->f_setpbc_control    = qib_7322_setpbc_control;
7188     dd->f_portcntr          = qib_portcntr_7322;
7189     dd->f_put_tid           = qib_7322_put_tid;
7190     dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7191     dd->f_rcvctrl           = rcvctrl_7322_mod;
7192     dd->f_read_cntrs        = qib_read_7322cntrs;
7193     dd->f_read_portcntrs    = qib_read_7322portcntrs;
7194     dd->f_reset             = qib_do_7322_reset;
7195     dd->f_init_sdma_regs    = init_sdma_7322_regs;
7196     dd->f_sdma_busy         = qib_sdma_7322_busy;
7197     dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7198     dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7199     dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7200     dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7201     dd->f_sendctrl          = sendctrl_7322_mod;
7202     dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7203     dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7204     dd->f_iblink_state      = qib_7322_iblink_state;
7205     dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7206     dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7207     dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7208     dd->f_set_ib_loopback   = qib_7322_set_loopback;
7209     dd->f_get_ib_table      = qib_7322_get_ib_table;
7210     dd->f_set_ib_table      = qib_7322_set_ib_table;
7211     dd->f_set_intr_state    = qib_7322_set_intr_state;
7212     dd->f_setextled         = qib_setup_7322_setextled;
7213     dd->f_txchk_change      = qib_7322_txchk_change;
7214     dd->f_update_usrhead    = qib_update_7322_usrhead;
7215     dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7216     dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7217     dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7218     dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7219     dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7220     dd->f_writescratch      = writescratch;
7221     dd->f_tempsense_rd  = qib_7322_tempsense_rd;
7222 #ifdef CONFIG_INFINIBAND_QIB_DCA
7223     dd->f_notify_dca    = qib_7322_notify_dca;
7224 #endif
7225     /*
7226      * Do remaining PCIe setup and save PCIe values in dd.
7227      * Any error printing is already done by the init code.
7228      * On return, we have the chip mapped, but chip registers
7229      * are not set up until start of qib_init_7322_variables.
7230      */
7231     ret = qib_pcie_ddinit(dd, pdev, ent);
7232     if (ret < 0)
7233         goto bail_free;
7234 
7235     /* initialize chip-specific variables */
7236     ret = qib_init_7322_variables(dd);
7237     if (ret)
7238         goto bail_cleanup;
7239 
7240     if (qib_mini_init || !dd->num_pports)
7241         goto bail;
7242 
7243     /*
7244      * Determine number of vectors we want; depends on port count
7245      * and number of configured kernel receive queues actually used.
7246      * Should also depend on whether sdma is enabled or not, but
7247      * that's such a rare testing case it's not worth worrying about.
7248      */
7249     tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7250     for (i = 0; i < tabsize; i++)
7251         if ((i < ARRAY_SIZE(irq_table) &&
7252              irq_table[i].port <= dd->num_pports) ||
7253             (i >= ARRAY_SIZE(irq_table) &&
7254              dd->rcd[i - ARRAY_SIZE(irq_table)]))
7255             actual_cnt++;
7256     /* reduce by ctxt's < 2 */
7257     if (qib_krcvq01_no_msi)
7258         actual_cnt -= dd->num_pports;
7259 
7260     tabsize = actual_cnt;
7261     dd->cspec->msix_entries = kcalloc(tabsize,
7262                       sizeof(struct qib_msix_entry),
7263                       GFP_KERNEL);
7264     if (!dd->cspec->msix_entries)
7265         tabsize = 0;
7266 
7267     if (qib_pcie_params(dd, 8, &tabsize))
7268         qib_dev_err(dd,
7269             "Failed to setup PCIe or interrupts; continuing anyway\n");
7270     /* may be less than we wanted, if not enough available */
7271     dd->cspec->num_msix_entries = tabsize;
7272 
7273     /* setup interrupt handler */
7274     qib_setup_7322_interrupt(dd, 1);
7275 
7276     /* clear diagctrl register, in case diags were running and crashed */
7277     qib_write_kreg(dd, kr_hwdiagctrl, 0);
7278 #ifdef CONFIG_INFINIBAND_QIB_DCA
7279     if (!dca_add_requester(&pdev->dev)) {
7280         qib_devinfo(dd->pcidev, "DCA enabled\n");
7281         dd->flags |= QIB_DCA_ENABLED;
7282         qib_setup_dca(dd);
7283     }
7284 #endif
7285     goto bail;
7286 
7287 bail_cleanup:
7288     qib_pcie_ddcleanup(dd);
7289 bail_free:
7290     qib_free_devdata(dd);
7291     dd = ERR_PTR(ret);
7292 bail:
7293     return dd;
7294 }
7295 
7296 /*
7297  * Set the table entry at the specified index from the table specifed.
7298  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7299  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7300  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7301  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7302  */
7303 #define DDS_ENT_AMP_LSB 14
7304 #define DDS_ENT_MAIN_LSB 9
7305 #define DDS_ENT_POST_LSB 5
7306 #define DDS_ENT_PRE_XTRA_LSB 3
7307 #define DDS_ENT_PRE_LSB 0
7308 
7309 /*
7310  * Set one entry in the TxDDS table for spec'd port
7311  * ridx picks one of the entries, while tp points
7312  * to the appropriate table entry.
7313  */
7314 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7315               const struct txdds_ent *tp)
7316 {
7317     struct qib_devdata *dd = ppd->dd;
7318     u32 pack_ent;
7319     int regidx;
7320 
7321     /* Get correct offset in chip-space, and in source table */
7322     regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7323     /*
7324      * We do not use qib_write_kreg_port() because it was intended
7325      * only for registers in the lower "port specific" pages.
7326      * So do index calculation  by hand.
7327      */
7328     if (ppd->hw_pidx)
7329         regidx += (dd->palign / sizeof(u64));
7330 
7331     pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7332     pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7333     pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7334     pack_ent |= tp->post << DDS_ENT_POST_LSB;
7335     qib_write_kreg(dd, regidx, pack_ent);
7336     /* Prevent back-to-back writes by hitting scratch */
7337     qib_write_kreg(ppd->dd, kr_scratch, 0);
7338 }
7339 
7340 static const struct vendor_txdds_ent vendor_txdds[] = {
7341     { /* Amphenol 1m 30awg NoEq */
7342         { 0x41, 0x50, 0x48 }, "584470002       ",
7343         { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7344     },
7345     { /* Amphenol 3m 28awg NoEq */
7346         { 0x41, 0x50, 0x48 }, "584470004       ",
7347         {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7348     },
7349     { /* Finisar 3m OM2 Optical */
7350         { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7351         {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7352     },
7353     { /* Finisar 30m OM2 Optical */
7354         { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7355         {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7356     },
7357     { /* Finisar Default OM2 Optical */
7358         { 0x00, 0x90, 0x65 }, NULL,
7359         {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7360     },
7361     { /* Gore 1m 30awg NoEq */
7362         { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7363         {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7364     },
7365     { /* Gore 2m 30awg NoEq */
7366         { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7367         {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7368     },
7369     { /* Gore 1m 28awg NoEq */
7370         { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7371         {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7372     },
7373     { /* Gore 3m 28awg NoEq */
7374         { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7375         {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7376     },
7377     { /* Gore 5m 24awg Eq */
7378         { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7379         {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7380     },
7381     { /* Gore 7m 24awg Eq */
7382         { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7383         {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7384     },
7385     { /* Gore 5m 26awg Eq */
7386         { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7387         {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7388     },
7389     { /* Gore 7m 26awg Eq */
7390         { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7391         {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7392     },
7393     { /* Intersil 12m 24awg Active */
7394         { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7395         {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7396     },
7397     { /* Intersil 10m 28awg Active */
7398         { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7399         {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7400     },
7401     { /* Intersil 7m 30awg Active */
7402         { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7403         {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7404     },
7405     { /* Intersil 5m 32awg Active */
7406         { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7407         {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7408     },
7409     { /* Intersil Default Active */
7410         { 0x00, 0x30, 0xB4 }, NULL,
7411         {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7412     },
7413     { /* Luxtera 20m Active Optical */
7414         { 0x00, 0x25, 0x63 }, NULL,
7415         {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7416     },
7417     { /* Molex 1M Cu loopback */
7418         { 0x00, 0x09, 0x3A }, "74763-0025      ",
7419         {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7420     },
7421     { /* Molex 2m 28awg NoEq */
7422         { 0x00, 0x09, 0x3A }, "74757-2201      ",
7423         {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7424     },
7425 };
7426 
7427 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7428     /* amp, pre, main, post */
7429     {  2, 2, 15,  6 },  /* Loopback */
7430     {  0, 0,  0,  1 },  /*  2 dB */
7431     {  0, 0,  0,  2 },  /*  3 dB */
7432     {  0, 0,  0,  3 },  /*  4 dB */
7433     {  0, 0,  0,  4 },  /*  5 dB */
7434     {  0, 0,  0,  5 },  /*  6 dB */
7435     {  0, 0,  0,  6 },  /*  7 dB */
7436     {  0, 0,  0,  7 },  /*  8 dB */
7437     {  0, 0,  0,  8 },  /*  9 dB */
7438     {  0, 0,  0,  9 },  /* 10 dB */
7439     {  0, 0,  0, 10 },  /* 11 dB */
7440     {  0, 0,  0, 11 },  /* 12 dB */
7441     {  0, 0,  0, 12 },  /* 13 dB */
7442     {  0, 0,  0, 13 },  /* 14 dB */
7443     {  0, 0,  0, 14 },  /* 15 dB */
7444     {  0, 0,  0, 15 },  /* 16 dB */
7445 };
7446 
7447 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7448     /* amp, pre, main, post */
7449     {  2, 2, 15,  6 },  /* Loopback */
7450     {  0, 0,  0,  8 },  /*  2 dB */
7451     {  0, 0,  0,  8 },  /*  3 dB */
7452     {  0, 0,  0,  9 },  /*  4 dB */
7453     {  0, 0,  0,  9 },  /*  5 dB */
7454     {  0, 0,  0, 10 },  /*  6 dB */
7455     {  0, 0,  0, 10 },  /*  7 dB */
7456     {  0, 0,  0, 11 },  /*  8 dB */
7457     {  0, 0,  0, 11 },  /*  9 dB */
7458     {  0, 0,  0, 12 },  /* 10 dB */
7459     {  0, 0,  0, 12 },  /* 11 dB */
7460     {  0, 0,  0, 13 },  /* 12 dB */
7461     {  0, 0,  0, 13 },  /* 13 dB */
7462     {  0, 0,  0, 14 },  /* 14 dB */
7463     {  0, 0,  0, 14 },  /* 15 dB */
7464     {  0, 0,  0, 15 },  /* 16 dB */
7465 };
7466 
7467 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7468     /* amp, pre, main, post */
7469     {  2, 2, 15,  6 },  /* Loopback */
7470     {  0, 1,  0,  7 },  /*  2 dB (also QMH7342) */
7471     {  0, 1,  0,  9 },  /*  3 dB (also QMH7342) */
7472     {  0, 1,  0, 11 },  /*  4 dB */
7473     {  0, 1,  0, 13 },  /*  5 dB */
7474     {  0, 1,  0, 15 },  /*  6 dB */
7475     {  0, 1,  3, 15 },  /*  7 dB */
7476     {  0, 1,  7, 15 },  /*  8 dB */
7477     {  0, 1,  7, 15 },  /*  9 dB */
7478     {  0, 1,  8, 15 },  /* 10 dB */
7479     {  0, 1,  9, 15 },  /* 11 dB */
7480     {  0, 1, 10, 15 },  /* 12 dB */
7481     {  0, 2,  6, 15 },  /* 13 dB */
7482     {  0, 2,  7, 15 },  /* 14 dB */
7483     {  0, 2,  8, 15 },  /* 15 dB */
7484     {  0, 2,  9, 15 },  /* 16 dB */
7485 };
7486 
7487 /*
7488  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7489  * These are mostly used for mez cards going through connectors
7490  * and backplane traces, but can be used to add other "unusual"
7491  * table values as well.
7492  */
7493 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7494     /* amp, pre, main, post */
7495     {  0, 0, 0,  1 },   /* QMH7342 backplane settings */
7496     {  0, 0, 0,  1 },   /* QMH7342 backplane settings */
7497     {  0, 0, 0,  2 },   /* QMH7342 backplane settings */
7498     {  0, 0, 0,  2 },   /* QMH7342 backplane settings */
7499     {  0, 0, 0,  3 },   /* QMH7342 backplane settings */
7500     {  0, 0, 0,  4 },   /* QMH7342 backplane settings */
7501     {  0, 1, 4, 15 },   /* QME7342 backplane settings 1.0 */
7502     {  0, 1, 3, 15 },   /* QME7342 backplane settings 1.0 */
7503     {  0, 1, 0, 12 },   /* QME7342 backplane settings 1.0 */
7504     {  0, 1, 0, 11 },   /* QME7342 backplane settings 1.0 */
7505     {  0, 1, 0,  9 },   /* QME7342 backplane settings 1.0 */
7506     {  0, 1, 0, 14 },   /* QME7342 backplane settings 1.0 */
7507     {  0, 1, 2, 15 },   /* QME7342 backplane settings 1.0 */
7508     {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7509     {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7510     {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7511     {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7512     {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7513 };
7514 
7515 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7516     /* amp, pre, main, post */
7517     {  0, 0, 0,  7 },   /* QMH7342 backplane settings */
7518     {  0, 0, 0,  7 },   /* QMH7342 backplane settings */
7519     {  0, 0, 0,  8 },   /* QMH7342 backplane settings */
7520     {  0, 0, 0,  8 },   /* QMH7342 backplane settings */
7521     {  0, 0, 0,  9 },   /* QMH7342 backplane settings */
7522     {  0, 0, 0, 10 },   /* QMH7342 backplane settings */
7523     {  0, 1, 4, 15 },   /* QME7342 backplane settings 1.0 */
7524     {  0, 1, 3, 15 },   /* QME7342 backplane settings 1.0 */
7525     {  0, 1, 0, 12 },   /* QME7342 backplane settings 1.0 */
7526     {  0, 1, 0, 11 },   /* QME7342 backplane settings 1.0 */
7527     {  0, 1, 0,  9 },   /* QME7342 backplane settings 1.0 */
7528     {  0, 1, 0, 14 },   /* QME7342 backplane settings 1.0 */
7529     {  0, 1, 2, 15 },   /* QME7342 backplane settings 1.0 */
7530     {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7531     {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7532     {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7533     {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7534     {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7535 };
7536 
7537 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7538     /* amp, pre, main, post */
7539     {  0, 1,  0,  4 },  /* QMH7342 backplane settings */
7540     {  0, 1,  0,  5 },  /* QMH7342 backplane settings */
7541     {  0, 1,  0,  6 },  /* QMH7342 backplane settings */
7542     {  0, 1,  0,  8 },  /* QMH7342 backplane settings */
7543     {  0, 1,  0, 10 },  /* QMH7342 backplane settings */
7544     {  0, 1,  0, 12 },  /* QMH7342 backplane settings */
7545     {  0, 1,  4, 15 },  /* QME7342 backplane settings 1.0 */
7546     {  0, 1,  3, 15 },  /* QME7342 backplane settings 1.0 */
7547     {  0, 1,  0, 12 },  /* QME7342 backplane settings 1.0 */
7548     {  0, 1,  0, 11 },  /* QME7342 backplane settings 1.0 */
7549     {  0, 1,  0,  9 },  /* QME7342 backplane settings 1.0 */
7550     {  0, 1,  0, 14 },  /* QME7342 backplane settings 1.0 */
7551     {  0, 1,  2, 15 },  /* QME7342 backplane settings 1.0 */
7552     {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7553     {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7554     {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7555     {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7556     {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7557 };
7558 
7559 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7560     /* amp, pre, main, post */
7561     { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7562     { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7563 };
7564 
7565 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7566                            unsigned atten)
7567 {
7568     /*
7569      * The attenuation table starts at 2dB for entry 1,
7570      * with entry 0 being the loopback entry.
7571      */
7572     if (atten <= 2)
7573         atten = 1;
7574     else if (atten > TXDDS_TABLE_SZ)
7575         atten = TXDDS_TABLE_SZ - 1;
7576     else
7577         atten--;
7578     return txdds + atten;
7579 }
7580 
7581 /*
7582  * if override is set, the module parameter txselect has a value
7583  * for this specific port, so use it, rather than our normal mechanism.
7584  */
7585 static void find_best_ent(struct qib_pportdata *ppd,
7586               const struct txdds_ent **sdr_dds,
7587               const struct txdds_ent **ddr_dds,
7588               const struct txdds_ent **qdr_dds, int override)
7589 {
7590     struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7591     int idx;
7592 
7593     /* Search table of known cables */
7594     for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7595         const struct vendor_txdds_ent *v = vendor_txdds + idx;
7596 
7597         if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7598             (!v->partnum ||
7599              !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7600             *sdr_dds = &v->sdr;
7601             *ddr_dds = &v->ddr;
7602             *qdr_dds = &v->qdr;
7603             return;
7604         }
7605     }
7606 
7607     /* Active cables don't have attenuation so we only set SERDES
7608      * settings to account for the attenuation of the board traces. */
7609     if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7610         *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7611         *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7612         *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7613         return;
7614     }
7615 
7616     if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7617                               qd->atten[1])) {
7618         *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7619         *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7620         *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7621         return;
7622     } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7623         /*
7624          * If we have no (or incomplete) data from the cable
7625          * EEPROM, or no QSFP, or override is set, use the
7626          * module parameter value to index into the attentuation
7627          * table.
7628          */
7629         idx = ppd->cpspec->no_eep;
7630         *sdr_dds = &txdds_sdr[idx];
7631         *ddr_dds = &txdds_ddr[idx];
7632         *qdr_dds = &txdds_qdr[idx];
7633     } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7634         /* similar to above, but index into the "extra" table. */
7635         idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7636         *sdr_dds = &txdds_extra_sdr[idx];
7637         *ddr_dds = &txdds_extra_ddr[idx];
7638         *qdr_dds = &txdds_extra_qdr[idx];
7639     } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7640            ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7641                       TXDDS_MFG_SZ)) {
7642         idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7643         pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7644             ppd->dd->unit, ppd->port, idx);
7645         *sdr_dds = &txdds_extra_mfg[idx];
7646         *ddr_dds = &txdds_extra_mfg[idx];
7647         *qdr_dds = &txdds_extra_mfg[idx];
7648     } else {
7649         /* this shouldn't happen, it's range checked */
7650         *sdr_dds = txdds_sdr + qib_long_atten;
7651         *ddr_dds = txdds_ddr + qib_long_atten;
7652         *qdr_dds = txdds_qdr + qib_long_atten;
7653     }
7654 }
7655 
7656 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7657 {
7658     const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7659     struct txdds_ent *dds;
7660     int idx;
7661     int single_ent = 0;
7662 
7663     find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7664 
7665     /* for mez cards or override, use the selected value for all entries */
7666     if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7667         single_ent = 1;
7668 
7669     /* Fill in the first entry with the best entry found. */
7670     set_txdds(ppd, 0, sdr_dds);
7671     set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7672     set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7673     if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7674         QIBL_LINKACTIVE)) {
7675         dds = (struct txdds_ent *)(ppd->link_speed_active ==
7676                        QIB_IB_QDR ?  qdr_dds :
7677                        (ppd->link_speed_active ==
7678                         QIB_IB_DDR ? ddr_dds : sdr_dds));
7679         write_tx_serdes_param(ppd, dds);
7680     }
7681 
7682     /* Fill in the remaining entries with the default table values. */
7683     for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7684         set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7685         set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7686               single_ent ? ddr_dds : txdds_ddr + idx);
7687         set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7688               single_ent ? qdr_dds : txdds_qdr + idx);
7689     }
7690 }
7691 
7692 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7693 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7694 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7695 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7696 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7697 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7698 #define AHB_TRANS_TRIES 10
7699 
7700 /*
7701  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7702  * 5=subsystem which is why most calls have "chan + chan >> 1"
7703  * for the channel argument.
7704  */
7705 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7706             u32 data, u32 mask)
7707 {
7708     u32 rd_data, wr_data, sz_mask;
7709     u64 trans, acc, prev_acc;
7710     u32 ret = 0xBAD0BAD;
7711     int tries;
7712 
7713     prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7714     /* From this point on, make sure we return access */
7715     acc = (quad << 1) | 1;
7716     qib_write_kreg(dd, KR_AHB_ACC, acc);
7717 
7718     for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7719         trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7720         if (trans & AHB_TRANS_RDY)
7721             break;
7722     }
7723     if (tries >= AHB_TRANS_TRIES) {
7724         qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7725         goto bail;
7726     }
7727 
7728     /* If mask is not all 1s, we need to read, but different SerDes
7729      * entities have different sizes
7730      */
7731     sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7732     wr_data = data & mask & sz_mask;
7733     if ((~mask & sz_mask) != 0) {
7734         trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7735         qib_write_kreg(dd, KR_AHB_TRANS, trans);
7736 
7737         for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7738             trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7739             if (trans & AHB_TRANS_RDY)
7740                 break;
7741         }
7742         if (tries >= AHB_TRANS_TRIES) {
7743             qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7744                     AHB_TRANS_TRIES);
7745             goto bail;
7746         }
7747         /* Re-read in case host split reads and read data first */
7748         trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7749         rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7750         wr_data |= (rd_data & ~mask & sz_mask);
7751     }
7752 
7753     /* If mask is not zero, we need to write. */
7754     if (mask & sz_mask) {
7755         trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7756         trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7757         trans |= AHB_WR;
7758         qib_write_kreg(dd, KR_AHB_TRANS, trans);
7759 
7760         for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7761             trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7762             if (trans & AHB_TRANS_RDY)
7763                 break;
7764         }
7765         if (tries >= AHB_TRANS_TRIES) {
7766             qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7767                     AHB_TRANS_TRIES);
7768             goto bail;
7769         }
7770     }
7771     ret = wr_data;
7772 bail:
7773     qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7774     return ret;
7775 }
7776 
7777 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7778                  unsigned mask)
7779 {
7780     struct qib_devdata *dd = ppd->dd;
7781     int chan;
7782 
7783     for (chan = 0; chan < SERDES_CHANS; ++chan) {
7784         ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7785             data, mask);
7786         ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7787             0, 0);
7788     }
7789 }
7790 
7791 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7792 {
7793     u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7794     u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7795 
7796     if (enable && !state) {
7797         pr_info("IB%u:%u Turning LOS on\n",
7798             ppd->dd->unit, ppd->port);
7799         data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7800     } else if (!enable && state) {
7801         pr_info("IB%u:%u Turning LOS off\n",
7802             ppd->dd->unit, ppd->port);
7803         data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7804     }
7805     qib_write_kreg_port(ppd, krp_serdesctrl, data);
7806 }
7807 
7808 static int serdes_7322_init(struct qib_pportdata *ppd)
7809 {
7810     int ret = 0;
7811 
7812     if (ppd->dd->cspec->r1)
7813         ret = serdes_7322_init_old(ppd);
7814     else
7815         ret = serdes_7322_init_new(ppd);
7816     return ret;
7817 }
7818 
7819 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7820 {
7821     u32 le_val;
7822 
7823     /*
7824      * Initialize the Tx DDS tables.  Also done every QSFP event,
7825      * for adapters with QSFP
7826      */
7827     init_txdds_table(ppd, 0);
7828 
7829     /* ensure no tx overrides from earlier driver loads */
7830     qib_write_kreg_port(ppd, krp_tx_deemph_override,
7831         SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7832         reset_tx_deemphasis_override));
7833 
7834     /* Patch some SerDes defaults to "Better for IB" */
7835     /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7836     ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7837 
7838     /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7839     ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7840     /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7841     ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7842 
7843     /* May be overridden in qsfp_7322_event */
7844     le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7845     ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7846 
7847     /* enable LE1 adaptation for all but QME, which is disabled */
7848     le_val = IS_QME(ppd->dd) ? 0 : 1;
7849     ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7850 
7851     /* Clear cmode-override, may be set from older driver */
7852     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7853 
7854     /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7855     ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7856 
7857     /* setup LoS params; these are subsystem, so chan == 5 */
7858     /* LoS filter threshold_count on, ch 0-3, set to 8 */
7859     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7860     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7861     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7862     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7863 
7864     /* LoS filter threshold_count off, ch 0-3, set to 4 */
7865     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7866     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7867     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7868     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7869 
7870     /* LoS filter select enabled */
7871     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7872 
7873     /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7874     ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7875     ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7876     ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7877 
7878     serdes_7322_los_enable(ppd, 1);
7879 
7880     /* rxbistena; set 0 to avoid effects of it switch later */
7881     ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7882 
7883     /* Configure 4 DFE taps, and only they adapt */
7884     ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7885 
7886     /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7887     le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7888     ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7889 
7890     /*
7891      * Set receive adaptation mode.  SDR and DDR adaptation are
7892      * always on, and QDR is initially enabled; later disabled.
7893      */
7894     qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7895     qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7896     qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7897                 ppd->dd->cspec->r1 ?
7898                 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7899     ppd->cpspec->qdr_dfe_on = 1;
7900 
7901     /* FLoop LOS gate: PPM filter  enabled */
7902     ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7903 
7904     /* rx offset center enabled */
7905     ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7906 
7907     if (!ppd->dd->cspec->r1) {
7908         ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7909         ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7910     }
7911 
7912     /* Set the frequency loop bandwidth to 15 */
7913     ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7914 
7915     return 0;
7916 }
7917 
7918 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7919 {
7920     unsigned long tend;
7921     u32 le_val, rxcaldone;
7922     int chan, chan_done = (1 << SERDES_CHANS) - 1;
7923 
7924     /* Clear cmode-override, may be set from older driver */
7925     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7926 
7927     /* ensure no tx overrides from earlier driver loads */
7928     qib_write_kreg_port(ppd, krp_tx_deemph_override,
7929         SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7930         reset_tx_deemphasis_override));
7931 
7932     /* START OF LSI SUGGESTED SERDES BRINGUP */
7933     /* Reset - Calibration Setup */
7934     /*       Stop DFE adaptaion */
7935     ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7936     /*       Disable LE1 */
7937     ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7938     /*       Disable autoadapt for LE1 */
7939     ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7940     /*       Disable LE2 */
7941     ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7942     /*       Disable VGA */
7943     ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7944     /*       Disable AFE Offset Cancel */
7945     ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7946     /*       Disable Timing Loop */
7947     ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7948     /*       Disable Frequency Loop */
7949     ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7950     /*       Disable Baseline Wander Correction */
7951     ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7952     /*       Disable RX Calibration */
7953     ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7954     /*       Disable RX Offset Calibration */
7955     ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7956     /*       Select BB CDR */
7957     ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7958     /*       CDR Step Size */
7959     ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7960     /*       Enable phase Calibration */
7961     ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7962     /*       DFE Bandwidth [2:14-12] */
7963     ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7964     /*       DFE Config (4 taps only) */
7965     ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7966     /*       Gain Loop Bandwidth */
7967     if (!ppd->dd->cspec->r1) {
7968         ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7969         ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7970     } else {
7971         ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7972     }
7973     /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
7974     /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
7975     /*       Data Rate Select [5:7-6] (leave as default) */
7976     /*       RX Parallel Word Width [3:10-8] (leave as default) */
7977 
7978     /* RX REST */
7979     /*       Single- or Multi-channel reset */
7980     /*       RX Analog reset */
7981     /*       RX Digital reset */
7982     ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7983     msleep(20);
7984     /*       RX Analog reset */
7985     ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7986     msleep(20);
7987     /*       RX Digital reset */
7988     ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7989     msleep(20);
7990 
7991     /* setup LoS params; these are subsystem, so chan == 5 */
7992     /* LoS filter threshold_count on, ch 0-3, set to 8 */
7993     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7994     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7995     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7996     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7997 
7998     /* LoS filter threshold_count off, ch 0-3, set to 4 */
7999     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8000     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8001     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8002     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8003 
8004     /* LoS filter select enabled */
8005     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8006 
8007     /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8008     ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8009     ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8010     ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8011 
8012     /* Turn on LOS on initial SERDES init */
8013     serdes_7322_los_enable(ppd, 1);
8014     /* FLoop LOS gate: PPM filter  enabled */
8015     ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8016 
8017     /* RX LATCH CALIBRATION */
8018     /*       Enable Eyefinder Phase Calibration latch */
8019     ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8020     /*       Enable RX Offset Calibration latch */
8021     ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8022     msleep(20);
8023     /*       Start Calibration */
8024     ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8025     tend = jiffies + msecs_to_jiffies(500);
8026     while (chan_done && !time_is_before_jiffies(tend)) {
8027         msleep(20);
8028         for (chan = 0; chan < SERDES_CHANS; ++chan) {
8029             rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8030                         (chan + (chan >> 1)),
8031                         25, 0, 0);
8032             if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8033                 (~chan_done & (1 << chan)) == 0)
8034                 chan_done &= ~(1 << chan);
8035         }
8036     }
8037     if (chan_done) {
8038         pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8039              IBSD(ppd->hw_pidx), chan_done);
8040     } else {
8041         for (chan = 0; chan < SERDES_CHANS; ++chan) {
8042             rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8043                         (chan + (chan >> 1)),
8044                         25, 0, 0);
8045             if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8046                 pr_info("Serdes %d chan %d calibration failed\n",
8047                     IBSD(ppd->hw_pidx), chan);
8048         }
8049     }
8050 
8051     /*       Turn off Calibration */
8052     ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8053     msleep(20);
8054 
8055     /* BRING RX UP */
8056     /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8057     le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8058     ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8059     /*       Set LE2 Loop bandwidth */
8060     ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8061     /*       Enable LE2 */
8062     ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8063     msleep(20);
8064     /*       Enable H0 only */
8065     ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8066     /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8067     le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8068     ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8069     /*       Enable VGA */
8070     ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8071     msleep(20);
8072     /*       Set Frequency Loop Bandwidth */
8073     ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8074     /*       Enable Frequency Loop */
8075     ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8076     /*       Set Timing Loop Bandwidth */
8077     ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8078     /*       Enable Timing Loop */
8079     ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8080     msleep(50);
8081     /*       Enable DFE
8082      *       Set receive adaptation mode.  SDR and DDR adaptation are
8083      *       always on, and QDR is initially enabled; later disabled.
8084      */
8085     qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8086     qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8087     qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8088                 ppd->dd->cspec->r1 ?
8089                 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8090     ppd->cpspec->qdr_dfe_on = 1;
8091     /*       Disable LE1  */
8092     ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8093     /*       Disable auto adapt for LE1 */
8094     ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8095     msleep(20);
8096     /*       Enable AFE Offset Cancel */
8097     ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8098     /*       Enable Baseline Wander Correction */
8099     ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8100     /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8101     ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8102     /* VGA output common mode */
8103     ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8104 
8105     /*
8106      * Initialize the Tx DDS tables.  Also done every QSFP event,
8107      * for adapters with QSFP
8108      */
8109     init_txdds_table(ppd, 0);
8110 
8111     return 0;
8112 }
8113 
8114 /* start adjust QMH serdes parameters */
8115 
8116 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8117 {
8118     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8119         9, code << 9, 0x3f << 9);
8120 }
8121 
8122 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8123     int enable, u32 tapenable)
8124 {
8125     if (enable)
8126         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8127             1, 3 << 10, 0x1f << 10);
8128     else
8129         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8130             1, 0, 0x1f << 10);
8131 }
8132 
8133 /* Set clock to 1, 0, 1, 0 */
8134 static void clock_man(struct qib_pportdata *ppd, int chan)
8135 {
8136     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8137         4, 0x4000, 0x4000);
8138     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8139         4, 0, 0x4000);
8140     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8141         4, 0x4000, 0x4000);
8142     ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8143         4, 0, 0x4000);
8144 }
8145 
8146 /*
8147  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8148  * The caller must pass the settings appropriate for the current speed,
8149  * or not care if they are correct for the current speed.
8150  */
8151 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8152                   struct txdds_ent *txdds)
8153 {
8154     u64 deemph;
8155 
8156     deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8157     /* field names for amp, main, post, pre, respectively */
8158     deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8159             SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8160             SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8161             SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8162 
8163     deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8164                tx_override_deemphasis_select);
8165     deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8166             txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8167                        txampcntl_d2a);
8168     deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8169              txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8170                    txc0_ena);
8171     deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8172              txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8173                     txcp1_ena);
8174     deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8175              txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8176                     txcn1_ena);
8177     qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8178 }
8179 
8180 /*
8181  * Set the parameters for mez cards on link bounce, so they are
8182  * always exactly what was requested.  Similar logic to init_txdds
8183  * but does just the serdes.
8184  */
8185 static void adj_tx_serdes(struct qib_pportdata *ppd)
8186 {
8187     const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8188     struct txdds_ent *dds;
8189 
8190     find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8191     dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8192         qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8193                 ddr_dds : sdr_dds));
8194     write_tx_serdes_param(ppd, dds);
8195 }
8196 
8197 /* set QDR forced value for H1, if needed */
8198 static void force_h1(struct qib_pportdata *ppd)
8199 {
8200     int chan;
8201 
8202     ppd->cpspec->qdr_reforce = 0;
8203     if (!ppd->dd->cspec->r1)
8204         return;
8205 
8206     for (chan = 0; chan < SERDES_CHANS; chan++) {
8207         set_man_mode_h1(ppd, chan, 1, 0);
8208         set_man_code(ppd, chan, ppd->cpspec->h1_val);
8209         clock_man(ppd, chan);
8210         set_man_mode_h1(ppd, chan, 0, 0);
8211     }
8212 }
8213 
8214 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8215 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8216 
8217 #define R_OPCODE_LSB 3
8218 #define R_OP_NOP 0
8219 #define R_OP_SHIFT 2
8220 #define R_OP_UPDATE 3
8221 #define R_TDI_LSB 2
8222 #define R_TDO_LSB 1
8223 #define R_RDY 1
8224 
8225 static int qib_r_grab(struct qib_devdata *dd)
8226 {
8227     u64 val = SJA_EN;
8228 
8229     qib_write_kreg(dd, kr_r_access, val);
8230     qib_read_kreg32(dd, kr_scratch);
8231     return 0;
8232 }
8233 
8234 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8235  * returns the current state of R_TDO
8236  */
8237 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8238 {
8239     u64 val;
8240     int timeout;
8241 
8242     for (timeout = 0; timeout < 100 ; ++timeout) {
8243         val = qib_read_kreg32(dd, kr_r_access);
8244         if (val & R_RDY)
8245             return (val >> R_TDO_LSB) & 1;
8246     }
8247     return -1;
8248 }
8249 
8250 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8251                int len, u8 *inp, u8 *outp)
8252 {
8253     u64 valbase, val;
8254     int ret, pos;
8255 
8256     valbase = SJA_EN | (bisten << BISTEN_LSB) |
8257         (R_OP_SHIFT << R_OPCODE_LSB);
8258     ret = qib_r_wait_for_rdy(dd);
8259     if (ret < 0)
8260         goto bail;
8261     for (pos = 0; pos < len; ++pos) {
8262         val = valbase;
8263         if (outp) {
8264             outp[pos >> 3] &= ~(1 << (pos & 7));
8265             outp[pos >> 3] |= (ret << (pos & 7));
8266         }
8267         if (inp) {
8268             int tdi = inp[pos >> 3] >> (pos & 7);
8269 
8270             val |= ((tdi & 1) << R_TDI_LSB);
8271         }
8272         qib_write_kreg(dd, kr_r_access, val);
8273         qib_read_kreg32(dd, kr_scratch);
8274         ret = qib_r_wait_for_rdy(dd);
8275         if (ret < 0)
8276             break;
8277     }
8278     /* Restore to NOP between operations. */
8279     val =  SJA_EN | (bisten << BISTEN_LSB);
8280     qib_write_kreg(dd, kr_r_access, val);
8281     qib_read_kreg32(dd, kr_scratch);
8282     ret = qib_r_wait_for_rdy(dd);
8283 
8284     if (ret >= 0)
8285         ret = pos;
8286 bail:
8287     return ret;
8288 }
8289 
8290 static int qib_r_update(struct qib_devdata *dd, int bisten)
8291 {
8292     u64 val;
8293     int ret;
8294 
8295     val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8296     ret = qib_r_wait_for_rdy(dd);
8297     if (ret >= 0) {
8298         qib_write_kreg(dd, kr_r_access, val);
8299         qib_read_kreg32(dd, kr_scratch);
8300     }
8301     return ret;
8302 }
8303 
8304 #define BISTEN_PORT_SEL 15
8305 #define LEN_PORT_SEL 625
8306 #define BISTEN_AT 17
8307 #define LEN_AT 156
8308 #define BISTEN_ETM 16
8309 #define LEN_ETM 632
8310 
8311 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8312 
8313 /* these are common for all IB port use cases. */
8314 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8315     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8316     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8317 };
8318 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8319     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8320     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8321     0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8322     0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8323     0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8324     0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8325     0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8326     0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8327 };
8328 static u8 at[BIT2BYTE(LEN_AT)] = {
8329     0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8330     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8331 };
8332 
8333 /* used for IB1 or IB2, only one in use */
8334 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8335     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8336     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8337     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8338     0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8339     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8340     0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8341     0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8342     0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8343 };
8344 
8345 /* used when both IB1 and IB2 are in use */
8346 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8347     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8348     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8349     0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8350     0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8351     0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8352     0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8353     0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8354     0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8355 };
8356 
8357 /* used when only IB1 is in use */
8358 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8359     0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8360     0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8361     0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8362     0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8363     0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8364     0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8365     0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8366     0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8367 };
8368 
8369 /* used when only IB2 is in use */
8370 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8371     0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8372     0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8373     0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8374     0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8375     0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8376     0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8377     0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8378     0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8379 };
8380 
8381 /* used when both IB1 and IB2 are in use */
8382 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8383     0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8384     0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8385     0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8386     0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8387     0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8388     0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8389     0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8390     0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8391 };
8392 
8393 /*
8394  * Do setup to properly handle IB link recovery; if port is zero, we
8395  * are initializing to cover both ports; otherwise we are initializing
8396  * to cover a single port card, or the port has reached INIT and we may
8397  * need to switch coverage types.
8398  */
8399 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8400 {
8401     u8 *portsel, *etm;
8402     struct qib_devdata *dd = ppd->dd;
8403 
8404     if (!ppd->dd->cspec->r1)
8405         return;
8406     if (!both) {
8407         dd->cspec->recovery_ports_initted++;
8408         ppd->cpspec->recovery_init = 1;
8409     }
8410     if (!both && dd->cspec->recovery_ports_initted == 1) {
8411         portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8412         etm = atetm_1port;
8413     } else {
8414         portsel = portsel_2port;
8415         etm = atetm_2port;
8416     }
8417 
8418     if (qib_r_grab(dd) < 0 ||
8419         qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8420         qib_r_update(dd, BISTEN_ETM) < 0 ||
8421         qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8422         qib_r_update(dd, BISTEN_AT) < 0 ||
8423         qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8424                 portsel, NULL) < 0 ||
8425         qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8426         qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8427         qib_r_update(dd, BISTEN_AT) < 0 ||
8428         qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8429         qib_r_update(dd, BISTEN_ETM) < 0)
8430         qib_dev_err(dd, "Failed IB link recovery setup\n");
8431 }
8432 
8433 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8434 {
8435     struct qib_devdata *dd = ppd->dd;
8436     u64 fmask;
8437 
8438     if (dd->cspec->recovery_ports_initted != 1)
8439         return; /* rest doesn't apply to dualport */
8440     qib_write_kreg(dd, kr_control, dd->control |
8441                SYM_MASK(Control, FreezeMode));
8442     (void)qib_read_kreg64(dd, kr_scratch);
8443     udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8444     fmask = qib_read_kreg64(dd, kr_act_fmask);
8445     if (!fmask) {
8446         /*
8447          * require a powercycle before we'll work again, and make
8448          * sure we get no more interrupts, and don't turn off
8449          * freeze.
8450          */
8451         ppd->dd->cspec->stay_in_freeze = 1;
8452         qib_7322_set_intr_state(ppd->dd, 0);
8453         qib_write_kreg(dd, kr_fmask, 0ULL);
8454         qib_dev_err(dd, "HCA unusable until powercycled\n");
8455         return; /* eventually reset */
8456     }
8457 
8458     qib_write_kreg(ppd->dd, kr_hwerrclear,
8459         SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8460 
8461     /* don't do the full clear_freeze(), not needed for this */
8462     qib_write_kreg(dd, kr_control, dd->control);
8463     qib_read_kreg32(dd, kr_scratch);
8464     /* take IBC out of reset */
8465     if (ppd->link_speed_supported) {
8466         ppd->cpspec->ibcctrl_a &=
8467             ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8468         qib_write_kreg_port(ppd, krp_ibcctrl_a,
8469                     ppd->cpspec->ibcctrl_a);
8470         qib_read_kreg32(dd, kr_scratch);
8471         if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8472             qib_set_ib_7322_lstate(ppd, 0,
8473                 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8474     }
8475 }