Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qed NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include <asm/byteorder.h>
0009 #include <linux/io.h>
0010 #include <linux/bitops.h>
0011 #include <linux/delay.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/errno.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/kernel.h>
0016 #include <linux/pci.h>
0017 #include <linux/slab.h>
0018 #include <linux/string.h>
0019 #include "qed.h"
0020 #include "qed_hsi.h"
0021 #include "qed_hw.h"
0022 #include "qed_init_ops.h"
0023 #include "qed_int.h"
0024 #include "qed_mcp.h"
0025 #include "qed_reg_addr.h"
0026 #include "qed_sp.h"
0027 #include "qed_sriov.h"
0028 #include "qed_vf.h"
0029 
0030 struct qed_pi_info {
0031     qed_int_comp_cb_t   comp_cb;
0032     void            *cookie;
0033 };
0034 
0035 struct qed_sb_sp_info {
0036     struct qed_sb_info sb_info;
0037 
0038     /* per protocol index data */
0039     struct qed_pi_info pi_info_arr[PIS_PER_SB];
0040 };
0041 
0042 enum qed_attention_type {
0043     QED_ATTN_TYPE_ATTN,
0044     QED_ATTN_TYPE_PARITY,
0045 };
0046 
0047 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
0048     ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
0049 
0050 struct aeu_invert_reg_bit {
0051     char bit_name[30];
0052 
0053 #define ATTENTION_PARITY                (1 << 0)
0054 
0055 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
0056 #define ATTENTION_LENGTH_SHIFT          (4)
0057 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
0058                      ATTENTION_LENGTH_SHIFT)
0059 #define ATTENTION_SINGLE                BIT(ATTENTION_LENGTH_SHIFT)
0060 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
0061 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
0062                      ATTENTION_PARITY)
0063 
0064 /* Multiple bits start with this offset */
0065 #define ATTENTION_OFFSET_MASK           (0x000ff000)
0066 #define ATTENTION_OFFSET_SHIFT          (12)
0067 
0068 #define ATTENTION_BB_MASK               (0x00700000)
0069 #define ATTENTION_BB_SHIFT              (20)
0070 #define ATTENTION_BB(value)             (value << ATTENTION_BB_SHIFT)
0071 #define ATTENTION_BB_DIFFERENT          BIT(23)
0072 
0073 #define ATTENTION_CLEAR_ENABLE          BIT(28)
0074     unsigned int flags;
0075 
0076     /* Callback to call if attention will be triggered */
0077     int (*cb)(struct qed_hwfn *p_hwfn);
0078 
0079     enum block_id block_index;
0080 };
0081 
0082 struct aeu_invert_reg {
0083     struct aeu_invert_reg_bit bits[32];
0084 };
0085 
0086 #define MAX_ATTN_GRPS           (8)
0087 #define NUM_ATTN_REGS           (9)
0088 
0089 /* Specific HW attention callbacks */
0090 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
0091 {
0092     u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
0093 
0094     /* This might occur on certain instances; Log it once then mask it */
0095     DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
0096         tmp);
0097     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
0098            0xffffffff);
0099 
0100     return 0;
0101 }
0102 
0103 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS       (0x1)
0104 #define ATTENTION_INCORRECT_ACCESS_WR_MASK      (0x1)
0105 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT     (0)
0106 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK      (0xf)
0107 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT     (1)
0108 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK    (0x1)
0109 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT   (5)
0110 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK       (0xff)
0111 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT      (6)
0112 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK       (0xf)
0113 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT      (14)
0114 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK     (0xff)
0115 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT    (18)
0116 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
0117 {
0118     u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0119              PSWHST_REG_INCORRECT_ACCESS_VALID);
0120 
0121     if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
0122         u32 addr, data, length;
0123 
0124         addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0125                   PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
0126         data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0127                   PSWHST_REG_INCORRECT_ACCESS_DATA);
0128         length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0129                 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
0130 
0131         DP_INFO(p_hwfn->cdev,
0132             "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
0133             addr, length,
0134             (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
0135             (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
0136             (u8) GET_FIELD(data,
0137                        ATTENTION_INCORRECT_ACCESS_VF_VALID),
0138             (u8) GET_FIELD(data,
0139                        ATTENTION_INCORRECT_ACCESS_CLIENT),
0140             (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
0141             (u8) GET_FIELD(data,
0142                        ATTENTION_INCORRECT_ACCESS_BYTE_EN),
0143             data);
0144     }
0145 
0146     return 0;
0147 }
0148 
0149 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
0150 #define QED_GRC_ATTENTION_ADDRESS_MASK  (0x7fffff)
0151 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
0152 #define QED_GRC_ATTENTION_RDWR_BIT  (1 << 23)
0153 #define QED_GRC_ATTENTION_MASTER_MASK   (0xf)
0154 #define QED_GRC_ATTENTION_MASTER_SHIFT  (24)
0155 #define QED_GRC_ATTENTION_PF_MASK   (0xf)
0156 #define QED_GRC_ATTENTION_PF_SHIFT  (0)
0157 #define QED_GRC_ATTENTION_VF_MASK   (0xff)
0158 #define QED_GRC_ATTENTION_VF_SHIFT  (4)
0159 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
0160 #define QED_GRC_ATTENTION_PRIV_SHIFT    (14)
0161 #define QED_GRC_ATTENTION_PRIV_VF   (0)
0162 static const char *attn_master_to_str(u8 master)
0163 {
0164     switch (master) {
0165     case 1: return "PXP";
0166     case 2: return "MCP";
0167     case 3: return "MSDM";
0168     case 4: return "PSDM";
0169     case 5: return "YSDM";
0170     case 6: return "USDM";
0171     case 7: return "TSDM";
0172     case 8: return "XSDM";
0173     case 9: return "DBU";
0174     case 10: return "DMAE";
0175     default:
0176         return "Unknown";
0177     }
0178 }
0179 
0180 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
0181 {
0182     u32 tmp, tmp2;
0183 
0184     /* We've already cleared the timeout interrupt register, so we learn
0185      * of interrupts via the validity register
0186      */
0187     tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0188              GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
0189     if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
0190         goto out;
0191 
0192     /* Read the GRC timeout information */
0193     tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0194              GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
0195     tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0196               GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
0197 
0198     DP_INFO(p_hwfn->cdev,
0199         "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
0200         tmp2, tmp,
0201         (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
0202         GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
0203         attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
0204         GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
0205         (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
0206          QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
0207         GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
0208 
0209 out:
0210     /* Regardles of anything else, clean the validity bit */
0211     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
0212            GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
0213     return 0;
0214 }
0215 
0216 #define PGLUE_ATTENTION_VALID           (1 << 29)
0217 #define PGLUE_ATTENTION_RD_VALID        (1 << 26)
0218 #define PGLUE_ATTENTION_DETAILS_PFID_MASK   (0xf)
0219 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT  (20)
0220 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK   (0x1)
0221 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT  (19)
0222 #define PGLUE_ATTENTION_DETAILS_VFID_MASK   (0xff)
0223 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT  (24)
0224 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK   (0x1)
0225 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT  (21)
0226 #define PGLUE_ATTENTION_DETAILS2_BME_MASK   (0x1)
0227 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT  (22)
0228 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK    (0x1)
0229 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT   (23)
0230 #define PGLUE_ATTENTION_ICPL_VALID      (1 << 23)
0231 #define PGLUE_ATTENTION_ZLR_VALID       (1 << 25)
0232 #define PGLUE_ATTENTION_ILT_VALID       (1 << 23)
0233 
0234 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
0235                 bool hw_init)
0236 {
0237     char msg[256];
0238     u32 tmp;
0239 
0240     tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
0241     if (tmp & PGLUE_ATTENTION_VALID) {
0242         u32 addr_lo, addr_hi, details;
0243 
0244         addr_lo = qed_rd(p_hwfn, p_ptt,
0245                  PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
0246         addr_hi = qed_rd(p_hwfn, p_ptt,
0247                  PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
0248         details = qed_rd(p_hwfn, p_ptt,
0249                  PGLUE_B_REG_TX_ERR_WR_DETAILS);
0250 
0251         snprintf(msg, sizeof(msg),
0252              "Illegal write by chip to [%08x:%08x] blocked.\n"
0253              "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
0254              "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
0255              addr_hi, addr_lo, details,
0256              (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
0257              (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
0258              !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
0259              tmp,
0260              !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
0261              !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
0262              !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
0263 
0264         if (hw_init)
0265             DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
0266         else
0267             DP_NOTICE(p_hwfn, "%s\n", msg);
0268     }
0269 
0270     tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
0271     if (tmp & PGLUE_ATTENTION_RD_VALID) {
0272         u32 addr_lo, addr_hi, details;
0273 
0274         addr_lo = qed_rd(p_hwfn, p_ptt,
0275                  PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
0276         addr_hi = qed_rd(p_hwfn, p_ptt,
0277                  PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
0278         details = qed_rd(p_hwfn, p_ptt,
0279                  PGLUE_B_REG_TX_ERR_RD_DETAILS);
0280 
0281         DP_NOTICE(p_hwfn,
0282               "Illegal read by chip from [%08x:%08x] blocked.\n"
0283               "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
0284               "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
0285               addr_hi, addr_lo, details,
0286               (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
0287               (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
0288               GET_FIELD(details,
0289                     PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
0290               tmp,
0291               GET_FIELD(tmp,
0292                     PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
0293               GET_FIELD(tmp,
0294                     PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
0295               GET_FIELD(tmp,
0296                     PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
0297     }
0298 
0299     tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
0300     if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
0301         snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
0302 
0303         if (hw_init)
0304             DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
0305         else
0306             DP_NOTICE(p_hwfn, "%s\n", msg);
0307     }
0308 
0309     tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
0310     if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
0311         u32 addr_hi, addr_lo;
0312 
0313         addr_lo = qed_rd(p_hwfn, p_ptt,
0314                  PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
0315         addr_hi = qed_rd(p_hwfn, p_ptt,
0316                  PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
0317 
0318         DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
0319               tmp, addr_hi, addr_lo);
0320     }
0321 
0322     tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
0323     if (tmp & PGLUE_ATTENTION_ILT_VALID) {
0324         u32 addr_hi, addr_lo, details;
0325 
0326         addr_lo = qed_rd(p_hwfn, p_ptt,
0327                  PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
0328         addr_hi = qed_rd(p_hwfn, p_ptt,
0329                  PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
0330         details = qed_rd(p_hwfn, p_ptt,
0331                  PGLUE_B_REG_VF_ILT_ERR_DETAILS);
0332 
0333         DP_NOTICE(p_hwfn,
0334               "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
0335               details, tmp, addr_hi, addr_lo);
0336     }
0337 
0338     /* Clear the indications */
0339     qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
0340 
0341     return 0;
0342 }
0343 
0344 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
0345 {
0346     return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
0347 }
0348 
0349 static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
0350 {
0351     qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
0352               "FW assertion!\n");
0353 
0354     /* Clear assert indications */
0355     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0);
0356 
0357     return -EINVAL;
0358 }
0359 
0360 static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
0361 {
0362     DP_INFO(p_hwfn, "General attention 35!\n");
0363 
0364     return 0;
0365 }
0366 
0367 #define QED_DORQ_ATTENTION_REASON_MASK  (0xfffff)
0368 #define QED_DORQ_ATTENTION_OPAQUE_MASK  (0xffff)
0369 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
0370 #define QED_DORQ_ATTENTION_SIZE_MASK            (0x7f)
0371 #define QED_DORQ_ATTENTION_SIZE_SHIFT           (16)
0372 
0373 #define QED_DB_REC_COUNT                        1000
0374 #define QED_DB_REC_INTERVAL                     100
0375 
0376 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
0377                   struct qed_ptt *p_ptt)
0378 {
0379     u32 count = QED_DB_REC_COUNT;
0380     u32 usage = 1;
0381 
0382     /* Flush any pending (e)dpms as they may never arrive */
0383     qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
0384 
0385     /* wait for usage to zero or count to run out. This is necessary since
0386      * EDPM doorbell transactions can take multiple 64b cycles, and as such
0387      * can "split" over the pci. Possibly, the doorbell drop can happen with
0388      * half an EDPM in the queue and other half dropped. Another EDPM
0389      * doorbell to the same address (from doorbell recovery mechanism or
0390      * from the doorbelling entity) could have first half dropped and second
0391      * half interpreted as continuation of the first. To prevent such
0392      * malformed doorbells from reaching the device, flush the queue before
0393      * releasing the overflow sticky indication.
0394      */
0395     while (count-- && usage) {
0396         usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
0397         udelay(QED_DB_REC_INTERVAL);
0398     }
0399 
0400     /* should have been depleted by now */
0401     if (usage) {
0402         DP_NOTICE(p_hwfn->cdev,
0403               "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
0404               QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
0405         return -EBUSY;
0406     }
0407 
0408     return 0;
0409 }
0410 
0411 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
0412 {
0413     u32 attn_ovfl, cur_ovfl;
0414     int rc;
0415 
0416     attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
0417                        &p_hwfn->db_recovery_info.overflow);
0418     cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
0419     if (!cur_ovfl && !attn_ovfl)
0420         return 0;
0421 
0422     DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
0423           attn_ovfl, cur_ovfl);
0424 
0425     if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
0426         rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
0427         if (rc)
0428             return rc;
0429     }
0430 
0431     /* Release overflow sticky indication (stop silently dropping everything) */
0432     qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
0433 
0434     /* Repeat all last doorbells (doorbell drop recovery) */
0435     qed_db_recovery_execute(p_hwfn);
0436 
0437     return 0;
0438 }
0439 
0440 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
0441 {
0442     struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
0443     u32 overflow;
0444     int rc;
0445 
0446     overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
0447     if (!overflow)
0448         goto out;
0449 
0450     /* Run PF doorbell recovery in next periodic handler */
0451     set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
0452 
0453     if (!p_hwfn->db_bar_no_edpm) {
0454         rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
0455         if (rc)
0456             goto out;
0457     }
0458 
0459     qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
0460 out:
0461     /* Schedule the handler even if overflow was not detected */
0462     qed_periodic_db_rec_start(p_hwfn);
0463 }
0464 
0465 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
0466 {
0467     u32 int_sts, first_drop_reason, details, address, all_drops_reason;
0468     struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
0469 
0470     int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
0471     if (int_sts == 0xdeadbeaf) {
0472         DP_NOTICE(p_hwfn->cdev,
0473               "DORQ is being reset, skipping int_sts handler\n");
0474 
0475         return 0;
0476     }
0477 
0478     /* int_sts may be zero since all PFs were interrupted for doorbell
0479      * overflow but another one already handled it. Can abort here. If
0480      * This PF also requires overflow recovery we will be interrupted again.
0481      * The masked almost full indication may also be set. Ignoring.
0482      */
0483     if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
0484         return 0;
0485 
0486     DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
0487 
0488     /* check if db_drop or overflow happened */
0489     if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
0490                DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
0491         /* Obtain data about db drop/overflow */
0492         first_drop_reason = qed_rd(p_hwfn, p_ptt,
0493                        DORQ_REG_DB_DROP_REASON) &
0494             QED_DORQ_ATTENTION_REASON_MASK;
0495         details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
0496         address = qed_rd(p_hwfn, p_ptt,
0497                  DORQ_REG_DB_DROP_DETAILS_ADDRESS);
0498         all_drops_reason = qed_rd(p_hwfn, p_ptt,
0499                       DORQ_REG_DB_DROP_DETAILS_REASON);
0500 
0501         /* Log info */
0502         DP_NOTICE(p_hwfn->cdev,
0503               "Doorbell drop occurred\n"
0504               "Address\t\t0x%08x\t(second BAR address)\n"
0505               "FID\t\t0x%04x\t\t(Opaque FID)\n"
0506               "Size\t\t0x%04x\t\t(in bytes)\n"
0507               "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
0508               "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
0509               address,
0510               GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
0511               GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
0512               first_drop_reason, all_drops_reason);
0513 
0514         /* Clear the doorbell drop details and prepare for next drop */
0515         qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
0516 
0517         /* Mark interrupt as handled (note: even if drop was due to a different
0518          * reason than overflow we mark as handled)
0519          */
0520         qed_wr(p_hwfn,
0521                p_ptt,
0522                DORQ_REG_INT_STS_WR,
0523                DORQ_REG_INT_STS_DB_DROP |
0524                DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
0525 
0526         /* If there are no indications other than drop indications, success */
0527         if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
0528                  DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
0529                  DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
0530             return 0;
0531     }
0532 
0533     /* Some other indication was present - non recoverable */
0534     DP_INFO(p_hwfn, "DORQ fatal attention\n");
0535 
0536     return -EINVAL;
0537 }
0538 
0539 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
0540 {
0541     if (p_hwfn->cdev->recov_in_prog)
0542         return 0;
0543 
0544     p_hwfn->db_recovery_info.dorq_attn = true;
0545     qed_dorq_attn_overflow(p_hwfn);
0546 
0547     return qed_dorq_attn_int_sts(p_hwfn);
0548 }
0549 
0550 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
0551 {
0552     if (p_hwfn->db_recovery_info.dorq_attn)
0553         goto out;
0554 
0555     /* Call DORQ callback if the attention was missed */
0556     qed_dorq_attn_cb(p_hwfn);
0557 out:
0558     p_hwfn->db_recovery_info.dorq_attn = false;
0559 }
0560 
0561 /* Instead of major changes to the data-structure, we have a some 'special'
0562  * identifiers for sources that changed meaning between adapters.
0563  */
0564 enum aeu_invert_reg_special_type {
0565     AEU_INVERT_REG_SPECIAL_CNIG_0,
0566     AEU_INVERT_REG_SPECIAL_CNIG_1,
0567     AEU_INVERT_REG_SPECIAL_CNIG_2,
0568     AEU_INVERT_REG_SPECIAL_CNIG_3,
0569     AEU_INVERT_REG_SPECIAL_MAX,
0570 };
0571 
0572 static struct aeu_invert_reg_bit
0573 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
0574     {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
0575     {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
0576     {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
0577     {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
0578 };
0579 
0580 /* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
0581 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
0582     {
0583         {       /* After Invert 1 */
0584             {"GPIO0 function%d",
0585              (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
0586         }
0587     },
0588 
0589     {
0590         {       /* After Invert 2 */
0591             {"PGLUE config_space", ATTENTION_SINGLE,
0592              NULL, MAX_BLOCK_ID},
0593             {"PGLUE misc_flr", ATTENTION_SINGLE,
0594              NULL, MAX_BLOCK_ID},
0595             {"PGLUE B RBC", ATTENTION_PAR_INT,
0596              qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
0597             {"PGLUE misc_mctp", ATTENTION_SINGLE,
0598              NULL, MAX_BLOCK_ID},
0599             {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
0600             {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
0601             {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
0602             {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
0603                       (1 << ATTENTION_OFFSET_SHIFT),
0604              NULL, MAX_BLOCK_ID},
0605             {"PCIE glue/PXP VPD %d",
0606              (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
0607         }
0608     },
0609 
0610     {
0611         {       /* After Invert 3 */
0612             {"General Attention %d",
0613              (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
0614         }
0615     },
0616 
0617     {
0618         {       /* After Invert 4 */
0619             {"General Attention 32", ATTENTION_SINGLE |
0620              ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
0621              MAX_BLOCK_ID},
0622             {"General Attention %d",
0623              (2 << ATTENTION_LENGTH_SHIFT) |
0624              (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
0625             {"General Attention 35", ATTENTION_SINGLE |
0626              ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
0627              MAX_BLOCK_ID},
0628             {"NWS Parity",
0629              ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
0630              ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
0631              NULL, BLOCK_NWS},
0632             {"NWS Interrupt",
0633              ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
0634              ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
0635              NULL, BLOCK_NWS},
0636             {"NWM Parity",
0637              ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
0638              ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
0639              NULL, BLOCK_NWM},
0640             {"NWM Interrupt",
0641              ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
0642              ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
0643              NULL, BLOCK_NWM},
0644             {"MCP CPU", ATTENTION_SINGLE,
0645              qed_mcp_attn_cb, MAX_BLOCK_ID},
0646             {"MCP Watchdog timer", ATTENTION_SINGLE,
0647              NULL, MAX_BLOCK_ID},
0648             {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
0649             {"AVS stop status ready", ATTENTION_SINGLE,
0650              NULL, MAX_BLOCK_ID},
0651             {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
0652             {"MSTAT per-path", ATTENTION_PAR_INT,
0653              NULL, MAX_BLOCK_ID},
0654             {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
0655              NULL, MAX_BLOCK_ID},
0656             {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
0657             {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
0658             {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
0659             {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
0660             {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
0661         }
0662     },
0663 
0664     {
0665         {       /* After Invert 5 */
0666             {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
0667             {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
0668             {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
0669             {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
0670             {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
0671             {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
0672             {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
0673             {"MCM",  ATTENTION_PAR_INT, NULL, BLOCK_MCM},
0674             {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
0675             {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
0676             {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
0677             {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
0678             {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
0679             {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
0680             {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
0681             {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
0682         }
0683     },
0684 
0685     {
0686         {       /* After Invert 6 */
0687             {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
0688             {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
0689             {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
0690             {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
0691             {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
0692             {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
0693             {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
0694             {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
0695             {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
0696             {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
0697             {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
0698             {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
0699             {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
0700             {"DORQ", ATTENTION_PAR_INT,
0701              qed_dorq_attn_cb, BLOCK_DORQ},
0702             {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
0703             {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
0704         }
0705     },
0706 
0707     {
0708         {       /* After Invert 7 */
0709             {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
0710             {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
0711             {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
0712             {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
0713             {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
0714             {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
0715             {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
0716             {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
0717             {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
0718             {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
0719             {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
0720             {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
0721             {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
0722             {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
0723             {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
0724             {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
0725             {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
0726         }
0727     },
0728 
0729     {
0730         {       /* After Invert 8 */
0731             {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
0732              NULL, BLOCK_PSWRQ2},
0733             {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
0734             {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
0735              NULL, BLOCK_PSWWR2},
0736             {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
0737             {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
0738              NULL, BLOCK_PSWRD2},
0739             {"PSWHST", ATTENTION_PAR_INT,
0740              qed_pswhst_attn_cb, BLOCK_PSWHST},
0741             {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
0742              NULL, BLOCK_PSWHST2},
0743             {"GRC", ATTENTION_PAR_INT,
0744              qed_grc_attn_cb, BLOCK_GRC},
0745             {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
0746             {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
0747             {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0748             {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0749             {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0750             {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0751             {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0752             {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
0753             {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
0754             {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
0755              NULL, BLOCK_PGLCS},
0756             {"PERST_B assertion", ATTENTION_SINGLE,
0757              NULL, MAX_BLOCK_ID},
0758             {"PERST_B deassertion", ATTENTION_SINGLE,
0759              NULL, MAX_BLOCK_ID},
0760             {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
0761              NULL, MAX_BLOCK_ID},
0762         }
0763     },
0764 
0765     {
0766         {       /* After Invert 9 */
0767             {"MCP Latched memory", ATTENTION_PAR,
0768              NULL, MAX_BLOCK_ID},
0769             {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
0770              NULL, MAX_BLOCK_ID},
0771             {"MCP Latched ump_tx", ATTENTION_PAR,
0772              NULL, MAX_BLOCK_ID},
0773             {"MCP Latched scratchpad", ATTENTION_PAR,
0774              NULL, MAX_BLOCK_ID},
0775             {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
0776              NULL, MAX_BLOCK_ID},
0777         }
0778     },
0779 };
0780 
0781 static struct aeu_invert_reg_bit *
0782 qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
0783               struct aeu_invert_reg_bit *p_bit)
0784 {
0785     if (!QED_IS_BB(p_hwfn->cdev))
0786         return p_bit;
0787 
0788     if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
0789         return p_bit;
0790 
0791     return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
0792                   ATTENTION_BB_SHIFT];
0793 }
0794 
0795 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
0796                    struct aeu_invert_reg_bit *p_bit)
0797 {
0798     return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
0799            ATTENTION_PARITY);
0800 }
0801 
0802 #define ATTN_STATE_BITS         (0xfff)
0803 #define ATTN_BITS_MASKABLE      (0x3ff)
0804 struct qed_sb_attn_info {
0805     /* Virtual & Physical address of the SB */
0806     struct atten_status_block       *sb_attn;
0807     dma_addr_t          sb_phys;
0808 
0809     /* Last seen running index */
0810     u16             index;
0811 
0812     /* A mask of the AEU bits resulting in a parity error */
0813     u32             parity_mask[NUM_ATTN_REGS];
0814 
0815     /* A pointer to the attention description structure */
0816     struct aeu_invert_reg       *p_aeu_desc;
0817 
0818     /* Previously asserted attentions, which are still unasserted */
0819     u16             known_attn;
0820 
0821     /* Cleanup address for the link's general hw attention */
0822     u32             mfw_attn_addr;
0823 };
0824 
0825 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
0826                       struct qed_sb_attn_info *p_sb_desc)
0827 {
0828     u16 rc = 0, index;
0829 
0830     index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
0831     if (p_sb_desc->index != index) {
0832         p_sb_desc->index    = index;
0833         rc            = QED_SB_ATT_IDX;
0834     }
0835 
0836     return rc;
0837 }
0838 
0839 /**
0840  * qed_int_assertion() - Handle asserted attention bits.
0841  *
0842  * @p_hwfn: HW device data.
0843  * @asserted_bits: Newly asserted bits.
0844  *
0845  * Return: Zero value.
0846  */
0847 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
0848 {
0849     struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
0850     u32 igu_mask;
0851 
0852     /* Mask the source of the attention in the IGU */
0853     igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
0854     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
0855            igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
0856     igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
0857     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
0858 
0859     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
0860            "inner known ATTN state: 0x%04x --> 0x%04x\n",
0861            sb_attn_sw->known_attn,
0862            sb_attn_sw->known_attn | asserted_bits);
0863     sb_attn_sw->known_attn |= asserted_bits;
0864 
0865     /* Handle MCP events */
0866     if (asserted_bits & 0x100) {
0867         qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
0868         /* Clean the MCP attention */
0869         qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
0870                sb_attn_sw->mfw_attn_addr, 0);
0871     }
0872 
0873     DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
0874               GTT_BAR0_MAP_REG_IGU_CMD +
0875               ((IGU_CMD_ATTN_BIT_SET_UPPER -
0876             IGU_CMD_INT_ACK_BASE) << 3),
0877               (u32)asserted_bits);
0878 
0879     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
0880            asserted_bits);
0881 
0882     return 0;
0883 }
0884 
0885 static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
0886                    enum block_id id,
0887                    enum dbg_attn_type type, bool b_clear)
0888 {
0889     struct dbg_attn_block_result attn_results;
0890     enum dbg_status status;
0891 
0892     memset(&attn_results, 0, sizeof(attn_results));
0893 
0894     status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
0895                    b_clear, &attn_results);
0896     if (status != DBG_STATUS_OK)
0897         DP_NOTICE(p_hwfn,
0898               "Failed to parse attention information [status: %s]\n",
0899               qed_dbg_get_status_str(status));
0900     else
0901         qed_dbg_parse_attn(p_hwfn, &attn_results);
0902 }
0903 
0904 /**
0905  * qed_int_deassertion_aeu_bit() - Handles the effects of a single
0906  * cause of the attention.
0907  *
0908  * @p_hwfn: HW device data.
0909  * @p_aeu: Descriptor of an AEU bit which caused the attention.
0910  * @aeu_en_reg: Register offset of the AEU enable reg. which configured
0911  *              this bit to this group.
0912  * @p_bit_name: AEU bit description for logging purposes.
0913  * @bitmask: Index of this bit in the aeu_en_reg.
0914  *
0915  * Return: Zero on success, negative errno otherwise.
0916  */
0917 static int
0918 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
0919                 struct aeu_invert_reg_bit *p_aeu,
0920                 u32 aeu_en_reg,
0921                 const char *p_bit_name, u32 bitmask)
0922 {
0923     bool b_fatal = false;
0924     int rc = -EINVAL;
0925     u32 val;
0926 
0927     DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
0928         p_bit_name, bitmask);
0929 
0930     /* Call callback before clearing the interrupt status */
0931     if (p_aeu->cb) {
0932         DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
0933             p_bit_name);
0934         rc = p_aeu->cb(p_hwfn);
0935     }
0936 
0937     if (rc)
0938         b_fatal = true;
0939 
0940     /* Print HW block interrupt registers */
0941     if (p_aeu->block_index != MAX_BLOCK_ID)
0942         qed_int_attn_print(p_hwfn, p_aeu->block_index,
0943                    ATTN_TYPE_INTERRUPT, !b_fatal);
0944 
0945     /* Reach assertion if attention is fatal */
0946     if (b_fatal)
0947         qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
0948                   "`%s': Fatal attention\n",
0949                   p_bit_name);
0950     else /* If the attention is benign, no need to prevent it */
0951         goto out;
0952 
0953     /* Prevent this Attention from being asserted in the future */
0954     val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
0955     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
0956     DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
0957         p_bit_name);
0958 
0959     /* Re-enable FW aassertion (Gen 32) interrupts */
0960     val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
0961              MISC_REG_AEU_ENABLE4_IGU_OUT_0);
0962     val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32;
0963     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
0964            MISC_REG_AEU_ENABLE4_IGU_OUT_0, val);
0965 
0966 out:
0967     return rc;
0968 }
0969 
0970 /**
0971  * qed_int_deassertion_parity() - Handle a single parity AEU source.
0972  *
0973  * @p_hwfn: HW device data.
0974  * @p_aeu: Descriptor of an AEU bit which caused the parity.
0975  * @aeu_en_reg: Address of the AEU enable register.
0976  * @bit_index: Index (0-31) of an AEU bit.
0977  */
0978 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
0979                        struct aeu_invert_reg_bit *p_aeu,
0980                        u32 aeu_en_reg, u8 bit_index)
0981 {
0982     u32 block_id = p_aeu->block_index, mask, val;
0983 
0984     DP_NOTICE(p_hwfn->cdev,
0985           "%s parity attention is set [address 0x%08x, bit %d]\n",
0986           p_aeu->bit_name, aeu_en_reg, bit_index);
0987 
0988     if (block_id != MAX_BLOCK_ID) {
0989         qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
0990 
0991         /* In BB, there's a single parity bit for several blocks */
0992         if (block_id == BLOCK_BTB) {
0993             qed_int_attn_print(p_hwfn, BLOCK_OPTE,
0994                        ATTN_TYPE_PARITY, false);
0995             qed_int_attn_print(p_hwfn, BLOCK_MCP,
0996                        ATTN_TYPE_PARITY, false);
0997         }
0998     }
0999 
1000     /* Prevent this parity error from being re-asserted */
1001     mask = ~BIT(bit_index);
1002     val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
1003     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
1004     DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
1005         p_aeu->bit_name);
1006 }
1007 
1008 /**
1009  * qed_int_deassertion() - Handle deassertion of previously asserted
1010  * attentions.
1011  *
1012  * @p_hwfn: HW device data.
1013  * @deasserted_bits: newly deasserted bits.
1014  *
1015  * Return: Zero value.
1016  */
1017 static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
1018                    u16 deasserted_bits)
1019 {
1020     struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1021     u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1022     u8 i, j, k, bit_idx;
1023     int rc = 0;
1024 
1025     /* Read the attention registers in the AEU */
1026     for (i = 0; i < NUM_ATTN_REGS; i++) {
1027         aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1028                     MISC_REG_AEU_AFTER_INVERT_1_IGU +
1029                     i * 0x4);
1030         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1031                "Deasserted bits [%d]: %08x\n",
1032                i, aeu_inv_arr[i]);
1033     }
1034 
1035     /* Find parity attentions first */
1036     for (i = 0; i < NUM_ATTN_REGS; i++) {
1037         struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1038         u32 parities;
1039 
1040         aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1041         en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1042 
1043         /* Skip register in which no parity bit is currently set */
1044         parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1045         if (!parities)
1046             continue;
1047 
1048         for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
1049             struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1050 
1051             if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
1052                 !!(parities & BIT(bit_idx)))
1053                 qed_int_deassertion_parity(p_hwfn, p_bit,
1054                                aeu_en, bit_idx);
1055 
1056             bit_idx += ATTENTION_LENGTH(p_bit->flags);
1057         }
1058     }
1059 
1060     /* Find non-parity cause for attention and act */
1061     for (k = 0; k < MAX_ATTN_GRPS; k++) {
1062         struct aeu_invert_reg_bit *p_aeu;
1063 
1064         /* Handle only groups whose attention is currently deasserted */
1065         if (!(deasserted_bits & (1 << k)))
1066             continue;
1067 
1068         for (i = 0; i < NUM_ATTN_REGS; i++) {
1069             u32 bits;
1070 
1071             aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1072                  i * sizeof(u32) +
1073                  k * sizeof(u32) * NUM_ATTN_REGS;
1074 
1075             en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1076             bits = aeu_inv_arr[i] & en;
1077 
1078             /* Skip if no bit from this group is currently set */
1079             if (!bits)
1080                 continue;
1081 
1082             /* Find all set bits from current register which belong
1083              * to current group, making them responsible for the
1084              * previous assertion.
1085              */
1086             for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
1087                 long unsigned int bitmask;
1088                 u8 bit, bit_len;
1089 
1090                 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1091                 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
1092 
1093                 bit = bit_idx;
1094                 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1095                 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
1096                     /* Skip Parity */
1097                     bit++;
1098                     bit_len--;
1099                 }
1100 
1101                 bitmask = bits & (((1 << bit_len) - 1) << bit);
1102                 bitmask >>= bit;
1103 
1104                 if (bitmask) {
1105                     u32 flags = p_aeu->flags;
1106                     char bit_name[30];
1107                     u8 num;
1108 
1109                     num = (u8)find_first_bit(&bitmask,
1110                                  bit_len);
1111 
1112                     /* Some bits represent more than a
1113                      * single interrupt. Correctly print
1114                      * their name.
1115                      */
1116                     if (ATTENTION_LENGTH(flags) > 2 ||
1117                         ((flags & ATTENTION_PAR_INT) &&
1118                          ATTENTION_LENGTH(flags) > 1))
1119                         snprintf(bit_name, 30,
1120                              p_aeu->bit_name, num);
1121                     else
1122                         strlcpy(bit_name,
1123                             p_aeu->bit_name, 30);
1124 
1125                     /* We now need to pass bitmask in its
1126                      * correct position.
1127                      */
1128                     bitmask <<= bit;
1129 
1130                     /* Handle source of the attention */
1131                     qed_int_deassertion_aeu_bit(p_hwfn,
1132                                     p_aeu,
1133                                     aeu_en,
1134                                     bit_name,
1135                                     bitmask);
1136                 }
1137 
1138                 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1139             }
1140         }
1141     }
1142 
1143     /* Handle missed DORQ attention */
1144     qed_dorq_attn_handler(p_hwfn);
1145 
1146     /* Clear IGU indication for the deasserted bits */
1147     DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1148                     GTT_BAR0_MAP_REG_IGU_CMD +
1149                     ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1150                       IGU_CMD_INT_ACK_BASE) << 3),
1151                     ~((u32)deasserted_bits));
1152 
1153     /* Unmask deasserted attentions in IGU */
1154     aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
1155     aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1156     qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1157 
1158     /* Clear deassertion from inner state */
1159     sb_attn_sw->known_attn &= ~deasserted_bits;
1160 
1161     return rc;
1162 }
1163 
1164 static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1165 {
1166     struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1167     struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1168     u32 attn_bits = 0, attn_acks = 0;
1169     u16 asserted_bits, deasserted_bits;
1170     __le16 index;
1171     int rc = 0;
1172 
1173     /* Read current attention bits/acks - safeguard against attentions
1174      * by guaranting work on a synchronized timeframe
1175      */
1176     do {
1177         index = p_sb_attn->sb_index;
1178         /* finish reading index before the loop condition */
1179         dma_rmb();
1180         attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
1181         attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
1182     } while (index != p_sb_attn->sb_index);
1183     p_sb_attn->sb_index = index;
1184 
1185     /* Attention / Deassertion are meaningful (and in correct state)
1186      * only when they differ and consistent with known state - deassertion
1187      * when previous attention & current ack, and assertion when current
1188      * attention with no previous attention
1189      */
1190     asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1191         ~p_sb_attn_sw->known_attn;
1192     deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1193         p_sb_attn_sw->known_attn;
1194 
1195     if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
1196         DP_INFO(p_hwfn,
1197             "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1198             index, attn_bits, attn_acks, asserted_bits,
1199             deasserted_bits, p_sb_attn_sw->known_attn);
1200     } else if (asserted_bits == 0x100) {
1201         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1202                "MFW indication via attention\n");
1203     } else {
1204         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1205                "MFW indication [deassertion]\n");
1206     }
1207 
1208     if (asserted_bits) {
1209         rc = qed_int_assertion(p_hwfn, asserted_bits);
1210         if (rc)
1211             return rc;
1212     }
1213 
1214     if (deasserted_bits)
1215         rc = qed_int_deassertion(p_hwfn, deasserted_bits);
1216 
1217     return rc;
1218 }
1219 
1220 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
1221                 void __iomem *igu_addr, u32 ack_cons)
1222 {
1223     u32 igu_ack;
1224 
1225     igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1226            (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1227            (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1228            (IGU_SEG_ACCESS_ATTN <<
1229             IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1230 
1231     DIRECT_REG_WR(igu_addr, igu_ack);
1232 
1233     /* Both segments (interrupts & acks) are written to same place address;
1234      * Need to guarantee all commands will be received (in-order) by HW.
1235      */
1236     barrier();
1237 }
1238 
1239 void qed_int_sp_dpc(struct tasklet_struct *t)
1240 {
1241     struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
1242     struct qed_pi_info *pi_info = NULL;
1243     struct qed_sb_attn_info *sb_attn;
1244     struct qed_sb_info *sb_info;
1245     int arr_size;
1246     u16 rc = 0;
1247 
1248     if (!p_hwfn->p_sp_sb) {
1249         DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
1250         return;
1251     }
1252 
1253     sb_info = &p_hwfn->p_sp_sb->sb_info;
1254     arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1255     if (!sb_info) {
1256         DP_ERR(p_hwfn->cdev,
1257                "Status block is NULL - cannot ack interrupts\n");
1258         return;
1259     }
1260 
1261     if (!p_hwfn->p_sb_attn) {
1262         DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
1263         return;
1264     }
1265     sb_attn = p_hwfn->p_sb_attn;
1266 
1267     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1268            p_hwfn, p_hwfn->my_id);
1269 
1270     /* Disable ack for def status block. Required both for msix +
1271      * inta in non-mask mode, in inta does no harm.
1272      */
1273     qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1274 
1275     /* Gather Interrupts/Attentions information */
1276     if (!sb_info->sb_virt) {
1277         DP_ERR(p_hwfn->cdev,
1278                "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1279     } else {
1280         u32 tmp_index = sb_info->sb_ack;
1281 
1282         rc = qed_sb_update_sb_idx(sb_info);
1283         DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1284                "Interrupt indices: 0x%08x --> 0x%08x\n",
1285                tmp_index, sb_info->sb_ack);
1286     }
1287 
1288     if (!sb_attn || !sb_attn->sb_attn) {
1289         DP_ERR(p_hwfn->cdev,
1290                "Attentions Status block is NULL - cannot check for new attentions!\n");
1291     } else {
1292         u16 tmp_index = sb_attn->index;
1293 
1294         rc |= qed_attn_update_idx(p_hwfn, sb_attn);
1295         DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1296                "Attention indices: 0x%08x --> 0x%08x\n",
1297                tmp_index, sb_attn->index);
1298     }
1299 
1300     /* Check if we expect interrupts at this time. if not just ack them */
1301     if (!(rc & QED_SB_EVENT_MASK)) {
1302         qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1303         return;
1304     }
1305 
1306     /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1307     if (!p_hwfn->p_dpc_ptt) {
1308         DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
1309         qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1310         return;
1311     }
1312 
1313     if (rc & QED_SB_ATT_IDX)
1314         qed_int_attentions(p_hwfn);
1315 
1316     if (rc & QED_SB_IDX) {
1317         int pi;
1318 
1319         /* Look for a free index */
1320         for (pi = 0; pi < arr_size; pi++) {
1321             pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1322             if (pi_info->comp_cb)
1323                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1324         }
1325     }
1326 
1327     if (sb_attn && (rc & QED_SB_ATT_IDX))
1328         /* This should be done before the interrupts are enabled,
1329          * since otherwise a new attention will be generated.
1330          */
1331         qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1332 
1333     qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1334 }
1335 
1336 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
1337 {
1338     struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1339 
1340     if (!p_sb)
1341         return;
1342 
1343     if (p_sb->sb_attn)
1344         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1345                   SB_ATTN_ALIGNED_SIZE(p_hwfn),
1346                   p_sb->sb_attn, p_sb->sb_phys);
1347     kfree(p_sb);
1348     p_hwfn->p_sb_attn = NULL;
1349 }
1350 
1351 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
1352                   struct qed_ptt *p_ptt)
1353 {
1354     struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1355 
1356     memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1357 
1358     sb_info->index = 0;
1359     sb_info->known_attn = 0;
1360 
1361     /* Configure Attention Status Block in IGU */
1362     qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1363            lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
1364     qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1365            upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
1366 }
1367 
1368 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
1369                  struct qed_ptt *p_ptt,
1370                  void *sb_virt_addr, dma_addr_t sb_phy_addr)
1371 {
1372     struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1373     int i, j, k;
1374 
1375     sb_info->sb_attn = sb_virt_addr;
1376     sb_info->sb_phys = sb_phy_addr;
1377 
1378     /* Set the pointer to the AEU descriptors */
1379     sb_info->p_aeu_desc = aeu_descs;
1380 
1381     /* Calculate Parity Masks */
1382     memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1383     for (i = 0; i < NUM_ATTN_REGS; i++) {
1384         /* j is array index, k is bit index */
1385         for (j = 0, k = 0; k < 32 && j < 32; j++) {
1386             struct aeu_invert_reg_bit *p_aeu;
1387 
1388             p_aeu = &aeu_descs[i].bits[j];
1389             if (qed_int_is_parity_flag(p_hwfn, p_aeu))
1390                 sb_info->parity_mask[i] |= 1 << k;
1391 
1392             k += ATTENTION_LENGTH(p_aeu->flags);
1393         }
1394         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1395                "Attn Mask [Reg %d]: 0x%08x\n",
1396                i, sb_info->parity_mask[i]);
1397     }
1398 
1399     /* Set the address of cleanup for the mcp attention */
1400     sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1401                  MISC_REG_AEU_GENERAL_ATTN_0;
1402 
1403     qed_int_sb_attn_setup(p_hwfn, p_ptt);
1404 }
1405 
1406 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
1407                  struct qed_ptt *p_ptt)
1408 {
1409     struct qed_dev *cdev = p_hwfn->cdev;
1410     struct qed_sb_attn_info *p_sb;
1411     dma_addr_t p_phys = 0;
1412     void *p_virt;
1413 
1414     /* SB struct */
1415     p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1416     if (!p_sb)
1417         return -ENOMEM;
1418 
1419     /* SB ring  */
1420     p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1421                     SB_ATTN_ALIGNED_SIZE(p_hwfn),
1422                     &p_phys, GFP_KERNEL);
1423 
1424     if (!p_virt) {
1425         kfree(p_sb);
1426         return -ENOMEM;
1427     }
1428 
1429     /* Attention setup */
1430     p_hwfn->p_sb_attn = p_sb;
1431     qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1432 
1433     return 0;
1434 }
1435 
1436 /* coalescing timeout = timeset << (timer_res + 1) */
1437 #define QED_CAU_DEF_RX_USECS 24
1438 #define QED_CAU_DEF_TX_USECS 48
1439 
1440 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
1441                struct cau_sb_entry *p_sb_entry,
1442                u8 pf_id, u16 vf_number, u8 vf_valid)
1443 {
1444     struct qed_dev *cdev = p_hwfn->cdev;
1445     u32 cau_state, params = 0, data = 0;
1446     u8 timer_res;
1447 
1448     memset(p_sb_entry, 0, sizeof(*p_sb_entry));
1449 
1450     SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1451     SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1452     SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1453     SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1454     SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1455 
1456     cau_state = CAU_HC_DISABLE_STATE;
1457 
1458     if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1459         cau_state = CAU_HC_ENABLE_STATE;
1460         if (!cdev->rx_coalesce_usecs)
1461             cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
1462         if (!cdev->tx_coalesce_usecs)
1463             cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
1464     }
1465 
1466     /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1467     if (cdev->rx_coalesce_usecs <= 0x7F)
1468         timer_res = 0;
1469     else if (cdev->rx_coalesce_usecs <= 0xFF)
1470         timer_res = 1;
1471     else
1472         timer_res = 2;
1473 
1474     SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1475 
1476     if (cdev->tx_coalesce_usecs <= 0x7F)
1477         timer_res = 0;
1478     else if (cdev->tx_coalesce_usecs <= 0xFF)
1479         timer_res = 1;
1480     else
1481         timer_res = 2;
1482 
1483     SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1484     p_sb_entry->params = cpu_to_le32(params);
1485 
1486     SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state);
1487     SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state);
1488     p_sb_entry->data = cpu_to_le32(data);
1489 }
1490 
1491 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
1492                 struct qed_ptt *p_ptt,
1493                 u16 igu_sb_id,
1494                 u32 pi_index,
1495                 enum qed_coalescing_fsm coalescing_fsm,
1496                 u8 timeset)
1497 {
1498     u32 sb_offset, pi_offset;
1499     u32 prod = 0;
1500 
1501     if (IS_VF(p_hwfn->cdev))
1502         return;
1503 
1504     SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1505     if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
1506         SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0);
1507     else
1508         SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
1509 
1510     sb_offset = igu_sb_id * PIS_PER_SB;
1511     pi_offset = sb_offset + pi_index;
1512 
1513     if (p_hwfn->hw_init_done)
1514         qed_wr(p_hwfn, p_ptt,
1515                CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod);
1516     else
1517         STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1518                  prod);
1519 }
1520 
1521 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
1522              struct qed_ptt *p_ptt,
1523              dma_addr_t sb_phys,
1524              u16 igu_sb_id, u16 vf_number, u8 vf_valid)
1525 {
1526     struct cau_sb_entry sb_entry;
1527 
1528     qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1529                   vf_number, vf_valid);
1530 
1531     if (p_hwfn->hw_init_done) {
1532         /* Wide-bus, initialize via DMAE */
1533         u64 phys_addr = (u64)sb_phys;
1534 
1535         qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
1536                   CAU_REG_SB_ADDR_MEMORY +
1537                   igu_sb_id * sizeof(u64), 2, NULL);
1538         qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
1539                   CAU_REG_SB_VAR_MEMORY +
1540                   igu_sb_id * sizeof(u64), 2, NULL);
1541     } else {
1542         /* Initialize Status Block Address */
1543         STORE_RT_REG_AGG(p_hwfn,
1544                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1545                  igu_sb_id * 2,
1546                  sb_phys);
1547 
1548         STORE_RT_REG_AGG(p_hwfn,
1549                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1550                  igu_sb_id * 2,
1551                  sb_entry);
1552     }
1553 
1554     /* Configure pi coalescing if set */
1555     if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1556         u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1557         u8 timeset, timer_res;
1558         u8 i;
1559 
1560         /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1561         if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
1562             timer_res = 0;
1563         else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
1564             timer_res = 1;
1565         else
1566             timer_res = 2;
1567         timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
1568         qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1569                     QED_COAL_RX_STATE_MACHINE, timeset);
1570 
1571         if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
1572             timer_res = 0;
1573         else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
1574             timer_res = 1;
1575         else
1576             timer_res = 2;
1577         timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
1578         for (i = 0; i < num_tc; i++) {
1579             qed_int_cau_conf_pi(p_hwfn, p_ptt,
1580                         igu_sb_id, TX_PI(i),
1581                         QED_COAL_TX_STATE_MACHINE,
1582                         timeset);
1583         }
1584     }
1585 }
1586 
1587 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
1588               struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
1589 {
1590     /* zero status block and ack counter */
1591     sb_info->sb_ack = 0;
1592     memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1593 
1594     if (IS_PF(p_hwfn->cdev))
1595         qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1596                     sb_info->igu_sb_id, 0, 0);
1597 }
1598 
1599 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
1600 {
1601     struct qed_igu_block *p_block;
1602     u16 igu_id;
1603 
1604     for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1605          igu_id++) {
1606         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1607 
1608         if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1609             !(p_block->status & QED_IGU_STATUS_FREE))
1610             continue;
1611 
1612         if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
1613             return p_block;
1614     }
1615 
1616     return NULL;
1617 }
1618 
1619 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
1620 {
1621     struct qed_igu_block *p_block;
1622     u16 igu_id;
1623 
1624     for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1625          igu_id++) {
1626         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1627 
1628         if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1629             !p_block->is_pf ||
1630             p_block->vector_number != vector_id)
1631             continue;
1632 
1633         return igu_id;
1634     }
1635 
1636     return QED_SB_INVALID_IDX;
1637 }
1638 
1639 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1640 {
1641     u16 igu_sb_id;
1642 
1643     /* Assuming continuous set of IGU SBs dedicated for given PF */
1644     if (sb_id == QED_SP_SB_ID)
1645         igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1646     else if (IS_PF(p_hwfn->cdev))
1647         igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1648     else
1649         igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
1650 
1651     if (sb_id == QED_SP_SB_ID)
1652         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1653                "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1654     else
1655         DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1656                "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1657 
1658     return igu_sb_id;
1659 }
1660 
1661 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
1662             struct qed_ptt *p_ptt,
1663             struct qed_sb_info *sb_info,
1664             void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
1665 {
1666     sb_info->sb_virt = sb_virt_addr;
1667     sb_info->sb_phys = sb_phy_addr;
1668 
1669     sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
1670 
1671     if (sb_id != QED_SP_SB_ID) {
1672         if (IS_PF(p_hwfn->cdev)) {
1673             struct qed_igu_info *p_info;
1674             struct qed_igu_block *p_block;
1675 
1676             p_info = p_hwfn->hw_info.p_igu_info;
1677             p_block = &p_info->entry[sb_info->igu_sb_id];
1678 
1679             p_block->sb_info = sb_info;
1680             p_block->status &= ~QED_IGU_STATUS_FREE;
1681             p_info->usage.free_cnt--;
1682         } else {
1683             qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1684         }
1685     }
1686 
1687     sb_info->cdev = p_hwfn->cdev;
1688 
1689     /* The igu address will hold the absolute address that needs to be
1690      * written to for a specific status block
1691      */
1692     if (IS_PF(p_hwfn->cdev)) {
1693         sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1694                           GTT_BAR0_MAP_REG_IGU_CMD +
1695                           (sb_info->igu_sb_id << 3);
1696     } else {
1697         sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1698                           PXP_VF_BAR0_START_IGU +
1699                           ((IGU_CMD_INT_ACK_BASE +
1700                             sb_info->igu_sb_id) << 3);
1701     }
1702 
1703     sb_info->flags |= QED_SB_INFO_INIT;
1704 
1705     qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
1706 
1707     return 0;
1708 }
1709 
1710 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
1711                struct qed_sb_info *sb_info, u16 sb_id)
1712 {
1713     struct qed_igu_block *p_block;
1714     struct qed_igu_info *p_info;
1715 
1716     if (!sb_info)
1717         return 0;
1718 
1719     /* zero status block and ack counter */
1720     sb_info->sb_ack = 0;
1721     memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1722 
1723     if (IS_VF(p_hwfn->cdev)) {
1724         qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
1725         return 0;
1726     }
1727 
1728     p_info = p_hwfn->hw_info.p_igu_info;
1729     p_block = &p_info->entry[sb_info->igu_sb_id];
1730 
1731     /* Vector 0 is reserved to Default SB */
1732     if (!p_block->vector_number) {
1733         DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1734         return -EINVAL;
1735     }
1736 
1737     /* Lose reference to client's SB info, and fix counters */
1738     p_block->sb_info = NULL;
1739     p_block->status |= QED_IGU_STATUS_FREE;
1740     p_info->usage.free_cnt++;
1741 
1742     return 0;
1743 }
1744 
1745 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
1746 {
1747     struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1748 
1749     if (!p_sb)
1750         return;
1751 
1752     if (p_sb->sb_info.sb_virt)
1753         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1754                   SB_ALIGNED_SIZE(p_hwfn),
1755                   p_sb->sb_info.sb_virt,
1756                   p_sb->sb_info.sb_phys);
1757     kfree(p_sb);
1758     p_hwfn->p_sp_sb = NULL;
1759 }
1760 
1761 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1762 {
1763     struct qed_sb_sp_info *p_sb;
1764     dma_addr_t p_phys = 0;
1765     void *p_virt;
1766 
1767     /* SB struct */
1768     p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1769     if (!p_sb)
1770         return -ENOMEM;
1771 
1772     /* SB ring  */
1773     p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1774                     SB_ALIGNED_SIZE(p_hwfn),
1775                     &p_phys, GFP_KERNEL);
1776     if (!p_virt) {
1777         kfree(p_sb);
1778         return -ENOMEM;
1779     }
1780 
1781     /* Status Block setup */
1782     p_hwfn->p_sp_sb = p_sb;
1783     qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1784             p_phys, QED_SP_SB_ID);
1785 
1786     memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1787 
1788     return 0;
1789 }
1790 
1791 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1792             qed_int_comp_cb_t comp_cb,
1793             void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
1794 {
1795     struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1796     int rc = -ENOMEM;
1797     u8 pi;
1798 
1799     /* Look for a free index */
1800     for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1801         if (p_sp_sb->pi_info_arr[pi].comp_cb)
1802             continue;
1803 
1804         p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1805         p_sp_sb->pi_info_arr[pi].cookie = cookie;
1806         *sb_idx = pi;
1807         *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1808         rc = 0;
1809         break;
1810     }
1811 
1812     return rc;
1813 }
1814 
1815 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1816 {
1817     struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1818 
1819     if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1820         return -ENOMEM;
1821 
1822     p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1823     p_sp_sb->pi_info_arr[pi].cookie = NULL;
1824 
1825     return 0;
1826 }
1827 
1828 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1829 {
1830     return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1831 }
1832 
1833 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1834                 struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1835 {
1836     u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1837 
1838     p_hwfn->cdev->int_mode = int_mode;
1839     switch (p_hwfn->cdev->int_mode) {
1840     case QED_INT_MODE_INTA:
1841         igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1842         igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1843         break;
1844 
1845     case QED_INT_MODE_MSI:
1846         igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1847         igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1848         break;
1849 
1850     case QED_INT_MODE_MSIX:
1851         igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1852         break;
1853     case QED_INT_MODE_POLL:
1854         break;
1855     }
1856 
1857     qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1858 }
1859 
1860 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
1861                     struct qed_ptt *p_ptt)
1862 {
1863 
1864     /* Configure AEU signal change to produce attentions */
1865     qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1866     qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1867     qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1868     qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1869 
1870     /* Unmask AEU signals toward IGU */
1871     qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1872 }
1873 
1874 int
1875 qed_int_igu_enable(struct qed_hwfn *p_hwfn,
1876            struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1877 {
1878     int rc = 0;
1879 
1880     qed_int_igu_enable_attn(p_hwfn, p_ptt);
1881 
1882     if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1883         rc = qed_slowpath_irq_req(p_hwfn);
1884         if (rc) {
1885             DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1886             return -EINVAL;
1887         }
1888         p_hwfn->b_int_requested = true;
1889     }
1890     /* Enable interrupt Generation */
1891     qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1892     p_hwfn->b_int_enabled = 1;
1893 
1894     return rc;
1895 }
1896 
1897 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1898 {
1899     p_hwfn->b_int_enabled = 0;
1900 
1901     if (IS_VF(p_hwfn->cdev))
1902         return;
1903 
1904     qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1905 }
1906 
1907 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
1908 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1909                    struct qed_ptt *p_ptt,
1910                    u16 igu_sb_id,
1911                    bool cleanup_set, u16 opaque_fid)
1912 {
1913     u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1914     u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1915     u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1916 
1917     /* Set the data field */
1918     SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1919     SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1920     SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1921 
1922     /* Set the control register */
1923     SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1924     SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1925     SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1926 
1927     qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1928 
1929     barrier();
1930 
1931     qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1932 
1933     /* calculate where to read the status bit from */
1934     sb_bit = 1 << (igu_sb_id % 32);
1935     sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1936 
1937     sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1938 
1939     /* Now wait for the command to complete */
1940     do {
1941         val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1942 
1943         if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1944             break;
1945 
1946         usleep_range(5000, 10000);
1947     } while (--sleep_cnt);
1948 
1949     if (!sleep_cnt)
1950         DP_NOTICE(p_hwfn,
1951               "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1952               val, igu_sb_id);
1953 }
1954 
1955 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1956                      struct qed_ptt *p_ptt,
1957                      u16 igu_sb_id, u16 opaque, bool b_set)
1958 {
1959     struct qed_igu_block *p_block;
1960     int pi, i;
1961 
1962     p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1963     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1964            "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1965            igu_sb_id,
1966            p_block->function_id,
1967            p_block->is_pf, p_block->vector_number);
1968 
1969     /* Set */
1970     if (b_set)
1971         qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1972 
1973     /* Clear */
1974     qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1975 
1976     /* Wait for the IGU SB to cleanup */
1977     for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1978         u32 val;
1979 
1980         val = qed_rd(p_hwfn, p_ptt,
1981                  IGU_REG_WRITE_DONE_PENDING +
1982                  ((igu_sb_id / 32) * 4));
1983         if (val & BIT((igu_sb_id % 32)))
1984             usleep_range(10, 20);
1985         else
1986             break;
1987     }
1988     if (i == IGU_CLEANUP_SLEEP_LENGTH)
1989         DP_NOTICE(p_hwfn,
1990               "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1991               igu_sb_id);
1992 
1993     /* Clear the CAU for the SB */
1994     for (pi = 0; pi < 12; pi++)
1995         qed_wr(p_hwfn, p_ptt,
1996                CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1997 }
1998 
1999 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
2000                   struct qed_ptt *p_ptt,
2001                   bool b_set, bool b_slowpath)
2002 {
2003     struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2004     struct qed_igu_block *p_block;
2005     u16 igu_sb_id = 0;
2006     u32 val = 0;
2007 
2008     val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2009     val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2010     val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2011     qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2012 
2013     for (igu_sb_id = 0;
2014          igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2015         p_block = &p_info->entry[igu_sb_id];
2016 
2017         if (!(p_block->status & QED_IGU_STATUS_VALID) ||
2018             !p_block->is_pf ||
2019             (p_block->status & QED_IGU_STATUS_DSB))
2020             continue;
2021 
2022         qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2023                         p_hwfn->hw_info.opaque_fid,
2024                         b_set);
2025     }
2026 
2027     if (b_slowpath)
2028         qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2029                         p_info->igu_dsb_id,
2030                         p_hwfn->hw_info.opaque_fid,
2031                         b_set);
2032 }
2033 
2034 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2035 {
2036     struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2037     struct qed_igu_block *p_block;
2038     int pf_sbs, vf_sbs;
2039     u16 igu_sb_id;
2040     u32 val, rval;
2041 
2042     if (!RESC_NUM(p_hwfn, QED_SB)) {
2043         p_info->b_allow_pf_vf_change = false;
2044     } else {
2045         /* Use the numbers the MFW have provided -
2046          * don't forget MFW accounts for the default SB as well.
2047          */
2048         p_info->b_allow_pf_vf_change = true;
2049 
2050         if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
2051             DP_INFO(p_hwfn,
2052                 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2053                 RESC_NUM(p_hwfn, QED_SB) - 1,
2054                 p_info->usage.cnt);
2055             p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
2056         }
2057 
2058         if (IS_PF_SRIOV(p_hwfn)) {
2059             u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
2060 
2061             if (vfs != p_info->usage.iov_cnt)
2062                 DP_VERBOSE(p_hwfn,
2063                        NETIF_MSG_INTR,
2064                        "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2065                        p_info->usage.iov_cnt, vfs);
2066 
2067             /* At this point we know how many SBs we have totally
2068              * in IGU + number of PF SBs. So we can validate that
2069              * we'd have sufficient for VF.
2070              */
2071             if (vfs > p_info->usage.free_cnt +
2072                 p_info->usage.free_cnt_iov - p_info->usage.cnt) {
2073                 DP_NOTICE(p_hwfn,
2074                       "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2075                       p_info->usage.free_cnt +
2076                       p_info->usage.free_cnt_iov,
2077                       p_info->usage.cnt, vfs);
2078                 return -EINVAL;
2079             }
2080 
2081             /* Currently cap the number of VFs SBs by the
2082              * number of VFs.
2083              */
2084             p_info->usage.iov_cnt = vfs;
2085         }
2086     }
2087 
2088     /* Mark all SBs as free, now in the right PF/VFs division */
2089     p_info->usage.free_cnt = p_info->usage.cnt;
2090     p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2091     p_info->usage.orig = p_info->usage.cnt;
2092     p_info->usage.iov_orig = p_info->usage.iov_cnt;
2093 
2094     /* We now proceed to re-configure the IGU cam to reflect the initial
2095      * configuration. We can start with the Default SB.
2096      */
2097     pf_sbs = p_info->usage.cnt;
2098     vf_sbs = p_info->usage.iov_cnt;
2099 
2100     for (igu_sb_id = p_info->igu_dsb_id;
2101          igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2102         p_block = &p_info->entry[igu_sb_id];
2103         val = 0;
2104 
2105         if (!(p_block->status & QED_IGU_STATUS_VALID))
2106             continue;
2107 
2108         if (p_block->status & QED_IGU_STATUS_DSB) {
2109             p_block->function_id = p_hwfn->rel_pf_id;
2110             p_block->is_pf = 1;
2111             p_block->vector_number = 0;
2112             p_block->status = QED_IGU_STATUS_VALID |
2113                       QED_IGU_STATUS_PF |
2114                       QED_IGU_STATUS_DSB;
2115         } else if (pf_sbs) {
2116             pf_sbs--;
2117             p_block->function_id = p_hwfn->rel_pf_id;
2118             p_block->is_pf = 1;
2119             p_block->vector_number = p_info->usage.cnt - pf_sbs;
2120             p_block->status = QED_IGU_STATUS_VALID |
2121                       QED_IGU_STATUS_PF |
2122                       QED_IGU_STATUS_FREE;
2123         } else if (vf_sbs) {
2124             p_block->function_id =
2125                 p_hwfn->cdev->p_iov_info->first_vf_in_pf +
2126                 p_info->usage.iov_cnt - vf_sbs;
2127             p_block->is_pf = 0;
2128             p_block->vector_number = 0;
2129             p_block->status = QED_IGU_STATUS_VALID |
2130                       QED_IGU_STATUS_FREE;
2131             vf_sbs--;
2132         } else {
2133             p_block->function_id = 0;
2134             p_block->is_pf = 0;
2135             p_block->vector_number = 0;
2136         }
2137 
2138         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2139               p_block->function_id);
2140         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2141         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2142               p_block->vector_number);
2143 
2144         /* VF entries would be enabled when VF is initializaed */
2145         SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2146 
2147         rval = qed_rd(p_hwfn, p_ptt,
2148                   IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2149 
2150         if (rval != val) {
2151             qed_wr(p_hwfn, p_ptt,
2152                    IGU_REG_MAPPING_MEMORY +
2153                    sizeof(u32) * igu_sb_id, val);
2154 
2155             DP_VERBOSE(p_hwfn,
2156                    NETIF_MSG_INTR,
2157                    "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2158                    igu_sb_id,
2159                    p_block->function_id,
2160                    p_block->is_pf,
2161                    p_block->vector_number, rval, val);
2162         }
2163     }
2164 
2165     return 0;
2166 }
2167 
2168 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
2169                        struct qed_ptt *p_ptt, u16 igu_sb_id)
2170 {
2171     u32 val = qed_rd(p_hwfn, p_ptt,
2172              IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2173     struct qed_igu_block *p_block;
2174 
2175     p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2176 
2177     /* Fill the block information */
2178     p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2179     p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2180     p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2181     p_block->igu_sb_id = igu_sb_id;
2182 }
2183 
2184 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2185 {
2186     struct qed_igu_info *p_igu_info;
2187     struct qed_igu_block *p_block;
2188     u32 min_vf = 0, max_vf = 0;
2189     u16 igu_sb_id;
2190 
2191     p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2192     if (!p_hwfn->hw_info.p_igu_info)
2193         return -ENOMEM;
2194 
2195     p_igu_info = p_hwfn->hw_info.p_igu_info;
2196 
2197     /* Distinguish between existent and non-existent default SB */
2198     p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
2199 
2200     /* Find the range of VF ids whose SB belong to this PF */
2201     if (p_hwfn->cdev->p_iov_info) {
2202         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2203 
2204         min_vf  = p_iov->first_vf_in_pf;
2205         max_vf  = p_iov->first_vf_in_pf + p_iov->total_vfs;
2206     }
2207 
2208     for (igu_sb_id = 0;
2209          igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2210         /* Read current entry; Notice it might not belong to this PF */
2211         qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2212         p_block = &p_igu_info->entry[igu_sb_id];
2213 
2214         if ((p_block->is_pf) &&
2215             (p_block->function_id == p_hwfn->rel_pf_id)) {
2216             p_block->status = QED_IGU_STATUS_PF |
2217                       QED_IGU_STATUS_VALID |
2218                       QED_IGU_STATUS_FREE;
2219 
2220             if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2221                 p_igu_info->usage.cnt++;
2222         } else if (!(p_block->is_pf) &&
2223                (p_block->function_id >= min_vf) &&
2224                (p_block->function_id < max_vf)) {
2225             /* Available for VFs of this PF */
2226             p_block->status = QED_IGU_STATUS_VALID |
2227                       QED_IGU_STATUS_FREE;
2228 
2229             if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2230                 p_igu_info->usage.iov_cnt++;
2231         }
2232 
2233         /* Mark the First entry belonging to the PF or its VFs
2234          * as the default SB [we'll reset IGU prior to first usage].
2235          */
2236         if ((p_block->status & QED_IGU_STATUS_VALID) &&
2237             (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
2238             p_igu_info->igu_dsb_id = igu_sb_id;
2239             p_block->status |= QED_IGU_STATUS_DSB;
2240         }
2241 
2242         /* limit number of prints by having each PF print only its
2243          * entries with the exception of PF0 which would print
2244          * everything.
2245          */
2246         if ((p_block->status & QED_IGU_STATUS_VALID) ||
2247             (p_hwfn->abs_pf_id == 0)) {
2248             DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2249                    "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2250                    igu_sb_id, p_block->function_id,
2251                    p_block->is_pf, p_block->vector_number);
2252         }
2253     }
2254 
2255     if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
2256         DP_NOTICE(p_hwfn,
2257               "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2258               p_igu_info->igu_dsb_id);
2259         return -EINVAL;
2260     }
2261 
2262     /* All non default SB are considered free at this point */
2263     p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2264     p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2265 
2266     DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2267            "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2268            p_igu_info->igu_dsb_id,
2269            p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
2270 
2271     return 0;
2272 }
2273 
2274 /**
2275  * qed_int_igu_init_rt() - Initialize IGU runtime registers.
2276  *
2277  * @p_hwfn: HW device data.
2278  */
2279 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
2280 {
2281     u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2282 
2283     STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2284 }
2285 
2286 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
2287 {
2288     u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
2289                    IGU_CMD_INT_ACK_BASE;
2290     u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
2291                    IGU_CMD_INT_ACK_BASE;
2292     u32 intr_status_hi = 0, intr_status_lo = 0;
2293     u64 intr_status = 0;
2294 
2295     intr_status_lo = REG_RD(p_hwfn,
2296                 GTT_BAR0_MAP_REG_IGU_CMD +
2297                 lsb_igu_cmd_addr * 8);
2298     intr_status_hi = REG_RD(p_hwfn,
2299                 GTT_BAR0_MAP_REG_IGU_CMD +
2300                 msb_igu_cmd_addr * 8);
2301     intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2302 
2303     return intr_status;
2304 }
2305 
2306 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
2307 {
2308     tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
2309     p_hwfn->b_sp_dpc_enabled = true;
2310 }
2311 
2312 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2313 {
2314     int rc = 0;
2315 
2316     rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
2317     if (rc)
2318         return rc;
2319 
2320     rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
2321 
2322     return rc;
2323 }
2324 
2325 void qed_int_free(struct qed_hwfn *p_hwfn)
2326 {
2327     qed_int_sp_sb_free(p_hwfn);
2328     qed_int_sb_attn_free(p_hwfn);
2329 }
2330 
2331 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2332 {
2333     qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2334     qed_int_sb_attn_setup(p_hwfn, p_ptt);
2335     qed_int_sp_dpc_setup(p_hwfn);
2336 }
2337 
2338 void qed_int_get_num_sbs(struct qed_hwfn    *p_hwfn,
2339              struct qed_sb_cnt_info *p_sb_cnt_info)
2340 {
2341     struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
2342 
2343     if (!info || !p_sb_cnt_info)
2344         return;
2345 
2346     memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
2347 }
2348 
2349 void qed_int_disable_post_isr_release(struct qed_dev *cdev)
2350 {
2351     int i;
2352 
2353     for_each_hwfn(cdev, i)
2354         cdev->hwfns[i].b_int_requested = false;
2355 }
2356 
2357 void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
2358 {
2359     cdev->attn_clr_en = clr_enable;
2360 }
2361 
2362 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2363               u8 timer_res, u16 sb_id, bool tx)
2364 {
2365     struct cau_sb_entry sb_entry;
2366     u32 params;
2367     int rc;
2368 
2369     if (!p_hwfn->hw_init_done) {
2370         DP_ERR(p_hwfn, "hardware not initialized yet\n");
2371         return -EINVAL;
2372     }
2373 
2374     rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2375                    sb_id * sizeof(u64),
2376                    (u64)(uintptr_t)&sb_entry, 2, NULL);
2377     if (rc) {
2378         DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2379         return rc;
2380     }
2381 
2382     params = le32_to_cpu(sb_entry.params);
2383 
2384     if (tx)
2385         SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2386     else
2387         SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2388 
2389     sb_entry.params = cpu_to_le32(params);
2390 
2391     rc = qed_dmae_host2grc(p_hwfn, p_ptt,
2392                    (u64)(uintptr_t)&sb_entry,
2393                    CAU_REG_SB_VAR_MEMORY +
2394                    sb_id * sizeof(u64), 2, NULL);
2395     if (rc) {
2396         DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2397         return rc;
2398     }
2399 
2400     return rc;
2401 }
2402 
2403 int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2404                struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
2405 {
2406     u16 sbid = p_sb->igu_sb_id;
2407     u32 i;
2408 
2409     if (IS_VF(p_hwfn->cdev))
2410         return -EINVAL;
2411 
2412     if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
2413         return -EINVAL;
2414 
2415     p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
2416     p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
2417 
2418     for (i = 0; i < PIS_PER_SB; i++)
2419         p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
2420                         CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
2421 
2422     return 0;
2423 }