Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Linux network driver for QLogic BR-series Converged Network Adapter.
0004  */
0005 /*
0006  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
0007  * Copyright (c) 2014-2015 QLogic Corporation
0008  * All rights reserved
0009  * www.qlogic.com
0010  */
0011 
0012 #include "bfa_ioc.h"
0013 #include "cna.h"
0014 #include "bfi.h"
0015 #include "bfi_reg.h"
0016 #include "bfa_defs.h"
0017 
0018 #define bfa_ioc_ct_sync_pos(__ioc)  BIT(bfa_ioc_pcifn(__ioc))
0019 #define BFA_IOC_SYNC_REQD_SH        16
0020 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
0021 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
0022 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
0023 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
0024         (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
0025 
0026 /*
0027  * forward declarations
0028  */
0029 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
0030 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
0031 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
0032 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
0033 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
0034 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
0035 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
0036 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
0037 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
0038 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
0039 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
0040 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
0041 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
0042 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
0043 static void bfa_ioc_ct_set_cur_ioc_fwstate(
0044             struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
0045 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
0046 static void bfa_ioc_ct_set_alt_ioc_fwstate(
0047             struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
0048 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
0049 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
0050                 enum bfi_asic_mode asic_mode);
0051 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
0052                 enum bfi_asic_mode asic_mode);
0053 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
0054 
0055 static const struct bfa_ioc_hwif nw_hwif_ct = {
0056     .ioc_pll_init        = bfa_ioc_ct_pll_init,
0057     .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
0058     .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
0059     .ioc_reg_init        = bfa_ioc_ct_reg_init,
0060     .ioc_map_port        = bfa_ioc_ct_map_port,
0061     .ioc_isr_mode_set    = bfa_ioc_ct_isr_mode_set,
0062     .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
0063     .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
0064     .ioc_sync_start      = bfa_ioc_ct_sync_start,
0065     .ioc_sync_join       = bfa_ioc_ct_sync_join,
0066     .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
0067     .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
0068     .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
0069     .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
0070     .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
0071     .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
0072     .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
0073 };
0074 
0075 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
0076     .ioc_pll_init        = bfa_ioc_ct2_pll_init,
0077     .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
0078     .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
0079     .ioc_reg_init        = bfa_ioc_ct2_reg_init,
0080     .ioc_map_port        = bfa_ioc_ct2_map_port,
0081     .ioc_lpu_read_stat   = bfa_ioc_ct2_lpu_read_stat,
0082     .ioc_isr_mode_set    = NULL,
0083     .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
0084     .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
0085     .ioc_sync_start      = bfa_ioc_ct_sync_start,
0086     .ioc_sync_join       = bfa_ioc_ct_sync_join,
0087     .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
0088     .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
0089     .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
0090     .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
0091     .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
0092     .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
0093     .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
0094 };
0095 
0096 /* Called from bfa_ioc_attach() to map asic specific calls. */
0097 void
0098 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
0099 {
0100     ioc->ioc_hwif = &nw_hwif_ct;
0101 }
0102 
0103 void
0104 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
0105 {
0106     ioc->ioc_hwif = &nw_hwif_ct2;
0107 }
0108 
0109 /* Return true if firmware of current driver matches the running firmware. */
0110 static bool
0111 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
0112 {
0113     enum bfi_ioc_state ioc_fwstate;
0114     u32 usecnt;
0115     struct bfi_ioc_image_hdr fwhdr;
0116 
0117     /**
0118      * If bios boot (flash based) -- do not increment usage count
0119      */
0120     if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
0121                         BFA_IOC_FWIMG_MINSZ)
0122         return true;
0123 
0124     bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
0125     usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
0126 
0127     /**
0128      * If usage count is 0, always return TRUE.
0129      */
0130     if (usecnt == 0) {
0131         writel(1, ioc->ioc_regs.ioc_usage_reg);
0132         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
0133         writel(0, ioc->ioc_regs.ioc_fail_sync);
0134         return true;
0135     }
0136 
0137     ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
0138 
0139     /**
0140      * Use count cannot be non-zero and chip in uninitialized state.
0141      */
0142     BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
0143 
0144     /**
0145      * Check if another driver with a different firmware is active
0146      */
0147     bfa_nw_ioc_fwver_get(ioc, &fwhdr);
0148     if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
0149         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
0150         return false;
0151     }
0152 
0153     /**
0154      * Same firmware version. Increment the reference count.
0155      */
0156     usecnt++;
0157     writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
0158     bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
0159     return true;
0160 }
0161 
0162 static void
0163 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
0164 {
0165     u32 usecnt;
0166 
0167     /**
0168      * If bios boot (flash based) -- do not decrement usage count
0169      */
0170     if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
0171                         BFA_IOC_FWIMG_MINSZ)
0172         return;
0173 
0174     /**
0175      * decrement usage count
0176      */
0177     bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
0178     usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
0179     BUG_ON(!(usecnt > 0));
0180 
0181     usecnt--;
0182     writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
0183 
0184     bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
0185 }
0186 
0187 /* Notify other functions on HB failure. */
0188 static void
0189 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
0190 {
0191     writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
0192     writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
0193     /* Wait for halt to take effect */
0194     readl(ioc->ioc_regs.ll_halt);
0195     readl(ioc->ioc_regs.alt_ll_halt);
0196 }
0197 
0198 /* Host to LPU mailbox message addresses */
0199 static const struct {
0200     u32 hfn_mbox;
0201     u32 lpu_mbox;
0202     u32 hfn_pgn;
0203 } ct_fnreg[] = {
0204     { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
0205     { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
0206     { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
0207     { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
0208 };
0209 
0210 /* Host <-> LPU mailbox command/status registers - port 0 */
0211 static const struct {
0212     u32 hfn;
0213     u32 lpu;
0214 } ct_p0reg[] = {
0215     { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
0216     { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
0217     { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
0218     { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
0219 };
0220 
0221 /* Host <-> LPU mailbox command/status registers - port 1 */
0222 static const struct {
0223     u32 hfn;
0224     u32 lpu;
0225 } ct_p1reg[] = {
0226     { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
0227     { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
0228     { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
0229     { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
0230 };
0231 
0232 static const struct {
0233     u32 hfn_mbox;
0234     u32 lpu_mbox;
0235     u32 hfn_pgn;
0236     u32 hfn;
0237     u32 lpu;
0238     u32 lpu_read;
0239 } ct2_reg[] = {
0240     { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
0241       CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
0242       CT2_HOSTFN_LPU0_READ_STAT},
0243     { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
0244       CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
0245       CT2_HOSTFN_LPU1_READ_STAT},
0246 };
0247 
0248 static void
0249 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
0250 {
0251     void __iomem *rb;
0252     int     pcifn = bfa_ioc_pcifn(ioc);
0253 
0254     rb = bfa_ioc_bar0(ioc);
0255 
0256     ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
0257     ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
0258     ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
0259 
0260     if (ioc->port_id == 0) {
0261         ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
0262         ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
0263         ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
0264         ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
0265         ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
0266         ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
0267         ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
0268     } else {
0269         ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
0270         ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
0271         ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
0272         ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
0273         ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
0274         ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
0275         ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
0276     }
0277 
0278     /*
0279      * PSS control registers
0280      */
0281     ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
0282     ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
0283     ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
0284     ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
0285 
0286     /*
0287      * IOC semaphore registers and serialization
0288      */
0289     ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
0290     ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
0291     ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
0292     ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
0293     ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
0294 
0295     /**
0296      * sram memory access
0297      */
0298     ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
0299     ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
0300 
0301     /*
0302      * err set reg : for notification of hb failure in fcmode
0303      */
0304     ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
0305 }
0306 
0307 static void
0308 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
0309 {
0310     void __iomem *rb;
0311     int     port = bfa_ioc_portid(ioc);
0312 
0313     rb = bfa_ioc_bar0(ioc);
0314 
0315     ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
0316     ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
0317     ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
0318     ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
0319     ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
0320     ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
0321 
0322     if (port == 0) {
0323         ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
0324         ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
0325         ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
0326         ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
0327         ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
0328     } else {
0329         ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
0330         ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
0331         ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
0332         ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
0333         ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
0334     }
0335 
0336     /*
0337      * PSS control registers
0338      */
0339     ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
0340     ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
0341     ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
0342     ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
0343 
0344     /*
0345      * IOC semaphore registers and serialization
0346      */
0347     ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
0348     ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
0349     ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
0350     ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
0351     ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
0352 
0353     /**
0354      * sram memory access
0355      */
0356     ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
0357     ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
0358 
0359     /*
0360      * err set reg : for notification of hb failure in fcmode
0361      */
0362     ioc->ioc_regs.err_set = rb + ERR_SET_REG;
0363 }
0364 
0365 /* Initialize IOC to port mapping. */
0366 
0367 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
0368 static void
0369 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
0370 {
0371     void __iomem *rb = ioc->pcidev.pci_bar_kva;
0372     u32 r32;
0373 
0374     /**
0375      * For catapult, base port id on personality register and IOC type
0376      */
0377     r32 = readl(rb + FNC_PERS_REG);
0378     r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
0379     ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
0380 
0381 }
0382 
0383 static void
0384 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
0385 {
0386     void __iomem *rb = ioc->pcidev.pci_bar_kva;
0387     u32 r32;
0388 
0389     r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
0390     ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
0391 }
0392 
0393 /* Set interrupt mode for a function: INTX or MSIX */
0394 static void
0395 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
0396 {
0397     void __iomem *rb = ioc->pcidev.pci_bar_kva;
0398     u32 r32, mode;
0399 
0400     r32 = readl(rb + FNC_PERS_REG);
0401 
0402     mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
0403         __F0_INTX_STATUS;
0404 
0405     /**
0406      * If already in desired mode, do not change anything
0407      */
0408     if ((!msix && mode) || (msix && !mode))
0409         return;
0410 
0411     if (msix)
0412         mode = __F0_INTX_STATUS_MSIX;
0413     else
0414         mode = __F0_INTX_STATUS_INTA;
0415 
0416     r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
0417     r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
0418 
0419     writel(r32, rb + FNC_PERS_REG);
0420 }
0421 
0422 static bool
0423 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
0424 {
0425     u32 r32;
0426 
0427     r32 = readl(ioc->ioc_regs.lpu_read_stat);
0428     if (r32) {
0429         writel(1, ioc->ioc_regs.lpu_read_stat);
0430         return true;
0431     }
0432 
0433     return false;
0434 }
0435 
0436 /* MSI-X resource allocation for 1860 with no asic block */
0437 #define HOSTFN_MSIX_DEFAULT     64
0438 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR   0x30138
0439 #define HOSTFN_MSIX_VT_OFST_NUMVT   0x3013c
0440 #define __MSIX_VT_NUMVT__MK     0x003ff800
0441 #define __MSIX_VT_NUMVT__SH     11
0442 #define __MSIX_VT_NUMVT_(_v)        ((_v) << __MSIX_VT_NUMVT__SH)
0443 #define __MSIX_VT_OFST_         0x000007ff
0444 void
0445 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
0446 {
0447     void __iomem *rb = ioc->pcidev.pci_bar_kva;
0448     u32 r32;
0449 
0450     r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
0451     if (r32 & __MSIX_VT_NUMVT__MK) {
0452         writel(r32 & __MSIX_VT_OFST_,
0453             rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
0454         return;
0455     }
0456 
0457     writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
0458             HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
0459             rb + HOSTFN_MSIX_VT_OFST_NUMVT);
0460     writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
0461             rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
0462 }
0463 
0464 /* Cleanup hw semaphore and usecnt registers */
0465 static void
0466 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
0467 {
0468     bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
0469     writel(0, ioc->ioc_regs.ioc_usage_reg);
0470     bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
0471 
0472     /*
0473      * Read the hw sem reg to make sure that it is locked
0474      * before we clear it. If it is not locked, writing 1
0475      * will lock it instead of clearing it.
0476      */
0477     readl(ioc->ioc_regs.ioc_sem_reg);
0478     bfa_nw_ioc_hw_sem_release(ioc);
0479 }
0480 
0481 /* Synchronized IOC failure processing routines */
0482 static bool
0483 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
0484 {
0485     u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
0486     u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
0487 
0488     /*
0489      * Driver load time.  If the sync required bit for this PCI fn
0490      * is set, it is due to an unclean exit by the driver for this
0491      * PCI fn in the previous incarnation. Whoever comes here first
0492      * should clean it up, no matter which PCI fn.
0493      */
0494 
0495     if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
0496         writel(0, ioc->ioc_regs.ioc_fail_sync);
0497         writel(1, ioc->ioc_regs.ioc_usage_reg);
0498         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
0499         writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
0500         return true;
0501     }
0502 
0503     return bfa_ioc_ct_sync_complete(ioc);
0504 }
0505 /* Synchronized IOC failure processing routines */
0506 static void
0507 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
0508 {
0509     u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
0510     u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
0511 
0512     writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
0513 }
0514 
0515 static void
0516 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
0517 {
0518     u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
0519     u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
0520                     bfa_ioc_ct_sync_pos(ioc);
0521 
0522     writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
0523 }
0524 
0525 static void
0526 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
0527 {
0528     u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
0529 
0530     writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
0531 }
0532 
0533 static bool
0534 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
0535 {
0536     u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
0537     u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
0538     u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
0539     u32 tmp_ackd;
0540 
0541     if (sync_ackd == 0)
0542         return true;
0543 
0544     /**
0545      * The check below is to see whether any other PCI fn
0546      * has reinitialized the ASIC (reset sync_ackd bits)
0547      * and failed again while this IOC was waiting for hw
0548      * semaphore (in bfa_iocpf_sm_semwait()).
0549      */
0550     tmp_ackd = sync_ackd;
0551     if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
0552             !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
0553         sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
0554 
0555     if (sync_reqd == sync_ackd) {
0556         writel(bfa_ioc_ct_clear_sync_ackd(r32),
0557                 ioc->ioc_regs.ioc_fail_sync);
0558         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
0559         writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
0560         return true;
0561     }
0562 
0563     /**
0564      * If another PCI fn reinitialized and failed again while
0565      * this IOC was waiting for hw sem, the sync_ackd bit for
0566      * this IOC need to be set again to allow reinitialization.
0567      */
0568     if (tmp_ackd != sync_ackd)
0569         writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
0570 
0571     return false;
0572 }
0573 
0574 static void
0575 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
0576                    enum bfi_ioc_state fwstate)
0577 {
0578     writel(fwstate, ioc->ioc_regs.ioc_fwstate);
0579 }
0580 
0581 static enum bfi_ioc_state
0582 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
0583 {
0584     return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
0585 }
0586 
0587 static void
0588 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
0589                    enum bfi_ioc_state fwstate)
0590 {
0591     writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
0592 }
0593 
0594 static enum bfi_ioc_state
0595 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
0596 {
0597     return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
0598 }
0599 
0600 static enum bfa_status
0601 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
0602 {
0603     u32 pll_sclk, pll_fclk, r32;
0604     bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
0605 
0606     pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
0607         __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
0608         __APP_PLL_SCLK_JITLMT0_1(3U) |
0609         __APP_PLL_SCLK_CNTLMT0_1(1U);
0610     pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
0611         __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
0612         __APP_PLL_LCLK_JITLMT0_1(3U) |
0613         __APP_PLL_LCLK_CNTLMT0_1(1U);
0614 
0615     if (fcmode) {
0616         writel(0, (rb + OP_MODE));
0617         writel(__APP_EMS_CMLCKSEL |
0618                 __APP_EMS_REFCKBUFEN2 |
0619                 __APP_EMS_CHANNEL_SEL,
0620                 (rb + ETH_MAC_SER_REG));
0621     } else {
0622         writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
0623         writel(__APP_EMS_REFCKBUFEN1,
0624                 (rb + ETH_MAC_SER_REG));
0625     }
0626     writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
0627     writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
0628     writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
0629     writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
0630     writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
0631     writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
0632     writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
0633     writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
0634     writel(pll_sclk |
0635         __APP_PLL_SCLK_LOGIC_SOFT_RESET,
0636         rb + APP_PLL_SCLK_CTL_REG);
0637     writel(pll_fclk |
0638         __APP_PLL_LCLK_LOGIC_SOFT_RESET,
0639         rb + APP_PLL_LCLK_CTL_REG);
0640     writel(pll_sclk |
0641         __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
0642         rb + APP_PLL_SCLK_CTL_REG);
0643     writel(pll_fclk |
0644         __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
0645         rb + APP_PLL_LCLK_CTL_REG);
0646     readl(rb + HOSTFN0_INT_MSK);
0647     udelay(2000);
0648     writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
0649     writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
0650     writel(pll_sclk |
0651         __APP_PLL_SCLK_ENABLE,
0652         rb + APP_PLL_SCLK_CTL_REG);
0653     writel(pll_fclk |
0654         __APP_PLL_LCLK_ENABLE,
0655         rb + APP_PLL_LCLK_CTL_REG);
0656 
0657     if (!fcmode) {
0658         writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
0659         writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
0660     }
0661     r32 = readl(rb + PSS_CTL_REG);
0662     r32 &= ~__PSS_LMEM_RESET;
0663     writel(r32, (rb + PSS_CTL_REG));
0664     udelay(1000);
0665     if (!fcmode) {
0666         writel(0, (rb + PMM_1T_RESET_REG_P0));
0667         writel(0, (rb + PMM_1T_RESET_REG_P1));
0668     }
0669 
0670     writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
0671     udelay(1000);
0672     r32 = readl(rb + MBIST_STAT_REG);
0673     writel(0, (rb + MBIST_CTL_REG));
0674     return BFA_STATUS_OK;
0675 }
0676 
0677 static void
0678 bfa_ioc_ct2_sclk_init(void __iomem *rb)
0679 {
0680     u32 r32;
0681 
0682     /*
0683      * put s_clk PLL and PLL FSM in reset
0684      */
0685     r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
0686     r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
0687     r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
0688         __APP_PLL_SCLK_LOGIC_SOFT_RESET);
0689     writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
0690 
0691     /*
0692      * Ignore mode and program for the max clock (which is FC16)
0693      * Firmware/NFC will do the PLL init appropriately
0694      */
0695     r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
0696     r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
0697     writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
0698 
0699     /*
0700      * while doing PLL init dont clock gate ethernet subsystem
0701      */
0702     r32 = readl(rb + CT2_CHIP_MISC_PRG);
0703     writel(r32 | __ETH_CLK_ENABLE_PORT0,
0704            rb + CT2_CHIP_MISC_PRG);
0705 
0706     r32 = readl(rb + CT2_PCIE_MISC_REG);
0707     writel(r32 | __ETH_CLK_ENABLE_PORT1,
0708            rb + CT2_PCIE_MISC_REG);
0709 
0710     /*
0711      * set sclk value
0712      */
0713     r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
0714     r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
0715         __APP_PLL_SCLK_CLK_DIV2);
0716     writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
0717 
0718     /*
0719      * poll for s_clk lock or delay 1ms
0720      */
0721     udelay(1000);
0722 
0723     /*
0724      * Dont do clock gating for ethernet subsystem, firmware/NFC will
0725      * do this appropriately
0726      */
0727 }
0728 
0729 static void
0730 bfa_ioc_ct2_lclk_init(void __iomem *rb)
0731 {
0732     u32 r32;
0733 
0734     /*
0735      * put l_clk PLL and PLL FSM in reset
0736      */
0737     r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0738     r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
0739     r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
0740         __APP_PLL_LCLK_LOGIC_SOFT_RESET);
0741     writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
0742 
0743     /*
0744      * set LPU speed (set for FC16 which will work for other modes)
0745      */
0746     r32 = readl(rb + CT2_CHIP_MISC_PRG);
0747     writel(r32, (rb + CT2_CHIP_MISC_PRG));
0748 
0749     /*
0750      * set LPU half speed (set for FC16 which will work for other modes)
0751      */
0752     r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0753     writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
0754 
0755     /*
0756      * set lclk for mode (set for FC16)
0757      */
0758     r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0759     r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
0760     r32 |= 0x20c1731b;
0761     writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
0762 
0763     /*
0764      * poll for s_clk lock or delay 1ms
0765      */
0766     udelay(1000);
0767 }
0768 
0769 static void
0770 bfa_ioc_ct2_mem_init(void __iomem *rb)
0771 {
0772     u32 r32;
0773 
0774     r32 = readl(rb + PSS_CTL_REG);
0775     r32 &= ~__PSS_LMEM_RESET;
0776     writel(r32, rb + PSS_CTL_REG);
0777     udelay(1000);
0778 
0779     writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
0780     udelay(1000);
0781     writel(0, rb + CT2_MBIST_CTL_REG);
0782 }
0783 
0784 static void
0785 bfa_ioc_ct2_mac_reset(void __iomem *rb)
0786 {
0787     volatile u32 r32;
0788 
0789     bfa_ioc_ct2_sclk_init(rb);
0790     bfa_ioc_ct2_lclk_init(rb);
0791 
0792     /*
0793      * release soft reset on s_clk & l_clk
0794      */
0795     r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
0796     writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
0797            rb + CT2_APP_PLL_SCLK_CTL_REG);
0798 
0799     /*
0800      * release soft reset on s_clk & l_clk
0801      */
0802     r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0803     writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
0804            rb + CT2_APP_PLL_LCLK_CTL_REG);
0805 
0806     /* put port0, port1 MAC & AHB in reset */
0807     writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
0808            rb + CT2_CSI_MAC_CONTROL_REG(0));
0809     writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
0810            rb + CT2_CSI_MAC_CONTROL_REG(1));
0811 }
0812 
0813 #define CT2_NFC_MAX_DELAY       1000
0814 #define CT2_NFC_VER_VALID       0x143
0815 #define BFA_IOC_PLL_POLL        1000000
0816 
0817 static bool
0818 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
0819 {
0820     volatile u32 r32;
0821 
0822     r32 = readl(rb + CT2_NFC_CSR_SET_REG);
0823     if (r32 & __NFC_CONTROLLER_HALTED)
0824         return true;
0825 
0826     return false;
0827 }
0828 
0829 static void
0830 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
0831 {
0832     volatile u32 r32;
0833     int i;
0834 
0835     writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
0836     for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
0837         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
0838         if (!(r32 & __NFC_CONTROLLER_HALTED))
0839             return;
0840         udelay(1000);
0841     }
0842     BUG_ON(1);
0843 }
0844 
0845 static enum bfa_status
0846 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
0847 {
0848     volatile u32 wgn, r32;
0849     u32 nfc_ver, i;
0850 
0851     wgn = readl(rb + CT2_WGN_STATUS);
0852 
0853     nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
0854 
0855     if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
0856         nfc_ver >= CT2_NFC_VER_VALID) {
0857         if (bfa_ioc_ct2_nfc_halted(rb))
0858             bfa_ioc_ct2_nfc_resume(rb);
0859         writel(__RESET_AND_START_SCLK_LCLK_PLLS,
0860                 rb + CT2_CSI_FW_CTL_SET_REG);
0861 
0862         for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
0863             r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0864             if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
0865                 break;
0866         }
0867         BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
0868 
0869         for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
0870             r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0871             if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
0872                 break;
0873         }
0874         BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
0875         udelay(1000);
0876 
0877         r32 = readl(rb + CT2_CSI_FW_CTL_REG);
0878         BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
0879     } else {
0880         writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
0881         for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
0882             r32 = readl(rb + CT2_NFC_CSR_SET_REG);
0883             if (r32 & __NFC_CONTROLLER_HALTED)
0884                 break;
0885             udelay(1000);
0886         }
0887 
0888         bfa_ioc_ct2_mac_reset(rb);
0889         bfa_ioc_ct2_sclk_init(rb);
0890         bfa_ioc_ct2_lclk_init(rb);
0891 
0892         /* release soft reset on s_clk & l_clk */
0893         r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
0894         writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
0895                 rb + CT2_APP_PLL_SCLK_CTL_REG);
0896         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
0897         writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
0898                 rb + CT2_APP_PLL_LCLK_CTL_REG);
0899     }
0900 
0901     /* Announce flash device presence, if flash was corrupted. */
0902     if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
0903         r32 = readl(rb + PSS_GPIO_OUT_REG);
0904         writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
0905         r32 = readl(rb + PSS_GPIO_OE_REG);
0906         writel(r32 | 1, rb + PSS_GPIO_OE_REG);
0907     }
0908 
0909     /*
0910      * Mask the interrupts and clear any
0911      * pending interrupts left by BIOS/EFI
0912      */
0913     writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
0914     writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
0915 
0916     /* For first time initialization, no need to clear interrupts */
0917     r32 = readl(rb + HOST_SEM5_REG);
0918     if (r32 & 0x1) {
0919         r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
0920         if (r32 == 1) {
0921             writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
0922             readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
0923         }
0924         r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
0925         if (r32 == 1) {
0926             writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
0927             readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
0928         }
0929     }
0930 
0931     bfa_ioc_ct2_mem_init(rb);
0932 
0933     writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
0934     writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
0935     return BFA_STATUS_OK;
0936 }