Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  QLogic FCoE Offload Driver
0004  *  Copyright (c) 2016-2018 Cavium Inc.
0005  */
0006 #include <linux/init.h>
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/pci.h>
0010 #include <linux/device.h>
0011 #include <linux/highmem.h>
0012 #include <linux/crc32.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/list.h>
0015 #include <linux/kthread.h>
0016 #include <linux/phylink.h>
0017 #include <scsi/libfc.h>
0018 #include <scsi/scsi_host.h>
0019 #include <scsi/fc_frame.h>
0020 #include <linux/if_ether.h>
0021 #include <linux/if_vlan.h>
0022 #include <linux/cpu.h>
0023 #include "qedf.h"
0024 #include "qedf_dbg.h"
0025 #include <uapi/linux/pci_regs.h>
0026 
0027 const struct qed_fcoe_ops *qed_ops;
0028 
0029 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
0030 static void qedf_remove(struct pci_dev *pdev);
0031 static void qedf_shutdown(struct pci_dev *pdev);
0032 static void qedf_schedule_recovery_handler(void *dev);
0033 static void qedf_recovery_handler(struct work_struct *work);
0034 
0035 /*
0036  * Driver module parameters.
0037  */
0038 static unsigned int qedf_dev_loss_tmo = 60;
0039 module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
0040 MODULE_PARM_DESC(dev_loss_tmo,  " dev_loss_tmo setting for attached "
0041     "remote ports (default 60)");
0042 
0043 uint qedf_debug = QEDF_LOG_INFO;
0044 module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
0045 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
0046     " mask");
0047 
0048 static uint qedf_fipvlan_retries = 60;
0049 module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
0050 MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
0051     "before giving up (default 60)");
0052 
0053 static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
0054 module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
0055 MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
0056     "(default 1002).");
0057 
0058 static int qedf_default_prio = -1;
0059 module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
0060 MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
0061     " traffic (value between 0 and 7, default 3).");
0062 
0063 uint qedf_dump_frames;
0064 module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
0065 MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
0066     "(default off)");
0067 
0068 static uint qedf_queue_depth;
0069 module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
0070 MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
0071     "by the qedf driver. Default is 0 (use OS default).");
0072 
0073 uint qedf_io_tracing;
0074 module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
0075 MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
0076     "into trace buffer. (default off).");
0077 
0078 static uint qedf_max_lun = MAX_FIBRE_LUNS;
0079 module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
0080 MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
0081     "supports. (default 0xffffffff)");
0082 
0083 uint qedf_link_down_tmo;
0084 module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
0085 MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
0086     "link is down by N seconds.");
0087 
0088 bool qedf_retry_delay;
0089 module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
0090 MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
0091     "delay handling (default off).");
0092 
0093 static bool qedf_dcbx_no_wait;
0094 module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
0095 MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
0096     "sending FIP VLAN requests on link up (Default: off).");
0097 
0098 static uint qedf_dp_module;
0099 module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
0100 MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
0101     "qed module during probe.");
0102 
0103 static uint qedf_dp_level = QED_LEVEL_NOTICE;
0104 module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
0105 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module  "
0106     "during probe (0-3: 0 more verbose).");
0107 
0108 static bool qedf_enable_recovery = true;
0109 module_param_named(enable_recovery, qedf_enable_recovery,
0110         bool, S_IRUGO | S_IWUSR);
0111 MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
0112         "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
0113 
0114 struct workqueue_struct *qedf_io_wq;
0115 
0116 static struct fcoe_percpu_s qedf_global;
0117 static DEFINE_SPINLOCK(qedf_global_lock);
0118 
0119 static struct kmem_cache *qedf_io_work_cache;
0120 
0121 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
0122 {
0123     int vlan_id_tmp = 0;
0124 
0125     vlan_id_tmp = vlan_id  | (qedf->prio << VLAN_PRIO_SHIFT);
0126     qedf->vlan_id = vlan_id_tmp;
0127     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0128           "Setting vlan_id=0x%04x prio=%d.\n",
0129           vlan_id_tmp, qedf->prio);
0130 }
0131 
0132 /* Returns true if we have a valid vlan, false otherwise */
0133 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
0134 {
0135 
0136     while (qedf->fipvlan_retries--) {
0137         /* This is to catch if link goes down during fipvlan retries */
0138         if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
0139             QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
0140             return false;
0141         }
0142 
0143         if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
0144             QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
0145             return false;
0146         }
0147 
0148         if (qedf->vlan_id > 0) {
0149             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0150                   "vlan = 0x%x already set, calling ctlr_link_up.\n",
0151                   qedf->vlan_id);
0152             if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
0153                 fcoe_ctlr_link_up(&qedf->ctlr);
0154             return true;
0155         }
0156 
0157         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0158                "Retry %d.\n", qedf->fipvlan_retries);
0159         init_completion(&qedf->fipvlan_compl);
0160         qedf_fcoe_send_vlan_req(qedf);
0161         wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
0162     }
0163 
0164     return false;
0165 }
0166 
0167 static void qedf_handle_link_update(struct work_struct *work)
0168 {
0169     struct qedf_ctx *qedf =
0170         container_of(work, struct qedf_ctx, link_update.work);
0171     int rc;
0172 
0173     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
0174           atomic_read(&qedf->link_state));
0175 
0176     if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
0177         rc = qedf_initiate_fipvlan_req(qedf);
0178         if (rc)
0179             return;
0180 
0181         if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
0182             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0183                   "Link is down, resetting vlan_id.\n");
0184             qedf->vlan_id = 0;
0185             return;
0186         }
0187 
0188         /*
0189          * If we get here then we never received a repsonse to our
0190          * fip vlan request so set the vlan_id to the default and
0191          * tell FCoE that the link is up
0192          */
0193         QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
0194                "response, falling back to default VLAN %d.\n",
0195                qedf_fallback_vlan);
0196         qedf_set_vlan_id(qedf, qedf_fallback_vlan);
0197 
0198         /*
0199          * Zero out data_src_addr so we'll update it with the new
0200          * lport port_id
0201          */
0202         eth_zero_addr(qedf->data_src_addr);
0203         fcoe_ctlr_link_up(&qedf->ctlr);
0204     } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
0205         /*
0206          * If we hit here and link_down_tmo_valid is still 1 it means
0207          * that link_down_tmo timed out so set it to 0 to make sure any
0208          * other readers have accurate state.
0209          */
0210         atomic_set(&qedf->link_down_tmo_valid, 0);
0211         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0212             "Calling fcoe_ctlr_link_down().\n");
0213         fcoe_ctlr_link_down(&qedf->ctlr);
0214         if (qedf_wait_for_upload(qedf) == false)
0215             QEDF_ERR(&qedf->dbg_ctx,
0216                  "Could not upload all sessions.\n");
0217         /* Reset the number of FIP VLAN retries */
0218         qedf->fipvlan_retries = qedf_fipvlan_retries;
0219     }
0220 }
0221 
0222 #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC        1
0223 #define QEDF_FCOE_MAC_METHOD_FCF_MAP            2
0224 #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC       3
0225 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
0226 {
0227     u8 *granted_mac;
0228     struct fc_frame_header *fh = fc_frame_header_get(fp);
0229     u8 fc_map[3];
0230     int method = 0;
0231 
0232     /* Get granted MAC address from FIP FLOGI payload */
0233     granted_mac = fr_cb(fp)->granted_mac;
0234 
0235     /*
0236      * We set the source MAC for FCoE traffic based on the Granted MAC
0237      * address from the switch.
0238      *
0239      * If granted_mac is non-zero, we used that.
0240      * If the granted_mac is zeroed out, created the FCoE MAC based on
0241      * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
0242      * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
0243      * d_id of the FLOGI frame.
0244      */
0245     if (!is_zero_ether_addr(granted_mac)) {
0246         ether_addr_copy(qedf->data_src_addr, granted_mac);
0247         method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
0248     } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
0249         hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
0250         qedf->data_src_addr[0] = fc_map[0];
0251         qedf->data_src_addr[1] = fc_map[1];
0252         qedf->data_src_addr[2] = fc_map[2];
0253         qedf->data_src_addr[3] = fh->fh_d_id[0];
0254         qedf->data_src_addr[4] = fh->fh_d_id[1];
0255         qedf->data_src_addr[5] = fh->fh_d_id[2];
0256         method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
0257     } else {
0258         fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
0259         method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
0260     }
0261 
0262     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0263         "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
0264 }
0265 
0266 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
0267     void *arg)
0268 {
0269     struct fc_exch *exch = fc_seq_exch(seq);
0270     struct fc_lport *lport = exch->lp;
0271     struct qedf_ctx *qedf = lport_priv(lport);
0272 
0273     if (!qedf) {
0274         QEDF_ERR(NULL, "qedf is NULL.\n");
0275         return;
0276     }
0277 
0278     /*
0279      * If ERR_PTR is set then don't try to stat anything as it will cause
0280      * a crash when we access fp.
0281      */
0282     if (IS_ERR(fp)) {
0283         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
0284             "fp has IS_ERR() set.\n");
0285         goto skip_stat;
0286     }
0287 
0288     /* Log stats for FLOGI reject */
0289     if (fc_frame_payload_op(fp) == ELS_LS_RJT)
0290         qedf->flogi_failed++;
0291     else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
0292         /* Set the source MAC we will use for FCoE traffic */
0293         qedf_set_data_src_addr(qedf, fp);
0294         qedf->flogi_pending = 0;
0295     }
0296 
0297     /* Complete flogi_compl so we can proceed to sending ADISCs */
0298     complete(&qedf->flogi_compl);
0299 
0300 skip_stat:
0301     /* Report response to libfc */
0302     fc_lport_flogi_resp(seq, fp, lport);
0303 }
0304 
0305 static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
0306     struct fc_frame *fp, unsigned int op,
0307     void (*resp)(struct fc_seq *,
0308     struct fc_frame *,
0309     void *),
0310     void *arg, u32 timeout)
0311 {
0312     struct qedf_ctx *qedf = lport_priv(lport);
0313 
0314     /*
0315      * Intercept FLOGI for statistic purposes. Note we use the resp
0316      * callback to tell if this is really a flogi.
0317      */
0318     if (resp == fc_lport_flogi_resp) {
0319         qedf->flogi_cnt++;
0320         if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
0321             schedule_delayed_work(&qedf->stag_work, 2);
0322             return NULL;
0323         }
0324         qedf->flogi_pending++;
0325         return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
0326             arg, timeout);
0327     }
0328 
0329     return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
0330 }
0331 
0332 int qedf_send_flogi(struct qedf_ctx *qedf)
0333 {
0334     struct fc_lport *lport;
0335     struct fc_frame *fp;
0336 
0337     lport = qedf->lport;
0338 
0339     if (!lport->tt.elsct_send) {
0340         QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
0341         return -EINVAL;
0342     }
0343 
0344     fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
0345     if (!fp) {
0346         QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
0347         return -ENOMEM;
0348     }
0349 
0350     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
0351         "Sending FLOGI to reestablish session with switch.\n");
0352     lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
0353         ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
0354 
0355     init_completion(&qedf->flogi_compl);
0356 
0357     return 0;
0358 }
0359 
0360 /*
0361  * This function is called if link_down_tmo is in use.  If we get a link up and
0362  * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
0363  * sessions with targets.  Otherwise, just call fcoe_ctlr_link_up().
0364  */
0365 static void qedf_link_recovery(struct work_struct *work)
0366 {
0367     struct qedf_ctx *qedf =
0368         container_of(work, struct qedf_ctx, link_recovery.work);
0369     struct fc_lport *lport = qedf->lport;
0370     struct fc_rport_priv *rdata;
0371     bool rc;
0372     int retries = 30;
0373     int rval, i;
0374     struct list_head rdata_login_list;
0375 
0376     INIT_LIST_HEAD(&rdata_login_list);
0377 
0378     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0379         "Link down tmo did not expire.\n");
0380 
0381     /*
0382      * Essentially reset the fcoe_ctlr here without affecting the state
0383      * of the libfc structs.
0384      */
0385     qedf->ctlr.state = FIP_ST_LINK_WAIT;
0386     fcoe_ctlr_link_down(&qedf->ctlr);
0387 
0388     /*
0389      * Bring the link up before we send the fipvlan request so libfcoe
0390      * can select a new fcf in parallel
0391      */
0392     fcoe_ctlr_link_up(&qedf->ctlr);
0393 
0394     /* Since the link when down and up to verify which vlan we're on */
0395     qedf->fipvlan_retries = qedf_fipvlan_retries;
0396     rc = qedf_initiate_fipvlan_req(qedf);
0397     /* If getting the VLAN fails, set the VLAN to the fallback one */
0398     if (!rc)
0399         qedf_set_vlan_id(qedf, qedf_fallback_vlan);
0400 
0401     /*
0402      * We need to wait for an FCF to be selected due to the
0403      * fcoe_ctlr_link_up other the FLOGI will be rejected.
0404      */
0405     while (retries > 0) {
0406         if (qedf->ctlr.sel_fcf) {
0407             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0408                 "FCF reselected, proceeding with FLOGI.\n");
0409             break;
0410         }
0411         msleep(500);
0412         retries--;
0413     }
0414 
0415     if (retries < 1) {
0416         QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
0417             "FCF selection.\n");
0418         return;
0419     }
0420 
0421     rval = qedf_send_flogi(qedf);
0422     if (rval)
0423         return;
0424 
0425     /* Wait for FLOGI completion before proceeding with sending ADISCs */
0426     i = wait_for_completion_timeout(&qedf->flogi_compl,
0427         qedf->lport->r_a_tov);
0428     if (i == 0) {
0429         QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
0430         return;
0431     }
0432 
0433     /*
0434      * Call lport->tt.rport_login which will cause libfc to send an
0435      * ADISC since the rport is in state ready.
0436      */
0437     mutex_lock(&lport->disc.disc_mutex);
0438     list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
0439         if (kref_get_unless_zero(&rdata->kref)) {
0440             fc_rport_login(rdata);
0441             kref_put(&rdata->kref, fc_rport_destroy);
0442         }
0443     }
0444     mutex_unlock(&lport->disc.disc_mutex);
0445 }
0446 
0447 static void qedf_update_link_speed(struct qedf_ctx *qedf,
0448     struct qed_link_output *link)
0449 {
0450     __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
0451     struct fc_lport *lport = qedf->lport;
0452 
0453     lport->link_speed = FC_PORTSPEED_UNKNOWN;
0454     lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
0455 
0456     /* Set fc_host link speed */
0457     switch (link->speed) {
0458     case 10000:
0459         lport->link_speed = FC_PORTSPEED_10GBIT;
0460         break;
0461     case 25000:
0462         lport->link_speed = FC_PORTSPEED_25GBIT;
0463         break;
0464     case 40000:
0465         lport->link_speed = FC_PORTSPEED_40GBIT;
0466         break;
0467     case 50000:
0468         lport->link_speed = FC_PORTSPEED_50GBIT;
0469         break;
0470     case 100000:
0471         lport->link_speed = FC_PORTSPEED_100GBIT;
0472         break;
0473     case 20000:
0474         lport->link_speed = FC_PORTSPEED_20GBIT;
0475         break;
0476     default:
0477         lport->link_speed = FC_PORTSPEED_UNKNOWN;
0478         break;
0479     }
0480 
0481     /*
0482      * Set supported link speed by querying the supported
0483      * capabilities of the link.
0484      */
0485 
0486     phylink_zero(sup_caps);
0487     phylink_set(sup_caps, 10000baseT_Full);
0488     phylink_set(sup_caps, 10000baseKX4_Full);
0489     phylink_set(sup_caps, 10000baseR_FEC);
0490     phylink_set(sup_caps, 10000baseCR_Full);
0491     phylink_set(sup_caps, 10000baseSR_Full);
0492     phylink_set(sup_caps, 10000baseLR_Full);
0493     phylink_set(sup_caps, 10000baseLRM_Full);
0494     phylink_set(sup_caps, 10000baseKR_Full);
0495 
0496     if (linkmode_intersects(link->supported_caps, sup_caps))
0497         lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
0498 
0499     phylink_zero(sup_caps);
0500     phylink_set(sup_caps, 25000baseKR_Full);
0501     phylink_set(sup_caps, 25000baseCR_Full);
0502     phylink_set(sup_caps, 25000baseSR_Full);
0503 
0504     if (linkmode_intersects(link->supported_caps, sup_caps))
0505         lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
0506 
0507     phylink_zero(sup_caps);
0508     phylink_set(sup_caps, 40000baseLR4_Full);
0509     phylink_set(sup_caps, 40000baseKR4_Full);
0510     phylink_set(sup_caps, 40000baseCR4_Full);
0511     phylink_set(sup_caps, 40000baseSR4_Full);
0512 
0513     if (linkmode_intersects(link->supported_caps, sup_caps))
0514         lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
0515 
0516     phylink_zero(sup_caps);
0517     phylink_set(sup_caps, 50000baseKR2_Full);
0518     phylink_set(sup_caps, 50000baseCR2_Full);
0519     phylink_set(sup_caps, 50000baseSR2_Full);
0520 
0521     if (linkmode_intersects(link->supported_caps, sup_caps))
0522         lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
0523 
0524     phylink_zero(sup_caps);
0525     phylink_set(sup_caps, 100000baseKR4_Full);
0526     phylink_set(sup_caps, 100000baseSR4_Full);
0527     phylink_set(sup_caps, 100000baseCR4_Full);
0528     phylink_set(sup_caps, 100000baseLR4_ER4_Full);
0529 
0530     if (linkmode_intersects(link->supported_caps, sup_caps))
0531         lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
0532 
0533     phylink_zero(sup_caps);
0534     phylink_set(sup_caps, 20000baseKR2_Full);
0535 
0536     if (linkmode_intersects(link->supported_caps, sup_caps))
0537         lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
0538 
0539     if (lport->host && lport->host->shost_data)
0540         fc_host_supported_speeds(lport->host) =
0541             lport->link_supported_speeds;
0542 }
0543 
0544 static void qedf_bw_update(void *dev)
0545 {
0546     struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
0547     struct qed_link_output link;
0548 
0549     /* Get the latest status of the link */
0550     qed_ops->common->get_link(qedf->cdev, &link);
0551 
0552     if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
0553         QEDF_ERR(&qedf->dbg_ctx,
0554              "Ignore link update, driver getting unload.\n");
0555         return;
0556     }
0557 
0558     if (link.link_up) {
0559         if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
0560             qedf_update_link_speed(qedf, &link);
0561         else
0562             QEDF_ERR(&qedf->dbg_ctx,
0563                  "Ignore bw update, link is down.\n");
0564 
0565     } else {
0566         QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
0567     }
0568 }
0569 
0570 static void qedf_link_update(void *dev, struct qed_link_output *link)
0571 {
0572     struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
0573 
0574     /*
0575      * Prevent race where we're removing the module and we get link update
0576      * for qed.
0577      */
0578     if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
0579         QEDF_ERR(&qedf->dbg_ctx,
0580              "Ignore link update, driver getting unload.\n");
0581         return;
0582     }
0583 
0584     if (link->link_up) {
0585         if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
0586             QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
0587                 "Ignoring link up event as link is already up.\n");
0588             return;
0589         }
0590         QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
0591             link->speed / 1000);
0592 
0593         /* Cancel any pending link down work */
0594         cancel_delayed_work(&qedf->link_update);
0595 
0596         atomic_set(&qedf->link_state, QEDF_LINK_UP);
0597         qedf_update_link_speed(qedf, link);
0598 
0599         if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
0600             qedf_dcbx_no_wait) {
0601             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0602                  "DCBx done.\n");
0603             if (atomic_read(&qedf->link_down_tmo_valid) > 0)
0604                 queue_delayed_work(qedf->link_update_wq,
0605                     &qedf->link_recovery, 0);
0606             else
0607                 queue_delayed_work(qedf->link_update_wq,
0608                     &qedf->link_update, 0);
0609             atomic_set(&qedf->link_down_tmo_valid, 0);
0610         }
0611 
0612     } else {
0613         QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
0614 
0615         atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
0616         atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
0617         /*
0618          * Flag that we're waiting for the link to come back up before
0619          * informing the fcoe layer of the event.
0620          */
0621         if (qedf_link_down_tmo > 0) {
0622             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0623                 "Starting link down tmo.\n");
0624             atomic_set(&qedf->link_down_tmo_valid, 1);
0625         }
0626         qedf->vlan_id = 0;
0627         qedf_update_link_speed(qedf, link);
0628         queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0629             qedf_link_down_tmo * HZ);
0630     }
0631 }
0632 
0633 
0634 static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
0635 {
0636     struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
0637     u8 tmp_prio;
0638 
0639     QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
0640         "prio=%d.\n", get->operational.valid, get->operational.enabled,
0641         get->operational.app_prio.fcoe);
0642 
0643     if (get->operational.enabled && get->operational.valid) {
0644         /* If DCBX was already negotiated on link up then just exit */
0645         if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
0646             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0647                 "DCBX already set on link up.\n");
0648             return;
0649         }
0650 
0651         atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
0652 
0653         /*
0654          * Set the 8021q priority in the following manner:
0655          *
0656          * 1. If a modparam is set use that
0657          * 2. If the value is not between 0..7 use the default
0658          * 3. Use the priority we get from the DCBX app tag
0659          */
0660         tmp_prio = get->operational.app_prio.fcoe;
0661         if (qedf_default_prio > -1)
0662             qedf->prio = qedf_default_prio;
0663         else if (tmp_prio > 7) {
0664             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0665                 "FIP/FCoE prio %d out of range, setting to %d.\n",
0666                 tmp_prio, QEDF_DEFAULT_PRIO);
0667             qedf->prio = QEDF_DEFAULT_PRIO;
0668         } else
0669             qedf->prio = tmp_prio;
0670 
0671         if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
0672             !qedf_dcbx_no_wait) {
0673             if (atomic_read(&qedf->link_down_tmo_valid) > 0)
0674                 queue_delayed_work(qedf->link_update_wq,
0675                     &qedf->link_recovery, 0);
0676             else
0677                 queue_delayed_work(qedf->link_update_wq,
0678                     &qedf->link_update, 0);
0679             atomic_set(&qedf->link_down_tmo_valid, 0);
0680         }
0681     }
0682 
0683 }
0684 
0685 static u32 qedf_get_login_failures(void *cookie)
0686 {
0687     struct qedf_ctx *qedf;
0688 
0689     qedf = (struct qedf_ctx *)cookie;
0690     return qedf->flogi_failed;
0691 }
0692 
0693 static struct qed_fcoe_cb_ops qedf_cb_ops = {
0694     {
0695         .link_update = qedf_link_update,
0696         .bw_update = qedf_bw_update,
0697         .schedule_recovery_handler = qedf_schedule_recovery_handler,
0698         .dcbx_aen = qedf_dcbx_handler,
0699         .get_generic_tlv_data = qedf_get_generic_tlv_data,
0700         .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
0701         .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
0702     }
0703 };
0704 
0705 /*
0706  * Various transport templates.
0707  */
0708 
0709 static struct scsi_transport_template *qedf_fc_transport_template;
0710 static struct scsi_transport_template *qedf_fc_vport_transport_template;
0711 
0712 /*
0713  * SCSI EH handlers
0714  */
0715 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
0716 {
0717     struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
0718     struct fc_lport *lport;
0719     struct qedf_ctx *qedf;
0720     struct qedf_ioreq *io_req;
0721     struct fc_rport_libfc_priv *rp = rport->dd_data;
0722     struct fc_rport_priv *rdata;
0723     struct qedf_rport *fcport = NULL;
0724     int rc = FAILED;
0725     int wait_count = 100;
0726     int refcount = 0;
0727     int rval;
0728     int got_ref = 0;
0729 
0730     lport = shost_priv(sc_cmd->device->host);
0731     qedf = (struct qedf_ctx *)lport_priv(lport);
0732 
0733     /* rport and tgt are allocated together, so tgt should be non-NULL */
0734     fcport = (struct qedf_rport *)&rp[1];
0735     rdata = fcport->rdata;
0736     if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
0737         QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
0738         rc = SUCCESS;
0739         goto out;
0740     }
0741 
0742 
0743     io_req = qedf_priv(sc_cmd)->io_req;
0744     if (!io_req) {
0745         QEDF_ERR(&qedf->dbg_ctx,
0746              "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
0747              sc_cmd, sc_cmd->cmnd[0],
0748              rdata->ids.port_id);
0749         rc = SUCCESS;
0750         goto drop_rdata_kref;
0751     }
0752 
0753     rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
0754     if (rval)
0755         got_ref = 1;
0756 
0757     /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
0758     if (!rval || io_req->sc_cmd != sc_cmd) {
0759         QEDF_ERR(&qedf->dbg_ctx,
0760              "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
0761              io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
0762 
0763         goto drop_rdata_kref;
0764     }
0765 
0766     if (fc_remote_port_chkready(rport)) {
0767         refcount = kref_read(&io_req->refcount);
0768         QEDF_ERR(&qedf->dbg_ctx,
0769              "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
0770              io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
0771              refcount, rdata->ids.port_id);
0772 
0773         goto drop_rdata_kref;
0774     }
0775 
0776     rc = fc_block_scsi_eh(sc_cmd);
0777     if (rc)
0778         goto drop_rdata_kref;
0779 
0780     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
0781         QEDF_ERR(&qedf->dbg_ctx,
0782              "Connection uploading, xid=0x%x., port_id=%06x\n",
0783              io_req->xid, rdata->ids.port_id);
0784         while (io_req->sc_cmd && (wait_count != 0)) {
0785             msleep(100);
0786             wait_count--;
0787         }
0788         if (wait_count) {
0789             QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
0790             rc = SUCCESS;
0791         } else {
0792             QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
0793             rc = FAILED;
0794         }
0795         goto drop_rdata_kref;
0796     }
0797 
0798     if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
0799         QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
0800         goto drop_rdata_kref;
0801     }
0802 
0803     QEDF_ERR(&qedf->dbg_ctx,
0804          "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
0805          io_req, sc_cmd, io_req->xid, io_req->fp_idx,
0806          rdata->ids.port_id);
0807 
0808     if (qedf->stop_io_on_error) {
0809         qedf_stop_all_io(qedf);
0810         rc = SUCCESS;
0811         goto drop_rdata_kref;
0812     }
0813 
0814     init_completion(&io_req->abts_done);
0815     rval = qedf_initiate_abts(io_req, true);
0816     if (rval) {
0817         QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
0818         /*
0819          * If we fail to queue the ABTS then return this command to
0820          * the SCSI layer as it will own and free the xid
0821          */
0822         rc = SUCCESS;
0823         qedf_scsi_done(qedf, io_req, DID_ERROR);
0824         goto drop_rdata_kref;
0825     }
0826 
0827     wait_for_completion(&io_req->abts_done);
0828 
0829     if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
0830         io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
0831         io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
0832         /*
0833          * If we get a reponse to the abort this is success from
0834          * the perspective that all references to the command have
0835          * been removed from the driver and firmware
0836          */
0837         rc = SUCCESS;
0838     } else {
0839         /* If the abort and cleanup failed then return a failure */
0840         rc = FAILED;
0841     }
0842 
0843     if (rc == SUCCESS)
0844         QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
0845               io_req->xid);
0846     else
0847         QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
0848               io_req->xid);
0849 
0850 drop_rdata_kref:
0851     kref_put(&rdata->kref, fc_rport_destroy);
0852 out:
0853     if (got_ref)
0854         kref_put(&io_req->refcount, qedf_release_cmd);
0855     return rc;
0856 }
0857 
0858 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
0859 {
0860     QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
0861          sc_cmd->device->host->host_no, sc_cmd->device->id,
0862          sc_cmd->device->lun);
0863     return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
0864 }
0865 
0866 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
0867 {
0868     QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
0869          sc_cmd->device->host->host_no, sc_cmd->device->id,
0870          sc_cmd->device->lun);
0871     return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
0872 }
0873 
0874 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
0875 {
0876     struct qedf_rport *fcport;
0877     int wait_cnt = 120;
0878 
0879     while (wait_cnt--) {
0880         if (atomic_read(&qedf->num_offloads))
0881             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0882                   "Waiting for all uploads to complete num_offloads = 0x%x.\n",
0883                   atomic_read(&qedf->num_offloads));
0884         else
0885             return true;
0886         msleep(500);
0887     }
0888 
0889     rcu_read_lock();
0890     list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
0891         if (test_bit(QEDF_RPORT_SESSION_READY,
0892                        &fcport->flags)) {
0893             if (fcport->rdata)
0894                 QEDF_ERR(&qedf->dbg_ctx,
0895                      "Waiting for fcport %p portid=%06x.\n",
0896                      fcport, fcport->rdata->ids.port_id);
0897             } else {
0898                 QEDF_ERR(&qedf->dbg_ctx,
0899                      "Waiting for fcport %p.\n", fcport);
0900             }
0901     }
0902 
0903     rcu_read_unlock();
0904     return false;
0905 }
0906 
0907 /* Performs soft reset of qedf_ctx by simulating a link down/up */
0908 void qedf_ctx_soft_reset(struct fc_lport *lport)
0909 {
0910     struct qedf_ctx *qedf;
0911     struct qed_link_output if_link;
0912 
0913     if (lport->vport) {
0914         printk_ratelimited("Cannot issue host reset on NPIV port.\n");
0915         return;
0916     }
0917 
0918     qedf = lport_priv(lport);
0919 
0920     qedf->flogi_pending = 0;
0921     /* For host reset, essentially do a soft link up/down */
0922     atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
0923     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0924           "Queuing link down work.\n");
0925     queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0926         0);
0927 
0928     if (qedf_wait_for_upload(qedf) == false) {
0929         QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
0930         WARN_ON(atomic_read(&qedf->num_offloads));
0931     }
0932 
0933     /* Before setting link up query physical link state */
0934     qed_ops->common->get_link(qedf->cdev, &if_link);
0935     /* Bail if the physical link is not up */
0936     if (!if_link.link_up) {
0937         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0938               "Physical link is not up.\n");
0939         return;
0940     }
0941     /* Flush and wait to make sure link down is processed */
0942     flush_delayed_work(&qedf->link_update);
0943     msleep(500);
0944 
0945     atomic_set(&qedf->link_state, QEDF_LINK_UP);
0946     qedf->vlan_id  = 0;
0947     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
0948           "Queue link up work.\n");
0949     queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0950         0);
0951 }
0952 
0953 /* Reset the host by gracefully logging out and then logging back in */
0954 static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
0955 {
0956     struct fc_lport *lport;
0957     struct qedf_ctx *qedf;
0958 
0959     lport = shost_priv(sc_cmd->device->host);
0960     qedf = lport_priv(lport);
0961 
0962     if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
0963         test_bit(QEDF_UNLOADING, &qedf->flags))
0964         return FAILED;
0965 
0966     QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
0967 
0968     qedf_ctx_soft_reset(lport);
0969 
0970     return SUCCESS;
0971 }
0972 
0973 static int qedf_slave_configure(struct scsi_device *sdev)
0974 {
0975     if (qedf_queue_depth) {
0976         scsi_change_queue_depth(sdev, qedf_queue_depth);
0977     }
0978 
0979     return 0;
0980 }
0981 
0982 static struct scsi_host_template qedf_host_template = {
0983     .module     = THIS_MODULE,
0984     .name       = QEDF_MODULE_NAME,
0985     .this_id    = -1,
0986     .cmd_per_lun    = 32,
0987     .max_sectors    = 0xffff,
0988     .queuecommand   = qedf_queuecommand,
0989     .shost_groups   = qedf_host_groups,
0990     .eh_abort_handler   = qedf_eh_abort,
0991     .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
0992     .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
0993     .eh_host_reset_handler  = qedf_eh_host_reset,
0994     .slave_configure    = qedf_slave_configure,
0995     .dma_boundary = QED_HW_DMA_BOUNDARY,
0996     .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
0997     .can_queue = FCOE_PARAMS_NUM_TASKS,
0998     .change_queue_depth = scsi_change_queue_depth,
0999     .cmd_size = sizeof(struct qedf_cmd_priv),
1000 };
1001 
1002 static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1003 {
1004     int rc;
1005 
1006     spin_lock(&qedf_global_lock);
1007     rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
1008     spin_unlock(&qedf_global_lock);
1009 
1010     return rc;
1011 }
1012 
1013 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1014 {
1015     struct qedf_rport *fcport;
1016     struct fc_rport_priv *rdata;
1017 
1018     rcu_read_lock();
1019     list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1020         rdata = fcport->rdata;
1021         if (rdata == NULL)
1022             continue;
1023         if (rdata->ids.port_id == port_id) {
1024             rcu_read_unlock();
1025             return fcport;
1026         }
1027     }
1028     rcu_read_unlock();
1029 
1030     /* Return NULL to caller to let them know fcport was not found */
1031     return NULL;
1032 }
1033 
1034 /* Transmits an ELS frame over an offloaded session */
1035 static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
1036 {
1037     struct fc_frame_header *fh;
1038     int rc = 0;
1039 
1040     fh = fc_frame_header_get(fp);
1041     if ((fh->fh_type == FC_TYPE_ELS) &&
1042         (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1043         switch (fc_frame_payload_op(fp)) {
1044         case ELS_ADISC:
1045             qedf_send_adisc(fcport, fp);
1046             rc = 1;
1047             break;
1048         }
1049     }
1050 
1051     return rc;
1052 }
1053 
1054 /*
1055  * qedf_xmit - qedf FCoE frame transmit function
1056  */
1057 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
1058 {
1059     struct fc_lport     *base_lport;
1060     struct qedf_ctx     *qedf;
1061     struct ethhdr       *eh;
1062     struct fcoe_crc_eof *cp;
1063     struct sk_buff      *skb;
1064     struct fc_frame_header  *fh;
1065     struct fcoe_hdr     *hp;
1066     u8          sof, eof;
1067     u32         crc;
1068     unsigned int        hlen, tlen, elen;
1069     int         wlen;
1070     struct fc_lport *tmp_lport;
1071     struct fc_lport *vn_port = NULL;
1072     struct qedf_rport *fcport;
1073     int rc;
1074     u16 vlan_tci = 0;
1075 
1076     qedf = (struct qedf_ctx *)lport_priv(lport);
1077 
1078     fh = fc_frame_header_get(fp);
1079     skb = fp_skb(fp);
1080 
1081     /* Filter out traffic to other NPIV ports on the same host */
1082     if (lport->vport)
1083         base_lport = shost_priv(vport_to_shost(lport->vport));
1084     else
1085         base_lport = lport;
1086 
1087     /* Flag if the destination is the base port */
1088     if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
1089         vn_port = base_lport;
1090     } else {
1091         /* Got through the list of vports attached to the base_lport
1092          * and see if we have a match with the destination address.
1093          */
1094         list_for_each_entry(tmp_lport, &base_lport->vports, list) {
1095             if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
1096                 vn_port = tmp_lport;
1097                 break;
1098             }
1099         }
1100     }
1101     if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
1102         struct fc_rport_priv *rdata = NULL;
1103 
1104         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1105             "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
1106         kfree_skb(skb);
1107         rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
1108         if (rdata) {
1109             rdata->retries = lport->max_rport_retry_count;
1110             kref_put(&rdata->kref, fc_rport_destroy);
1111         }
1112         return -EINVAL;
1113     }
1114     /* End NPIV filtering */
1115 
1116     if (!qedf->ctlr.sel_fcf) {
1117         kfree_skb(skb);
1118         return 0;
1119     }
1120 
1121     if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1122         QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1123         kfree_skb(skb);
1124         return 0;
1125     }
1126 
1127     if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1128         QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1129         kfree_skb(skb);
1130         return 0;
1131     }
1132 
1133     if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1134         if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1135             return 0;
1136     }
1137 
1138     /* Check to see if this needs to be sent on an offloaded session */
1139     fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1140 
1141     if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1142         rc = qedf_xmit_l2_frame(fcport, fp);
1143         /*
1144          * If the frame was successfully sent over the middle path
1145          * then do not try to also send it over the LL2 path
1146          */
1147         if (rc)
1148             return 0;
1149     }
1150 
1151     sof = fr_sof(fp);
1152     eof = fr_eof(fp);
1153 
1154     elen = sizeof(struct ethhdr);
1155     hlen = sizeof(struct fcoe_hdr);
1156     tlen = sizeof(struct fcoe_crc_eof);
1157     wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1158 
1159     skb->ip_summed = CHECKSUM_NONE;
1160     crc = fcoe_fc_crc(fp);
1161 
1162     /* copy port crc and eof to the skb buff */
1163     if (skb_is_nonlinear(skb)) {
1164         skb_frag_t *frag;
1165 
1166         if (qedf_get_paged_crc_eof(skb, tlen)) {
1167             kfree_skb(skb);
1168             return -ENOMEM;
1169         }
1170         frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1171         cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1172     } else {
1173         cp = skb_put(skb, tlen);
1174     }
1175 
1176     memset(cp, 0, sizeof(*cp));
1177     cp->fcoe_eof = eof;
1178     cp->fcoe_crc32 = cpu_to_le32(~crc);
1179     if (skb_is_nonlinear(skb)) {
1180         kunmap_atomic(cp);
1181         cp = NULL;
1182     }
1183 
1184 
1185     /* adjust skb network/transport offsets to match mac/fcoe/port */
1186     skb_push(skb, elen + hlen);
1187     skb_reset_mac_header(skb);
1188     skb_reset_network_header(skb);
1189     skb->mac_len = elen;
1190     skb->protocol = htons(ETH_P_FCOE);
1191 
1192     /*
1193      * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1194      * for FIP/FCoE traffic.
1195      */
1196     __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1197 
1198     /* fill up mac and fcoe headers */
1199     eh = eth_hdr(skb);
1200     eh->h_proto = htons(ETH_P_FCOE);
1201     if (qedf->ctlr.map_dest)
1202         fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1203     else
1204         /* insert GW address */
1205         ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1206 
1207     /* Set the source MAC address */
1208     ether_addr_copy(eh->h_source, qedf->data_src_addr);
1209 
1210     hp = (struct fcoe_hdr *)(eh + 1);
1211     memset(hp, 0, sizeof(*hp));
1212     if (FC_FCOE_VER)
1213         FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1214     hp->fcoe_sof = sof;
1215 
1216     /*update tx stats */
1217     this_cpu_inc(lport->stats->TxFrames);
1218     this_cpu_add(lport->stats->TxWords, wlen);
1219 
1220     /* Get VLAN ID from skb for printing purposes */
1221     __vlan_hwaccel_get_tag(skb, &vlan_tci);
1222 
1223     /* send down to lld */
1224     fr_dev(fp) = lport;
1225     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1226         "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1227         ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1228         vlan_tci);
1229     if (qedf_dump_frames)
1230         print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1231             1, skb->data, skb->len, false);
1232     rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1233     if (rc) {
1234         QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1235         kfree_skb(skb);
1236         return rc;
1237     }
1238 
1239     return 0;
1240 }
1241 
1242 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1243 {
1244     int rval = 0;
1245     u32 *pbl;
1246     dma_addr_t page;
1247     int num_pages;
1248 
1249     /* Calculate appropriate queue and PBL sizes */
1250     fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1251     fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1252     fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1253         sizeof(void *);
1254     fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1255 
1256     fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1257                     &fcport->sq_dma, GFP_KERNEL);
1258     if (!fcport->sq) {
1259         QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1260         rval = 1;
1261         goto out;
1262     }
1263 
1264     fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1265                         fcport->sq_pbl_size,
1266                         &fcport->sq_pbl_dma, GFP_KERNEL);
1267     if (!fcport->sq_pbl) {
1268         QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1269         rval = 1;
1270         goto out_free_sq;
1271     }
1272 
1273     /* Create PBL */
1274     num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1275     page = fcport->sq_dma;
1276     pbl = (u32 *)fcport->sq_pbl;
1277 
1278     while (num_pages--) {
1279         *pbl = U64_LO(page);
1280         pbl++;
1281         *pbl = U64_HI(page);
1282         pbl++;
1283         page += QEDF_PAGE_SIZE;
1284     }
1285 
1286     return rval;
1287 
1288 out_free_sq:
1289     dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1290         fcport->sq_dma);
1291 out:
1292     return rval;
1293 }
1294 
1295 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1296 {
1297     if (fcport->sq_pbl)
1298         dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1299             fcport->sq_pbl, fcport->sq_pbl_dma);
1300     if (fcport->sq)
1301         dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1302             fcport->sq, fcport->sq_dma);
1303 }
1304 
1305 static int qedf_offload_connection(struct qedf_ctx *qedf,
1306     struct qedf_rport *fcport)
1307 {
1308     struct qed_fcoe_params_offload conn_info;
1309     u32 port_id;
1310     int rval;
1311     uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1312 
1313     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1314            "portid=%06x.\n", fcport->rdata->ids.port_id);
1315     rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1316         &fcport->fw_cid, &fcport->p_doorbell);
1317     if (rval) {
1318         QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1319                "for portid=%06x.\n", fcport->rdata->ids.port_id);
1320         rval = 1; /* For some reason qed returns 0 on failure here */
1321         goto out;
1322     }
1323 
1324     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1325            "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1326            fcport->fw_cid, fcport->handle);
1327 
1328     memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1329 
1330     /* Fill in the offload connection info */
1331     conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1332 
1333     conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1334     conn_info.sq_next_page_addr =
1335         (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1336 
1337     /* Need to use our FCoE MAC for the offload session */
1338     ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1339 
1340     ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1341 
1342     conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1343     conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1344     conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1345     conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1346 
1347     /* Set VLAN data */
1348     conn_info.vlan_tag = qedf->vlan_id <<
1349         FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1350     conn_info.vlan_tag |=
1351         qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1352     conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1353         FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1354 
1355     /* Set host port source id */
1356     port_id = fc_host_port_id(qedf->lport->host);
1357     fcport->sid = port_id;
1358     conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1359     conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1360     conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1361 
1362     conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1363 
1364     /* Set remote port destination id */
1365     port_id = fcport->rdata->rport->port_id;
1366     conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1367     conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1368     conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1369 
1370     conn_info.def_q_idx = 0; /* Default index for send queue? */
1371 
1372     /* Set FC-TAPE specific flags if needed */
1373     if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1374         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1375             "Enable CONF, REC for portid=%06x.\n",
1376             fcport->rdata->ids.port_id);
1377         conn_info.flags |= 1 <<
1378             FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1379         conn_info.flags |=
1380             ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1381             FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1382     }
1383 
1384     rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1385     if (rval) {
1386         QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1387                "for portid=%06x.\n", fcport->rdata->ids.port_id);
1388         goto out_free_conn;
1389     } else
1390         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1391                "succeeded portid=%06x total_sqe=%d.\n",
1392                fcport->rdata->ids.port_id, total_sqe);
1393 
1394     spin_lock_init(&fcport->rport_lock);
1395     atomic_set(&fcport->free_sqes, total_sqe);
1396     return 0;
1397 out_free_conn:
1398     qed_ops->release_conn(qedf->cdev, fcport->handle);
1399 out:
1400     return rval;
1401 }
1402 
1403 #define QEDF_TERM_BUFF_SIZE     10
1404 static void qedf_upload_connection(struct qedf_ctx *qedf,
1405     struct qedf_rport *fcport)
1406 {
1407     void *term_params;
1408     dma_addr_t term_params_dma;
1409 
1410     /* Term params needs to be a DMA coherent buffer as qed shared the
1411      * physical DMA address with the firmware. The buffer may be used in
1412      * the receive path so we may eventually have to move this.
1413      */
1414     term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1415         &term_params_dma, GFP_KERNEL);
1416     if (!term_params)
1417         return;
1418 
1419     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1420            "port_id=%06x.\n", fcport->rdata->ids.port_id);
1421 
1422     qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1423     qed_ops->release_conn(qedf->cdev, fcport->handle);
1424 
1425     dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1426         term_params_dma);
1427 }
1428 
1429 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1430     struct qedf_rport *fcport)
1431 {
1432     struct fc_rport_priv *rdata = fcport->rdata;
1433 
1434     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1435         fcport->rdata->ids.port_id);
1436 
1437     /* Flush any remaining i/o's before we upload the connection */
1438     qedf_flush_active_ios(fcport, -1);
1439 
1440     if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1441         qedf_upload_connection(qedf, fcport);
1442     qedf_free_sq(qedf, fcport);
1443     fcport->rdata = NULL;
1444     fcport->qedf = NULL;
1445     kref_put(&rdata->kref, fc_rport_destroy);
1446 }
1447 
1448 /*
1449  * This event_callback is called after successful completion of libfc
1450  * initiated target login. qedf can proceed with initiating the session
1451  * establishment.
1452  */
1453 static void qedf_rport_event_handler(struct fc_lport *lport,
1454                 struct fc_rport_priv *rdata,
1455                 enum fc_rport_event event)
1456 {
1457     struct qedf_ctx *qedf = lport_priv(lport);
1458     struct fc_rport *rport = rdata->rport;
1459     struct fc_rport_libfc_priv *rp;
1460     struct qedf_rport *fcport;
1461     u32 port_id;
1462     int rval;
1463     unsigned long flags;
1464 
1465     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1466            "port_id = 0x%x\n", event, rdata->ids.port_id);
1467 
1468     switch (event) {
1469     case RPORT_EV_READY:
1470         if (!rport) {
1471             QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1472             break;
1473         }
1474 
1475         rp = rport->dd_data;
1476         fcport = (struct qedf_rport *)&rp[1];
1477         fcport->qedf = qedf;
1478 
1479         if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1480             QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1481                 "portid=0x%x as max number of offloaded sessions "
1482                 "reached.\n", rdata->ids.port_id);
1483             return;
1484         }
1485 
1486         /*
1487          * Don't try to offload the session again. Can happen when we
1488          * get an ADISC
1489          */
1490         if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1491             QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1492                    "offloaded, portid=0x%x.\n",
1493                    rdata->ids.port_id);
1494             return;
1495         }
1496 
1497         if (rport->port_id == FC_FID_DIR_SERV) {
1498             /*
1499              * qedf_rport structure doesn't exist for
1500              * directory server.
1501              * We should not come here, as lport will
1502              * take care of fabric login
1503              */
1504             QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1505                 "exist for dir server port_id=%x\n",
1506                 rdata->ids.port_id);
1507             break;
1508         }
1509 
1510         if (rdata->spp_type != FC_TYPE_FCP) {
1511             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1512                 "Not offloading since spp type isn't FCP\n");
1513             break;
1514         }
1515         if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1516             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1517                 "Not FCP target so not offloading\n");
1518             break;
1519         }
1520 
1521         /* Initial reference held on entry, so this can't fail */
1522         kref_get(&rdata->kref);
1523         fcport->rdata = rdata;
1524         fcport->rport = rport;
1525 
1526         rval = qedf_alloc_sq(qedf, fcport);
1527         if (rval) {
1528             qedf_cleanup_fcport(qedf, fcport);
1529             break;
1530         }
1531 
1532         /* Set device type */
1533         if (rdata->flags & FC_RP_FLAGS_RETRY &&
1534             rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1535             !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1536             fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1537             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1538                 "portid=%06x is a TAPE device.\n",
1539                 rdata->ids.port_id);
1540         } else {
1541             fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1542         }
1543 
1544         rval = qedf_offload_connection(qedf, fcport);
1545         if (rval) {
1546             qedf_cleanup_fcport(qedf, fcport);
1547             break;
1548         }
1549 
1550         /* Add fcport to list of qedf_ctx list of offloaded ports */
1551         spin_lock_irqsave(&qedf->hba_lock, flags);
1552         list_add_rcu(&fcport->peers, &qedf->fcports);
1553         spin_unlock_irqrestore(&qedf->hba_lock, flags);
1554 
1555         /*
1556          * Set the session ready bit to let everyone know that this
1557          * connection is ready for I/O
1558          */
1559         set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1560         atomic_inc(&qedf->num_offloads);
1561 
1562         break;
1563     case RPORT_EV_LOGO:
1564     case RPORT_EV_FAILED:
1565     case RPORT_EV_STOP:
1566         port_id = rdata->ids.port_id;
1567         if (port_id == FC_FID_DIR_SERV)
1568             break;
1569 
1570         if (rdata->spp_type != FC_TYPE_FCP) {
1571             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1572                 "No action since spp type isn't FCP\n");
1573             break;
1574         }
1575         if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1576             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1577                 "Not FCP target so no action\n");
1578             break;
1579         }
1580 
1581         if (!rport) {
1582             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1583                 "port_id=%x - rport notcreated Yet!!\n", port_id);
1584             break;
1585         }
1586         rp = rport->dd_data;
1587         /*
1588          * Perform session upload. Note that rdata->peers is already
1589          * removed from disc->rports list before we get this event.
1590          */
1591         fcport = (struct qedf_rport *)&rp[1];
1592 
1593         spin_lock_irqsave(&fcport->rport_lock, flags);
1594         /* Only free this fcport if it is offloaded already */
1595         if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1596             !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1597             &fcport->flags)) {
1598             set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1599                 &fcport->flags);
1600             spin_unlock_irqrestore(&fcport->rport_lock, flags);
1601             qedf_cleanup_fcport(qedf, fcport);
1602             /*
1603              * Remove fcport to list of qedf_ctx list of offloaded
1604              * ports
1605              */
1606             spin_lock_irqsave(&qedf->hba_lock, flags);
1607             list_del_rcu(&fcport->peers);
1608             spin_unlock_irqrestore(&qedf->hba_lock, flags);
1609 
1610             clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1611                 &fcport->flags);
1612             atomic_dec(&qedf->num_offloads);
1613         } else {
1614             spin_unlock_irqrestore(&fcport->rport_lock, flags);
1615         }
1616         break;
1617 
1618     case RPORT_EV_NONE:
1619         break;
1620     }
1621 }
1622 
1623 static void qedf_abort_io(struct fc_lport *lport)
1624 {
1625     /* NO-OP but need to fill in the template */
1626 }
1627 
1628 static void qedf_fcp_cleanup(struct fc_lport *lport)
1629 {
1630     /*
1631      * NO-OP but need to fill in template to prevent a NULL
1632      * function pointer dereference during link down. I/Os
1633      * will be flushed when port is uploaded.
1634      */
1635 }
1636 
1637 static struct libfc_function_template qedf_lport_template = {
1638     .frame_send     = qedf_xmit,
1639     .fcp_abort_io       = qedf_abort_io,
1640     .fcp_cleanup        = qedf_fcp_cleanup,
1641     .rport_event_callback   = qedf_rport_event_handler,
1642     .elsct_send     = qedf_elsct_send,
1643 };
1644 
1645 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1646 {
1647     fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1648 
1649     qedf->ctlr.send = qedf_fip_send;
1650     qedf->ctlr.get_src_addr = qedf_get_src_mac;
1651     ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1652 }
1653 
1654 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1655 {
1656     struct fc_lport *lport = qedf->lport;
1657     u8 buf[8];
1658     int pos;
1659     uint32_t i;
1660 
1661     /*
1662      * fdmi_enabled needs to be set for libfc
1663      * to execute FDMI registration
1664      */
1665     lport->fdmi_enabled = 1;
1666 
1667     /*
1668      * Setup the necessary fc_host attributes to that will be used to fill
1669      * in the FDMI information.
1670      */
1671 
1672     /* Get the PCI-e Device Serial Number Capability */
1673     pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1674     if (pos) {
1675         pos += 4;
1676         for (i = 0; i < 8; i++)
1677             pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1678 
1679         snprintf(fc_host_serial_number(lport->host),
1680             FC_SERIAL_NUMBER_SIZE,
1681             "%02X%02X%02X%02X%02X%02X%02X%02X",
1682             buf[7], buf[6], buf[5], buf[4],
1683             buf[3], buf[2], buf[1], buf[0]);
1684     } else
1685         snprintf(fc_host_serial_number(lport->host),
1686             FC_SERIAL_NUMBER_SIZE, "Unknown");
1687 
1688     snprintf(fc_host_manufacturer(lport->host),
1689         FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
1690 
1691     if (qedf->pdev->device == QL45xxx) {
1692         snprintf(fc_host_model(lport->host),
1693             FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
1694 
1695         snprintf(fc_host_model_description(lport->host),
1696             FC_SYMBOLIC_NAME_SIZE, "%s",
1697             "Marvell FastLinQ QL45xxx FCoE Adapter");
1698     }
1699 
1700     if (qedf->pdev->device == QL41xxx) {
1701         snprintf(fc_host_model(lport->host),
1702             FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
1703 
1704         snprintf(fc_host_model_description(lport->host),
1705             FC_SYMBOLIC_NAME_SIZE, "%s",
1706             "Marvell FastLinQ QL41xxx FCoE Adapter");
1707     }
1708 
1709     snprintf(fc_host_hardware_version(lport->host),
1710         FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1711 
1712     snprintf(fc_host_driver_version(lport->host),
1713         FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
1714 
1715     snprintf(fc_host_firmware_version(lport->host),
1716         FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
1717         FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1718         FW_ENGINEERING_VERSION);
1719 
1720     snprintf(fc_host_vendor_identifier(lport->host),
1721         FC_VENDOR_IDENTIFIER, "%s", "Marvell");
1722 
1723 }
1724 
1725 static int qedf_lport_setup(struct qedf_ctx *qedf)
1726 {
1727     struct fc_lport *lport = qedf->lport;
1728 
1729     lport->link_up = 0;
1730     lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1731     lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1732     lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1733         FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1734     lport->boot_time = jiffies;
1735     lport->e_d_tov = 2 * 1000;
1736     lport->r_a_tov = 10 * 1000;
1737 
1738     /* Set NPIV support */
1739     lport->does_npiv = 1;
1740     fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1741 
1742     fc_set_wwnn(lport, qedf->wwnn);
1743     fc_set_wwpn(lport, qedf->wwpn);
1744 
1745     if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1746         QEDF_ERR(&qedf->dbg_ctx,
1747              "fcoe_libfc_config failed.\n");
1748         return -ENOMEM;
1749     }
1750 
1751     /* Allocate the exchange manager */
1752     fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1753               0xfffe, NULL);
1754 
1755     if (fc_lport_init_stats(lport))
1756         return -ENOMEM;
1757 
1758     /* Finish lport config */
1759     fc_lport_config(lport);
1760 
1761     /* Set max frame size */
1762     fc_set_mfs(lport, QEDF_MFS);
1763     fc_host_maxframe_size(lport->host) = lport->mfs;
1764 
1765     /* Set default dev_loss_tmo based on module parameter */
1766     fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1767 
1768     /* Set symbolic node name */
1769     if (qedf->pdev->device == QL45xxx)
1770         snprintf(fc_host_symbolic_name(lport->host), 256,
1771             "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1772 
1773     if (qedf->pdev->device == QL41xxx)
1774         snprintf(fc_host_symbolic_name(lport->host), 256,
1775             "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1776 
1777     qedf_setup_fdmi(qedf);
1778 
1779     return 0;
1780 }
1781 
1782 /*
1783  * NPIV functions
1784  */
1785 
1786 static int qedf_vport_libfc_config(struct fc_vport *vport,
1787     struct fc_lport *lport)
1788 {
1789     lport->link_up = 0;
1790     lport->qfull = 0;
1791     lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1792     lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1793     lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1794         FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1795     lport->boot_time = jiffies;
1796     lport->e_d_tov = 2 * 1000;
1797     lport->r_a_tov = 10 * 1000;
1798     lport->does_npiv = 1; /* Temporary until we add NPIV support */
1799 
1800     /* Allocate stats for vport */
1801     if (fc_lport_init_stats(lport))
1802         return -ENOMEM;
1803 
1804     /* Finish lport config */
1805     fc_lport_config(lport);
1806 
1807     /* offload related configuration */
1808     lport->crc_offload = 0;
1809     lport->seq_offload = 0;
1810     lport->lro_enabled = 0;
1811     lport->lro_xid = 0;
1812     lport->lso_max = 0;
1813 
1814     return 0;
1815 }
1816 
1817 static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1818 {
1819     struct Scsi_Host *shost = vport_to_shost(vport);
1820     struct fc_lport *n_port = shost_priv(shost);
1821     struct fc_lport *vn_port;
1822     struct qedf_ctx *base_qedf = lport_priv(n_port);
1823     struct qedf_ctx *vport_qedf;
1824 
1825     char buf[32];
1826     int rc = 0;
1827 
1828     rc = fcoe_validate_vport_create(vport);
1829     if (rc) {
1830         fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1831         QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1832                "WWPN (0x%s) already exists.\n", buf);
1833         return rc;
1834     }
1835 
1836     if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1837         QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1838                "because link is not up.\n");
1839         return -EIO;
1840     }
1841 
1842     vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1843     if (!vn_port) {
1844         QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1845                "for vport.\n");
1846         return -ENOMEM;
1847     }
1848 
1849     fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1850     QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1851         buf);
1852 
1853     /* Copy some fields from base_qedf */
1854     vport_qedf = lport_priv(vn_port);
1855     memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1856 
1857     /* Set qedf data specific to this vport */
1858     vport_qedf->lport = vn_port;
1859     /* Use same hba_lock as base_qedf */
1860     vport_qedf->hba_lock = base_qedf->hba_lock;
1861     vport_qedf->pdev = base_qedf->pdev;
1862     vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1863     init_completion(&vport_qedf->flogi_compl);
1864     INIT_LIST_HEAD(&vport_qedf->fcports);
1865     INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
1866 
1867     rc = qedf_vport_libfc_config(vport, vn_port);
1868     if (rc) {
1869         QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1870             "for lport stats.\n");
1871         goto err;
1872     }
1873 
1874     fc_set_wwnn(vn_port, vport->node_name);
1875     fc_set_wwpn(vn_port, vport->port_name);
1876     vport_qedf->wwnn = vn_port->wwnn;
1877     vport_qedf->wwpn = vn_port->wwpn;
1878 
1879     vn_port->host->transportt = qedf_fc_vport_transport_template;
1880     vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
1881     vn_port->host->max_lun = qedf_max_lun;
1882     vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1883     vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1884     vn_port->host->max_id = QEDF_MAX_SESSIONS;
1885 
1886     rc = scsi_add_host(vn_port->host, &vport->dev);
1887     if (rc) {
1888         QEDF_WARN(&base_qedf->dbg_ctx,
1889               "Error adding Scsi_Host rc=0x%x.\n", rc);
1890         goto err;
1891     }
1892 
1893     /* Set default dev_loss_tmo based on module parameter */
1894     fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1895 
1896     /* Init libfc stuffs */
1897     memcpy(&vn_port->tt, &qedf_lport_template,
1898         sizeof(qedf_lport_template));
1899     fc_exch_init(vn_port);
1900     fc_elsct_init(vn_port);
1901     fc_lport_init(vn_port);
1902     fc_disc_init(vn_port);
1903     fc_disc_config(vn_port, vn_port);
1904 
1905 
1906     /* Allocate the exchange manager */
1907     shost = vport_to_shost(vport);
1908     n_port = shost_priv(shost);
1909     fc_exch_mgr_list_clone(n_port, vn_port);
1910 
1911     /* Set max frame size */
1912     fc_set_mfs(vn_port, QEDF_MFS);
1913 
1914     fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1915 
1916     if (disabled) {
1917         fc_vport_set_state(vport, FC_VPORT_DISABLED);
1918     } else {
1919         vn_port->boot_time = jiffies;
1920         fc_fabric_login(vn_port);
1921         fc_vport_setlink(vn_port);
1922     }
1923 
1924     QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1925            vn_port);
1926 
1927     /* Set up debug context for vport */
1928     vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1929     vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1930 
1931     return 0;
1932 
1933 err:
1934     scsi_host_put(vn_port->host);
1935     return rc;
1936 }
1937 
1938 static int qedf_vport_destroy(struct fc_vport *vport)
1939 {
1940     struct Scsi_Host *shost = vport_to_shost(vport);
1941     struct fc_lport *n_port = shost_priv(shost);
1942     struct fc_lport *vn_port = vport->dd_data;
1943     struct qedf_ctx *qedf = lport_priv(vn_port);
1944 
1945     if (!qedf) {
1946         QEDF_ERR(NULL, "qedf is NULL.\n");
1947         goto out;
1948     }
1949 
1950     /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1951     set_bit(QEDF_UNLOADING, &qedf->flags);
1952 
1953     mutex_lock(&n_port->lp_mutex);
1954     list_del(&vn_port->list);
1955     mutex_unlock(&n_port->lp_mutex);
1956 
1957     fc_fabric_logoff(vn_port);
1958     fc_lport_destroy(vn_port);
1959 
1960     /* Detach from scsi-ml */
1961     fc_remove_host(vn_port->host);
1962     scsi_remove_host(vn_port->host);
1963 
1964     /*
1965      * Only try to release the exchange manager if the vn_port
1966      * configuration is complete.
1967      */
1968     if (vn_port->state == LPORT_ST_READY)
1969         fc_exch_mgr_free(vn_port);
1970 
1971     /* Free memory used by statistical counters */
1972     fc_lport_free_stats(vn_port);
1973 
1974     /* Release Scsi_Host */
1975     scsi_host_put(vn_port->host);
1976 
1977 out:
1978     return 0;
1979 }
1980 
1981 static int qedf_vport_disable(struct fc_vport *vport, bool disable)
1982 {
1983     struct fc_lport *lport = vport->dd_data;
1984 
1985     if (disable) {
1986         fc_vport_set_state(vport, FC_VPORT_DISABLED);
1987         fc_fabric_logoff(lport);
1988     } else {
1989         lport->boot_time = jiffies;
1990         fc_fabric_login(lport);
1991         fc_vport_setlink(lport);
1992     }
1993     return 0;
1994 }
1995 
1996 /*
1997  * During removal we need to wait for all the vports associated with a port
1998  * to be destroyed so we avoid a race condition where libfc is still trying
1999  * to reap vports while the driver remove function has already reaped the
2000  * driver contexts associated with the physical port.
2001  */
2002 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2003 {
2004     struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2005 
2006     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2007         "Entered.\n");
2008     while (fc_host->npiv_vports_inuse > 0) {
2009         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2010             "Waiting for all vports to be reaped.\n");
2011         msleep(1000);
2012     }
2013 }
2014 
2015 /**
2016  * qedf_fcoe_reset - Resets the fcoe
2017  *
2018  * @shost: shost the reset is from
2019  *
2020  * Returns: always 0
2021  */
2022 static int qedf_fcoe_reset(struct Scsi_Host *shost)
2023 {
2024     struct fc_lport *lport = shost_priv(shost);
2025 
2026     qedf_ctx_soft_reset(lport);
2027     return 0;
2028 }
2029 
2030 static void qedf_get_host_port_id(struct Scsi_Host *shost)
2031 {
2032     struct fc_lport *lport = shost_priv(shost);
2033 
2034     fc_host_port_id(shost) = lport->port_id;
2035 }
2036 
2037 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
2038     *shost)
2039 {
2040     struct fc_host_statistics *qedf_stats;
2041     struct fc_lport *lport = shost_priv(shost);
2042     struct qedf_ctx *qedf = lport_priv(lport);
2043     struct qed_fcoe_stats *fw_fcoe_stats;
2044 
2045     qedf_stats = fc_get_host_stats(shost);
2046 
2047     /* We don't collect offload stats for specific NPIV ports */
2048     if (lport->vport)
2049         goto out;
2050 
2051     fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
2052     if (!fw_fcoe_stats) {
2053         QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2054             "fw_fcoe_stats.\n");
2055         goto out;
2056     }
2057 
2058     mutex_lock(&qedf->stats_mutex);
2059 
2060     /* Query firmware for offload stats */
2061     qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2062 
2063     /*
2064      * The expectation is that we add our offload stats to the stats
2065      * being maintained by libfc each time the fc_get_host_status callback
2066      * is invoked. The additions are not carried over for each call to
2067      * the fc_get_host_stats callback.
2068      */
2069     qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
2070         fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
2071         fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
2072     qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
2073         fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
2074         fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
2075     qedf_stats->fcp_input_megabytes +=
2076         do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
2077     qedf_stats->fcp_output_megabytes +=
2078         do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
2079     qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
2080     qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
2081     qedf_stats->invalid_crc_count +=
2082         fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
2083     qedf_stats->dumped_frames =
2084         fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2085     qedf_stats->error_frames +=
2086         fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2087     qedf_stats->fcp_input_requests += qedf->input_requests;
2088     qedf_stats->fcp_output_requests += qedf->output_requests;
2089     qedf_stats->fcp_control_requests += qedf->control_requests;
2090     qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2091     qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2092 
2093     mutex_unlock(&qedf->stats_mutex);
2094     kfree(fw_fcoe_stats);
2095 out:
2096     return qedf_stats;
2097 }
2098 
2099 static struct fc_function_template qedf_fc_transport_fn = {
2100     .show_host_node_name = 1,
2101     .show_host_port_name = 1,
2102     .show_host_supported_classes = 1,
2103     .show_host_supported_fc4s = 1,
2104     .show_host_active_fc4s = 1,
2105     .show_host_maxframe_size = 1,
2106 
2107     .get_host_port_id = qedf_get_host_port_id,
2108     .show_host_port_id = 1,
2109     .show_host_supported_speeds = 1,
2110     .get_host_speed = fc_get_host_speed,
2111     .show_host_speed = 1,
2112     .show_host_port_type = 1,
2113     .get_host_port_state = fc_get_host_port_state,
2114     .show_host_port_state = 1,
2115     .show_host_symbolic_name = 1,
2116 
2117     /*
2118      * Tell FC transport to allocate enough space to store the backpointer
2119      * for the associate qedf_rport struct.
2120      */
2121     .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2122                 sizeof(struct qedf_rport)),
2123     .show_rport_maxframe_size = 1,
2124     .show_rport_supported_classes = 1,
2125     .show_host_fabric_name = 1,
2126     .show_starget_node_name = 1,
2127     .show_starget_port_name = 1,
2128     .show_starget_port_id = 1,
2129     .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2130     .show_rport_dev_loss_tmo = 1,
2131     .get_fc_host_stats = qedf_fc_get_host_stats,
2132     .issue_fc_host_lip = qedf_fcoe_reset,
2133     .vport_create = qedf_vport_create,
2134     .vport_delete = qedf_vport_destroy,
2135     .vport_disable = qedf_vport_disable,
2136     .bsg_request = fc_lport_bsg_request,
2137 };
2138 
2139 static struct fc_function_template qedf_fc_vport_transport_fn = {
2140     .show_host_node_name = 1,
2141     .show_host_port_name = 1,
2142     .show_host_supported_classes = 1,
2143     .show_host_supported_fc4s = 1,
2144     .show_host_active_fc4s = 1,
2145     .show_host_maxframe_size = 1,
2146     .show_host_port_id = 1,
2147     .show_host_supported_speeds = 1,
2148     .get_host_speed = fc_get_host_speed,
2149     .show_host_speed = 1,
2150     .show_host_port_type = 1,
2151     .get_host_port_state = fc_get_host_port_state,
2152     .show_host_port_state = 1,
2153     .show_host_symbolic_name = 1,
2154     .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2155                 sizeof(struct qedf_rport)),
2156     .show_rport_maxframe_size = 1,
2157     .show_rport_supported_classes = 1,
2158     .show_host_fabric_name = 1,
2159     .show_starget_node_name = 1,
2160     .show_starget_port_name = 1,
2161     .show_starget_port_id = 1,
2162     .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2163     .show_rport_dev_loss_tmo = 1,
2164     .get_fc_host_stats = fc_get_host_stats,
2165     .issue_fc_host_lip = qedf_fcoe_reset,
2166     .bsg_request = fc_lport_bsg_request,
2167 };
2168 
2169 static bool qedf_fp_has_work(struct qedf_fastpath *fp)
2170 {
2171     struct qedf_ctx *qedf = fp->qedf;
2172     struct global_queue *que;
2173     struct qed_sb_info *sb_info = fp->sb_info;
2174     struct status_block *sb = sb_info->sb_virt;
2175     u16 prod_idx;
2176 
2177     /* Get the pointer to the global CQ this completion is on */
2178     que = qedf->global_queues[fp->sb_id];
2179 
2180     /* Be sure all responses have been written to PI */
2181     rmb();
2182 
2183     /* Get the current firmware producer index */
2184     prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2185 
2186     return (que->cq_prod_idx != prod_idx);
2187 }
2188 
2189 /*
2190  * Interrupt handler code.
2191  */
2192 
2193 /* Process completion queue and copy CQE contents for deferred processesing
2194  *
2195  * Return true if we should wake the I/O thread, false if not.
2196  */
2197 static bool qedf_process_completions(struct qedf_fastpath *fp)
2198 {
2199     struct qedf_ctx *qedf = fp->qedf;
2200     struct qed_sb_info *sb_info = fp->sb_info;
2201     struct status_block *sb = sb_info->sb_virt;
2202     struct global_queue *que;
2203     u16 prod_idx;
2204     struct fcoe_cqe *cqe;
2205     struct qedf_io_work *io_work;
2206     int num_handled = 0;
2207     unsigned int cpu;
2208     struct qedf_ioreq *io_req = NULL;
2209     u16 xid;
2210     u16 new_cqes;
2211     u32 comp_type;
2212 
2213     /* Get the current firmware producer index */
2214     prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2215 
2216     /* Get the pointer to the global CQ this completion is on */
2217     que = qedf->global_queues[fp->sb_id];
2218 
2219     /* Calculate the amount of new elements since last processing */
2220     new_cqes = (prod_idx >= que->cq_prod_idx) ?
2221         (prod_idx - que->cq_prod_idx) :
2222         0x10000 - que->cq_prod_idx + prod_idx;
2223 
2224     /* Save producer index */
2225     que->cq_prod_idx = prod_idx;
2226 
2227     while (new_cqes) {
2228         fp->completions++;
2229         num_handled++;
2230         cqe = &que->cq[que->cq_cons_idx];
2231 
2232         comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2233             FCOE_CQE_CQE_TYPE_MASK;
2234 
2235         /*
2236          * Process unsolicited CQEs directly in the interrupt handler
2237          * sine we need the fastpath ID
2238          */
2239         if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2240             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2241                "Unsolicated CQE.\n");
2242             qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2243             /*
2244              * Don't add a work list item.  Increment consumer
2245              * consumer index and move on.
2246              */
2247             goto inc_idx;
2248         }
2249 
2250         xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2251         io_req = &qedf->cmd_mgr->cmds[xid];
2252 
2253         /*
2254          * Figure out which percpu thread we should queue this I/O
2255          * on.
2256          */
2257         if (!io_req)
2258             /* If there is not io_req assocated with this CQE
2259              * just queue it on CPU 0
2260              */
2261             cpu = 0;
2262         else {
2263             cpu = io_req->cpu;
2264             io_req->int_cpu = smp_processor_id();
2265         }
2266 
2267         io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2268         if (!io_work) {
2269             QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2270                    "work for I/O completion.\n");
2271             continue;
2272         }
2273         memset(io_work, 0, sizeof(struct qedf_io_work));
2274 
2275         INIT_WORK(&io_work->work, qedf_fp_io_handler);
2276 
2277         /* Copy contents of CQE for deferred processing */
2278         memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2279 
2280         io_work->qedf = fp->qedf;
2281         io_work->fp = NULL; /* Only used for unsolicited frames */
2282 
2283         queue_work_on(cpu, qedf_io_wq, &io_work->work);
2284 
2285 inc_idx:
2286         que->cq_cons_idx++;
2287         if (que->cq_cons_idx == fp->cq_num_entries)
2288             que->cq_cons_idx = 0;
2289         new_cqes--;
2290     }
2291 
2292     return true;
2293 }
2294 
2295 
2296 /* MSI-X fastpath handler code */
2297 static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2298 {
2299     struct qedf_fastpath *fp = dev_id;
2300 
2301     if (!fp) {
2302         QEDF_ERR(NULL, "fp is null.\n");
2303         return IRQ_HANDLED;
2304     }
2305     if (!fp->sb_info) {
2306         QEDF_ERR(NULL, "fp->sb_info in null.");
2307         return IRQ_HANDLED;
2308     }
2309 
2310     /*
2311      * Disable interrupts for this status block while we process new
2312      * completions
2313      */
2314     qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
2315 
2316     while (1) {
2317         qedf_process_completions(fp);
2318 
2319         if (qedf_fp_has_work(fp) == 0) {
2320             /* Update the sb information */
2321             qed_sb_update_sb_idx(fp->sb_info);
2322 
2323             /* Check for more work */
2324             rmb();
2325 
2326             if (qedf_fp_has_work(fp) == 0) {
2327                 /* Re-enable interrupts */
2328                 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
2329                 return IRQ_HANDLED;
2330             }
2331         }
2332     }
2333 
2334     /* Do we ever want to break out of above loop? */
2335     return IRQ_HANDLED;
2336 }
2337 
2338 /* simd handler for MSI/INTa */
2339 static void qedf_simd_int_handler(void *cookie)
2340 {
2341     /* Cookie is qedf_ctx struct */
2342     struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2343 
2344     QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2345 }
2346 
2347 #define QEDF_SIMD_HANDLER_NUM       0
2348 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2349 {
2350     int i;
2351     u16 vector_idx = 0;
2352     u32 vector;
2353 
2354     if (qedf->int_info.msix_cnt) {
2355         for (i = 0; i < qedf->int_info.used_cnt; i++) {
2356             vector_idx = i * qedf->dev_info.common.num_hwfns +
2357                 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2358             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2359                   "Freeing IRQ #%d vector_idx=%d.\n",
2360                   i, vector_idx);
2361             vector = qedf->int_info.msix[vector_idx].vector;
2362             synchronize_irq(vector);
2363             irq_set_affinity_hint(vector, NULL);
2364             irq_set_affinity_notifier(vector, NULL);
2365             free_irq(vector, &qedf->fp_array[i]);
2366         }
2367     } else
2368         qed_ops->common->simd_handler_clean(qedf->cdev,
2369             QEDF_SIMD_HANDLER_NUM);
2370 
2371     qedf->int_info.used_cnt = 0;
2372     qed_ops->common->set_fp_int(qedf->cdev, 0);
2373 }
2374 
2375 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2376 {
2377     int i, rc, cpu;
2378     u16 vector_idx = 0;
2379     u32 vector;
2380 
2381     cpu = cpumask_first(cpu_online_mask);
2382     for (i = 0; i < qedf->num_queues; i++) {
2383         vector_idx = i * qedf->dev_info.common.num_hwfns +
2384             qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2385         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2386               "Requesting IRQ #%d vector_idx=%d.\n",
2387               i, vector_idx);
2388         vector = qedf->int_info.msix[vector_idx].vector;
2389         rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2390                  &qedf->fp_array[i]);
2391 
2392         if (rc) {
2393             QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2394             qedf_sync_free_irqs(qedf);
2395             return rc;
2396         }
2397 
2398         qedf->int_info.used_cnt++;
2399         rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
2400         cpu = cpumask_next(cpu, cpu_online_mask);
2401     }
2402 
2403     return 0;
2404 }
2405 
2406 static int qedf_setup_int(struct qedf_ctx *qedf)
2407 {
2408     int rc = 0;
2409 
2410     /*
2411      * Learn interrupt configuration
2412      */
2413     rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2414     if (rc <= 0)
2415         return 0;
2416 
2417     rc  = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2418     if (rc)
2419         return 0;
2420 
2421     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2422            "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2423            num_online_cpus());
2424 
2425     if (qedf->int_info.msix_cnt)
2426         return qedf_request_msix_irq(qedf);
2427 
2428     qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2429         QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2430     qedf->int_info.used_cnt = 1;
2431 
2432     QEDF_ERR(&qedf->dbg_ctx,
2433          "Cannot load driver due to a lack of MSI-X vectors.\n");
2434     return -EINVAL;
2435 }
2436 
2437 /* Main function for libfc frame reception */
2438 static void qedf_recv_frame(struct qedf_ctx *qedf,
2439     struct sk_buff *skb)
2440 {
2441     u32 fr_len;
2442     struct fc_lport *lport;
2443     struct fc_frame_header *fh;
2444     struct fcoe_crc_eof crc_eof;
2445     struct fc_frame *fp;
2446     u8 *mac = NULL;
2447     u8 *dest_mac = NULL;
2448     struct fcoe_hdr *hp;
2449     struct qedf_rport *fcport;
2450     struct fc_lport *vn_port;
2451     u32 f_ctl;
2452 
2453     lport = qedf->lport;
2454     if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2455         QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2456         kfree_skb(skb);
2457         return;
2458     }
2459 
2460     if (skb_is_nonlinear(skb))
2461         skb_linearize(skb);
2462     mac = eth_hdr(skb)->h_source;
2463     dest_mac = eth_hdr(skb)->h_dest;
2464 
2465     /* Pull the header */
2466     hp = (struct fcoe_hdr *)skb->data;
2467     fh = (struct fc_frame_header *) skb_transport_header(skb);
2468     skb_pull(skb, sizeof(struct fcoe_hdr));
2469     fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2470 
2471     fp = (struct fc_frame *)skb;
2472     fc_frame_init(fp);
2473     fr_dev(fp) = lport;
2474     fr_sof(fp) = hp->fcoe_sof;
2475     if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2476         QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2477         kfree_skb(skb);
2478         return;
2479     }
2480     fr_eof(fp) = crc_eof.fcoe_eof;
2481     fr_crc(fp) = crc_eof.fcoe_crc32;
2482     if (pskb_trim(skb, fr_len)) {
2483         QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2484         kfree_skb(skb);
2485         return;
2486     }
2487 
2488     fh = fc_frame_header_get(fp);
2489 
2490     /*
2491      * Invalid frame filters.
2492      */
2493 
2494     if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2495         fh->fh_type == FC_TYPE_FCP) {
2496         /* Drop FCP data. We dont this in L2 path */
2497         kfree_skb(skb);
2498         return;
2499     }
2500     if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2501         fh->fh_type == FC_TYPE_ELS) {
2502         switch (fc_frame_payload_op(fp)) {
2503         case ELS_LOGO:
2504             if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2505                 /* drop non-FIP LOGO */
2506                 kfree_skb(skb);
2507                 return;
2508             }
2509             break;
2510         }
2511     }
2512 
2513     if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2514         /* Drop incoming ABTS */
2515         kfree_skb(skb);
2516         return;
2517     }
2518 
2519     if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
2520         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2521             "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2522         kfree_skb(skb);
2523         return;
2524     }
2525 
2526     if (qedf->ctlr.state) {
2527         if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2528             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2529                 "Wrong source address: mac:%pM dest_addr:%pM.\n",
2530                 mac, qedf->ctlr.dest_addr);
2531             kfree_skb(skb);
2532             return;
2533         }
2534     }
2535 
2536     vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
2537 
2538     /*
2539      * If the destination ID from the frame header does not match what we
2540      * have on record for lport and the search for a NPIV port came up
2541      * empty then this is not addressed to our port so simply drop it.
2542      */
2543     if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2544         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2545               "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2546               lport->port_id, ntoh24(fh->fh_d_id));
2547         kfree_skb(skb);
2548         return;
2549     }
2550 
2551     f_ctl = ntoh24(fh->fh_f_ctl);
2552     if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2553         (f_ctl & FC_FC_EX_CTX)) {
2554         /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2555         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2556               "Dropping ABTS response as both SEQ/EX CTX set.\n");
2557         kfree_skb(skb);
2558         return;
2559     }
2560 
2561     /*
2562      * If a connection is uploading, drop incoming FCoE frames as there
2563      * is a small window where we could try to return a frame while libfc
2564      * is trying to clean things up.
2565      */
2566 
2567     /* Get fcport associated with d_id if it exists */
2568     fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2569 
2570     if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2571         &fcport->flags)) {
2572         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2573             "Connection uploading, dropping fp=%p.\n", fp);
2574         kfree_skb(skb);
2575         return;
2576     }
2577 
2578     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2579         "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2580         ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2581         fh->fh_type);
2582     if (qedf_dump_frames)
2583         print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2584             1, skb->data, skb->len, false);
2585     fc_exch_recv(lport, fp);
2586 }
2587 
2588 static void qedf_ll2_process_skb(struct work_struct *work)
2589 {
2590     struct qedf_skb_work *skb_work =
2591         container_of(work, struct qedf_skb_work, work);
2592     struct qedf_ctx *qedf = skb_work->qedf;
2593     struct sk_buff *skb = skb_work->skb;
2594     struct ethhdr *eh;
2595 
2596     if (!qedf) {
2597         QEDF_ERR(NULL, "qedf is NULL\n");
2598         goto err_out;
2599     }
2600 
2601     eh = (struct ethhdr *)skb->data;
2602 
2603     /* Undo VLAN encapsulation */
2604     if (eh->h_proto == htons(ETH_P_8021Q)) {
2605         memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2606         eh = skb_pull(skb, VLAN_HLEN);
2607         skb_reset_mac_header(skb);
2608     }
2609 
2610     /*
2611      * Process either a FIP frame or FCoE frame based on the
2612      * protocol value.  If it's not either just drop the
2613      * frame.
2614      */
2615     if (eh->h_proto == htons(ETH_P_FIP)) {
2616         qedf_fip_recv(qedf, skb);
2617         goto out;
2618     } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2619         __skb_pull(skb, ETH_HLEN);
2620         qedf_recv_frame(qedf, skb);
2621         goto out;
2622     } else
2623         goto err_out;
2624 
2625 err_out:
2626     kfree_skb(skb);
2627 out:
2628     kfree(skb_work);
2629     return;
2630 }
2631 
2632 static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2633     u32 arg1, u32 arg2)
2634 {
2635     struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2636     struct qedf_skb_work *skb_work;
2637 
2638     if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2639         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2640               "Dropping frame as link state is down.\n");
2641         kfree_skb(skb);
2642         return 0;
2643     }
2644 
2645     skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2646     if (!skb_work) {
2647         QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2648                "dropping frame.\n");
2649         kfree_skb(skb);
2650         return 0;
2651     }
2652 
2653     INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2654     skb_work->skb = skb;
2655     skb_work->qedf = qedf;
2656     queue_work(qedf->ll2_recv_wq, &skb_work->work);
2657 
2658     return 0;
2659 }
2660 
2661 static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2662     .rx_cb = qedf_ll2_rx,
2663     .tx_cb = NULL,
2664 };
2665 
2666 /* Main thread to process I/O completions */
2667 void qedf_fp_io_handler(struct work_struct *work)
2668 {
2669     struct qedf_io_work *io_work =
2670         container_of(work, struct qedf_io_work, work);
2671     u32 comp_type;
2672 
2673     /*
2674      * Deferred part of unsolicited CQE sends
2675      * frame to libfc.
2676      */
2677     comp_type = (io_work->cqe.cqe_data >>
2678         FCOE_CQE_CQE_TYPE_SHIFT) &
2679         FCOE_CQE_CQE_TYPE_MASK;
2680     if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2681         io_work->fp)
2682         fc_exch_recv(io_work->qedf->lport, io_work->fp);
2683     else
2684         qedf_process_cqe(io_work->qedf, &io_work->cqe);
2685 
2686     kfree(io_work);
2687 }
2688 
2689 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2690     struct qed_sb_info *sb_info, u16 sb_id)
2691 {
2692     struct status_block *sb_virt;
2693     dma_addr_t sb_phys;
2694     int ret;
2695 
2696     sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2697         sizeof(struct status_block), &sb_phys, GFP_KERNEL);
2698 
2699     if (!sb_virt) {
2700         QEDF_ERR(&qedf->dbg_ctx,
2701              "Status block allocation failed for id = %d.\n",
2702              sb_id);
2703         return -ENOMEM;
2704     }
2705 
2706     ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2707         sb_id, QED_SB_TYPE_STORAGE);
2708 
2709     if (ret) {
2710         QEDF_ERR(&qedf->dbg_ctx,
2711              "Status block initialization failed (0x%x) for id = %d.\n",
2712              ret, sb_id);
2713         return ret;
2714     }
2715 
2716     return 0;
2717 }
2718 
2719 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2720 {
2721     if (sb_info->sb_virt)
2722         dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2723             (void *)sb_info->sb_virt, sb_info->sb_phys);
2724 }
2725 
2726 static void qedf_destroy_sb(struct qedf_ctx *qedf)
2727 {
2728     int id;
2729     struct qedf_fastpath *fp = NULL;
2730 
2731     for (id = 0; id < qedf->num_queues; id++) {
2732         fp = &(qedf->fp_array[id]);
2733         if (fp->sb_id == QEDF_SB_ID_NULL)
2734             break;
2735         qedf_free_sb(qedf, fp->sb_info);
2736         kfree(fp->sb_info);
2737     }
2738     kfree(qedf->fp_array);
2739 }
2740 
2741 static int qedf_prepare_sb(struct qedf_ctx *qedf)
2742 {
2743     int id;
2744     struct qedf_fastpath *fp;
2745     int ret;
2746 
2747     qedf->fp_array =
2748         kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2749         GFP_KERNEL);
2750 
2751     if (!qedf->fp_array) {
2752         QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2753               "failed.\n");
2754         return -ENOMEM;
2755     }
2756 
2757     for (id = 0; id < qedf->num_queues; id++) {
2758         fp = &(qedf->fp_array[id]);
2759         fp->sb_id = QEDF_SB_ID_NULL;
2760         fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2761         if (!fp->sb_info) {
2762             QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2763                   "allocation failed.\n");
2764             goto err;
2765         }
2766         ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2767         if (ret) {
2768             QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2769                   "initialization failed.\n");
2770             goto err;
2771         }
2772         fp->sb_id = id;
2773         fp->qedf = qedf;
2774         fp->cq_num_entries =
2775             qedf->global_queues[id]->cq_mem_size /
2776             sizeof(struct fcoe_cqe);
2777     }
2778 err:
2779     return 0;
2780 }
2781 
2782 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2783 {
2784     u16 xid;
2785     struct qedf_ioreq *io_req;
2786     struct qedf_rport *fcport;
2787     u32 comp_type;
2788 
2789     comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2790         FCOE_CQE_CQE_TYPE_MASK;
2791 
2792     xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2793     io_req = &qedf->cmd_mgr->cmds[xid];
2794 
2795     /* Completion not for a valid I/O anymore so just return */
2796     if (!io_req) {
2797         QEDF_ERR(&qedf->dbg_ctx,
2798              "io_req is NULL for xid=0x%x.\n", xid);
2799         return;
2800     }
2801 
2802     fcport = io_req->fcport;
2803 
2804     if (fcport == NULL) {
2805         QEDF_ERR(&qedf->dbg_ctx,
2806              "fcport is NULL for xid=0x%x io_req=%p.\n",
2807              xid, io_req);
2808         return;
2809     }
2810 
2811     /*
2812      * Check that fcport is offloaded.  If it isn't then the spinlock
2813      * isn't valid and shouldn't be taken. We should just return.
2814      */
2815     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2816         QEDF_ERR(&qedf->dbg_ctx,
2817              "Session not offloaded yet, fcport = %p.\n", fcport);
2818         return;
2819     }
2820 
2821 
2822     switch (comp_type) {
2823     case FCOE_GOOD_COMPLETION_CQE_TYPE:
2824         atomic_inc(&fcport->free_sqes);
2825         switch (io_req->cmd_type) {
2826         case QEDF_SCSI_CMD:
2827             qedf_scsi_completion(qedf, cqe, io_req);
2828             break;
2829         case QEDF_ELS:
2830             qedf_process_els_compl(qedf, cqe, io_req);
2831             break;
2832         case QEDF_TASK_MGMT_CMD:
2833             qedf_process_tmf_compl(qedf, cqe, io_req);
2834             break;
2835         case QEDF_SEQ_CLEANUP:
2836             qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2837             break;
2838         }
2839         break;
2840     case FCOE_ERROR_DETECTION_CQE_TYPE:
2841         atomic_inc(&fcport->free_sqes);
2842         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2843             "Error detect CQE.\n");
2844         qedf_process_error_detect(qedf, cqe, io_req);
2845         break;
2846     case FCOE_EXCH_CLEANUP_CQE_TYPE:
2847         atomic_inc(&fcport->free_sqes);
2848         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2849             "Cleanup CQE.\n");
2850         qedf_process_cleanup_compl(qedf, cqe, io_req);
2851         break;
2852     case FCOE_ABTS_CQE_TYPE:
2853         atomic_inc(&fcport->free_sqes);
2854         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2855             "Abort CQE.\n");
2856         qedf_process_abts_compl(qedf, cqe, io_req);
2857         break;
2858     case FCOE_DUMMY_CQE_TYPE:
2859         atomic_inc(&fcport->free_sqes);
2860         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2861             "Dummy CQE.\n");
2862         break;
2863     case FCOE_LOCAL_COMP_CQE_TYPE:
2864         atomic_inc(&fcport->free_sqes);
2865         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2866             "Local completion CQE.\n");
2867         break;
2868     case FCOE_WARNING_CQE_TYPE:
2869         atomic_inc(&fcport->free_sqes);
2870         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2871             "Warning CQE.\n");
2872         qedf_process_warning_compl(qedf, cqe, io_req);
2873         break;
2874     case MAX_FCOE_CQE_TYPE:
2875         atomic_inc(&fcport->free_sqes);
2876         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2877             "Max FCoE CQE.\n");
2878         break;
2879     default:
2880         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2881             "Default CQE.\n");
2882         break;
2883     }
2884 }
2885 
2886 static void qedf_free_bdq(struct qedf_ctx *qedf)
2887 {
2888     int i;
2889 
2890     if (qedf->bdq_pbl_list)
2891         dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2892             qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2893 
2894     if (qedf->bdq_pbl)
2895         dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2896             qedf->bdq_pbl, qedf->bdq_pbl_dma);
2897 
2898     for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2899         if (qedf->bdq[i].buf_addr) {
2900             dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2901                 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2902         }
2903     }
2904 }
2905 
2906 static void qedf_free_global_queues(struct qedf_ctx *qedf)
2907 {
2908     int i;
2909     struct global_queue **gl = qedf->global_queues;
2910 
2911     for (i = 0; i < qedf->num_queues; i++) {
2912         if (!gl[i])
2913             continue;
2914 
2915         if (gl[i]->cq)
2916             dma_free_coherent(&qedf->pdev->dev,
2917                 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2918         if (gl[i]->cq_pbl)
2919             dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2920                 gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2921 
2922         kfree(gl[i]);
2923     }
2924 
2925     qedf_free_bdq(qedf);
2926 }
2927 
2928 static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2929 {
2930     int i;
2931     struct scsi_bd *pbl;
2932     u64 *list;
2933     dma_addr_t page;
2934 
2935     /* Alloc dma memory for BDQ buffers */
2936     for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2937         qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2938             QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2939         if (!qedf->bdq[i].buf_addr) {
2940             QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2941                 "buffer %d.\n", i);
2942             return -ENOMEM;
2943         }
2944     }
2945 
2946     /* Alloc dma memory for BDQ page buffer list */
2947     qedf->bdq_pbl_mem_size =
2948         QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2949     qedf->bdq_pbl_mem_size =
2950         ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2951 
2952     qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2953         qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2954     if (!qedf->bdq_pbl) {
2955         QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2956         return -ENOMEM;
2957     }
2958 
2959     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2960           "BDQ PBL addr=0x%p dma=%pad\n",
2961           qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2962 
2963     /*
2964      * Populate BDQ PBL with physical and virtual address of individual
2965      * BDQ buffers
2966      */
2967     pbl = (struct scsi_bd *)qedf->bdq_pbl;
2968     for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2969         pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2970         pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
2971         pbl->opaque.fcoe_opaque.hi = 0;
2972         /* Opaque lo data is an index into the BDQ array */
2973         pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
2974         pbl++;
2975     }
2976 
2977     /* Allocate list of PBL pages */
2978     qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2979                         QEDF_PAGE_SIZE,
2980                         &qedf->bdq_pbl_list_dma,
2981                         GFP_KERNEL);
2982     if (!qedf->bdq_pbl_list) {
2983         QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
2984         return -ENOMEM;
2985     }
2986 
2987     /*
2988      * Now populate PBL list with pages that contain pointers to the
2989      * individual buffers.
2990      */
2991     qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
2992         QEDF_PAGE_SIZE;
2993     list = (u64 *)qedf->bdq_pbl_list;
2994     page = qedf->bdq_pbl_list_dma;
2995     for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
2996         *list = qedf->bdq_pbl_dma;
2997         list++;
2998         page += QEDF_PAGE_SIZE;
2999     }
3000 
3001     return 0;
3002 }
3003 
3004 static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3005 {
3006     u32 *list;
3007     int i;
3008     int status;
3009     u32 *pbl;
3010     dma_addr_t page;
3011     int num_pages;
3012 
3013     /* Allocate and map CQs, RQs */
3014     /*
3015      * Number of global queues (CQ / RQ). This should
3016      * be <= number of available MSIX vectors for the PF
3017      */
3018     if (!qedf->num_queues) {
3019         QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3020         return -ENOMEM;
3021     }
3022 
3023     /*
3024      * Make sure we allocated the PBL that will contain the physical
3025      * addresses of our queues
3026      */
3027     if (!qedf->p_cpuq) {
3028         status = -EINVAL;
3029         QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3030         goto mem_alloc_failure;
3031     }
3032 
3033     qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3034         * qedf->num_queues), GFP_KERNEL);
3035     if (!qedf->global_queues) {
3036         QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3037               "queues array ptr memory\n");
3038         return -ENOMEM;
3039     }
3040     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3041            "qedf->global_queues=%p.\n", qedf->global_queues);
3042 
3043     /* Allocate DMA coherent buffers for BDQ */
3044     status = qedf_alloc_bdq(qedf);
3045     if (status) {
3046         QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3047         goto mem_alloc_failure;
3048     }
3049 
3050     /* Allocate a CQ and an associated PBL for each MSI-X vector */
3051     for (i = 0; i < qedf->num_queues; i++) {
3052         qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3053             GFP_KERNEL);
3054         if (!qedf->global_queues[i]) {
3055             QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3056                    "global queue %d.\n", i);
3057             status = -ENOMEM;
3058             goto mem_alloc_failure;
3059         }
3060 
3061         qedf->global_queues[i]->cq_mem_size =
3062             FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3063         qedf->global_queues[i]->cq_mem_size =
3064             ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3065 
3066         qedf->global_queues[i]->cq_pbl_size =
3067             (qedf->global_queues[i]->cq_mem_size /
3068             PAGE_SIZE) * sizeof(void *);
3069         qedf->global_queues[i]->cq_pbl_size =
3070             ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3071 
3072         qedf->global_queues[i]->cq =
3073             dma_alloc_coherent(&qedf->pdev->dev,
3074                        qedf->global_queues[i]->cq_mem_size,
3075                        &qedf->global_queues[i]->cq_dma,
3076                        GFP_KERNEL);
3077 
3078         if (!qedf->global_queues[i]->cq) {
3079             QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3080             status = -ENOMEM;
3081             goto mem_alloc_failure;
3082         }
3083 
3084         qedf->global_queues[i]->cq_pbl =
3085             dma_alloc_coherent(&qedf->pdev->dev,
3086                        qedf->global_queues[i]->cq_pbl_size,
3087                        &qedf->global_queues[i]->cq_pbl_dma,
3088                        GFP_KERNEL);
3089 
3090         if (!qedf->global_queues[i]->cq_pbl) {
3091             QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3092             status = -ENOMEM;
3093             goto mem_alloc_failure;
3094         }
3095 
3096         /* Create PBL */
3097         num_pages = qedf->global_queues[i]->cq_mem_size /
3098             QEDF_PAGE_SIZE;
3099         page = qedf->global_queues[i]->cq_dma;
3100         pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3101 
3102         while (num_pages--) {
3103             *pbl = U64_LO(page);
3104             pbl++;
3105             *pbl = U64_HI(page);
3106             pbl++;
3107             page += QEDF_PAGE_SIZE;
3108         }
3109         /* Set the initial consumer index for cq */
3110         qedf->global_queues[i]->cq_cons_idx = 0;
3111     }
3112 
3113     list = (u32 *)qedf->p_cpuq;
3114 
3115     /*
3116      * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
3117      * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
3118      * to the physical address which contains an array of pointers to
3119      * the physical addresses of the specific queue pages.
3120      */
3121     for (i = 0; i < qedf->num_queues; i++) {
3122         *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3123         list++;
3124         *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3125         list++;
3126         *list = U64_LO(0);
3127         list++;
3128         *list = U64_HI(0);
3129         list++;
3130     }
3131 
3132     return 0;
3133 
3134 mem_alloc_failure:
3135     qedf_free_global_queues(qedf);
3136     return status;
3137 }
3138 
3139 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3140 {
3141     u8 sq_num_pbl_pages;
3142     u32 sq_mem_size;
3143     u32 cq_mem_size;
3144     u32 cq_num_entries;
3145     int rval;
3146 
3147     /*
3148      * The number of completion queues/fastpath interrupts/status blocks
3149      * we allocation is the minimum off:
3150      *
3151      * Number of CPUs
3152      * Number allocated by qed for our PCI function
3153      */
3154     qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3155 
3156     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3157            qedf->num_queues);
3158 
3159     qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3160         qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3161         &qedf->hw_p_cpuq, GFP_KERNEL);
3162 
3163     if (!qedf->p_cpuq) {
3164         QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3165         return 1;
3166     }
3167 
3168     rval = qedf_alloc_global_queues(qedf);
3169     if (rval) {
3170         QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3171               "failed.\n");
3172         return 1;
3173     }
3174 
3175     /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
3176     sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
3177     sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
3178     sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
3179 
3180     /* Calculate CQ num entries */
3181     cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3182     cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
3183     cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
3184 
3185     memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3186 
3187     /* Setup the value for fcoe PF */
3188     qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3189     qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3190     qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3191         (u64)qedf->hw_p_cpuq;
3192     qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3193 
3194     qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3195 
3196     qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3197     qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3198 
3199     /* log_page_size: 12 for 4KB pages */
3200     qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3201 
3202     qedf->pf_params.fcoe_pf_params.mtu = 9000;
3203     qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3204     qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3205 
3206     /* BDQ address and size */
3207     qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3208         qedf->bdq_pbl_list_dma;
3209     qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3210         qedf->bdq_pbl_list_num_entries;
3211     qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3212 
3213     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3214         "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
3215         qedf->bdq_pbl_list,
3216         qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3217         qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3218 
3219     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3220         "cq_num_entries=%d.\n",
3221         qedf->pf_params.fcoe_pf_params.cq_num_entries);
3222 
3223     return 0;
3224 }
3225 
3226 /* Free DMA coherent memory for array of queue pointers we pass to qed */
3227 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3228 {
3229     size_t size = 0;
3230 
3231     if (qedf->p_cpuq) {
3232         size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3233         dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3234             qedf->hw_p_cpuq);
3235     }
3236 
3237     qedf_free_global_queues(qedf);
3238 
3239     kfree(qedf->global_queues);
3240 }
3241 
3242 /*
3243  * PCI driver functions
3244  */
3245 
3246 static const struct pci_device_id qedf_pci_tbl[] = {
3247     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3248     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3249     {0}
3250 };
3251 MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3252 
3253 static struct pci_driver qedf_pci_driver = {
3254     .name = QEDF_MODULE_NAME,
3255     .id_table = qedf_pci_tbl,
3256     .probe = qedf_probe,
3257     .remove = qedf_remove,
3258     .shutdown = qedf_shutdown,
3259 };
3260 
3261 static int __qedf_probe(struct pci_dev *pdev, int mode)
3262 {
3263     int rc = -EINVAL;
3264     struct fc_lport *lport;
3265     struct qedf_ctx *qedf = NULL;
3266     struct Scsi_Host *host;
3267     bool is_vf = false;
3268     struct qed_ll2_params params;
3269     char host_buf[20];
3270     struct qed_link_params link_params;
3271     int status;
3272     void *task_start, *task_end;
3273     struct qed_slowpath_params slowpath_params;
3274     struct qed_probe_params qed_params;
3275     u16 retry_cnt = 10;
3276 
3277     /*
3278      * When doing error recovery we didn't reap the lport so don't try
3279      * to reallocate it.
3280      */
3281 retry_probe:
3282     if (mode == QEDF_MODE_RECOVERY)
3283         msleep(2000);
3284 
3285     if (mode != QEDF_MODE_RECOVERY) {
3286         lport = libfc_host_alloc(&qedf_host_template,
3287             sizeof(struct qedf_ctx));
3288 
3289         if (!lport) {
3290             QEDF_ERR(NULL, "Could not allocate lport.\n");
3291             rc = -ENOMEM;
3292             goto err0;
3293         }
3294 
3295         fc_disc_init(lport);
3296 
3297         /* Initialize qedf_ctx */
3298         qedf = lport_priv(lport);
3299         set_bit(QEDF_PROBING, &qedf->flags);
3300         qedf->lport = lport;
3301         qedf->ctlr.lp = lport;
3302         qedf->pdev = pdev;
3303         qedf->dbg_ctx.pdev = pdev;
3304         qedf->dbg_ctx.host_no = lport->host->host_no;
3305         spin_lock_init(&qedf->hba_lock);
3306         INIT_LIST_HEAD(&qedf->fcports);
3307         qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3308         atomic_set(&qedf->num_offloads, 0);
3309         qedf->stop_io_on_error = false;
3310         pci_set_drvdata(pdev, qedf);
3311         init_completion(&qedf->fipvlan_compl);
3312         mutex_init(&qedf->stats_mutex);
3313         mutex_init(&qedf->flush_mutex);
3314         qedf->flogi_pending = 0;
3315 
3316         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3317            "QLogic FastLinQ FCoE Module qedf %s, "
3318            "FW %d.%d.%d.%d\n", QEDF_VERSION,
3319            FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3320            FW_ENGINEERING_VERSION);
3321     } else {
3322         /* Init pointers during recovery */
3323         qedf = pci_get_drvdata(pdev);
3324         set_bit(QEDF_PROBING, &qedf->flags);
3325         lport = qedf->lport;
3326     }
3327 
3328     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3329 
3330     host = lport->host;
3331 
3332     /* Allocate mempool for qedf_io_work structs */
3333     qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3334         qedf_io_work_cache);
3335     if (qedf->io_mempool == NULL) {
3336         QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3337         goto err1;
3338     }
3339     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3340         qedf->io_mempool);
3341 
3342     sprintf(host_buf, "qedf_%u_link",
3343         qedf->lport->host->host_no);
3344     qedf->link_update_wq = create_workqueue(host_buf);
3345     INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3346     INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3347     INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3348     INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3349     qedf->fipvlan_retries = qedf_fipvlan_retries;
3350     /* Set a default prio in case DCBX doesn't converge */
3351     if (qedf_default_prio > -1) {
3352         /*
3353          * This is the case where we pass a modparam in so we want to
3354          * honor it even if dcbx doesn't converge.
3355          */
3356         qedf->prio = qedf_default_prio;
3357     } else
3358         qedf->prio = QEDF_DEFAULT_PRIO;
3359 
3360     /*
3361      * Common probe. Takes care of basic hardware init and pci_*
3362      * functions.
3363      */
3364     memset(&qed_params, 0, sizeof(qed_params));
3365     qed_params.protocol = QED_PROTOCOL_FCOE;
3366     qed_params.dp_module = qedf_dp_module;
3367     qed_params.dp_level = qedf_dp_level;
3368     qed_params.is_vf = is_vf;
3369     qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3370     if (!qedf->cdev) {
3371         if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3372             QEDF_ERR(&qedf->dbg_ctx,
3373                 "Retry %d initialize hardware\n", retry_cnt);
3374             retry_cnt--;
3375             goto retry_probe;
3376         }
3377         QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3378         rc = -ENODEV;
3379         goto err1;
3380     }
3381 
3382     /* Learn information crucial for qedf to progress */
3383     rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3384     if (rc) {
3385         QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3386         goto err1;
3387     }
3388 
3389     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3390           "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3391           qedf->dev_info.common.num_hwfns,
3392           qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3393 
3394     /* queue allocation code should come here
3395      * order should be
3396      *  slowpath_start
3397      *  status block allocation
3398      *  interrupt registration (to get min number of queues)
3399      *  set_fcoe_pf_param
3400      *  qed_sp_fcoe_func_start
3401      */
3402     rc = qedf_set_fcoe_pf_param(qedf);
3403     if (rc) {
3404         QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3405         goto err2;
3406     }
3407     qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3408 
3409     /* Learn information crucial for qedf to progress */
3410     rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3411     if (rc) {
3412         QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3413         goto err2;
3414     }
3415 
3416     if (mode != QEDF_MODE_RECOVERY) {
3417         qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
3418         if (IS_ERR(qedf->devlink)) {
3419             QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
3420             rc = PTR_ERR(qedf->devlink);
3421             qedf->devlink = NULL;
3422             goto err2;
3423         }
3424     }
3425 
3426     /* Record BDQ producer doorbell addresses */
3427     qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3428     qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3429     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3430         "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3431         qedf->bdq_secondary_prod);
3432 
3433     qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3434 
3435     rc = qedf_prepare_sb(qedf);
3436     if (rc) {
3437 
3438         QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3439         goto err2;
3440     }
3441 
3442     /* Start the Slowpath-process */
3443     slowpath_params.int_mode = QED_INT_MODE_MSIX;
3444     slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3445     slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3446     slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3447     slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
3448     strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
3449     rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3450     if (rc) {
3451         QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3452         goto err2;
3453     }
3454 
3455     /*
3456      * update_pf_params needs to be called before and after slowpath
3457      * start
3458      */
3459     qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3460 
3461     /* Setup interrupts */
3462     rc = qedf_setup_int(qedf);
3463     if (rc) {
3464         QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3465         goto err3;
3466     }
3467 
3468     rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3469     if (rc) {
3470         QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3471         goto err4;
3472     }
3473     task_start = qedf_get_task_mem(&qedf->tasks, 0);
3474     task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3475     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3476            "end=%p block_size=%u.\n", task_start, task_end,
3477            qedf->tasks.size);
3478 
3479     /*
3480      * We need to write the number of BDs in the BDQ we've preallocated so
3481      * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3482      * packet arrives.
3483      */
3484     qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3485     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3486         "Writing %d to primary and secondary BDQ doorbell registers.\n",
3487         qedf->bdq_prod_idx);
3488     writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3489     readw(qedf->bdq_primary_prod);
3490     writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3491     readw(qedf->bdq_secondary_prod);
3492 
3493     qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3494 
3495     /* Now that the dev_info struct has been filled in set the MAC
3496      * address
3497      */
3498     ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3499     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3500            qedf->mac);
3501 
3502     /*
3503      * Set the WWNN and WWPN in the following way:
3504      *
3505      * If the info we get from qed is non-zero then use that to set the
3506      * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3507      * on the MAC address.
3508      */
3509     if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3510         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3511             "Setting WWPN and WWNN from qed dev_info.\n");
3512         qedf->wwnn = qedf->dev_info.wwnn;
3513         qedf->wwpn = qedf->dev_info.wwpn;
3514     } else {
3515         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3516             "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3517         qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3518         qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3519     }
3520     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,  "WWNN=%016llx "
3521            "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3522 
3523     sprintf(host_buf, "host_%d", host->host_no);
3524     qed_ops->common->set_name(qedf->cdev, host_buf);
3525 
3526     /* Allocate cmd mgr */
3527     qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3528     if (!qedf->cmd_mgr) {
3529         QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3530         rc = -ENOMEM;
3531         goto err5;
3532     }
3533 
3534     if (mode != QEDF_MODE_RECOVERY) {
3535         host->transportt = qedf_fc_transport_template;
3536         host->max_lun = qedf_max_lun;
3537         host->max_cmd_len = QEDF_MAX_CDB_LEN;
3538         host->max_id = QEDF_MAX_SESSIONS;
3539         host->can_queue = FCOE_PARAMS_NUM_TASKS;
3540         rc = scsi_add_host(host, &pdev->dev);
3541         if (rc) {
3542             QEDF_WARN(&qedf->dbg_ctx,
3543                   "Error adding Scsi_Host rc=0x%x.\n", rc);
3544             goto err6;
3545         }
3546     }
3547 
3548     memset(&params, 0, sizeof(params));
3549     params.mtu = QEDF_LL2_BUF_SIZE;
3550     ether_addr_copy(params.ll2_mac_address, qedf->mac);
3551 
3552     /* Start LL2 processing thread */
3553     snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
3554     qedf->ll2_recv_wq =
3555         create_workqueue(host_buf);
3556     if (!qedf->ll2_recv_wq) {
3557         QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3558         rc = -ENOMEM;
3559         goto err7;
3560     }
3561 
3562 #ifdef CONFIG_DEBUG_FS
3563     qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3564                 qedf_dbg_fops);
3565 #endif
3566 
3567     /* Start LL2 */
3568     qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3569     rc = qed_ops->ll2->start(qedf->cdev, &params);
3570     if (rc) {
3571         QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3572         goto err7;
3573     }
3574     set_bit(QEDF_LL2_STARTED, &qedf->flags);
3575 
3576     /* Set initial FIP/FCoE VLAN to NULL */
3577     qedf->vlan_id = 0;
3578 
3579     /*
3580      * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3581      * they were not reaped during the unload process.
3582      */
3583     if (mode != QEDF_MODE_RECOVERY) {
3584         /* Setup imbedded fcoe controller */
3585         qedf_fcoe_ctlr_setup(qedf);
3586 
3587         /* Setup lport */
3588         rc = qedf_lport_setup(qedf);
3589         if (rc) {
3590             QEDF_ERR(&(qedf->dbg_ctx),
3591                 "qedf_lport_setup failed.\n");
3592             goto err7;
3593         }
3594     }
3595 
3596     sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3597     qedf->timer_work_queue =
3598         create_workqueue(host_buf);
3599     if (!qedf->timer_work_queue) {
3600         QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3601               "workqueue.\n");
3602         rc = -ENOMEM;
3603         goto err7;
3604     }
3605 
3606     /* DPC workqueue is not reaped during recovery unload */
3607     if (mode != QEDF_MODE_RECOVERY) {
3608         sprintf(host_buf, "qedf_%u_dpc",
3609             qedf->lport->host->host_no);
3610         qedf->dpc_wq = create_workqueue(host_buf);
3611     }
3612     INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3613 
3614     /*
3615      * GRC dump and sysfs parameters are not reaped during the recovery
3616      * unload process.
3617      */
3618     if (mode != QEDF_MODE_RECOVERY) {
3619         qedf->grcdump_size =
3620             qed_ops->common->dbg_all_data_size(qedf->cdev);
3621         if (qedf->grcdump_size) {
3622             rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3623                 qedf->grcdump_size);
3624             if (rc) {
3625                 QEDF_ERR(&(qedf->dbg_ctx),
3626                     "GRC Dump buffer alloc failed.\n");
3627                 qedf->grcdump = NULL;
3628             }
3629 
3630             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3631                 "grcdump: addr=%p, size=%u.\n",
3632                 qedf->grcdump, qedf->grcdump_size);
3633         }
3634         qedf_create_sysfs_ctx_attr(qedf);
3635 
3636         /* Initialize I/O tracing for this adapter */
3637         spin_lock_init(&qedf->io_trace_lock);
3638         qedf->io_trace_idx = 0;
3639     }
3640 
3641     init_completion(&qedf->flogi_compl);
3642 
3643     status = qed_ops->common->update_drv_state(qedf->cdev, true);
3644     if (status)
3645         QEDF_ERR(&(qedf->dbg_ctx),
3646             "Failed to send drv state to MFW.\n");
3647 
3648     memset(&link_params, 0, sizeof(struct qed_link_params));
3649     link_params.link_up = true;
3650     status = qed_ops->common->set_link(qedf->cdev, &link_params);
3651     if (status)
3652         QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3653 
3654     /* Start/restart discovery */
3655     if (mode == QEDF_MODE_RECOVERY)
3656         fcoe_ctlr_link_up(&qedf->ctlr);
3657     else
3658         fc_fabric_login(lport);
3659 
3660     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3661 
3662     clear_bit(QEDF_PROBING, &qedf->flags);
3663 
3664     /* All good */
3665     return 0;
3666 
3667 err7:
3668     if (qedf->ll2_recv_wq)
3669         destroy_workqueue(qedf->ll2_recv_wq);
3670     fc_remove_host(qedf->lport->host);
3671     scsi_remove_host(qedf->lport->host);
3672 #ifdef CONFIG_DEBUG_FS
3673     qedf_dbg_host_exit(&(qedf->dbg_ctx));
3674 #endif
3675 err6:
3676     qedf_cmd_mgr_free(qedf->cmd_mgr);
3677 err5:
3678     qed_ops->stop(qedf->cdev);
3679 err4:
3680     qedf_free_fcoe_pf_param(qedf);
3681     qedf_sync_free_irqs(qedf);
3682 err3:
3683     qed_ops->common->slowpath_stop(qedf->cdev);
3684 err2:
3685     qed_ops->common->remove(qedf->cdev);
3686 err1:
3687     scsi_host_put(lport->host);
3688 err0:
3689     return rc;
3690 }
3691 
3692 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3693 {
3694     return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3695 }
3696 
3697 static void __qedf_remove(struct pci_dev *pdev, int mode)
3698 {
3699     struct qedf_ctx *qedf;
3700     int rc;
3701 
3702     if (!pdev) {
3703         QEDF_ERR(NULL, "pdev is NULL.\n");
3704         return;
3705     }
3706 
3707     qedf = pci_get_drvdata(pdev);
3708 
3709     /*
3710      * Prevent race where we're in board disable work and then try to
3711      * rmmod the module.
3712      */
3713     if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3714         QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3715         return;
3716     }
3717 
3718     if (mode != QEDF_MODE_RECOVERY)
3719         set_bit(QEDF_UNLOADING, &qedf->flags);
3720 
3721     /* Logoff the fabric to upload all connections */
3722     if (mode == QEDF_MODE_RECOVERY)
3723         fcoe_ctlr_link_down(&qedf->ctlr);
3724     else
3725         fc_fabric_logoff(qedf->lport);
3726 
3727     if (!qedf_wait_for_upload(qedf))
3728         QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3729 
3730 #ifdef CONFIG_DEBUG_FS
3731     qedf_dbg_host_exit(&(qedf->dbg_ctx));
3732 #endif
3733 
3734     /* Stop any link update handling */
3735     cancel_delayed_work_sync(&qedf->link_update);
3736     destroy_workqueue(qedf->link_update_wq);
3737     qedf->link_update_wq = NULL;
3738 
3739     if (qedf->timer_work_queue)
3740         destroy_workqueue(qedf->timer_work_queue);
3741 
3742     /* Stop Light L2 */
3743     clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3744     qed_ops->ll2->stop(qedf->cdev);
3745     if (qedf->ll2_recv_wq)
3746         destroy_workqueue(qedf->ll2_recv_wq);
3747 
3748     /* Stop fastpath */
3749     qedf_sync_free_irqs(qedf);
3750     qedf_destroy_sb(qedf);
3751 
3752     /*
3753      * During recovery don't destroy OS constructs that represent the
3754      * physical port.
3755      */
3756     if (mode != QEDF_MODE_RECOVERY) {
3757         qedf_free_grc_dump_buf(&qedf->grcdump);
3758         qedf_remove_sysfs_ctx_attr(qedf);
3759 
3760         /* Remove all SCSI/libfc/libfcoe structures */
3761         fcoe_ctlr_destroy(&qedf->ctlr);
3762         fc_lport_destroy(qedf->lport);
3763         fc_remove_host(qedf->lport->host);
3764         scsi_remove_host(qedf->lport->host);
3765     }
3766 
3767     qedf_cmd_mgr_free(qedf->cmd_mgr);
3768 
3769     if (mode != QEDF_MODE_RECOVERY) {
3770         fc_exch_mgr_free(qedf->lport);
3771         fc_lport_free_stats(qedf->lport);
3772 
3773         /* Wait for all vports to be reaped */
3774         qedf_wait_for_vport_destroy(qedf);
3775     }
3776 
3777     /*
3778      * Now that all connections have been uploaded we can stop the
3779      * rest of the qed operations
3780      */
3781     qed_ops->stop(qedf->cdev);
3782 
3783     if (mode != QEDF_MODE_RECOVERY) {
3784         if (qedf->dpc_wq) {
3785             /* Stop general DPC handling */
3786             destroy_workqueue(qedf->dpc_wq);
3787             qedf->dpc_wq = NULL;
3788         }
3789     }
3790 
3791     /* Final shutdown for the board */
3792     qedf_free_fcoe_pf_param(qedf);
3793     if (mode != QEDF_MODE_RECOVERY) {
3794         qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3795         pci_set_drvdata(pdev, NULL);
3796     }
3797 
3798     rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3799     if (rc)
3800         QEDF_ERR(&(qedf->dbg_ctx),
3801             "Failed to send drv state to MFW.\n");
3802 
3803     if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
3804         qed_ops->common->devlink_unregister(qedf->devlink);
3805         qedf->devlink = NULL;
3806     }
3807 
3808     qed_ops->common->slowpath_stop(qedf->cdev);
3809     qed_ops->common->remove(qedf->cdev);
3810 
3811     mempool_destroy(qedf->io_mempool);
3812 
3813     /* Only reap the Scsi_host on a real removal */
3814     if (mode != QEDF_MODE_RECOVERY)
3815         scsi_host_put(qedf->lport->host);
3816 }
3817 
3818 static void qedf_remove(struct pci_dev *pdev)
3819 {
3820     /* Check to make sure this function wasn't already disabled */
3821     if (!atomic_read(&pdev->enable_cnt))
3822         return;
3823 
3824     __qedf_remove(pdev, QEDF_MODE_NORMAL);
3825 }
3826 
3827 void qedf_wq_grcdump(struct work_struct *work)
3828 {
3829     struct qedf_ctx *qedf =
3830         container_of(work, struct qedf_ctx, grcdump_work.work);
3831 
3832     QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3833     qedf_capture_grc_dump(qedf);
3834 }
3835 
3836 void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3837 {
3838     struct qedf_ctx *qedf = dev;
3839 
3840     QEDF_ERR(&(qedf->dbg_ctx),
3841             "Hardware error handler scheduled, event=%d.\n",
3842             err_type);
3843 
3844     if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3845         QEDF_ERR(&(qedf->dbg_ctx),
3846                 "Already in recovery, not scheduling board disable work.\n");
3847         return;
3848     }
3849 
3850     switch (err_type) {
3851     case QED_HW_ERR_FAN_FAIL:
3852         schedule_delayed_work(&qedf->board_disable_work, 0);
3853         break;
3854     case QED_HW_ERR_MFW_RESP_FAIL:
3855     case QED_HW_ERR_HW_ATTN:
3856     case QED_HW_ERR_DMAE_FAIL:
3857     case QED_HW_ERR_FW_ASSERT:
3858         /* Prevent HW attentions from being reasserted */
3859         qed_ops->common->attn_clr_enable(qedf->cdev, true);
3860         break;
3861     case QED_HW_ERR_RAMROD_FAIL:
3862         /* Prevent HW attentions from being reasserted */
3863         qed_ops->common->attn_clr_enable(qedf->cdev, true);
3864 
3865         if (qedf_enable_recovery && qedf->devlink)
3866             qed_ops->common->report_fatal_error(qedf->devlink,
3867                 err_type);
3868 
3869         break;
3870     default:
3871         break;
3872     }
3873 }
3874 
3875 /*
3876  * Protocol TLV handler
3877  */
3878 void qedf_get_protocol_tlv_data(void *dev, void *data)
3879 {
3880     struct qedf_ctx *qedf = dev;
3881     struct qed_mfw_tlv_fcoe *fcoe = data;
3882     struct fc_lport *lport;
3883     struct Scsi_Host *host;
3884     struct fc_host_attrs *fc_host;
3885     struct fc_host_statistics *hst;
3886 
3887     if (!qedf) {
3888         QEDF_ERR(NULL, "qedf is null.\n");
3889         return;
3890     }
3891 
3892     if (test_bit(QEDF_PROBING, &qedf->flags)) {
3893         QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3894         return;
3895     }
3896 
3897     lport = qedf->lport;
3898     host = lport->host;
3899     fc_host = shost_to_fc_host(host);
3900 
3901     /* Force a refresh of the fc_host stats including offload stats */
3902     hst = qedf_fc_get_host_stats(host);
3903 
3904     fcoe->qos_pri_set = true;
3905     fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3906 
3907     fcoe->ra_tov_set = true;
3908     fcoe->ra_tov = lport->r_a_tov;
3909 
3910     fcoe->ed_tov_set = true;
3911     fcoe->ed_tov = lport->e_d_tov;
3912 
3913     fcoe->npiv_state_set = true;
3914     fcoe->npiv_state = 1; /* NPIV always enabled */
3915 
3916     fcoe->num_npiv_ids_set = true;
3917     fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3918 
3919     /* Certain attributes we only want to set if we've selected an FCF */
3920     if (qedf->ctlr.sel_fcf) {
3921         fcoe->switch_name_set = true;
3922         u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3923     }
3924 
3925     fcoe->port_state_set = true;
3926     /* For qedf we're either link down or fabric attach */
3927     if (lport->link_up)
3928         fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3929     else
3930         fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3931 
3932     fcoe->link_failures_set = true;
3933     fcoe->link_failures = (u16)hst->link_failure_count;
3934 
3935     fcoe->fcoe_txq_depth_set = true;
3936     fcoe->fcoe_rxq_depth_set = true;
3937     fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3938     fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3939 
3940     fcoe->fcoe_rx_frames_set = true;
3941     fcoe->fcoe_rx_frames = hst->rx_frames;
3942 
3943     fcoe->fcoe_tx_frames_set = true;
3944     fcoe->fcoe_tx_frames = hst->tx_frames;
3945 
3946     fcoe->fcoe_rx_bytes_set = true;
3947     fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3948 
3949     fcoe->fcoe_tx_bytes_set = true;
3950     fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3951 
3952     fcoe->crc_count_set = true;
3953     fcoe->crc_count = hst->invalid_crc_count;
3954 
3955     fcoe->tx_abts_set = true;
3956     fcoe->tx_abts = hst->fcp_packet_aborts;
3957 
3958     fcoe->tx_lun_rst_set = true;
3959     fcoe->tx_lun_rst = qedf->lun_resets;
3960 
3961     fcoe->abort_task_sets_set = true;
3962     fcoe->abort_task_sets = qedf->packet_aborts;
3963 
3964     fcoe->scsi_busy_set = true;
3965     fcoe->scsi_busy = qedf->busy;
3966 
3967     fcoe->scsi_tsk_full_set = true;
3968     fcoe->scsi_tsk_full = qedf->task_set_fulls;
3969 }
3970 
3971 /* Deferred work function to perform soft context reset on STAG change */
3972 void qedf_stag_change_work(struct work_struct *work)
3973 {
3974     struct qedf_ctx *qedf =
3975         container_of(work, struct qedf_ctx, stag_work.work);
3976 
3977     printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
3978             dev_name(&qedf->pdev->dev), __func__, __LINE__,
3979             qedf->dbg_ctx.host_no);
3980     qedf_ctx_soft_reset(qedf->lport);
3981 }
3982 
3983 static void qedf_shutdown(struct pci_dev *pdev)
3984 {
3985     __qedf_remove(pdev, QEDF_MODE_NORMAL);
3986 }
3987 
3988 /*
3989  * Recovery handler code
3990  */
3991 static void qedf_schedule_recovery_handler(void *dev)
3992 {
3993     struct qedf_ctx *qedf = dev;
3994 
3995     QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
3996     schedule_delayed_work(&qedf->recovery_work, 0);
3997 }
3998 
3999 static void qedf_recovery_handler(struct work_struct *work)
4000 {
4001     struct qedf_ctx *qedf =
4002         container_of(work, struct qedf_ctx, recovery_work.work);
4003 
4004     if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4005         return;
4006 
4007     /*
4008      * Call common_ops->recovery_prolog to allow the MFW to quiesce
4009      * any PCI transactions.
4010      */
4011     qed_ops->common->recovery_prolog(qedf->cdev);
4012 
4013     QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4014     __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4015     /*
4016      * Reset link and dcbx to down state since we will not get a link down
4017      * event from the MFW but calling __qedf_remove will essentially be a
4018      * link down event.
4019      */
4020     atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4021     atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4022     __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4023     clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4024     QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4025 }
4026 
4027 /* Generic TLV data callback */
4028 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
4029 {
4030     struct qedf_ctx *qedf;
4031 
4032     if (!dev) {
4033         QEDF_INFO(NULL, QEDF_LOG_EVT,
4034               "dev is NULL so ignoring get_generic_tlv_data request.\n");
4035         return;
4036     }
4037     qedf = (struct qedf_ctx *)dev;
4038 
4039     memset(data, 0, sizeof(struct qed_generic_tlvs));
4040     ether_addr_copy(data->mac[0], qedf->mac);
4041 }
4042 
4043 /*
4044  * Module Init/Remove
4045  */
4046 
4047 static int __init qedf_init(void)
4048 {
4049     int ret;
4050 
4051     /* If debug=1 passed, set the default log mask */
4052     if (qedf_debug == QEDF_LOG_DEFAULT)
4053         qedf_debug = QEDF_DEFAULT_LOG_MASK;
4054 
4055     /*
4056      * Check that default prio for FIP/FCoE traffic is between 0..7 if a
4057      * value has been set
4058      */
4059     if (qedf_default_prio > -1)
4060         if (qedf_default_prio > 7) {
4061             qedf_default_prio = QEDF_DEFAULT_PRIO;
4062             QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
4063                 QEDF_DEFAULT_PRIO);
4064         }
4065 
4066     /* Print driver banner */
4067     QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
4068            QEDF_VERSION);
4069 
4070     /* Create kmem_cache for qedf_io_work structs */
4071     qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
4072         sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
4073     if (qedf_io_work_cache == NULL) {
4074         QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
4075         goto err1;
4076     }
4077     QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
4078         qedf_io_work_cache);
4079 
4080     qed_ops = qed_get_fcoe_ops();
4081     if (!qed_ops) {
4082         QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
4083         goto err1;
4084     }
4085 
4086 #ifdef CONFIG_DEBUG_FS
4087     qedf_dbg_init("qedf");
4088 #endif
4089 
4090     qedf_fc_transport_template =
4091         fc_attach_transport(&qedf_fc_transport_fn);
4092     if (!qedf_fc_transport_template) {
4093         QEDF_ERR(NULL, "Could not register with FC transport\n");
4094         goto err2;
4095     }
4096 
4097     qedf_fc_vport_transport_template =
4098         fc_attach_transport(&qedf_fc_vport_transport_fn);
4099     if (!qedf_fc_vport_transport_template) {
4100         QEDF_ERR(NULL, "Could not register vport template with FC "
4101               "transport\n");
4102         goto err3;
4103     }
4104 
4105     qedf_io_wq = create_workqueue("qedf_io_wq");
4106     if (!qedf_io_wq) {
4107         QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
4108         goto err4;
4109     }
4110 
4111     qedf_cb_ops.get_login_failures = qedf_get_login_failures;
4112 
4113     ret = pci_register_driver(&qedf_pci_driver);
4114     if (ret) {
4115         QEDF_ERR(NULL, "Failed to register driver\n");
4116         goto err5;
4117     }
4118 
4119     return 0;
4120 
4121 err5:
4122     destroy_workqueue(qedf_io_wq);
4123 err4:
4124     fc_release_transport(qedf_fc_vport_transport_template);
4125 err3:
4126     fc_release_transport(qedf_fc_transport_template);
4127 err2:
4128 #ifdef CONFIG_DEBUG_FS
4129     qedf_dbg_exit();
4130 #endif
4131     qed_put_fcoe_ops();
4132 err1:
4133     return -EINVAL;
4134 }
4135 
4136 static void __exit qedf_cleanup(void)
4137 {
4138     pci_unregister_driver(&qedf_pci_driver);
4139 
4140     destroy_workqueue(qedf_io_wq);
4141 
4142     fc_release_transport(qedf_fc_vport_transport_template);
4143     fc_release_transport(qedf_fc_transport_template);
4144 #ifdef CONFIG_DEBUG_FS
4145     qedf_dbg_exit();
4146 #endif
4147     qed_put_fcoe_ops();
4148 
4149     kmem_cache_destroy(qedf_io_work_cache);
4150 }
4151 
4152 MODULE_LICENSE("GPL");
4153 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
4154 MODULE_AUTHOR("QLogic Corporation");
4155 MODULE_VERSION(QEDF_VERSION);
4156 module_init(qedf_init);
4157 module_exit(qedf_cleanup);