Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is part of the Chelsio FCoE driver for Linux.
0003  *
0004  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <linux/kernel.h>
0036 #include <linux/delay.h>
0037 #include <linux/slab.h>
0038 #include <linux/utsname.h>
0039 #include <scsi/scsi_device.h>
0040 #include <scsi/scsi_transport_fc.h>
0041 #include <asm/unaligned.h>
0042 #include <scsi/fc/fc_els.h>
0043 #include <scsi/fc/fc_fs.h>
0044 #include <scsi/fc/fc_gs.h>
0045 #include <scsi/fc/fc_ms.h>
0046 
0047 #include "csio_hw.h"
0048 #include "csio_mb.h"
0049 #include "csio_lnode.h"
0050 #include "csio_rnode.h"
0051 
0052 int csio_fcoe_rnodes = 1024;
0053 int csio_fdmi_enable = 1;
0054 
0055 #define PORT_ID_PTR(_x)         ((uint8_t *)(&_x) + 1)
0056 
0057 /* Lnode SM declarations */
0058 static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
0059 static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
0060 static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
0061 static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
0062 
0063 static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
0064         void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
0065         enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
0066 
0067 /* LN event mapping */
0068 static enum csio_ln_ev fwevt_to_lnevt[] = {
0069     CSIO_LNE_NONE,      /* None */
0070     CSIO_LNE_NONE,      /* PLOGI_ACC_RCVD  */
0071     CSIO_LNE_NONE,      /* PLOGI_RJT_RCVD  */
0072     CSIO_LNE_NONE,      /* PLOGI_RCVD      */
0073     CSIO_LNE_NONE,      /* PLOGO_RCVD      */
0074     CSIO_LNE_NONE,      /* PRLI_ACC_RCVD   */
0075     CSIO_LNE_NONE,      /* PRLI_RJT_RCVD   */
0076     CSIO_LNE_NONE,      /* PRLI_RCVD       */
0077     CSIO_LNE_NONE,      /* PRLO_RCVD       */
0078     CSIO_LNE_NONE,      /* NPORT_ID_CHGD   */
0079     CSIO_LNE_LOGO,      /* FLOGO_RCVD      */
0080     CSIO_LNE_LOGO,      /* CLR_VIRT_LNK_RCVD */
0081     CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD   */
0082     CSIO_LNE_NONE,      /* FLOGI_RJT_RCVD   */
0083     CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD   */
0084     CSIO_LNE_NONE,      /* FDISC_RJT_RCVD   */
0085     CSIO_LNE_NONE,      /* FLOGI_TMO_MAX_RETRY */
0086     CSIO_LNE_NONE,      /* IMPL_LOGO_ADISC_ACC */
0087     CSIO_LNE_NONE,      /* IMPL_LOGO_ADISC_RJT */
0088     CSIO_LNE_NONE,      /* IMPL_LOGO_ADISC_CNFLT */
0089     CSIO_LNE_NONE,      /* PRLI_TMO     */
0090     CSIO_LNE_NONE,      /* ADISC_TMO        */
0091     CSIO_LNE_NONE,      /* RSCN_DEV_LOST */
0092     CSIO_LNE_NONE,      /* SCR_ACC_RCVD */
0093     CSIO_LNE_NONE,      /* ADISC_RJT_RCVD */
0094     CSIO_LNE_NONE,      /* LOGO_SNT */
0095     CSIO_LNE_NONE,      /* PROTO_ERR_IMPL_LOGO */
0096 };
0097 
0098 #define CSIO_FWE_TO_LNE(_evt)   ((_evt > PROTO_ERR_IMPL_LOGO) ?     \
0099                         CSIO_LNE_NONE : \
0100                         fwevt_to_lnevt[_evt])
0101 
0102 #define csio_ct_rsp(cp)     (((struct fc_ct_hdr *)cp)->ct_cmd)
0103 #define csio_ct_reason(cp)  (((struct fc_ct_hdr *)cp)->ct_reason)
0104 #define csio_ct_expl(cp)    (((struct fc_ct_hdr *)cp)->ct_explan)
0105 #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
0106 
0107 /*
0108  * csio_ln_match_by_portid - lookup lnode using given portid.
0109  * @hw: HW module
0110  * @portid: port-id.
0111  *
0112  * If found, returns lnode matching given portid otherwise returns NULL.
0113  */
0114 static struct csio_lnode *
0115 csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
0116 {
0117     struct csio_lnode *ln;
0118     struct list_head *tmp;
0119 
0120     /* Match siblings lnode with portid */
0121     list_for_each(tmp, &hw->sln_head) {
0122         ln = (struct csio_lnode *) tmp;
0123         if (ln->portid == portid)
0124             return ln;
0125     }
0126 
0127     return NULL;
0128 }
0129 
0130 /*
0131  * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
0132  * @hw - HW module
0133  * @vnpi - vnp index.
0134  * Returns - If found, returns lnode matching given vnp id
0135  * otherwise returns NULL.
0136  */
0137 static struct csio_lnode *
0138 csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
0139 {
0140     struct list_head *tmp1, *tmp2;
0141     struct csio_lnode *sln = NULL, *cln = NULL;
0142 
0143     if (list_empty(&hw->sln_head)) {
0144         CSIO_INC_STATS(hw, n_lnlkup_miss);
0145         return NULL;
0146     }
0147     /* Traverse sibling lnodes */
0148     list_for_each(tmp1, &hw->sln_head) {
0149         sln = (struct csio_lnode *) tmp1;
0150 
0151         /* Match sibling lnode */
0152         if (sln->vnp_flowid == vnp_id)
0153             return sln;
0154 
0155         if (list_empty(&sln->cln_head))
0156             continue;
0157 
0158         /* Traverse children lnodes */
0159         list_for_each(tmp2, &sln->cln_head) {
0160             cln = (struct csio_lnode *) tmp2;
0161 
0162             if (cln->vnp_flowid == vnp_id)
0163                 return cln;
0164         }
0165     }
0166     CSIO_INC_STATS(hw, n_lnlkup_miss);
0167     return NULL;
0168 }
0169 
0170 /**
0171  * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
0172  * @hw:     HW module.
0173  * @wwpn:   WWPN.
0174  *
0175  * If found, returns lnode matching given wwpn, returns NULL otherwise.
0176  */
0177 struct csio_lnode *
0178 csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
0179 {
0180     struct list_head *tmp1, *tmp2;
0181     struct csio_lnode *sln = NULL, *cln = NULL;
0182 
0183     if (list_empty(&hw->sln_head)) {
0184         CSIO_INC_STATS(hw, n_lnlkup_miss);
0185         return NULL;
0186     }
0187     /* Traverse sibling lnodes */
0188     list_for_each(tmp1, &hw->sln_head) {
0189         sln = (struct csio_lnode *) tmp1;
0190 
0191         /* Match sibling lnode */
0192         if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
0193             return sln;
0194 
0195         if (list_empty(&sln->cln_head))
0196             continue;
0197 
0198         /* Traverse children lnodes */
0199         list_for_each(tmp2, &sln->cln_head) {
0200             cln = (struct csio_lnode *) tmp2;
0201 
0202             if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
0203                 return cln;
0204         }
0205     }
0206     return NULL;
0207 }
0208 
0209 /* FDMI */
0210 static void
0211 csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
0212 {
0213     struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
0214     cmd->ct_rev = FC_CT_REV;
0215     cmd->ct_fs_type = type;
0216     cmd->ct_fs_subtype = sub_type;
0217     cmd->ct_cmd = htons(op);
0218 }
0219 
0220 static int
0221 csio_hostname(uint8_t *buf, size_t buf_len)
0222 {
0223     if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
0224         return 0;
0225     return -1;
0226 }
0227 
0228 static int
0229 csio_osname(uint8_t *buf, size_t buf_len)
0230 {
0231     if (snprintf(buf, buf_len, "%s %s %s",
0232              init_utsname()->sysname,
0233              init_utsname()->release,
0234              init_utsname()->version) > 0)
0235         return 0;
0236 
0237     return -1;
0238 }
0239 
0240 static inline void
0241 csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
0242 {
0243     uint16_t len;
0244     struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
0245 
0246     if (WARN_ON(val_len > U16_MAX))
0247         return;
0248 
0249     len = val_len;
0250 
0251     ae->type = htons(type);
0252     len += 4;       /* includes attribute type and length */
0253     len = (len + 3) & ~3;   /* should be multiple of 4 bytes */
0254     ae->len = htons(len);
0255     memcpy(ae->value, val, val_len);
0256     if (len > val_len)
0257         memset(ae->value + val_len, 0, len - val_len);
0258     *ptr += len;
0259 }
0260 
0261 /*
0262  * csio_ln_fdmi_done - FDMI registeration completion
0263  * @hw: HW context
0264  * @fdmi_req: fdmi request
0265  */
0266 static void
0267 csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
0268 {
0269     void *cmd;
0270     struct csio_lnode *ln = fdmi_req->lnode;
0271 
0272     if (fdmi_req->wr_status != FW_SUCCESS) {
0273         csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
0274                 fdmi_req->wr_status);
0275         CSIO_INC_STATS(ln, n_fdmi_err);
0276     }
0277 
0278     cmd = fdmi_req->dma_buf.vaddr;
0279     if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
0280         csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
0281                 csio_ct_reason(cmd), csio_ct_expl(cmd));
0282     }
0283 }
0284 
0285 /*
0286  * csio_ln_fdmi_rhba_cbfn - RHBA completion
0287  * @hw: HW context
0288  * @fdmi_req: fdmi request
0289  */
0290 static void
0291 csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
0292 {
0293     void *cmd;
0294     uint8_t *pld;
0295     uint32_t len = 0;
0296     __be32 val;
0297     __be16 mfs;
0298     uint32_t numattrs = 0;
0299     struct csio_lnode *ln = fdmi_req->lnode;
0300     struct fs_fdmi_attrs *attrib_blk;
0301     struct fc_fdmi_port_name *port_name;
0302     uint8_t buf[64];
0303     uint8_t *fc4_type;
0304     unsigned long flags;
0305 
0306     if (fdmi_req->wr_status != FW_SUCCESS) {
0307         csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
0308                 fdmi_req->wr_status);
0309         CSIO_INC_STATS(ln, n_fdmi_err);
0310     }
0311 
0312     cmd = fdmi_req->dma_buf.vaddr;
0313     if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
0314         csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
0315                 csio_ct_reason(cmd), csio_ct_expl(cmd));
0316     }
0317 
0318     if (!csio_is_rnode_ready(fdmi_req->rnode)) {
0319         CSIO_INC_STATS(ln, n_fdmi_err);
0320         return;
0321     }
0322 
0323     /* Prepare CT hdr for RPA cmd */
0324     memset(cmd, 0, FC_CT_HDR_LEN);
0325     csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
0326 
0327     /* Prepare RPA payload */
0328     pld = (uint8_t *)csio_ct_get_pld(cmd);
0329     port_name = (struct fc_fdmi_port_name *)pld;
0330     memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
0331     pld += sizeof(*port_name);
0332 
0333     /* Start appending Port attributes */
0334     attrib_blk = (struct fs_fdmi_attrs *)pld;
0335     attrib_blk->numattrs = 0;
0336     len += sizeof(attrib_blk->numattrs);
0337     pld += sizeof(attrib_blk->numattrs);
0338 
0339     fc4_type = &buf[0];
0340     memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
0341     fc4_type[2] = 1;
0342     fc4_type[7] = 1;
0343     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
0344                fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
0345     numattrs++;
0346     val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
0347     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
0348                &val,
0349                FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
0350     numattrs++;
0351 
0352     if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
0353         val = htonl(FC_PORTSPEED_1GBIT);
0354     else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
0355         val = htonl(FC_PORTSPEED_10GBIT);
0356     else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
0357         val = htonl(FC_PORTSPEED_25GBIT);
0358     else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
0359         val = htonl(FC_PORTSPEED_40GBIT);
0360     else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
0361         val = htonl(FC_PORTSPEED_50GBIT);
0362     else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
0363         val = htonl(FC_PORTSPEED_100GBIT);
0364     else
0365         val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
0366     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
0367                &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
0368     numattrs++;
0369 
0370     mfs = ln->ln_sparm.csp.sp_bb_data;
0371     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
0372                &mfs, sizeof(mfs));
0373     numattrs++;
0374 
0375     strcpy(buf, "csiostor");
0376     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
0377                strlen(buf));
0378     numattrs++;
0379 
0380     if (!csio_hostname(buf, sizeof(buf))) {
0381         csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
0382                    buf, strlen(buf));
0383         numattrs++;
0384     }
0385     attrib_blk->numattrs = htonl(numattrs);
0386     len = (uint32_t)(pld - (uint8_t *)cmd);
0387 
0388     /* Submit FDMI RPA request */
0389     spin_lock_irqsave(&hw->lock, flags);
0390     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
0391                 FCOE_CT, &fdmi_req->dma_buf, len)) {
0392         CSIO_INC_STATS(ln, n_fdmi_err);
0393         csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
0394     }
0395     spin_unlock_irqrestore(&hw->lock, flags);
0396 }
0397 
0398 /*
0399  * csio_ln_fdmi_dprt_cbfn - DPRT completion
0400  * @hw: HW context
0401  * @fdmi_req: fdmi request
0402  */
0403 static void
0404 csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
0405 {
0406     void *cmd;
0407     uint8_t *pld;
0408     uint32_t len = 0;
0409     uint32_t numattrs = 0;
0410     __be32  maxpayload = htonl(65536);
0411     struct fc_fdmi_hba_identifier *hbaid;
0412     struct csio_lnode *ln = fdmi_req->lnode;
0413     struct fc_fdmi_rpl *reg_pl;
0414     struct fs_fdmi_attrs *attrib_blk;
0415     uint8_t buf[64];
0416     unsigned long flags;
0417 
0418     if (fdmi_req->wr_status != FW_SUCCESS) {
0419         csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
0420                 fdmi_req->wr_status);
0421         CSIO_INC_STATS(ln, n_fdmi_err);
0422     }
0423 
0424     if (!csio_is_rnode_ready(fdmi_req->rnode)) {
0425         CSIO_INC_STATS(ln, n_fdmi_err);
0426         return;
0427     }
0428     cmd = fdmi_req->dma_buf.vaddr;
0429     if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
0430         csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
0431                 csio_ct_reason(cmd), csio_ct_expl(cmd));
0432     }
0433 
0434     /* Prepare CT hdr for RHBA cmd */
0435     memset(cmd, 0, FC_CT_HDR_LEN);
0436     csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
0437     len = FC_CT_HDR_LEN;
0438 
0439     /* Prepare RHBA payload */
0440     pld = (uint8_t *)csio_ct_get_pld(cmd);
0441     hbaid = (struct fc_fdmi_hba_identifier *)pld;
0442     memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
0443     pld += sizeof(*hbaid);
0444 
0445     /* Register one port per hba */
0446     reg_pl = (struct fc_fdmi_rpl *)pld;
0447     reg_pl->numport = htonl(1);
0448     memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
0449     pld += sizeof(*reg_pl);
0450 
0451     /* Start appending HBA attributes hba */
0452     attrib_blk = (struct fs_fdmi_attrs *)pld;
0453     attrib_blk->numattrs = 0;
0454     len += sizeof(attrib_blk->numattrs);
0455     pld += sizeof(attrib_blk->numattrs);
0456 
0457     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
0458                FC_FDMI_HBA_ATTR_NODENAME_LEN);
0459     numattrs++;
0460 
0461     memset(buf, 0, sizeof(buf));
0462 
0463     strcpy(buf, "Chelsio Communications");
0464     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
0465                strlen(buf));
0466     numattrs++;
0467     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
0468                hw->vpd.sn, sizeof(hw->vpd.sn));
0469     numattrs++;
0470     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
0471                sizeof(hw->vpd.id));
0472     numattrs++;
0473     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
0474                hw->model_desc, strlen(hw->model_desc));
0475     numattrs++;
0476     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
0477                hw->hw_ver, sizeof(hw->hw_ver));
0478     numattrs++;
0479     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
0480                hw->fwrev_str, strlen(hw->fwrev_str));
0481     numattrs++;
0482 
0483     if (!csio_osname(buf, sizeof(buf))) {
0484         csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
0485                    buf, strlen(buf));
0486         numattrs++;
0487     }
0488 
0489     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
0490                &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
0491     len = (uint32_t)(pld - (uint8_t *)cmd);
0492     numattrs++;
0493     attrib_blk->numattrs = htonl(numattrs);
0494 
0495     /* Submit FDMI RHBA request */
0496     spin_lock_irqsave(&hw->lock, flags);
0497     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
0498                 FCOE_CT, &fdmi_req->dma_buf, len)) {
0499         CSIO_INC_STATS(ln, n_fdmi_err);
0500         csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
0501     }
0502     spin_unlock_irqrestore(&hw->lock, flags);
0503 }
0504 
0505 /*
0506  * csio_ln_fdmi_dhba_cbfn - DHBA completion
0507  * @hw: HW context
0508  * @fdmi_req: fdmi request
0509  */
0510 static void
0511 csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
0512 {
0513     struct csio_lnode *ln = fdmi_req->lnode;
0514     void *cmd;
0515     struct fc_fdmi_port_name *port_name;
0516     uint32_t len;
0517     unsigned long flags;
0518 
0519     if (fdmi_req->wr_status != FW_SUCCESS) {
0520         csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
0521                 fdmi_req->wr_status);
0522         CSIO_INC_STATS(ln, n_fdmi_err);
0523     }
0524 
0525     if (!csio_is_rnode_ready(fdmi_req->rnode)) {
0526         CSIO_INC_STATS(ln, n_fdmi_err);
0527         return;
0528     }
0529     cmd = fdmi_req->dma_buf.vaddr;
0530     if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
0531         csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
0532                 csio_ct_reason(cmd), csio_ct_expl(cmd));
0533     }
0534 
0535     /* Send FDMI cmd to de-register any Port attributes if registered
0536      * before
0537      */
0538 
0539     /* Prepare FDMI DPRT cmd */
0540     memset(cmd, 0, FC_CT_HDR_LEN);
0541     csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
0542     len = FC_CT_HDR_LEN;
0543     port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
0544     memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
0545     len += sizeof(*port_name);
0546 
0547     /* Submit FDMI request */
0548     spin_lock_irqsave(&hw->lock, flags);
0549     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
0550                 FCOE_CT, &fdmi_req->dma_buf, len)) {
0551         CSIO_INC_STATS(ln, n_fdmi_err);
0552         csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
0553     }
0554     spin_unlock_irqrestore(&hw->lock, flags);
0555 }
0556 
0557 /**
0558  * csio_ln_fdmi_start - Start an FDMI request.
0559  * @ln:     lnode
0560  * @context:    session context
0561  *
0562  * Issued with lock held.
0563  */
0564 int
0565 csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
0566 {
0567     struct csio_ioreq *fdmi_req;
0568     struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
0569     void *cmd;
0570     struct fc_fdmi_hba_identifier *hbaid;
0571     uint32_t len;
0572 
0573     if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
0574         return -EPROTONOSUPPORT;
0575 
0576     if (!csio_is_rnode_ready(fdmi_rn))
0577         CSIO_INC_STATS(ln, n_fdmi_err);
0578 
0579     /* Send FDMI cmd to de-register any HBA attributes if registered
0580      * before
0581      */
0582 
0583     fdmi_req = ln->mgmt_req;
0584     fdmi_req->lnode = ln;
0585     fdmi_req->rnode = fdmi_rn;
0586 
0587     /* Prepare FDMI DHBA cmd */
0588     cmd = fdmi_req->dma_buf.vaddr;
0589     memset(cmd, 0, FC_CT_HDR_LEN);
0590     csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
0591     len = FC_CT_HDR_LEN;
0592 
0593     hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
0594     memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
0595     len += sizeof(*hbaid);
0596 
0597     /* Submit FDMI request */
0598     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
0599                     FCOE_CT, &fdmi_req->dma_buf, len)) {
0600         CSIO_INC_STATS(ln, n_fdmi_err);
0601         csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
0602     }
0603 
0604     return 0;
0605 }
0606 
0607 /*
0608  * csio_ln_vnp_read_cbfn - vnp read completion handler.
0609  * @hw: HW lnode
0610  * @cbfn: Completion handler.
0611  *
0612  * Reads vnp response and updates ln parameters.
0613  */
0614 static void
0615 csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
0616 {
0617     struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
0618     struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
0619     struct fc_els_csp *csp;
0620     struct fc_els_cssp *clsp;
0621     enum fw_retval retval;
0622     __be32 nport_id = 0;
0623 
0624     retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
0625     if (retval != FW_SUCCESS) {
0626         csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
0627         mempool_free(mbp, hw->mb_mempool);
0628         return;
0629     }
0630 
0631     spin_lock_irq(&hw->lock);
0632 
0633     memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
0634     memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
0635     ln->nport_id = ntohl(nport_id);
0636     ln->nport_id = ln->nport_id >> 8;
0637 
0638     /* Update WWNs */
0639     /*
0640      * This may look like a duplication of what csio_fcoe_enable_link()
0641      * does, but is absolutely necessary if the vnpi changes between
0642      * a FCOE LINK UP and FCOE LINK DOWN.
0643      */
0644     memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
0645     memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
0646 
0647     /* Copy common sparam */
0648     csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
0649     ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
0650     ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
0651     ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
0652     ln->ln_sparm.csp.sp_features = csp->sp_features;
0653     ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
0654     ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
0655     ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
0656 
0657     /* Copy word 0 & word 1 of class sparam */
0658     clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
0659     ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
0660     ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
0661     ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
0662     ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
0663 
0664     spin_unlock_irq(&hw->lock);
0665 
0666     mempool_free(mbp, hw->mb_mempool);
0667 
0668     /* Send an event to update local attribs */
0669     csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
0670 }
0671 
0672 /*
0673  * csio_ln_vnp_read - Read vnp params.
0674  * @ln: lnode
0675  * @cbfn: Completion handler.
0676  *
0677  * Issued with lock held.
0678  */
0679 static int
0680 csio_ln_vnp_read(struct csio_lnode *ln,
0681         void (*cbfn) (struct csio_hw *, struct csio_mb *))
0682 {
0683     struct csio_hw *hw = ln->hwp;
0684     struct csio_mb  *mbp;
0685 
0686     /* Allocate Mbox request */
0687     mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0688     if (!mbp) {
0689         CSIO_INC_STATS(hw, n_err_nomem);
0690         return -ENOMEM;
0691     }
0692 
0693     /* Prepare VNP Command */
0694     csio_fcoe_vnp_read_init_mb(ln, mbp,
0695                     CSIO_MB_DEFAULT_TMO,
0696                     ln->fcf_flowid,
0697                     ln->vnp_flowid,
0698                     cbfn);
0699 
0700     /* Issue MBOX cmd */
0701     if (csio_mb_issue(hw, mbp)) {
0702         csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
0703         mempool_free(mbp, hw->mb_mempool);
0704         return -EINVAL;
0705     }
0706 
0707     return 0;
0708 }
0709 
0710 /*
0711  * csio_fcoe_enable_link - Enable fcoe link.
0712  * @ln: lnode
0713  * @enable: enable/disable
0714  * Issued with lock held.
0715  * Issues mbox cmd to bring up FCOE link on port associated with given ln.
0716  */
0717 static int
0718 csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
0719 {
0720     struct csio_hw *hw = ln->hwp;
0721     struct csio_mb  *mbp;
0722     enum fw_retval retval;
0723     uint8_t portid;
0724     uint8_t sub_op;
0725     struct fw_fcoe_link_cmd *lcmd;
0726     int i;
0727 
0728     mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0729     if (!mbp) {
0730         CSIO_INC_STATS(hw, n_err_nomem);
0731         return -ENOMEM;
0732     }
0733 
0734     portid = ln->portid;
0735     sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
0736 
0737     csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
0738          sub_op ? "UP" : "DOWN", portid);
0739 
0740     csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
0741                       portid, sub_op, 0, 0, 0, NULL);
0742 
0743     if (csio_mb_issue(hw, mbp)) {
0744         csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
0745             portid);
0746         mempool_free(mbp, hw->mb_mempool);
0747         return -EINVAL;
0748     }
0749 
0750     retval = csio_mb_fw_retval(mbp);
0751     if (retval != FW_SUCCESS) {
0752         csio_err(hw,
0753              "FCOE LINK %s cmd on port[%d] failed with "
0754              "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
0755         mempool_free(mbp, hw->mb_mempool);
0756         return -EINVAL;
0757     }
0758 
0759     if (!enable)
0760         goto out;
0761 
0762     lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
0763 
0764     memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
0765     memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
0766 
0767     for (i = 0; i < CSIO_MAX_PPORTS; i++)
0768         if (hw->pport[i].portid == portid)
0769             memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
0770 
0771 out:
0772     mempool_free(mbp, hw->mb_mempool);
0773     return 0;
0774 }
0775 
0776 /*
0777  * csio_ln_read_fcf_cbfn - Read fcf parameters
0778  * @ln: lnode
0779  *
0780  * read fcf response and Update ln fcf information.
0781  */
0782 static void
0783 csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
0784 {
0785     struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
0786     struct csio_fcf_info    *fcf_info;
0787     struct fw_fcoe_fcf_cmd *rsp =
0788                 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
0789     enum fw_retval retval;
0790 
0791     retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
0792     if (retval != FW_SUCCESS) {
0793         csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
0794                 retval);
0795         mempool_free(mbp, hw->mb_mempool);
0796         return;
0797     }
0798 
0799     spin_lock_irq(&hw->lock);
0800     fcf_info = ln->fcfinfo;
0801     fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
0802                     ntohs(rsp->priority_pkd));
0803     fcf_info->vf_id = ntohs(rsp->vf_id);
0804     fcf_info->vlan_id = rsp->vlan_id;
0805     fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
0806     fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
0807     fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
0808     fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
0809     fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
0810     fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
0811     fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
0812     memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
0813     memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
0814     memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
0815     memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
0816     memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
0817 
0818     spin_unlock_irq(&hw->lock);
0819 
0820     mempool_free(mbp, hw->mb_mempool);
0821 }
0822 
0823 /*
0824  * csio_ln_read_fcf_entry - Read fcf entry.
0825  * @ln: lnode
0826  * @cbfn: Completion handler.
0827  *
0828  * Issued with lock held.
0829  */
0830 static int
0831 csio_ln_read_fcf_entry(struct csio_lnode *ln,
0832             void (*cbfn) (struct csio_hw *, struct csio_mb *))
0833 {
0834     struct csio_hw *hw = ln->hwp;
0835     struct csio_mb  *mbp;
0836 
0837     mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0838     if (!mbp) {
0839         CSIO_INC_STATS(hw, n_err_nomem);
0840         return -ENOMEM;
0841     }
0842 
0843     /* Get FCoE FCF information */
0844     csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
0845                       ln->portid, ln->fcf_flowid, cbfn);
0846 
0847     if (csio_mb_issue(hw, mbp)) {
0848         csio_err(hw, "failed to issue FCOE FCF cmd\n");
0849         mempool_free(mbp, hw->mb_mempool);
0850         return -EINVAL;
0851     }
0852 
0853     return 0;
0854 }
0855 
0856 /*
0857  * csio_handle_link_up - Logical Linkup event.
0858  * @hw - HW module.
0859  * @portid - Physical port number
0860  * @fcfi - FCF index.
0861  * @vnpi - VNP index.
0862  * Returns - none.
0863  *
0864  * This event is received from FW, when virtual link is established between
0865  * Physical port[ENode] and FCF. If its new vnpi, then local node object is
0866  * created on this FCF and set to [ONLINE] state.
0867  * Lnode waits for FW_RDEV_CMD event to be received indicating that
0868  * Fabric login is completed and lnode moves to [READY] state.
0869  *
0870  * This called with hw lock held
0871  */
0872 static void
0873 csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
0874             uint32_t vnpi)
0875 {
0876     struct csio_lnode *ln = NULL;
0877 
0878     /* Lookup lnode based on vnpi */
0879     ln = csio_ln_lookup_by_vnpi(hw, vnpi);
0880     if (!ln) {
0881         /* Pick lnode based on portid */
0882         ln = csio_ln_lookup_by_portid(hw, portid);
0883         if (!ln) {
0884             csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
0885                 portid);
0886             CSIO_DB_ASSERT(0);
0887             return;
0888         }
0889 
0890         /* Check if lnode has valid vnp flowid */
0891         if (ln->vnp_flowid != CSIO_INVALID_IDX) {
0892             /* New VN-Port */
0893             spin_unlock_irq(&hw->lock);
0894             csio_lnode_alloc(hw);
0895             spin_lock_irq(&hw->lock);
0896             if (!ln) {
0897                 csio_err(hw,
0898                      "failed to allocate fcoe lnode"
0899                      "for port:%d vnpi:x%x\n",
0900                      portid, vnpi);
0901                 CSIO_DB_ASSERT(0);
0902                 return;
0903             }
0904             ln->portid = portid;
0905         }
0906         ln->vnp_flowid = vnpi;
0907         ln->dev_num &= ~0xFFFF;
0908         ln->dev_num |= vnpi;
0909     }
0910 
0911     /*Initialize fcfi */
0912     ln->fcf_flowid = fcfi;
0913 
0914     csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
0915 
0916     CSIO_INC_STATS(ln, n_link_up);
0917 
0918     /* Send LINKUP event to SM */
0919     csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
0920 }
0921 
0922 /*
0923  * csio_post_event_rns
0924  * @ln - FCOE lnode
0925  * @evt - Given rnode event
0926  * Returns - none
0927  *
0928  * Posts given rnode event to all FCOE rnodes connected with given Lnode.
0929  * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
0930  * event.
0931  *
0932  * This called with hw lock held
0933  */
0934 static void
0935 csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
0936 {
0937     struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
0938     struct list_head *tmp, *next;
0939     struct csio_rnode *rn;
0940 
0941     list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
0942         rn = (struct csio_rnode *) tmp;
0943         csio_post_event(&rn->sm, evt);
0944     }
0945 }
0946 
0947 /*
0948  * csio_cleanup_rns
0949  * @ln - FCOE lnode
0950  * Returns - none
0951  *
0952  * Frees all FCOE rnodes connected with given Lnode.
0953  *
0954  * This called with hw lock held
0955  */
0956 static void
0957 csio_cleanup_rns(struct csio_lnode *ln)
0958 {
0959     struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
0960     struct list_head *tmp, *next_rn;
0961     struct csio_rnode *rn;
0962 
0963     list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
0964         rn = (struct csio_rnode *) tmp;
0965         csio_put_rnode(ln, rn);
0966     }
0967 
0968 }
0969 
0970 /*
0971  * csio_post_event_lns
0972  * @ln - FCOE lnode
0973  * @evt - Given lnode event
0974  * Returns - none
0975  *
0976  * Posts given lnode event to all FCOE lnodes connected with given Lnode.
0977  * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
0978  * event.
0979  *
0980  * This called with hw lock held
0981  */
0982 static void
0983 csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
0984 {
0985     struct list_head *tmp;
0986     struct csio_lnode *cln, *sln;
0987 
0988     /* If NPIV lnode, send evt only to that and return */
0989     if (csio_is_npiv_ln(ln)) {
0990         csio_post_event(&ln->sm, evt);
0991         return;
0992     }
0993 
0994     sln = ln;
0995     /* Traverse children lnodes list and send evt */
0996     list_for_each(tmp, &sln->cln_head) {
0997         cln = (struct csio_lnode *) tmp;
0998         csio_post_event(&cln->sm, evt);
0999     }
1000 
1001     /* Send evt to parent lnode */
1002     csio_post_event(&ln->sm, evt);
1003 }
1004 
1005 /*
1006  * csio_ln_down - Lcoal nport is down
1007  * @ln - FCOE Lnode
1008  * Returns - none
1009  *
1010  * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
1011  *
1012  * This called with hw lock held
1013  */
1014 static void
1015 csio_ln_down(struct csio_lnode *ln)
1016 {
1017     csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
1018 }
1019 
1020 /*
1021  * csio_handle_link_down - Logical Linkdown event.
1022  * @hw - HW module.
1023  * @portid - Physical port number
1024  * @fcfi - FCF index.
1025  * @vnpi - VNP index.
1026  * Returns - none
1027  *
1028  * This event is received from FW, when virtual link goes down between
1029  * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
1030  * this vnpi[VN-Port] will be de-instantiated.
1031  *
1032  * This called with hw lock held
1033  */
1034 static void
1035 csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
1036               uint32_t vnpi)
1037 {
1038     struct csio_fcf_info *fp;
1039     struct csio_lnode *ln;
1040 
1041     /* Lookup lnode based on vnpi */
1042     ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1043     if (ln) {
1044         fp = ln->fcfinfo;
1045         CSIO_INC_STATS(ln, n_link_down);
1046 
1047         /*Warn if linkdown received if lnode is not in ready state */
1048         if (!csio_is_lnode_ready(ln)) {
1049             csio_ln_warn(ln,
1050                 "warn: FCOE link is already in offline "
1051                 "Ignoring Fcoe linkdown event on portid %d\n",
1052                  portid);
1053             CSIO_INC_STATS(ln, n_evt_drop);
1054             return;
1055         }
1056 
1057         /* Verify portid */
1058         if (fp->portid != portid) {
1059             csio_ln_warn(ln,
1060                 "warn: FCOE linkdown recv with "
1061                 "invalid port %d\n", portid);
1062             CSIO_INC_STATS(ln, n_evt_drop);
1063             return;
1064         }
1065 
1066         /* verify fcfi */
1067         if (ln->fcf_flowid != fcfi) {
1068             csio_ln_warn(ln,
1069                 "warn: FCOE linkdown recv with "
1070                 "invalid fcfi x%x\n", fcfi);
1071             CSIO_INC_STATS(ln, n_evt_drop);
1072             return;
1073         }
1074 
1075         csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
1076 
1077         /* Send LINK_DOWN event to lnode s/m */
1078         csio_ln_down(ln);
1079 
1080         return;
1081     } else {
1082         csio_warn(hw,
1083               "warn: FCOE linkdown recv with invalid vnpi x%x\n",
1084               vnpi);
1085         CSIO_INC_STATS(hw, n_evt_drop);
1086     }
1087 }
1088 
1089 /*
1090  * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
1091  * @ln: Lnode module
1092  *
1093  * Returns True if FCOE lnode is in ready state.
1094  */
1095 int
1096 csio_is_lnode_ready(struct csio_lnode *ln)
1097 {
1098     return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
1099 }
1100 
1101 /*****************************************************************************/
1102 /* START: Lnode SM                                                           */
1103 /*****************************************************************************/
1104 /*
1105  * csio_lns_uninit - The request in uninit state.
1106  * @ln - FCOE lnode.
1107  * @evt - Event to be processed.
1108  *
1109  * Process the given lnode event which is currently in "uninit" state.
1110  * Invoked with HW lock held.
1111  * Return - none.
1112  */
1113 static void
1114 csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
1115 {
1116     struct csio_hw *hw = csio_lnode_to_hw(ln);
1117     struct csio_lnode *rln = hw->rln;
1118     int rv;
1119 
1120     CSIO_INC_STATS(ln, n_evt_sm[evt]);
1121     switch (evt) {
1122     case CSIO_LNE_LINKUP:
1123         csio_set_state(&ln->sm, csio_lns_online);
1124         /* Read FCF only for physical lnode */
1125         if (csio_is_phys_ln(ln)) {
1126             rv = csio_ln_read_fcf_entry(ln,
1127                     csio_ln_read_fcf_cbfn);
1128             if (rv != 0) {
1129                 /* TODO: Send HW RESET event */
1130                 CSIO_INC_STATS(ln, n_err);
1131                 break;
1132             }
1133 
1134             /* Add FCF record */
1135             list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1136         }
1137 
1138         rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1139         if (rv != 0) {
1140             /* TODO: Send HW RESET event */
1141             CSIO_INC_STATS(ln, n_err);
1142         }
1143         break;
1144 
1145     case CSIO_LNE_DOWN_LINK:
1146         break;
1147 
1148     default:
1149         csio_ln_dbg(ln,
1150                 "unexp ln event %d recv from did:x%x in "
1151                 "ln state[uninit].\n", evt, ln->nport_id);
1152         CSIO_INC_STATS(ln, n_evt_unexp);
1153         break;
1154     } /* switch event */
1155 }
1156 
1157 /*
1158  * csio_lns_online - The request in online state.
1159  * @ln - FCOE lnode.
1160  * @evt - Event to be processed.
1161  *
1162  * Process the given lnode event which is currently in "online" state.
1163  * Invoked with HW lock held.
1164  * Return - none.
1165  */
1166 static void
1167 csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
1168 {
1169     struct csio_hw *hw = csio_lnode_to_hw(ln);
1170 
1171     CSIO_INC_STATS(ln, n_evt_sm[evt]);
1172     switch (evt) {
1173     case CSIO_LNE_LINKUP:
1174         csio_ln_warn(ln,
1175                  "warn: FCOE link is up already "
1176                  "Ignoring linkup on port:%d\n", ln->portid);
1177         CSIO_INC_STATS(ln, n_evt_drop);
1178         break;
1179 
1180     case CSIO_LNE_FAB_INIT_DONE:
1181         csio_set_state(&ln->sm, csio_lns_ready);
1182 
1183         spin_unlock_irq(&hw->lock);
1184         csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
1185         spin_lock_irq(&hw->lock);
1186 
1187         break;
1188 
1189     case CSIO_LNE_LINK_DOWN:
1190     case CSIO_LNE_DOWN_LINK:
1191         csio_set_state(&ln->sm, csio_lns_uninit);
1192         if (csio_is_phys_ln(ln)) {
1193             /* Remove FCF entry */
1194             list_del_init(&ln->fcfinfo->list);
1195         }
1196         break;
1197 
1198     default:
1199         csio_ln_dbg(ln,
1200                 "unexp ln event %d recv from did:x%x in "
1201                 "ln state[uninit].\n", evt, ln->nport_id);
1202         CSIO_INC_STATS(ln, n_evt_unexp);
1203 
1204         break;
1205     } /* switch event */
1206 }
1207 
1208 /*
1209  * csio_lns_ready - The request in ready state.
1210  * @ln - FCOE lnode.
1211  * @evt - Event to be processed.
1212  *
1213  * Process the given lnode event which is currently in "ready" state.
1214  * Invoked with HW lock held.
1215  * Return - none.
1216  */
1217 static void
1218 csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
1219 {
1220     struct csio_hw *hw = csio_lnode_to_hw(ln);
1221 
1222     CSIO_INC_STATS(ln, n_evt_sm[evt]);
1223     switch (evt) {
1224     case CSIO_LNE_FAB_INIT_DONE:
1225         csio_ln_dbg(ln,
1226                 "ignoring event %d recv from did x%x"
1227                 "in ln state[ready].\n", evt, ln->nport_id);
1228         CSIO_INC_STATS(ln, n_evt_drop);
1229         break;
1230 
1231     case CSIO_LNE_LINK_DOWN:
1232         csio_set_state(&ln->sm, csio_lns_offline);
1233         csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1234 
1235         spin_unlock_irq(&hw->lock);
1236         csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1237         spin_lock_irq(&hw->lock);
1238 
1239         if (csio_is_phys_ln(ln)) {
1240             /* Remove FCF entry */
1241             list_del_init(&ln->fcfinfo->list);
1242         }
1243         break;
1244 
1245     case CSIO_LNE_DOWN_LINK:
1246         csio_set_state(&ln->sm, csio_lns_offline);
1247         csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1248 
1249         /* Host need to issue aborts in case if FW has not returned
1250          * WRs with status "ABORTED"
1251          */
1252         spin_unlock_irq(&hw->lock);
1253         csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1254         spin_lock_irq(&hw->lock);
1255 
1256         if (csio_is_phys_ln(ln)) {
1257             /* Remove FCF entry */
1258             list_del_init(&ln->fcfinfo->list);
1259         }
1260         break;
1261 
1262     case CSIO_LNE_CLOSE:
1263         csio_set_state(&ln->sm, csio_lns_uninit);
1264         csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1265         break;
1266 
1267     case CSIO_LNE_LOGO:
1268         csio_set_state(&ln->sm, csio_lns_offline);
1269         csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1270         break;
1271 
1272     default:
1273         csio_ln_dbg(ln,
1274                 "unexp ln event %d recv from did:x%x in "
1275                 "ln state[uninit].\n", evt, ln->nport_id);
1276         CSIO_INC_STATS(ln, n_evt_unexp);
1277         CSIO_DB_ASSERT(0);
1278         break;
1279     } /* switch event */
1280 }
1281 
1282 /*
1283  * csio_lns_offline - The request in offline state.
1284  * @ln - FCOE lnode.
1285  * @evt - Event to be processed.
1286  *
1287  * Process the given lnode event which is currently in "offline" state.
1288  * Invoked with HW lock held.
1289  * Return - none.
1290  */
1291 static void
1292 csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
1293 {
1294     struct csio_hw *hw = csio_lnode_to_hw(ln);
1295     struct csio_lnode *rln = hw->rln;
1296     int rv;
1297 
1298     CSIO_INC_STATS(ln, n_evt_sm[evt]);
1299     switch (evt) {
1300     case CSIO_LNE_LINKUP:
1301         csio_set_state(&ln->sm, csio_lns_online);
1302         /* Read FCF only for physical lnode */
1303         if (csio_is_phys_ln(ln)) {
1304             rv = csio_ln_read_fcf_entry(ln,
1305                     csio_ln_read_fcf_cbfn);
1306             if (rv != 0) {
1307                 /* TODO: Send HW RESET event */
1308                 CSIO_INC_STATS(ln, n_err);
1309                 break;
1310             }
1311 
1312             /* Add FCF record */
1313             list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1314         }
1315 
1316         rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1317         if (rv != 0) {
1318             /* TODO: Send HW RESET event */
1319             CSIO_INC_STATS(ln, n_err);
1320         }
1321         break;
1322 
1323     case CSIO_LNE_LINK_DOWN:
1324     case CSIO_LNE_DOWN_LINK:
1325     case CSIO_LNE_LOGO:
1326         csio_ln_dbg(ln,
1327                 "ignoring event %d recv from did x%x"
1328                 "in ln state[offline].\n", evt, ln->nport_id);
1329         CSIO_INC_STATS(ln, n_evt_drop);
1330         break;
1331 
1332     case CSIO_LNE_CLOSE:
1333         csio_set_state(&ln->sm, csio_lns_uninit);
1334         csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1335         break;
1336 
1337     default:
1338         csio_ln_dbg(ln,
1339                 "unexp ln event %d recv from did:x%x in "
1340                 "ln state[offline]\n", evt, ln->nport_id);
1341         CSIO_INC_STATS(ln, n_evt_unexp);
1342         CSIO_DB_ASSERT(0);
1343         break;
1344     } /* switch event */
1345 }
1346 
1347 /*****************************************************************************/
1348 /* END: Lnode SM                                                             */
1349 /*****************************************************************************/
1350 
1351 static void
1352 csio_free_fcfinfo(struct kref *kref)
1353 {
1354     struct csio_fcf_info *fcfinfo = container_of(kref,
1355                         struct csio_fcf_info, kref);
1356     kfree(fcfinfo);
1357 }
1358 
1359 /* Helper routines for attributes  */
1360 /*
1361  * csio_lnode_state_to_str - Get current state of FCOE lnode.
1362  * @ln - lnode
1363  * @str - state of lnode.
1364  *
1365  */
1366 void
1367 csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
1368 {
1369     if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
1370         strcpy(str, "UNINIT");
1371         return;
1372     }
1373     if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
1374         strcpy(str, "READY");
1375         return;
1376     }
1377     if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
1378         strcpy(str, "OFFLINE");
1379         return;
1380     }
1381     strcpy(str, "UNKNOWN");
1382 } /* csio_lnode_state_to_str */
1383 
1384 
1385 int
1386 csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
1387             struct fw_fcoe_port_stats *port_stats)
1388 {
1389     struct csio_mb  *mbp;
1390     struct fw_fcoe_port_cmd_params portparams;
1391     enum fw_retval retval;
1392     int idx;
1393 
1394     mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1395     if (!mbp) {
1396         csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
1397         return -EINVAL;
1398     }
1399     portparams.portid = portid;
1400 
1401     for (idx = 1; idx <= 3; idx++) {
1402         portparams.idx = (idx-1)*6 + 1;
1403         portparams.nstats = 6;
1404         if (idx == 3)
1405             portparams.nstats = 4;
1406         csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
1407                             &portparams, NULL);
1408         if (csio_mb_issue(hw, mbp)) {
1409             csio_err(hw, "Issue of FCoE port params failed!\n");
1410             mempool_free(mbp, hw->mb_mempool);
1411             return -EINVAL;
1412         }
1413         csio_mb_process_portparams_rsp(hw, mbp, &retval,
1414                         &portparams, port_stats);
1415     }
1416 
1417     mempool_free(mbp, hw->mb_mempool);
1418     return 0;
1419 }
1420 
1421 /*
1422  * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
1423  * @wr - WR.
1424  * @len - WR len.
1425  * This handler is invoked when an outstanding mgmt WR is completed.
1426  * Its invoked in the context of FW event worker thread for every
1427  * mgmt event received.
1428  * Return - none.
1429  */
1430 
1431 static void
1432 csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
1433 {
1434     struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1435     struct csio_ioreq *io_req = NULL;
1436     struct fw_fcoe_els_ct_wr *wr_cmd;
1437 
1438 
1439     wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
1440 
1441     if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
1442         csio_err(mgmtm->hw,
1443              "Invalid ELS CT WR length recvd, len:%x\n", len);
1444         mgmtm->stats.n_err++;
1445         return;
1446     }
1447 
1448     io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
1449     io_req->wr_status = csio_wr_status(wr_cmd);
1450 
1451     /* lookup ioreq exists in our active Q */
1452     spin_lock_irq(&hw->lock);
1453     if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
1454         csio_err(mgmtm->hw,
1455             "Error- Invalid IO handle recv in WR. handle: %p\n",
1456             io_req);
1457         mgmtm->stats.n_err++;
1458         spin_unlock_irq(&hw->lock);
1459         return;
1460     }
1461 
1462     mgmtm = csio_hw_to_mgmtm(hw);
1463 
1464     /* Dequeue from active queue */
1465     list_del_init(&io_req->sm.sm_list);
1466     mgmtm->stats.n_active--;
1467     spin_unlock_irq(&hw->lock);
1468 
1469     /* io_req will be freed by completion handler */
1470     if (io_req->io_cbfn)
1471         io_req->io_cbfn(hw, io_req);
1472 }
1473 
1474 /**
1475  * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
1476  * @hw:     HW module
1477  * @cpl_op: CPL opcode
1478  * @cmd:    FW cmd/WR.
1479  *
1480  * Process received FCoE cmd/WR event from FW.
1481  */
1482 void
1483 csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
1484 {
1485     struct csio_lnode *ln;
1486     struct csio_rnode *rn;
1487     uint8_t portid, opcode = *(uint8_t *)cmd;
1488     struct fw_fcoe_link_cmd *lcmd;
1489     struct fw_wr_hdr *wr;
1490     struct fw_rdev_wr *rdev_wr;
1491     enum fw_fcoe_link_status lstatus;
1492     uint32_t fcfi, rdev_flowid, vnpi;
1493     enum csio_ln_ev evt;
1494 
1495     if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
1496 
1497         lcmd = (struct fw_fcoe_link_cmd *)cmd;
1498         lstatus = lcmd->lstatus;
1499         portid = FW_FCOE_LINK_CMD_PORTID_GET(
1500                     ntohl(lcmd->op_to_portid));
1501         fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
1502         vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
1503 
1504         if (lstatus == FCOE_LINKUP) {
1505 
1506             /* HW lock here */
1507             spin_lock_irq(&hw->lock);
1508             csio_handle_link_up(hw, portid, fcfi, vnpi);
1509             spin_unlock_irq(&hw->lock);
1510             /* HW un lock here */
1511 
1512         } else if (lstatus == FCOE_LINKDOWN) {
1513 
1514             /* HW lock here */
1515             spin_lock_irq(&hw->lock);
1516             csio_handle_link_down(hw, portid, fcfi, vnpi);
1517             spin_unlock_irq(&hw->lock);
1518             /* HW un lock here */
1519         } else {
1520             csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
1521                   lcmd->lstatus);
1522             CSIO_INC_STATS(hw, n_cpl_unexp);
1523         }
1524     } else if (cpl_op == CPL_FW6_PLD) {
1525         wr = (struct fw_wr_hdr *) (cmd + 4);
1526         if (FW_WR_OP_G(be32_to_cpu(wr->hi))
1527             == FW_RDEV_WR) {
1528 
1529             rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
1530 
1531             rdev_flowid = FW_RDEV_WR_FLOWID_GET(
1532                     ntohl(rdev_wr->alloc_to_len16));
1533             vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
1534                     ntohl(rdev_wr->flags_to_assoc_flowid));
1535 
1536             csio_dbg(hw,
1537                 "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
1538                 "vnpi:0x%x\n", rdev_flowid,
1539                 rdev_wr->event_cause, vnpi);
1540 
1541             if (rdev_wr->protocol != PROT_FCOE) {
1542                 csio_err(hw,
1543                     "FW_RDEV_WR: invalid proto:x%x "
1544                     "received with flowid:x%x\n",
1545                     rdev_wr->protocol,
1546                     rdev_flowid);
1547                 CSIO_INC_STATS(hw, n_evt_drop);
1548                 return;
1549             }
1550 
1551             /* HW lock here */
1552             spin_lock_irq(&hw->lock);
1553             ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1554             if (!ln) {
1555                 csio_err(hw,
1556                     "FW_DEV_WR: invalid vnpi:x%x received "
1557                     "with flowid:x%x\n", vnpi, rdev_flowid);
1558                 CSIO_INC_STATS(hw, n_evt_drop);
1559                 goto out_pld;
1560             }
1561 
1562             rn = csio_confirm_rnode(ln, rdev_flowid,
1563                     &rdev_wr->u.fcoe_rdev);
1564             if (!rn) {
1565                 csio_ln_dbg(ln,
1566                     "Failed to confirm rnode "
1567                     "for flowid:x%x\n", rdev_flowid);
1568                 CSIO_INC_STATS(hw, n_evt_drop);
1569                 goto out_pld;
1570             }
1571 
1572             /* save previous event for debugging */
1573             ln->prev_evt = ln->cur_evt;
1574             ln->cur_evt = rdev_wr->event_cause;
1575             CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
1576 
1577             /* Translate all the fabric events to lnode SM events */
1578             evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
1579             if (evt) {
1580                 csio_ln_dbg(ln,
1581                     "Posting event to lnode event:%d "
1582                     "cause:%d flowid:x%x\n", evt,
1583                     rdev_wr->event_cause, rdev_flowid);
1584                 csio_post_event(&ln->sm, evt);
1585             }
1586 
1587             /* Handover event to rn SM here. */
1588             csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
1589 out_pld:
1590             spin_unlock_irq(&hw->lock);
1591             return;
1592         } else {
1593             csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1594                   FW_WR_OP_G(be32_to_cpu((wr->hi))));
1595             CSIO_INC_STATS(hw, n_cpl_unexp);
1596         }
1597     } else if (cpl_op == CPL_FW6_MSG) {
1598         wr = (struct fw_wr_hdr *) (cmd);
1599         if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
1600             csio_ln_mgmt_wr_handler(hw, wr,
1601                     sizeof(struct fw_fcoe_els_ct_wr));
1602         } else {
1603             csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1604                   FW_WR_OP_G(be32_to_cpu((wr->hi))));
1605             CSIO_INC_STATS(hw, n_cpl_unexp);
1606         }
1607     } else {
1608         csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
1609         CSIO_INC_STATS(hw, n_cpl_unexp);
1610     }
1611 }
1612 
1613 /**
1614  * csio_lnode_start - Kickstart lnode discovery.
1615  * @ln:     lnode
1616  *
1617  * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
1618  */
1619 int
1620 csio_lnode_start(struct csio_lnode *ln)
1621 {
1622     int rv = 0;
1623     if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
1624         rv = csio_fcoe_enable_link(ln, 1);
1625         ln->flags |= CSIO_LNF_LINK_ENABLE;
1626     }
1627 
1628     return rv;
1629 }
1630 
1631 /**
1632  * csio_lnode_stop - Stop the lnode.
1633  * @ln:     lnode
1634  *
1635  * This routine is invoked by HW module to stop lnode and its associated NPIV
1636  * lnodes.
1637  */
1638 void
1639 csio_lnode_stop(struct csio_lnode *ln)
1640 {
1641     csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
1642     if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
1643         csio_fcoe_enable_link(ln, 0);
1644         ln->flags &= ~CSIO_LNF_LINK_ENABLE;
1645     }
1646     csio_ln_dbg(ln, "stopping ln :%p\n", ln);
1647 }
1648 
1649 /**
1650  * csio_lnode_close - Close an lnode.
1651  * @ln:     lnode
1652  *
1653  * This routine is invoked by HW module to close an lnode and its
1654  * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
1655  * set to uninitialized state.
1656  */
1657 void
1658 csio_lnode_close(struct csio_lnode *ln)
1659 {
1660     csio_post_event_lns(ln, CSIO_LNE_CLOSE);
1661     if (csio_is_phys_ln(ln))
1662         ln->vnp_flowid = CSIO_INVALID_IDX;
1663 
1664     csio_ln_dbg(ln, "closed ln :%p\n", ln);
1665 }
1666 
1667 /*
1668  * csio_ln_prep_ecwr - Prepare ELS/CT WR.
1669  * @io_req - IO request.
1670  * @wr_len - WR len
1671  * @immd_len - WR immediate data
1672  * @sub_op - Sub opcode
1673  * @sid - source portid.
1674  * @did - destination portid
1675  * @flow_id - flowid
1676  * @fw_wr - ELS/CT WR to be prepared.
1677  * Returns: 0 - on success
1678  */
1679 static int
1680 csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
1681               uint32_t immd_len, uint8_t sub_op, uint32_t sid,
1682               uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
1683 {
1684     struct fw_fcoe_els_ct_wr *wr;
1685     __be32 port_id;
1686 
1687     wr  = (struct fw_fcoe_els_ct_wr *)fw_wr;
1688     wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
1689                      FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
1690 
1691     wr_len =  DIV_ROUND_UP(wr_len, 16);
1692     wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
1693                        FW_WR_LEN16_V(wr_len));
1694     wr->els_ct_type = sub_op;
1695     wr->ctl_pri = 0;
1696     wr->cp_en_class = 0;
1697     wr->cookie = io_req->fw_handle;
1698     wr->iqid = cpu_to_be16(csio_q_physiqid(
1699                     io_req->lnode->hwp, io_req->iq_idx));
1700     wr->fl_to_sp =  FW_FCOE_ELS_CT_WR_SP(1);
1701     wr->tmo_val = (uint8_t) io_req->tmo;
1702     port_id = htonl(sid);
1703     memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
1704     port_id = htonl(did);
1705     memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
1706 
1707     /* Prepare RSP SGL */
1708     wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
1709     wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
1710     return 0;
1711 }
1712 
1713 /*
1714  * csio_ln_mgmt_submit_wr - Post elsct work request.
1715  * @mgmtm - mgmtm
1716  * @io_req - io request.
1717  * @sub_op - ELS or CT request type
1718  * @pld - Dma Payload buffer
1719  * @pld_len - Payload len
1720  * Prepares ELSCT Work request and sents it to FW.
1721  * Returns: 0 - on success
1722  */
1723 static int
1724 csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1725         uint8_t sub_op, struct csio_dma_buf *pld,
1726         uint32_t pld_len)
1727 {
1728     struct csio_wr_pair wrp;
1729     struct csio_lnode *ln = io_req->lnode;
1730     struct csio_rnode *rn = io_req->rnode;
1731     struct  csio_hw *hw = mgmtm->hw;
1732     uint8_t fw_wr[64];
1733     struct ulptx_sgl dsgl;
1734     uint32_t wr_size = 0;
1735     uint8_t im_len = 0;
1736     uint32_t wr_off = 0;
1737 
1738     int ret = 0;
1739 
1740     /* Calculate WR Size for this ELS REQ */
1741     wr_size = sizeof(struct fw_fcoe_els_ct_wr);
1742 
1743     /* Send as immediate data if pld < 256 */
1744     if (pld_len < 256) {
1745         wr_size += ALIGN(pld_len, 8);
1746         im_len = (uint8_t)pld_len;
1747     } else
1748         wr_size += sizeof(struct ulptx_sgl);
1749 
1750     /* Roundup WR size in units of 16 bytes */
1751     wr_size = ALIGN(wr_size, 16);
1752 
1753     /* Get WR to send ELS REQ */
1754     ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
1755     if (ret != 0) {
1756         csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
1757             io_req, ret);
1758         return ret;
1759     }
1760 
1761     /* Prepare Generic WR used by all ELS/CT cmd */
1762     csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
1763                 ln->nport_id, rn->nport_id,
1764                 csio_rn_flowid(rn),
1765                 &fw_wr[0]);
1766 
1767     /* Copy ELS/CT WR CMD */
1768     csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
1769             sizeof(struct fw_fcoe_els_ct_wr));
1770     wr_off += sizeof(struct fw_fcoe_els_ct_wr);
1771 
1772     /* Copy payload to Immediate section of WR */
1773     if (im_len)
1774         csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1775     else {
1776         /* Program DSGL to dma payload */
1777         dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
1778                     ULPTX_MORE_F | ULPTX_NSGE_V(1));
1779         dsgl.len0 = cpu_to_be32(pld_len);
1780         dsgl.addr0 = cpu_to_be64(pld->paddr);
1781         csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
1782                    sizeof(struct ulptx_sgl));
1783     }
1784 
1785     /* Issue work request to xmit ELS/CT req to FW */
1786     csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
1787     return ret;
1788 }
1789 
1790 /*
1791  * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
1792  * @io_req - IO Request
1793  * @io_cbfn - Completion handler.
1794  * @req_type - ELS or CT request type
1795  * @pld - Dma Payload buffer
1796  * @pld_len - Payload len
1797  *
1798  *
1799  * This API used submit managment ELS/CT request.
1800  * This called with hw lock held
1801  * Returns: 0 - on success
1802  *      -ENOMEM - on error.
1803  */
1804 static int
1805 csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
1806         void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
1807         enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
1808         uint32_t pld_len)
1809 {
1810     struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
1811     struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1812     int rv;
1813 
1814     BUG_ON(pld_len > pld->len);
1815 
1816     io_req->io_cbfn = io_cbfn;  /* Upper layer callback handler */
1817     io_req->fw_handle = (uintptr_t) (io_req);
1818     io_req->eq_idx = mgmtm->eq_idx;
1819     io_req->iq_idx = mgmtm->iq_idx;
1820 
1821     rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
1822     if (rv == 0) {
1823         list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
1824         mgmtm->stats.n_active++;
1825     }
1826     return rv;
1827 }
1828 
1829 /*
1830  * csio_ln_fdmi_init - FDMI Init entry point.
1831  * @ln: lnode
1832  */
1833 static int
1834 csio_ln_fdmi_init(struct csio_lnode *ln)
1835 {
1836     struct csio_hw *hw = csio_lnode_to_hw(ln);
1837     struct csio_dma_buf *dma_buf;
1838 
1839     /* Allocate MGMT request required for FDMI */
1840     ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
1841     if (!ln->mgmt_req) {
1842         csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
1843         CSIO_INC_STATS(hw, n_err_nomem);
1844         return -ENOMEM;
1845     }
1846 
1847     /* Allocate Dma buffers for FDMI response Payload */
1848     dma_buf = &ln->mgmt_req->dma_buf;
1849     dma_buf->len = 2048;
1850     dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
1851                         &dma_buf->paddr, GFP_KERNEL);
1852     if (!dma_buf->vaddr) {
1853         csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
1854         kfree(ln->mgmt_req);
1855         ln->mgmt_req = NULL;
1856         return -ENOMEM;
1857     }
1858 
1859     ln->flags |= CSIO_LNF_FDMI_ENABLE;
1860     return 0;
1861 }
1862 
1863 /*
1864  * csio_ln_fdmi_exit - FDMI exit entry point.
1865  * @ln: lnode
1866  */
1867 static int
1868 csio_ln_fdmi_exit(struct csio_lnode *ln)
1869 {
1870     struct csio_dma_buf *dma_buf;
1871     struct csio_hw *hw = csio_lnode_to_hw(ln);
1872 
1873     if (!ln->mgmt_req)
1874         return 0;
1875 
1876     dma_buf = &ln->mgmt_req->dma_buf;
1877     if (dma_buf->vaddr)
1878         dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
1879                     dma_buf->paddr);
1880 
1881     kfree(ln->mgmt_req);
1882     return 0;
1883 }
1884 
1885 int
1886 csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
1887         unsigned long time, unsigned long max_scan_ticks,
1888         unsigned long delta_scan_ticks)
1889 {
1890     int rv = 0;
1891 
1892     if (time >= max_scan_ticks)
1893         return 1;
1894 
1895     if (!ln->tgt_scan_tick)
1896         ln->tgt_scan_tick = ticks;
1897 
1898     if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
1899         if (!ln->last_scan_ntgts)
1900             ln->last_scan_ntgts = ln->n_scsi_tgts;
1901         else {
1902             if (ln->last_scan_ntgts == ln->n_scsi_tgts)
1903                 return 1;
1904 
1905             ln->last_scan_ntgts = ln->n_scsi_tgts;
1906         }
1907         ln->tgt_scan_tick = ticks;
1908     }
1909     return rv;
1910 }
1911 
1912 /*
1913  * csio_notify_lnodes:
1914  * @hw: HW module
1915  * @note: Notification
1916  *
1917  * Called from the HW SM to fan out notifications to the
1918  * Lnode SM. Since the HW SM is entered with lock held,
1919  * there is no need to hold locks here.
1920  *
1921  */
1922 void
1923 csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
1924 {
1925     struct list_head *tmp;
1926     struct csio_lnode *ln;
1927 
1928     csio_dbg(hw, "Notifying all nodes of event %d\n", note);
1929 
1930     /* Traverse children lnodes list and send evt */
1931     list_for_each(tmp, &hw->sln_head) {
1932         ln = (struct csio_lnode *) tmp;
1933 
1934         switch (note) {
1935         case CSIO_LN_NOTIFY_HWREADY:
1936             csio_lnode_start(ln);
1937             break;
1938 
1939         case CSIO_LN_NOTIFY_HWRESET:
1940         case CSIO_LN_NOTIFY_HWREMOVE:
1941             csio_lnode_close(ln);
1942             break;
1943 
1944         case CSIO_LN_NOTIFY_HWSTOP:
1945             csio_lnode_stop(ln);
1946             break;
1947 
1948         default:
1949             break;
1950 
1951         }
1952     }
1953 }
1954 
1955 /*
1956  * csio_disable_lnodes:
1957  * @hw: HW module
1958  * @portid:port id
1959  * @disable: disable/enable flag.
1960  * If disable=1, disables all lnode hosted on given physical port.
1961  * otherwise enables all the lnodes on given phsysical port.
1962  * This routine need to called with hw lock held.
1963  */
1964 void
1965 csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
1966 {
1967     struct list_head *tmp;
1968     struct csio_lnode *ln;
1969 
1970     csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
1971 
1972     /* Traverse sibling lnodes list and send evt */
1973     list_for_each(tmp, &hw->sln_head) {
1974         ln = (struct csio_lnode *) tmp;
1975         if (ln->portid != portid)
1976             continue;
1977 
1978         if (disable)
1979             csio_lnode_stop(ln);
1980         else
1981             csio_lnode_start(ln);
1982     }
1983 }
1984 
1985 /*
1986  * csio_ln_init - Initialize an lnode.
1987  * @ln:     lnode
1988  *
1989  */
1990 static int
1991 csio_ln_init(struct csio_lnode *ln)
1992 {
1993     int rv = -EINVAL;
1994     struct csio_lnode *pln;
1995     struct csio_hw *hw = csio_lnode_to_hw(ln);
1996 
1997     csio_init_state(&ln->sm, csio_lns_uninit);
1998     ln->vnp_flowid = CSIO_INVALID_IDX;
1999     ln->fcf_flowid = CSIO_INVALID_IDX;
2000 
2001     if (csio_is_root_ln(ln)) {
2002 
2003         /* This is the lnode used during initialization */
2004 
2005         ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
2006         if (!ln->fcfinfo) {
2007             csio_ln_err(ln, "Failed to alloc FCF record\n");
2008             CSIO_INC_STATS(hw, n_err_nomem);
2009             goto err;
2010         }
2011 
2012         INIT_LIST_HEAD(&ln->fcf_lsthead);
2013         kref_init(&ln->fcfinfo->kref);
2014 
2015         if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2016             goto err;
2017 
2018     } else { /* Either a non-root physical or a virtual lnode */
2019 
2020         /*
2021          * THe rest is common for non-root physical and NPIV lnodes.
2022          * Just get references to all other modules
2023          */
2024 
2025         if (csio_is_npiv_ln(ln)) {
2026             /* NPIV */
2027             pln = csio_parent_lnode(ln);
2028             kref_get(&pln->fcfinfo->kref);
2029             ln->fcfinfo = pln->fcfinfo;
2030         } else {
2031             /* Another non-root physical lnode (FCF) */
2032             ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
2033                                 GFP_KERNEL);
2034             if (!ln->fcfinfo) {
2035                 csio_ln_err(ln, "Failed to alloc FCF info\n");
2036                 CSIO_INC_STATS(hw, n_err_nomem);
2037                 goto err;
2038             }
2039 
2040             kref_init(&ln->fcfinfo->kref);
2041 
2042             if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2043                 goto err;
2044         }
2045 
2046     } /* if (!csio_is_root_ln(ln)) */
2047 
2048     return 0;
2049 err:
2050     return rv;
2051 }
2052 
2053 static void
2054 csio_ln_exit(struct csio_lnode *ln)
2055 {
2056     struct csio_lnode *pln;
2057 
2058     csio_cleanup_rns(ln);
2059     if (csio_is_npiv_ln(ln)) {
2060         pln = csio_parent_lnode(ln);
2061         kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
2062     } else {
2063         kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
2064         if (csio_fdmi_enable)
2065             csio_ln_fdmi_exit(ln);
2066     }
2067     ln->fcfinfo = NULL;
2068 }
2069 
2070 /*
2071  * csio_lnode_init - Initialize the members of an lnode.
2072  * @ln:     lnode
2073  */
2074 int
2075 csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
2076         struct csio_lnode *pln)
2077 {
2078     int rv = -EINVAL;
2079 
2080     /* Link this lnode to hw */
2081     csio_lnode_to_hw(ln)    = hw;
2082 
2083     /* Link child to parent if child lnode */
2084     if (pln)
2085         ln->pln = pln;
2086     else
2087         ln->pln = NULL;
2088 
2089     /* Initialize scsi_tgt and timers to zero */
2090     ln->n_scsi_tgts = 0;
2091     ln->last_scan_ntgts = 0;
2092     ln->tgt_scan_tick = 0;
2093 
2094     /* Initialize rnode list */
2095     INIT_LIST_HEAD(&ln->rnhead);
2096     INIT_LIST_HEAD(&ln->cln_head);
2097 
2098     /* Initialize log level for debug */
2099     ln->params.log_level    = hw->params.log_level;
2100 
2101     if (csio_ln_init(ln))
2102         goto err;
2103 
2104     /* Add lnode to list of sibling or children lnodes */
2105     spin_lock_irq(&hw->lock);
2106     list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
2107     if (pln)
2108         pln->num_vports++;
2109     spin_unlock_irq(&hw->lock);
2110 
2111     hw->num_lns++;
2112 
2113     return 0;
2114 err:
2115     csio_lnode_to_hw(ln) = NULL;
2116     return rv;
2117 }
2118 
2119 /**
2120  * csio_lnode_exit - De-instantiate an lnode.
2121  * @ln:     lnode
2122  *
2123  */
2124 void
2125 csio_lnode_exit(struct csio_lnode *ln)
2126 {
2127     struct csio_hw *hw = csio_lnode_to_hw(ln);
2128 
2129     csio_ln_exit(ln);
2130 
2131     /* Remove this lnode from hw->sln_head */
2132     spin_lock_irq(&hw->lock);
2133 
2134     list_del_init(&ln->sm.sm_list);
2135 
2136     /* If it is children lnode, decrement the
2137      * counter in its parent lnode
2138      */
2139     if (ln->pln)
2140         ln->pln->num_vports--;
2141 
2142     /* Update root lnode pointer */
2143     if (list_empty(&hw->sln_head))
2144         hw->rln = NULL;
2145     else
2146         hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
2147 
2148     spin_unlock_irq(&hw->lock);
2149 
2150     csio_lnode_to_hw(ln)    = NULL;
2151     hw->num_lns--;
2152 }