Back to home page

OSCL-LXR

 
 

    


0001 /* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
0002  * This file contains the code that low level functions that interact
0003  * with 57712 FCoE firmware.
0004  *
0005  * Copyright (c) 2008-2013 Broadcom Corporation
0006  * Copyright (c) 2014-2016 QLogic Corporation
0007  * Copyright (c) 2016-2017 Cavium Inc.
0008  *
0009  * This program is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License as published by
0011  * the Free Software Foundation.
0012  *
0013  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
0014  */
0015 
0016 #include "bnx2fc.h"
0017 
0018 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
0019 
0020 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
0021                     struct fcoe_kcqe *new_cqe_kcqe);
0022 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
0023                     struct fcoe_kcqe *ofld_kcqe);
0024 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
0025                         struct fcoe_kcqe *ofld_kcqe);
0026 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
0027 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
0028                     struct fcoe_kcqe *destroy_kcqe);
0029 
0030 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
0031 {
0032     struct fcoe_kwqe_stat stat_req;
0033     struct kwqe *kwqe_arr[2];
0034     int num_kwqes = 1;
0035     int rc = 0;
0036 
0037     memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
0038     stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
0039     stat_req.hdr.flags =
0040         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0041 
0042     stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
0043     stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
0044 
0045     kwqe_arr[0] = (struct kwqe *) &stat_req;
0046 
0047     if (hba->cnic && hba->cnic->submit_kwqes)
0048         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0049 
0050     return rc;
0051 }
0052 
0053 /**
0054  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
0055  *
0056  * @hba:    adapter structure pointer
0057  *
0058  * Send down FCoE firmware init KWQEs which initiates the initial handshake
0059  *  with the f/w.
0060  *
0061  */
0062 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
0063 {
0064     struct fcoe_kwqe_init1 fcoe_init1;
0065     struct fcoe_kwqe_init2 fcoe_init2;
0066     struct fcoe_kwqe_init3 fcoe_init3;
0067     struct kwqe *kwqe_arr[3];
0068     int num_kwqes = 3;
0069     int rc = 0;
0070 
0071     if (!hba->cnic) {
0072         printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
0073         return -ENODEV;
0074     }
0075 
0076     /* fill init1 KWQE */
0077     memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
0078     fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
0079     fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
0080                     FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0081 
0082     fcoe_init1.num_tasks = hba->max_tasks;
0083     fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
0084     fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
0085     fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
0086     fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
0087     fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
0088     fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
0089     fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
0090     fcoe_init1.task_list_pbl_addr_hi =
0091                 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
0092     fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
0093 
0094     fcoe_init1.flags = (PAGE_SHIFT <<
0095                 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
0096 
0097     fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
0098 
0099     /* fill init2 KWQE */
0100     memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
0101     fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
0102     fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
0103                     FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0104 
0105     fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
0106     fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
0107 
0108 
0109     fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
0110     fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
0111                        ((u64) hba->hash_tbl_pbl_dma >> 32);
0112 
0113     fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
0114     fcoe_init2.t2_hash_tbl_addr_hi = (u32)
0115                       ((u64) hba->t2_hash_tbl_dma >> 32);
0116 
0117     fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
0118     fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
0119                     ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
0120 
0121     fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
0122 
0123     /* fill init3 KWQE */
0124     memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
0125     fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
0126     fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
0127                     FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0128     fcoe_init3.error_bit_map_lo = 0xffffffff;
0129     fcoe_init3.error_bit_map_hi = 0xffffffff;
0130 
0131     /*
0132      * enable both cached connection and cached tasks
0133      * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
0134      */
0135     fcoe_init3.perf_config = 3;
0136 
0137     kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
0138     kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
0139     kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
0140 
0141     if (hba->cnic && hba->cnic->submit_kwqes)
0142         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0143 
0144     return rc;
0145 }
0146 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
0147 {
0148     struct fcoe_kwqe_destroy fcoe_destroy;
0149     struct kwqe *kwqe_arr[2];
0150     int num_kwqes = 1;
0151     int rc = -1;
0152 
0153     /* fill destroy KWQE */
0154     memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
0155     fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
0156     fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
0157                     FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0158     kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
0159 
0160     if (hba->cnic && hba->cnic->submit_kwqes)
0161         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0162     return rc;
0163 }
0164 
0165 /**
0166  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
0167  *
0168  * @port:       port structure pointer
0169  * @tgt:        bnx2fc_rport structure pointer
0170  */
0171 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
0172                     struct bnx2fc_rport *tgt)
0173 {
0174     struct fc_lport *lport = port->lport;
0175     struct bnx2fc_interface *interface = port->priv;
0176     struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
0177     struct bnx2fc_hba *hba = interface->hba;
0178     struct kwqe *kwqe_arr[4];
0179     struct fcoe_kwqe_conn_offload1 ofld_req1;
0180     struct fcoe_kwqe_conn_offload2 ofld_req2;
0181     struct fcoe_kwqe_conn_offload3 ofld_req3;
0182     struct fcoe_kwqe_conn_offload4 ofld_req4;
0183     struct fc_rport_priv *rdata = tgt->rdata;
0184     struct fc_rport *rport = tgt->rport;
0185     int num_kwqes = 4;
0186     u32 port_id;
0187     int rc = 0;
0188     u16 conn_id;
0189 
0190     /* Initialize offload request 1 structure */
0191     memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
0192 
0193     ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
0194     ofld_req1.hdr.flags =
0195         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0196 
0197 
0198     conn_id = (u16)tgt->fcoe_conn_id;
0199     ofld_req1.fcoe_conn_id = conn_id;
0200 
0201 
0202     ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
0203     ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
0204 
0205     ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
0206     ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
0207 
0208     ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
0209     ofld_req1.rq_first_pbe_addr_hi =
0210                 (u32)((u64) tgt->rq_dma >> 32);
0211 
0212     ofld_req1.rq_prod = 0x8000;
0213 
0214     /* Initialize offload request 2 structure */
0215     memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
0216 
0217     ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
0218     ofld_req2.hdr.flags =
0219         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0220 
0221     ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
0222 
0223     ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
0224     ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
0225 
0226     ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
0227     ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
0228 
0229     ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
0230     ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
0231 
0232     /* Initialize offload request 3 structure */
0233     memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
0234 
0235     ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
0236     ofld_req3.hdr.flags =
0237         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0238 
0239     ofld_req3.vlan_tag = interface->vlan_id <<
0240                 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
0241     ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
0242 
0243     port_id = fc_host_port_id(lport->host);
0244     if (port_id == 0) {
0245         BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
0246         return -EINVAL;
0247     }
0248 
0249     /*
0250      * Store s_id of the initiator for further reference. This will
0251      * be used during disable/destroy during linkdown processing as
0252      * when the lport is reset, the port_id also is reset to 0
0253      */
0254     tgt->sid = port_id;
0255     ofld_req3.s_id[0] = (port_id & 0x000000FF);
0256     ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
0257     ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
0258 
0259     port_id = rport->port_id;
0260     ofld_req3.d_id[0] = (port_id & 0x000000FF);
0261     ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
0262     ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
0263 
0264     ofld_req3.tx_total_conc_seqs = rdata->max_seq;
0265 
0266     ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
0267     ofld_req3.rx_max_fc_pay_len  = lport->mfs;
0268 
0269     ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
0270     ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
0271     ofld_req3.rx_open_seqs_exch_c3 = 1;
0272 
0273     ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
0274     ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
0275 
0276     /* set mul_n_port_ids supported flag to 0, until it is supported */
0277     ofld_req3.flags = 0;
0278     /*
0279     ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
0280                 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
0281     */
0282     /* Info from PLOGI response */
0283     ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
0284                  FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
0285 
0286     ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
0287                  FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
0288 
0289     /*
0290      * Info from PRLI response, this info is used for sequence level error
0291      * recovery support
0292      */
0293     if (tgt->dev_type == TYPE_TAPE) {
0294         ofld_req3.flags |= 1 <<
0295                     FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
0296         ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
0297                     ? 1 : 0) <<
0298                     FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
0299     }
0300 
0301     /* vlan flag */
0302     ofld_req3.flags |= (interface->vlan_enabled <<
0303                 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
0304 
0305     /* C2_VALID and ACK flags are not set as they are not supported */
0306 
0307 
0308     /* Initialize offload request 4 structure */
0309     memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
0310     ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
0311     ofld_req4.hdr.flags =
0312         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0313 
0314     ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
0315 
0316 
0317     ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
0318                             /* local mac */
0319     ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
0320     ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
0321     ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
0322     ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
0323     ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
0324     ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
0325                             /* fcf mac */
0326     ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
0327     ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
0328     ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
0329     ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
0330     ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
0331 
0332     ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
0333     ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
0334 
0335     ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
0336     ofld_req4.confq_pbl_base_addr_hi =
0337                     (u32)((u64) tgt->confq_pbl_dma >> 32);
0338 
0339     kwqe_arr[0] = (struct kwqe *) &ofld_req1;
0340     kwqe_arr[1] = (struct kwqe *) &ofld_req2;
0341     kwqe_arr[2] = (struct kwqe *) &ofld_req3;
0342     kwqe_arr[3] = (struct kwqe *) &ofld_req4;
0343 
0344     if (hba->cnic && hba->cnic->submit_kwqes)
0345         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0346 
0347     return rc;
0348 }
0349 
0350 /**
0351  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
0352  *
0353  * @port:       port structure pointer
0354  * @tgt:        bnx2fc_rport structure pointer
0355  */
0356 int bnx2fc_send_session_enable_req(struct fcoe_port *port,
0357                     struct bnx2fc_rport *tgt)
0358 {
0359     struct kwqe *kwqe_arr[2];
0360     struct bnx2fc_interface *interface = port->priv;
0361     struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
0362     struct bnx2fc_hba *hba = interface->hba;
0363     struct fcoe_kwqe_conn_enable_disable enbl_req;
0364     struct fc_lport *lport = port->lport;
0365     struct fc_rport *rport = tgt->rport;
0366     int num_kwqes = 1;
0367     int rc = 0;
0368     u32 port_id;
0369 
0370     memset(&enbl_req, 0x00,
0371            sizeof(struct fcoe_kwqe_conn_enable_disable));
0372     enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
0373     enbl_req.hdr.flags =
0374         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0375 
0376     enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
0377                             /* local mac */
0378     enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
0379     enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
0380     enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
0381     enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
0382     enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
0383     memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
0384 
0385     enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
0386     enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
0387     enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
0388     enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
0389     enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
0390     enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
0391 
0392     port_id = fc_host_port_id(lport->host);
0393     if (port_id != tgt->sid) {
0394         printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
0395                 "sid = 0x%x\n", port_id, tgt->sid);
0396         port_id = tgt->sid;
0397     }
0398     enbl_req.s_id[0] = (port_id & 0x000000FF);
0399     enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
0400     enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
0401 
0402     port_id = rport->port_id;
0403     enbl_req.d_id[0] = (port_id & 0x000000FF);
0404     enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
0405     enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
0406     enbl_req.vlan_tag = interface->vlan_id <<
0407                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
0408     enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
0409     enbl_req.vlan_flag = interface->vlan_enabled;
0410     enbl_req.context_id = tgt->context_id;
0411     enbl_req.conn_id = tgt->fcoe_conn_id;
0412 
0413     kwqe_arr[0] = (struct kwqe *) &enbl_req;
0414 
0415     if (hba->cnic && hba->cnic->submit_kwqes)
0416         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0417     return rc;
0418 }
0419 
0420 /**
0421  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
0422  *
0423  * @port:       port structure pointer
0424  * @tgt:        bnx2fc_rport structure pointer
0425  */
0426 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
0427                     struct bnx2fc_rport *tgt)
0428 {
0429     struct bnx2fc_interface *interface = port->priv;
0430     struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
0431     struct bnx2fc_hba *hba = interface->hba;
0432     struct fcoe_kwqe_conn_enable_disable disable_req;
0433     struct kwqe *kwqe_arr[2];
0434     struct fc_rport *rport = tgt->rport;
0435     int num_kwqes = 1;
0436     int rc = 0;
0437     u32 port_id;
0438 
0439     memset(&disable_req, 0x00,
0440            sizeof(struct fcoe_kwqe_conn_enable_disable));
0441     disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
0442     disable_req.hdr.flags =
0443         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0444 
0445     disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
0446     disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
0447     disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
0448     disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
0449     disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
0450     disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
0451 
0452     disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
0453     disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
0454     disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
0455     disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
0456     disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
0457     disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
0458 
0459     port_id = tgt->sid;
0460     disable_req.s_id[0] = (port_id & 0x000000FF);
0461     disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
0462     disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
0463 
0464 
0465     port_id = rport->port_id;
0466     disable_req.d_id[0] = (port_id & 0x000000FF);
0467     disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
0468     disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
0469     disable_req.context_id = tgt->context_id;
0470     disable_req.conn_id = tgt->fcoe_conn_id;
0471     disable_req.vlan_tag = interface->vlan_id <<
0472                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
0473     disable_req.vlan_tag |=
0474             3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
0475     disable_req.vlan_flag = interface->vlan_enabled;
0476 
0477     kwqe_arr[0] = (struct kwqe *) &disable_req;
0478 
0479     if (hba->cnic && hba->cnic->submit_kwqes)
0480         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0481 
0482     return rc;
0483 }
0484 
0485 /**
0486  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
0487  *
0488  * @hba:        adapter structure pointer
0489  * @tgt:        bnx2fc_rport structure pointer
0490  */
0491 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
0492                     struct bnx2fc_rport *tgt)
0493 {
0494     struct fcoe_kwqe_conn_destroy destroy_req;
0495     struct kwqe *kwqe_arr[2];
0496     int num_kwqes = 1;
0497     int rc = 0;
0498 
0499     memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
0500     destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
0501     destroy_req.hdr.flags =
0502         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
0503 
0504     destroy_req.context_id = tgt->context_id;
0505     destroy_req.conn_id = tgt->fcoe_conn_id;
0506 
0507     kwqe_arr[0] = (struct kwqe *) &destroy_req;
0508 
0509     if (hba->cnic && hba->cnic->submit_kwqes)
0510         rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
0511 
0512     return rc;
0513 }
0514 
0515 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
0516 {
0517     struct bnx2fc_lport *blport;
0518 
0519     spin_lock_bh(&hba->hba_lock);
0520     list_for_each_entry(blport, &hba->vports, list) {
0521         if (blport->lport == lport) {
0522             spin_unlock_bh(&hba->hba_lock);
0523             return true;
0524         }
0525     }
0526     spin_unlock_bh(&hba->hba_lock);
0527     return false;
0528 
0529 }
0530 
0531 
0532 static void bnx2fc_unsol_els_work(struct work_struct *work)
0533 {
0534     struct bnx2fc_unsol_els *unsol_els;
0535     struct fc_lport *lport;
0536     struct bnx2fc_hba *hba;
0537     struct fc_frame *fp;
0538 
0539     unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
0540     lport = unsol_els->lport;
0541     fp = unsol_els->fp;
0542     hba = unsol_els->hba;
0543     if (is_valid_lport(hba, lport))
0544         fc_exch_recv(lport, fp);
0545     kfree(unsol_els);
0546 }
0547 
0548 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
0549                    unsigned char *buf,
0550                    u32 frame_len, u16 l2_oxid)
0551 {
0552     struct fcoe_port *port = tgt->port;
0553     struct fc_lport *lport = port->lport;
0554     struct bnx2fc_interface *interface = port->priv;
0555     struct bnx2fc_unsol_els *unsol_els;
0556     struct fc_frame_header *fh;
0557     struct fc_frame *fp;
0558     struct sk_buff *skb;
0559     u32 payload_len;
0560     u32 crc;
0561     u8 op;
0562 
0563 
0564     unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
0565     if (!unsol_els) {
0566         BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
0567         return;
0568     }
0569 
0570     BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
0571         l2_oxid, frame_len);
0572 
0573     payload_len = frame_len - sizeof(struct fc_frame_header);
0574 
0575     fp = fc_frame_alloc(lport, payload_len);
0576     if (!fp) {
0577         printk(KERN_ERR PFX "fc_frame_alloc failure\n");
0578         kfree(unsol_els);
0579         return;
0580     }
0581 
0582     fh = (struct fc_frame_header *) fc_frame_header_get(fp);
0583     /* Copy FC Frame header and payload into the frame */
0584     memcpy(fh, buf, frame_len);
0585 
0586     if (l2_oxid != FC_XID_UNKNOWN)
0587         fh->fh_ox_id = htons(l2_oxid);
0588 
0589     skb = fp_skb(fp);
0590 
0591     if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
0592         (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
0593 
0594         if (fh->fh_type == FC_TYPE_ELS) {
0595             op = fc_frame_payload_op(fp);
0596             if ((op == ELS_TEST) || (op == ELS_ESTC) ||
0597                 (op == ELS_FAN) || (op == ELS_CSU)) {
0598                 /*
0599                  * No need to reply for these
0600                  * ELS requests
0601                  */
0602                 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
0603                 kfree_skb(skb);
0604                 kfree(unsol_els);
0605                 return;
0606             }
0607         }
0608         crc = fcoe_fc_crc(fp);
0609         fc_frame_init(fp);
0610         fr_dev(fp) = lport;
0611         fr_sof(fp) = FC_SOF_I3;
0612         fr_eof(fp) = FC_EOF_T;
0613         fr_crc(fp) = cpu_to_le32(~crc);
0614         unsol_els->lport = lport;
0615         unsol_els->hba = interface->hba;
0616         unsol_els->fp = fp;
0617         INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
0618         queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
0619     } else {
0620         BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
0621         kfree_skb(skb);
0622         kfree(unsol_els);
0623     }
0624 }
0625 
0626 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
0627 {
0628     u8 num_rq;
0629     struct fcoe_err_report_entry *err_entry;
0630     unsigned char *rq_data;
0631     unsigned char *buf = NULL, *buf1;
0632     int i;
0633     u16 xid;
0634     u32 frame_len, len;
0635     struct bnx2fc_cmd *io_req = NULL;
0636     struct bnx2fc_interface *interface = tgt->port->priv;
0637     struct bnx2fc_hba *hba = interface->hba;
0638     int rc = 0;
0639     u64 err_warn_bit_map;
0640     u8 err_warn = 0xff;
0641 
0642 
0643     BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
0644     switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
0645     case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
0646         frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
0647                  FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
0648 
0649         num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
0650 
0651         spin_lock_bh(&tgt->tgt_lock);
0652         rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
0653         spin_unlock_bh(&tgt->tgt_lock);
0654 
0655         if (rq_data) {
0656             buf = rq_data;
0657         } else {
0658             buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
0659                           GFP_ATOMIC);
0660 
0661             if (!buf1) {
0662                 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
0663                 break;
0664             }
0665 
0666             for (i = 0; i < num_rq; i++) {
0667                 spin_lock_bh(&tgt->tgt_lock);
0668                 rq_data = (unsigned char *)
0669                        bnx2fc_get_next_rqe(tgt, 1);
0670                 spin_unlock_bh(&tgt->tgt_lock);
0671                 len = BNX2FC_RQ_BUF_SZ;
0672                 memcpy(buf1, rq_data, len);
0673                 buf1 += len;
0674             }
0675         }
0676         bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
0677                           FC_XID_UNKNOWN);
0678 
0679         if (buf != rq_data)
0680             kfree(buf);
0681         spin_lock_bh(&tgt->tgt_lock);
0682         bnx2fc_return_rqe(tgt, num_rq);
0683         spin_unlock_bh(&tgt->tgt_lock);
0684         break;
0685 
0686     case FCOE_ERROR_DETECTION_CQE_TYPE:
0687         /*
0688          * In case of error reporting CQE a single RQ entry
0689          * is consumed.
0690          */
0691         spin_lock_bh(&tgt->tgt_lock);
0692         num_rq = 1;
0693         err_entry = (struct fcoe_err_report_entry *)
0694                  bnx2fc_get_next_rqe(tgt, 1);
0695         xid = err_entry->fc_hdr.ox_id;
0696         BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
0697         BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
0698             err_entry->data.err_warn_bitmap_hi,
0699             err_entry->data.err_warn_bitmap_lo);
0700         BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
0701             err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
0702 
0703         if (xid > hba->max_xid) {
0704             BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
0705                    xid);
0706             goto ret_err_rqe;
0707         }
0708 
0709 
0710         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
0711         if (!io_req)
0712             goto ret_err_rqe;
0713 
0714         if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
0715             printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
0716             goto ret_err_rqe;
0717         }
0718 
0719         if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
0720                        &io_req->req_flags)) {
0721             BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
0722                         "progress.. ignore unsol err\n");
0723             goto ret_err_rqe;
0724         }
0725 
0726         err_warn_bit_map = (u64)
0727             ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
0728             (u64)err_entry->data.err_warn_bitmap_lo;
0729         for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
0730             if (err_warn_bit_map & (u64)((u64)1 << i)) {
0731                 err_warn = i;
0732                 break;
0733             }
0734         }
0735 
0736         /*
0737          * If ABTS is already in progress, and FW error is
0738          * received after that, do not cancel the timeout_work
0739          * and let the error recovery continue by explicitly
0740          * logging out the target, when the ABTS eventually
0741          * times out.
0742          */
0743         if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
0744             printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
0745                         "in ABTS processing\n", xid);
0746             goto ret_err_rqe;
0747         }
0748         BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
0749         if (tgt->dev_type != TYPE_TAPE)
0750             goto skip_rec;
0751         switch (err_warn) {
0752         case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
0753         case FCOE_ERROR_CODE_DATA_OOO_RO:
0754         case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
0755         case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
0756         case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
0757         case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
0758             BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
0759                    xid);
0760             memcpy(&io_req->err_entry, err_entry,
0761                    sizeof(struct fcoe_err_report_entry));
0762             if (!test_bit(BNX2FC_FLAG_SRR_SENT,
0763                       &io_req->req_flags)) {
0764                 spin_unlock_bh(&tgt->tgt_lock);
0765                 rc = bnx2fc_send_rec(io_req);
0766                 spin_lock_bh(&tgt->tgt_lock);
0767 
0768                 if (rc)
0769                     goto skip_rec;
0770             } else
0771                 printk(KERN_ERR PFX "SRR in progress\n");
0772             goto ret_err_rqe;
0773         default:
0774             break;
0775         }
0776 
0777 skip_rec:
0778         set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
0779         /*
0780          * Cancel the timeout_work, as we received IO
0781          * completion with FW error.
0782          */
0783         if (cancel_delayed_work(&io_req->timeout_work))
0784             kref_put(&io_req->refcount, bnx2fc_cmd_release);
0785 
0786         rc = bnx2fc_initiate_abts(io_req);
0787         if (rc != SUCCESS) {
0788             printk(KERN_ERR PFX "err_warn: initiate_abts "
0789                 "failed xid = 0x%x. issue cleanup\n",
0790                 io_req->xid);
0791             bnx2fc_initiate_cleanup(io_req);
0792         }
0793 ret_err_rqe:
0794         bnx2fc_return_rqe(tgt, 1);
0795         spin_unlock_bh(&tgt->tgt_lock);
0796         break;
0797 
0798     case FCOE_WARNING_DETECTION_CQE_TYPE:
0799         /*
0800          *In case of warning reporting CQE a single RQ entry
0801          * is consumes.
0802          */
0803         spin_lock_bh(&tgt->tgt_lock);
0804         num_rq = 1;
0805         err_entry = (struct fcoe_err_report_entry *)
0806                  bnx2fc_get_next_rqe(tgt, 1);
0807         xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
0808         BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
0809         BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
0810             err_entry->data.err_warn_bitmap_hi,
0811             err_entry->data.err_warn_bitmap_lo);
0812         BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
0813             err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
0814 
0815         if (xid > hba->max_xid) {
0816             BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
0817             goto ret_warn_rqe;
0818         }
0819 
0820         err_warn_bit_map = (u64)
0821             ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
0822             (u64)err_entry->data.err_warn_bitmap_lo;
0823         for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
0824             if (err_warn_bit_map & ((u64)1 << i)) {
0825                 err_warn = i;
0826                 break;
0827             }
0828         }
0829         BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
0830 
0831         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
0832         if (!io_req)
0833             goto ret_warn_rqe;
0834 
0835         if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
0836             printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
0837             goto ret_warn_rqe;
0838         }
0839 
0840         memcpy(&io_req->err_entry, err_entry,
0841                sizeof(struct fcoe_err_report_entry));
0842 
0843         if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
0844             /* REC_TOV is not a warning code */
0845             BUG_ON(1);
0846         else
0847             BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
0848 ret_warn_rqe:
0849         bnx2fc_return_rqe(tgt, 1);
0850         spin_unlock_bh(&tgt->tgt_lock);
0851         break;
0852 
0853     default:
0854         printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
0855         break;
0856     }
0857 }
0858 
0859 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
0860                  unsigned char *rq_data, u8 num_rq,
0861                  struct fcoe_task_ctx_entry *task)
0862 {
0863     struct fcoe_port *port = tgt->port;
0864     struct bnx2fc_interface *interface = port->priv;
0865     struct bnx2fc_hba *hba = interface->hba;
0866     struct bnx2fc_cmd *io_req;
0867 
0868     u16 xid;
0869     u8  cmd_type;
0870     u8 rx_state = 0;
0871 
0872     spin_lock_bh(&tgt->tgt_lock);
0873 
0874     xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
0875     io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
0876 
0877     if (io_req == NULL) {
0878         printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
0879         spin_unlock_bh(&tgt->tgt_lock);
0880         return;
0881     }
0882 
0883     /* Timestamp IO completion time */
0884     cmd_type = io_req->cmd_type;
0885 
0886     rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
0887             FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
0888             FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
0889 
0890     /* Process other IO completion types */
0891     switch (cmd_type) {
0892     case BNX2FC_SCSI_CMD:
0893         if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
0894             bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
0895                               rq_data);
0896             spin_unlock_bh(&tgt->tgt_lock);
0897             return;
0898         }
0899 
0900         if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
0901             bnx2fc_process_abts_compl(io_req, task, num_rq);
0902         else if (rx_state ==
0903              FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
0904             bnx2fc_process_cleanup_compl(io_req, task, num_rq);
0905         else
0906             printk(KERN_ERR PFX "Invalid rx state - %d\n",
0907                 rx_state);
0908         break;
0909 
0910     case BNX2FC_TASK_MGMT_CMD:
0911         BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
0912         bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
0913         break;
0914 
0915     case BNX2FC_ABTS:
0916         /*
0917          * ABTS request received by firmware. ABTS response
0918          * will be delivered to the task belonging to the IO
0919          * that was aborted
0920          */
0921         BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
0922         kref_put(&io_req->refcount, bnx2fc_cmd_release);
0923         break;
0924 
0925     case BNX2FC_ELS:
0926         if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
0927             bnx2fc_process_els_compl(io_req, task, num_rq);
0928         else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
0929             bnx2fc_process_abts_compl(io_req, task, num_rq);
0930         else if (rx_state ==
0931              FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
0932             bnx2fc_process_cleanup_compl(io_req, task, num_rq);
0933         else
0934             printk(KERN_ERR PFX "Invalid rx state =  %d\n",
0935                 rx_state);
0936         break;
0937 
0938     case BNX2FC_CLEANUP:
0939         BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
0940         kref_put(&io_req->refcount, bnx2fc_cmd_release);
0941         break;
0942 
0943     case BNX2FC_SEQ_CLEANUP:
0944         BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
0945                   io_req->xid);
0946         bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
0947         kref_put(&io_req->refcount, bnx2fc_cmd_release);
0948         break;
0949 
0950     default:
0951         printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
0952         break;
0953     }
0954     spin_unlock_bh(&tgt->tgt_lock);
0955 }
0956 
0957 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
0958 {
0959     struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
0960     u32 msg;
0961 
0962     wmb();
0963     rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
0964             FCOE_CQE_TOGGLE_BIT_SHIFT);
0965     msg = *((u32 *)rx_db);
0966     writel(cpu_to_le32(msg), tgt->ctx_base);
0967 
0968 }
0969 
0970 static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
0971                          unsigned char *rq_data, u8 num_rq,
0972                          struct fcoe_task_ctx_entry *task)
0973 {
0974     struct bnx2fc_work *work;
0975     work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
0976     if (!work)
0977         return NULL;
0978 
0979     INIT_LIST_HEAD(&work->list);
0980     work->tgt = tgt;
0981     work->wqe = wqe;
0982     work->num_rq = num_rq;
0983     work->task = task;
0984     if (rq_data)
0985         memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
0986 
0987     return work;
0988 }
0989 
0990 /* Pending work request completion */
0991 static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
0992 {
0993     unsigned int cpu = wqe % num_possible_cpus();
0994     struct bnx2fc_percpu_s *fps;
0995     struct bnx2fc_work *work;
0996     struct fcoe_task_ctx_entry *task;
0997     struct fcoe_task_ctx_entry *task_page;
0998     struct fcoe_port *port = tgt->port;
0999     struct bnx2fc_interface *interface = port->priv;
1000     struct bnx2fc_hba *hba = interface->hba;
1001     unsigned char *rq_data = NULL;
1002     unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
1003     int task_idx, index;
1004     u16 xid;
1005     u8 num_rq;
1006     int i;
1007 
1008     xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
1009     if (xid >= hba->max_tasks) {
1010         pr_err(PFX "ERROR:xid out of range\n");
1011         return false;
1012     }
1013 
1014     task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1015     index = xid % BNX2FC_TASKS_PER_PAGE;
1016     task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
1017     task = &task_page[index];
1018 
1019     num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
1020            FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
1021           FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
1022 
1023     memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
1024 
1025     if (!num_rq)
1026         goto num_rq_zero;
1027 
1028     rq_data = bnx2fc_get_next_rqe(tgt, 1);
1029 
1030     if (num_rq > 1) {
1031         /* We do not need extra sense data */
1032         for (i = 1; i < num_rq; i++)
1033             bnx2fc_get_next_rqe(tgt, 1);
1034     }
1035 
1036     if (rq_data)
1037         memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
1038 
1039     /* return RQ entries */
1040     for (i = 0; i < num_rq; i++)
1041         bnx2fc_return_rqe(tgt, 1);
1042 
1043 num_rq_zero:
1044 
1045     fps = &per_cpu(bnx2fc_percpu, cpu);
1046     spin_lock_bh(&fps->fp_work_lock);
1047     if (fps->iothread) {
1048         work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
1049                      num_rq, task);
1050         if (work) {
1051             list_add_tail(&work->list, &fps->work_list);
1052             wake_up_process(fps->iothread);
1053             spin_unlock_bh(&fps->fp_work_lock);
1054             return true;
1055         }
1056     }
1057     spin_unlock_bh(&fps->fp_work_lock);
1058     bnx2fc_process_cq_compl(tgt, wqe,
1059                 rq_data_buff, num_rq, task);
1060 
1061     return true;
1062 }
1063 
1064 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1065 {
1066     struct fcoe_cqe *cq;
1067     u32 cq_cons;
1068     struct fcoe_cqe *cqe;
1069     u32 num_free_sqes = 0;
1070     u32 num_cqes = 0;
1071     u16 wqe;
1072 
1073     /*
1074      * cq_lock is a low contention lock used to protect
1075      * the CQ data structure from being freed up during
1076      * the upload operation
1077      */
1078     spin_lock_bh(&tgt->cq_lock);
1079 
1080     if (!tgt->cq) {
1081         printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1082         spin_unlock_bh(&tgt->cq_lock);
1083         return 0;
1084     }
1085     cq = tgt->cq;
1086     cq_cons = tgt->cq_cons_idx;
1087     cqe = &cq[cq_cons];
1088 
1089     while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1090            (tgt->cq_curr_toggle_bit <<
1091            FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1092 
1093         /* new entry on the cq */
1094         if (wqe & FCOE_CQE_CQE_TYPE) {
1095             /* Unsolicited event notification */
1096             bnx2fc_process_unsol_compl(tgt, wqe);
1097         } else {
1098             if (bnx2fc_pending_work(tgt, wqe))
1099                 num_free_sqes++;
1100         }
1101         cqe++;
1102         tgt->cq_cons_idx++;
1103         num_cqes++;
1104 
1105         if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1106             tgt->cq_cons_idx = 0;
1107             cqe = cq;
1108             tgt->cq_curr_toggle_bit =
1109                 1 - tgt->cq_curr_toggle_bit;
1110         }
1111     }
1112     if (num_cqes) {
1113         /* Arm CQ only if doorbell is mapped */
1114         if (tgt->ctx_base)
1115             bnx2fc_arm_cq(tgt);
1116         atomic_add(num_free_sqes, &tgt->free_sqes);
1117     }
1118     spin_unlock_bh(&tgt->cq_lock);
1119     return 0;
1120 }
1121 
1122 /**
1123  * bnx2fc_fastpath_notification - process global event queue (KCQ)
1124  *
1125  * @hba:        adapter structure pointer
1126  * @new_cqe_kcqe:   pointer to newly DMA'd KCQ entry
1127  *
1128  * Fast path event notification handler
1129  */
1130 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1131                     struct fcoe_kcqe *new_cqe_kcqe)
1132 {
1133     u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1134     struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1135 
1136     if (!tgt) {
1137         printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1138         return;
1139     }
1140 
1141     bnx2fc_process_new_cqes(tgt);
1142 }
1143 
1144 /**
1145  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1146  *
1147  * @hba:    adapter structure pointer
1148  * @ofld_kcqe:  connection offload kcqe pointer
1149  *
1150  * handle session offload completion, enable the session if offload is
1151  * successful.
1152  */
1153 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1154                     struct fcoe_kcqe *ofld_kcqe)
1155 {
1156     struct bnx2fc_rport     *tgt;
1157     struct bnx2fc_interface     *interface;
1158     u32             conn_id;
1159     u32             context_id;
1160 
1161     conn_id = ofld_kcqe->fcoe_conn_id;
1162     context_id = ofld_kcqe->fcoe_conn_context_id;
1163     tgt = hba->tgt_ofld_list[conn_id];
1164     if (!tgt) {
1165         printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1166         return;
1167     }
1168     BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1169         ofld_kcqe->fcoe_conn_context_id);
1170     interface = tgt->port->priv;
1171     if (hba != interface->hba) {
1172         printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
1173         goto ofld_cmpl_err;
1174     }
1175     /*
1176      * cnic has allocated a context_id for this session; use this
1177      * while enabling the session.
1178      */
1179     tgt->context_id = context_id;
1180     if (ofld_kcqe->completion_status) {
1181         if (ofld_kcqe->completion_status ==
1182                 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1183             printk(KERN_ERR PFX "unable to allocate FCoE context "
1184                 "resources\n");
1185             set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1186         }
1187     } else {
1188         /* FW offload request successfully completed */
1189         set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1190     }
1191 ofld_cmpl_err:
1192     set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1193     wake_up_interruptible(&tgt->ofld_wait);
1194 }
1195 
1196 /**
1197  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1198  *
1199  * @hba:    adapter structure pointer
1200  * @ofld_kcqe:  connection offload kcqe pointer
1201  *
1202  * handle session enable completion, mark the rport as ready
1203  */
1204 
1205 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1206                         struct fcoe_kcqe *ofld_kcqe)
1207 {
1208     struct bnx2fc_rport     *tgt;
1209     struct bnx2fc_interface     *interface;
1210     u32             conn_id;
1211     u32             context_id;
1212 
1213     context_id = ofld_kcqe->fcoe_conn_context_id;
1214     conn_id = ofld_kcqe->fcoe_conn_id;
1215     tgt = hba->tgt_ofld_list[conn_id];
1216     if (!tgt) {
1217         printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1218         return;
1219     }
1220 
1221     BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1222         ofld_kcqe->fcoe_conn_context_id);
1223 
1224     /*
1225      * context_id should be the same for this target during offload
1226      * and enable
1227      */
1228     if (tgt->context_id != context_id) {
1229         printk(KERN_ERR PFX "context id mismatch\n");
1230         return;
1231     }
1232     interface = tgt->port->priv;
1233     if (hba != interface->hba) {
1234         printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
1235         goto enbl_cmpl_err;
1236     }
1237     if (!ofld_kcqe->completion_status)
1238         /* enable successful - rport ready for issuing IOs */
1239         set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1240 
1241 enbl_cmpl_err:
1242     set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1243     wake_up_interruptible(&tgt->ofld_wait);
1244 }
1245 
1246 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1247                     struct fcoe_kcqe *disable_kcqe)
1248 {
1249 
1250     struct bnx2fc_rport     *tgt;
1251     u32             conn_id;
1252 
1253     conn_id = disable_kcqe->fcoe_conn_id;
1254     tgt = hba->tgt_ofld_list[conn_id];
1255     if (!tgt) {
1256         printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1257         return;
1258     }
1259 
1260     BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1261 
1262     if (disable_kcqe->completion_status) {
1263         printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1264             disable_kcqe->completion_status);
1265         set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1266         set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1267         wake_up_interruptible(&tgt->upld_wait);
1268     } else {
1269         /* disable successful */
1270         BNX2FC_TGT_DBG(tgt, "disable successful\n");
1271         clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1272         clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1273         set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1274         set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1275         wake_up_interruptible(&tgt->upld_wait);
1276     }
1277 }
1278 
1279 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1280                     struct fcoe_kcqe *destroy_kcqe)
1281 {
1282     struct bnx2fc_rport     *tgt;
1283     u32             conn_id;
1284 
1285     conn_id = destroy_kcqe->fcoe_conn_id;
1286     tgt = hba->tgt_ofld_list[conn_id];
1287     if (!tgt) {
1288         printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1289         return;
1290     }
1291 
1292     BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1293 
1294     if (destroy_kcqe->completion_status) {
1295         printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1296             destroy_kcqe->completion_status);
1297         return;
1298     } else {
1299         /* destroy successful */
1300         BNX2FC_TGT_DBG(tgt, "upload successful\n");
1301         clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1302         set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1303         set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1304         wake_up_interruptible(&tgt->upld_wait);
1305     }
1306 }
1307 
1308 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1309 {
1310     switch (err_code) {
1311     case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1312         printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1313         break;
1314 
1315     case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1316         printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1317         break;
1318 
1319     case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1320         printk(KERN_ERR PFX "init_failure due to NIC error\n");
1321         break;
1322     case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1323         printk(KERN_ERR PFX "init failure due to compl status err\n");
1324         break;
1325     case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1326         printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1327         break;
1328     default:
1329         printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1330     }
1331 }
1332 
1333 /**
1334  * bnx2fc_indicate_kcqe() - process KCQE
1335  *
1336  * @context:    adapter structure pointer
1337  * @kcq:    kcqe pointer
1338  * @num_cqe:    Number of completion queue elements
1339  *
1340  * Generic KCQ event handler
1341  */
1342 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1343                     u32 num_cqe)
1344 {
1345     struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1346     int i = 0;
1347     struct fcoe_kcqe *kcqe = NULL;
1348 
1349     while (i < num_cqe) {
1350         kcqe = (struct fcoe_kcqe *) kcq[i++];
1351 
1352         switch (kcqe->op_code) {
1353         case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1354             bnx2fc_fastpath_notification(hba, kcqe);
1355             break;
1356 
1357         case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1358             bnx2fc_process_ofld_cmpl(hba, kcqe);
1359             break;
1360 
1361         case FCOE_KCQE_OPCODE_ENABLE_CONN:
1362             bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1363             break;
1364 
1365         case FCOE_KCQE_OPCODE_INIT_FUNC:
1366             if (kcqe->completion_status !=
1367                     FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1368                 bnx2fc_init_failure(hba,
1369                         kcqe->completion_status);
1370             } else {
1371                 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1372                 bnx2fc_get_link_state(hba);
1373                 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1374                     (u8)hba->pcidev->bus->number);
1375             }
1376             break;
1377 
1378         case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1379             if (kcqe->completion_status !=
1380                     FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1381 
1382                 printk(KERN_ERR PFX "DESTROY failed\n");
1383             } else {
1384                 printk(KERN_ERR PFX "DESTROY success\n");
1385             }
1386             set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1387             wake_up_interruptible(&hba->destroy_wait);
1388             break;
1389 
1390         case FCOE_KCQE_OPCODE_DISABLE_CONN:
1391             bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1392             break;
1393 
1394         case FCOE_KCQE_OPCODE_DESTROY_CONN:
1395             bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1396             break;
1397 
1398         case FCOE_KCQE_OPCODE_STAT_FUNC:
1399             if (kcqe->completion_status !=
1400                 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1401                 printk(KERN_ERR PFX "STAT failed\n");
1402             complete(&hba->stat_req_done);
1403             break;
1404 
1405         case FCOE_KCQE_OPCODE_FCOE_ERROR:
1406         default:
1407             printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1408                                 kcqe->op_code);
1409         }
1410     }
1411 }
1412 
1413 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1414 {
1415     struct fcoe_sqe *sqe;
1416 
1417     sqe = &tgt->sq[tgt->sq_prod_idx];
1418 
1419     /* Fill SQ WQE */
1420     sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1421     sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1422 
1423     /* Advance SQ Prod Idx */
1424     if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1425         tgt->sq_prod_idx = 0;
1426         tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1427     }
1428 }
1429 
1430 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1431 {
1432     struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1433     u32 msg;
1434 
1435     wmb();
1436     sq_db->prod = tgt->sq_prod_idx |
1437                 (tgt->sq_curr_toggle_bit << 15);
1438     msg = *((u32 *)sq_db);
1439     writel(cpu_to_le32(msg), tgt->ctx_base);
1440 
1441 }
1442 
1443 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1444 {
1445     u32 context_id = tgt->context_id;
1446     struct fcoe_port *port = tgt->port;
1447     u32 reg_off;
1448     resource_size_t reg_base;
1449     struct bnx2fc_interface *interface = port->priv;
1450     struct bnx2fc_hba *hba = interface->hba;
1451 
1452     reg_base = pci_resource_start(hba->pcidev,
1453                     BNX2X_DOORBELL_PCI_BAR);
1454     reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1455     tgt->ctx_base = ioremap(reg_base + reg_off, 4);
1456     if (!tgt->ctx_base)
1457         return -ENOMEM;
1458     return 0;
1459 }
1460 
1461 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1462 {
1463     char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1464 
1465     if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1466         return NULL;
1467 
1468     tgt->rq_cons_idx += num_items;
1469 
1470     if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1471         tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1472 
1473     return buf;
1474 }
1475 
1476 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1477 {
1478     /* return the rq buffer */
1479     u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1480     if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1481         /* Wrap around RQ */
1482         next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1483     }
1484     tgt->rq_prod_idx = next_prod_idx;
1485     tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1486 }
1487 
1488 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1489                   struct fcoe_task_ctx_entry *task,
1490                   struct bnx2fc_cmd *orig_io_req,
1491                   u32 offset)
1492 {
1493     struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1494     struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1495     struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1496     struct fcoe_ext_mul_sges_ctx *sgl;
1497     u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1498     u8 orig_task_type;
1499     u16 orig_xid = orig_io_req->xid;
1500     u32 context_id = tgt->context_id;
1501     u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1502     u32 orig_offset = offset;
1503     int bd_count;
1504     int i;
1505 
1506     memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1507 
1508     if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1509         orig_task_type = FCOE_TASK_TYPE_WRITE;
1510     else
1511         orig_task_type = FCOE_TASK_TYPE_READ;
1512 
1513     /* Tx flags */
1514     task->txwr_rxrd.const_ctx.tx_flags =
1515                 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1516                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1517     /* init flags */
1518     task->txwr_rxrd.const_ctx.init_flags = task_type <<
1519                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1520     task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1521                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1522     task->rxwr_txrd.const_ctx.init_flags = context_id <<
1523                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1524     task->rxwr_txrd.const_ctx.init_flags = context_id <<
1525                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1526 
1527     task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1528 
1529     task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1530     task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1531 
1532     bd_count = orig_io_req->bd_tbl->bd_valid;
1533 
1534     /* obtain the appropriate bd entry from relative offset */
1535     for (i = 0; i < bd_count; i++) {
1536         if (offset < bd[i].buf_len)
1537             break;
1538         offset -= bd[i].buf_len;
1539     }
1540     phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1541 
1542     if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1543         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1544                 (u32)phys_addr;
1545         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1546                 (u32)((u64)phys_addr >> 32);
1547         task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1548                 bd_count;
1549         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1550                 offset; /* adjusted offset */
1551         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1552     } else {
1553 
1554         /* Multiple SGEs were used for this IO */
1555         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1556         sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1557         sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1558         sgl->mul_sgl.sgl_size = bd_count;
1559         sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1560         sgl->mul_sgl.cur_sge_idx = i;
1561 
1562         memset(&task->rxwr_only.rx_seq_ctx, 0,
1563                sizeof(struct fcoe_rx_seq_ctx));
1564         task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1565         task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1566     }
1567 }
1568 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1569                   struct fcoe_task_ctx_entry *task,
1570                   u16 orig_xid)
1571 {
1572     u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1573     struct bnx2fc_rport *tgt = io_req->tgt;
1574     u32 context_id = tgt->context_id;
1575 
1576     memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1577 
1578     /* Tx Write Rx Read */
1579     /* init flags */
1580     task->txwr_rxrd.const_ctx.init_flags = task_type <<
1581                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1582     task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1583                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1584     if (tgt->dev_type == TYPE_TAPE)
1585         task->txwr_rxrd.const_ctx.init_flags |=
1586                 FCOE_TASK_DEV_TYPE_TAPE <<
1587                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1588     else
1589         task->txwr_rxrd.const_ctx.init_flags |=
1590                 FCOE_TASK_DEV_TYPE_DISK <<
1591                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1592     task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1593 
1594     /* Tx flags */
1595     task->txwr_rxrd.const_ctx.tx_flags =
1596                 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1597                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1598 
1599     /* Rx Read Tx Write */
1600     task->rxwr_txrd.const_ctx.init_flags = context_id <<
1601                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1602     task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1603                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1604 }
1605 
1606 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1607                 struct fcoe_task_ctx_entry *task)
1608 {
1609     struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1610     struct bnx2fc_rport *tgt = io_req->tgt;
1611     struct fc_frame_header *fc_hdr;
1612     struct fcoe_ext_mul_sges_ctx *sgl;
1613     u8 task_type = 0;
1614     u64 *hdr;
1615     u64 temp_hdr[3];
1616     u32 context_id;
1617 
1618 
1619     /* Obtain task_type */
1620     if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1621         (io_req->cmd_type == BNX2FC_ELS)) {
1622         task_type = FCOE_TASK_TYPE_MIDPATH;
1623     } else if (io_req->cmd_type == BNX2FC_ABTS) {
1624         task_type = FCOE_TASK_TYPE_ABTS;
1625     }
1626 
1627     memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1628 
1629     /* Setup the task from io_req for easy reference */
1630     io_req->task = task;
1631 
1632     BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1633         io_req->cmd_type, task_type);
1634 
1635     /* Tx only */
1636     if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1637         (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1638         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1639                 (u32)mp_req->mp_req_bd_dma;
1640         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1641                 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1642         task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1643     }
1644 
1645     /* Tx Write Rx Read */
1646     /* init flags */
1647     task->txwr_rxrd.const_ctx.init_flags = task_type <<
1648                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1649     if (tgt->dev_type == TYPE_TAPE)
1650         task->txwr_rxrd.const_ctx.init_flags |=
1651                 FCOE_TASK_DEV_TYPE_TAPE <<
1652                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1653     else
1654         task->txwr_rxrd.const_ctx.init_flags |=
1655                 FCOE_TASK_DEV_TYPE_DISK <<
1656                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1657     task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1658                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1659 
1660     /* tx flags */
1661     task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1662                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1663 
1664     /* Rx Write Tx Read */
1665     task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1666 
1667     /* rx flags */
1668     task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1669                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1670 
1671     context_id = tgt->context_id;
1672     task->rxwr_txrd.const_ctx.init_flags = context_id <<
1673                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1674 
1675     fc_hdr = &(mp_req->req_fc_hdr);
1676     if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1677         fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1678         fc_hdr->fh_rx_id = htons(0xffff);
1679         task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1680     } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1681         fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1682     }
1683 
1684     /* Fill FC Header into middle path buffer */
1685     hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1686     memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1687     hdr[0] = cpu_to_be64(temp_hdr[0]);
1688     hdr[1] = cpu_to_be64(temp_hdr[1]);
1689     hdr[2] = cpu_to_be64(temp_hdr[2]);
1690 
1691     /* Rx Only */
1692     if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1693         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1694 
1695         sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1696         sgl->mul_sgl.cur_sge_addr.hi =
1697                 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1698         sgl->mul_sgl.sgl_size = 1;
1699     }
1700 }
1701 
1702 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1703                  struct fcoe_task_ctx_entry *task)
1704 {
1705     u8 task_type;
1706     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1707     struct io_bdt *bd_tbl = io_req->bd_tbl;
1708     struct bnx2fc_rport *tgt = io_req->tgt;
1709     struct fcoe_cached_sge_ctx *cached_sge;
1710     struct fcoe_ext_mul_sges_ctx *sgl;
1711     int dev_type = tgt->dev_type;
1712     u64 *fcp_cmnd;
1713     u64 tmp_fcp_cmnd[4];
1714     u32 context_id;
1715     int cnt, i;
1716     int bd_count;
1717 
1718     memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1719 
1720     /* Setup the task from io_req for easy reference */
1721     io_req->task = task;
1722 
1723     if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1724         task_type = FCOE_TASK_TYPE_WRITE;
1725     else
1726         task_type = FCOE_TASK_TYPE_READ;
1727 
1728     /* Tx only */
1729     bd_count = bd_tbl->bd_valid;
1730     cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1731     if (task_type == FCOE_TASK_TYPE_WRITE) {
1732         if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1733             struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1734 
1735             task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1736             cached_sge->cur_buf_addr.lo =
1737                     fcoe_bd_tbl->buf_addr_lo;
1738             task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1739             cached_sge->cur_buf_addr.hi =
1740                     fcoe_bd_tbl->buf_addr_hi;
1741             task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1742             cached_sge->cur_buf_rem =
1743                     fcoe_bd_tbl->buf_len;
1744 
1745             task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1746                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1747         } else {
1748             task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1749                     (u32)bd_tbl->bd_tbl_dma;
1750             task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1751                     (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1752             task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1753                     bd_tbl->bd_valid;
1754         }
1755     }
1756 
1757     /*Tx Write Rx Read */
1758     /* Init state to NORMAL */
1759     task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1760                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1761     if (dev_type == TYPE_TAPE) {
1762         task->txwr_rxrd.const_ctx.init_flags |=
1763                 FCOE_TASK_DEV_TYPE_TAPE <<
1764                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1765         io_req->rec_retry = 0;
1766         io_req->rec_retry = 0;
1767     } else
1768         task->txwr_rxrd.const_ctx.init_flags |=
1769                 FCOE_TASK_DEV_TYPE_DISK <<
1770                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1771     task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1772                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1773     /* tx flags */
1774     task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1775                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1776 
1777     /* Set initial seq counter */
1778     task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1779 
1780     /* Fill FCP_CMND IU */
1781     fcp_cmnd = (u64 *)
1782             task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1783     bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1784 
1785     /* swap fcp_cmnd */
1786     cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1787 
1788     for (i = 0; i < cnt; i++) {
1789         *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1790         fcp_cmnd++;
1791     }
1792 
1793     /* Rx Write Tx Read */
1794     task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1795 
1796     context_id = tgt->context_id;
1797     task->rxwr_txrd.const_ctx.init_flags = context_id <<
1798                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1799 
1800     /* rx flags */
1801     /* Set state to "waiting for the first packet" */
1802     task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1803                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1804 
1805     task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1806 
1807     /* Rx Only */
1808     if (task_type != FCOE_TASK_TYPE_READ)
1809         return;
1810 
1811     sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1812     bd_count = bd_tbl->bd_valid;
1813 
1814     if (dev_type == TYPE_DISK) {
1815         if (bd_count == 1) {
1816 
1817             struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1818 
1819             cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1820             cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1821             cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1822             task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1823                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1824         } else if (bd_count == 2) {
1825             struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1826 
1827             cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1828             cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1829             cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1830 
1831             fcoe_bd_tbl++;
1832             cached_sge->second_buf_addr.lo =
1833                          fcoe_bd_tbl->buf_addr_lo;
1834             cached_sge->second_buf_addr.hi =
1835                         fcoe_bd_tbl->buf_addr_hi;
1836             cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1837             task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1838                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1839         } else {
1840 
1841             sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1842             sgl->mul_sgl.cur_sge_addr.hi =
1843                     (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1844             sgl->mul_sgl.sgl_size = bd_count;
1845         }
1846     } else {
1847         sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1848         sgl->mul_sgl.cur_sge_addr.hi =
1849                 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1850         sgl->mul_sgl.sgl_size = bd_count;
1851     }
1852 }
1853 
1854 /**
1855  * bnx2fc_setup_task_ctx - allocate and map task context
1856  *
1857  * @hba:    pointer to adapter structure
1858  *
1859  * allocate memory for task context, and associated BD table to be used
1860  * by firmware
1861  *
1862  */
1863 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1864 {
1865     int rc = 0;
1866     struct regpair *task_ctx_bdt;
1867     dma_addr_t addr;
1868     int task_ctx_arr_sz;
1869     int i;
1870 
1871     /*
1872      * Allocate task context bd table. A page size of bd table
1873      * can map 256 buffers. Each buffer contains 32 task context
1874      * entries. Hence the limit with one page is 8192 task context
1875      * entries.
1876      */
1877     hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1878                           PAGE_SIZE,
1879                           &hba->task_ctx_bd_dma,
1880                           GFP_KERNEL);
1881     if (!hba->task_ctx_bd_tbl) {
1882         printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1883         rc = -1;
1884         goto out;
1885     }
1886 
1887     /*
1888      * Allocate task_ctx which is an array of pointers pointing to
1889      * a page containing 32 task contexts
1890      */
1891     task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1892     hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1893                  GFP_KERNEL);
1894     if (!hba->task_ctx) {
1895         printk(KERN_ERR PFX "unable to allocate task context array\n");
1896         rc = -1;
1897         goto out1;
1898     }
1899 
1900     /*
1901      * Allocate task_ctx_dma which is an array of dma addresses
1902      */
1903     hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1904                     sizeof(dma_addr_t)), GFP_KERNEL);
1905     if (!hba->task_ctx_dma) {
1906         printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1907         rc = -1;
1908         goto out2;
1909     }
1910 
1911     task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1912     for (i = 0; i < task_ctx_arr_sz; i++) {
1913 
1914         hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1915                               PAGE_SIZE,
1916                               &hba->task_ctx_dma[i],
1917                               GFP_KERNEL);
1918         if (!hba->task_ctx[i]) {
1919             printk(KERN_ERR PFX "unable to alloc task context\n");
1920             rc = -1;
1921             goto out3;
1922         }
1923         addr = (u64)hba->task_ctx_dma[i];
1924         task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1925         task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1926         task_ctx_bdt++;
1927     }
1928     return 0;
1929 
1930 out3:
1931     for (i = 0; i < task_ctx_arr_sz; i++) {
1932         if (hba->task_ctx[i]) {
1933 
1934             dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1935                 hba->task_ctx[i], hba->task_ctx_dma[i]);
1936             hba->task_ctx[i] = NULL;
1937         }
1938     }
1939 
1940     kfree(hba->task_ctx_dma);
1941     hba->task_ctx_dma = NULL;
1942 out2:
1943     kfree(hba->task_ctx);
1944     hba->task_ctx = NULL;
1945 out1:
1946     dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1947             hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1948     hba->task_ctx_bd_tbl = NULL;
1949 out:
1950     return rc;
1951 }
1952 
1953 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1954 {
1955     int task_ctx_arr_sz;
1956     int i;
1957 
1958     if (hba->task_ctx_bd_tbl) {
1959         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1960                     hba->task_ctx_bd_tbl,
1961                     hba->task_ctx_bd_dma);
1962         hba->task_ctx_bd_tbl = NULL;
1963     }
1964 
1965     task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1966     if (hba->task_ctx) {
1967         for (i = 0; i < task_ctx_arr_sz; i++) {
1968             if (hba->task_ctx[i]) {
1969                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1970                             hba->task_ctx[i],
1971                             hba->task_ctx_dma[i]);
1972                 hba->task_ctx[i] = NULL;
1973             }
1974         }
1975         kfree(hba->task_ctx);
1976         hba->task_ctx = NULL;
1977     }
1978 
1979     kfree(hba->task_ctx_dma);
1980     hba->task_ctx_dma = NULL;
1981 }
1982 
1983 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1984 {
1985     int i;
1986     int segment_count;
1987     u32 *pbl;
1988 
1989     if (hba->hash_tbl_segments) {
1990 
1991         pbl = hba->hash_tbl_pbl;
1992         if (pbl) {
1993             segment_count = hba->hash_tbl_segment_count;
1994             for (i = 0; i < segment_count; ++i) {
1995                 dma_addr_t dma_address;
1996 
1997                 dma_address = le32_to_cpu(*pbl);
1998                 ++pbl;
1999                 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
2000                 ++pbl;
2001                 dma_free_coherent(&hba->pcidev->dev,
2002                           BNX2FC_HASH_TBL_CHUNK_SIZE,
2003                           hba->hash_tbl_segments[i],
2004                           dma_address);
2005             }
2006         }
2007 
2008         kfree(hba->hash_tbl_segments);
2009         hba->hash_tbl_segments = NULL;
2010     }
2011 
2012     if (hba->hash_tbl_pbl) {
2013         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2014                     hba->hash_tbl_pbl,
2015                     hba->hash_tbl_pbl_dma);
2016         hba->hash_tbl_pbl = NULL;
2017     }
2018 }
2019 
2020 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2021 {
2022     int i;
2023     int hash_table_size;
2024     int segment_count;
2025     int segment_array_size;
2026     int dma_segment_array_size;
2027     dma_addr_t *dma_segment_array;
2028     u32 *pbl;
2029 
2030     hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2031         sizeof(struct fcoe_hash_table_entry);
2032 
2033     segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2034     segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2035     hba->hash_tbl_segment_count = segment_count;
2036 
2037     segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2038     hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2039     if (!hba->hash_tbl_segments) {
2040         printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2041         return -ENOMEM;
2042     }
2043     dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2044     dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2045     if (!dma_segment_array) {
2046         printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2047         goto cleanup_ht;
2048     }
2049 
2050     for (i = 0; i < segment_count; ++i) {
2051         hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2052                                    BNX2FC_HASH_TBL_CHUNK_SIZE,
2053                                    &dma_segment_array[i],
2054                                    GFP_KERNEL);
2055         if (!hba->hash_tbl_segments[i]) {
2056             printk(KERN_ERR PFX "hash segment alloc failed\n");
2057             goto cleanup_dma;
2058         }
2059     }
2060 
2061     hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2062                            &hba->hash_tbl_pbl_dma,
2063                            GFP_KERNEL);
2064     if (!hba->hash_tbl_pbl) {
2065         printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2066         goto cleanup_dma;
2067     }
2068 
2069     pbl = hba->hash_tbl_pbl;
2070     for (i = 0; i < segment_count; ++i) {
2071         u64 paddr = dma_segment_array[i];
2072         *pbl = cpu_to_le32((u32) paddr);
2073         ++pbl;
2074         *pbl = cpu_to_le32((u32) (paddr >> 32));
2075         ++pbl;
2076     }
2077     pbl = hba->hash_tbl_pbl;
2078     i = 0;
2079     while (*pbl && *(pbl + 1)) {
2080         ++pbl;
2081         ++pbl;
2082         ++i;
2083     }
2084     kfree(dma_segment_array);
2085     return 0;
2086 
2087 cleanup_dma:
2088     for (i = 0; i < segment_count; ++i) {
2089         if (hba->hash_tbl_segments[i])
2090             dma_free_coherent(&hba->pcidev->dev,
2091                         BNX2FC_HASH_TBL_CHUNK_SIZE,
2092                         hba->hash_tbl_segments[i],
2093                         dma_segment_array[i]);
2094     }
2095 
2096     kfree(dma_segment_array);
2097 
2098 cleanup_ht:
2099     kfree(hba->hash_tbl_segments);
2100     hba->hash_tbl_segments = NULL;
2101     return -ENOMEM;
2102 }
2103 
2104 /**
2105  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2106  *
2107  * @hba:    Pointer to adapter structure
2108  *
2109  */
2110 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2111 {
2112     u64 addr;
2113     u32 mem_size;
2114     int i;
2115 
2116     if (bnx2fc_allocate_hash_table(hba))
2117         return -ENOMEM;
2118 
2119     mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2120     hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2121                           &hba->t2_hash_tbl_ptr_dma,
2122                           GFP_KERNEL);
2123     if (!hba->t2_hash_tbl_ptr) {
2124         printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2125         bnx2fc_free_fw_resc(hba);
2126         return -ENOMEM;
2127     }
2128 
2129     mem_size = BNX2FC_NUM_MAX_SESS *
2130                 sizeof(struct fcoe_t2_hash_table_entry);
2131     hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2132                           &hba->t2_hash_tbl_dma,
2133                           GFP_KERNEL);
2134     if (!hba->t2_hash_tbl) {
2135         printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2136         bnx2fc_free_fw_resc(hba);
2137         return -ENOMEM;
2138     }
2139     for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2140         addr = (unsigned long) hba->t2_hash_tbl_dma +
2141              ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2142         hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2143         hba->t2_hash_tbl[i].next.hi = addr >> 32;
2144     }
2145 
2146     hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2147                            PAGE_SIZE, &hba->dummy_buf_dma,
2148                            GFP_KERNEL);
2149     if (!hba->dummy_buffer) {
2150         printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2151         bnx2fc_free_fw_resc(hba);
2152         return -ENOMEM;
2153     }
2154 
2155     hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2156                            &hba->stats_buf_dma,
2157                            GFP_KERNEL);
2158     if (!hba->stats_buffer) {
2159         printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2160         bnx2fc_free_fw_resc(hba);
2161         return -ENOMEM;
2162     }
2163 
2164     return 0;
2165 }
2166 
2167 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2168 {
2169     u32 mem_size;
2170 
2171     if (hba->stats_buffer) {
2172         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2173                   hba->stats_buffer, hba->stats_buf_dma);
2174         hba->stats_buffer = NULL;
2175     }
2176 
2177     if (hba->dummy_buffer) {
2178         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2179                   hba->dummy_buffer, hba->dummy_buf_dma);
2180         hba->dummy_buffer = NULL;
2181     }
2182 
2183     if (hba->t2_hash_tbl_ptr) {
2184         mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2185         dma_free_coherent(&hba->pcidev->dev, mem_size,
2186                     hba->t2_hash_tbl_ptr,
2187                     hba->t2_hash_tbl_ptr_dma);
2188         hba->t2_hash_tbl_ptr = NULL;
2189     }
2190 
2191     if (hba->t2_hash_tbl) {
2192         mem_size = BNX2FC_NUM_MAX_SESS *
2193                 sizeof(struct fcoe_t2_hash_table_entry);
2194         dma_free_coherent(&hba->pcidev->dev, mem_size,
2195                     hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2196         hba->t2_hash_tbl = NULL;
2197     }
2198     bnx2fc_free_hash_table(hba);
2199 }