Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
0004  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
0005  */
0006 #include <linux/errno.h>
0007 #include <linux/pci.h>
0008 #include <linux/slab.h>
0009 #include <linux/skbuff.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/if_ether.h>
0013 #include <linux/if_vlan.h>
0014 #include <linux/workqueue.h>
0015 #include <scsi/fc/fc_fip.h>
0016 #include <scsi/fc/fc_els.h>
0017 #include <scsi/fc/fc_fcoe.h>
0018 #include <scsi/fc_frame.h>
0019 #include <scsi/libfc.h>
0020 #include "fnic_io.h"
0021 #include "fnic.h"
0022 #include "fnic_fip.h"
0023 #include "cq_enet_desc.h"
0024 #include "cq_exch_desc.h"
0025 
0026 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
0027 struct workqueue_struct *fnic_fip_queue;
0028 struct workqueue_struct *fnic_event_queue;
0029 
0030 static void fnic_set_eth_mode(struct fnic *);
0031 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
0032 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
0033 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
0034 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
0035 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
0036 
0037 void fnic_handle_link(struct work_struct *work)
0038 {
0039     struct fnic *fnic = container_of(work, struct fnic, link_work);
0040     unsigned long flags;
0041     int old_link_status;
0042     u32 old_link_down_cnt;
0043     u64 old_port_speed, new_port_speed;
0044 
0045     spin_lock_irqsave(&fnic->fnic_lock, flags);
0046 
0047     fnic->link_events = 1;      /* less work to just set everytime*/
0048 
0049     if (fnic->stop_rx_link_events) {
0050         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0051         return;
0052     }
0053 
0054     old_link_down_cnt = fnic->link_down_cnt;
0055     old_link_status = fnic->link_status;
0056     old_port_speed = atomic64_read(
0057             &fnic->fnic_stats.misc_stats.current_port_speed);
0058 
0059     fnic->link_status = vnic_dev_link_status(fnic->vdev);
0060     fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
0061 
0062     new_port_speed = vnic_dev_port_speed(fnic->vdev);
0063     atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
0064             new_port_speed);
0065     if (old_port_speed != new_port_speed)
0066         FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
0067                 "Current vnic speed set to :  %llu\n",
0068                 new_port_speed);
0069 
0070     switch (vnic_dev_port_speed(fnic->vdev)) {
0071     case DCEM_PORTSPEED_10G:
0072         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_10GBIT;
0073         fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
0074         break;
0075     case DCEM_PORTSPEED_20G:
0076         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_20GBIT;
0077         fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
0078         break;
0079     case DCEM_PORTSPEED_25G:
0080         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_25GBIT;
0081         fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
0082         break;
0083     case DCEM_PORTSPEED_40G:
0084     case DCEM_PORTSPEED_4x10G:
0085         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_40GBIT;
0086         fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
0087         break;
0088     case DCEM_PORTSPEED_100G:
0089         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_100GBIT;
0090         fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
0091         break;
0092     default:
0093         fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_UNKNOWN;
0094         fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
0095         break;
0096     }
0097 
0098     if (old_link_status == fnic->link_status) {
0099         if (!fnic->link_status) {
0100             /* DOWN -> DOWN */
0101             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0102             fnic_fc_trace_set_data(fnic->lport->host->host_no,
0103                 FNIC_FC_LE, "Link Status: DOWN->DOWN",
0104                 strlen("Link Status: DOWN->DOWN"));
0105         } else {
0106             if (old_link_down_cnt != fnic->link_down_cnt) {
0107                 /* UP -> DOWN -> UP */
0108                 fnic->lport->host_stats.link_failure_count++;
0109                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0110                 fnic_fc_trace_set_data(
0111                     fnic->lport->host->host_no,
0112                     FNIC_FC_LE,
0113                     "Link Status:UP_DOWN_UP",
0114                     strlen("Link_Status:UP_DOWN_UP")
0115                     );
0116                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0117                          "link down\n");
0118                 fcoe_ctlr_link_down(&fnic->ctlr);
0119                 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
0120                     /* start FCoE VLAN discovery */
0121                     fnic_fc_trace_set_data(
0122                         fnic->lport->host->host_no,
0123                         FNIC_FC_LE,
0124                         "Link Status: UP_DOWN_UP_VLAN",
0125                         strlen(
0126                         "Link Status: UP_DOWN_UP_VLAN")
0127                         );
0128                     fnic_fcoe_send_vlan_req(fnic);
0129                     return;
0130                 }
0131                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0132                          "link up\n");
0133                 fcoe_ctlr_link_up(&fnic->ctlr);
0134             } else {
0135                 /* UP -> UP */
0136                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0137                 fnic_fc_trace_set_data(
0138                     fnic->lport->host->host_no, FNIC_FC_LE,
0139                     "Link Status: UP_UP",
0140                     strlen("Link Status: UP_UP"));
0141             }
0142         }
0143     } else if (fnic->link_status) {
0144         /* DOWN -> UP */
0145         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0146         if (fnic->config.flags & VFCF_FIP_CAPABLE) {
0147             /* start FCoE VLAN discovery */
0148                 fnic_fc_trace_set_data(
0149                 fnic->lport->host->host_no,
0150                 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
0151                 strlen("Link Status: DOWN_UP_VLAN"));
0152             fnic_fcoe_send_vlan_req(fnic);
0153             return;
0154         }
0155         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
0156         fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
0157             "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
0158         fcoe_ctlr_link_up(&fnic->ctlr);
0159     } else {
0160         /* UP -> DOWN */
0161         fnic->lport->host_stats.link_failure_count++;
0162         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0163         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
0164         fnic_fc_trace_set_data(
0165             fnic->lport->host->host_no, FNIC_FC_LE,
0166             "Link Status: UP_DOWN",
0167             strlen("Link Status: UP_DOWN"));
0168         if (fnic->config.flags & VFCF_FIP_CAPABLE) {
0169             FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0170                 "deleting fip-timer during link-down\n");
0171             del_timer_sync(&fnic->fip_timer);
0172         }
0173         fcoe_ctlr_link_down(&fnic->ctlr);
0174     }
0175 
0176 }
0177 
0178 /*
0179  * This function passes incoming fabric frames to libFC
0180  */
0181 void fnic_handle_frame(struct work_struct *work)
0182 {
0183     struct fnic *fnic = container_of(work, struct fnic, frame_work);
0184     struct fc_lport *lp = fnic->lport;
0185     unsigned long flags;
0186     struct sk_buff *skb;
0187     struct fc_frame *fp;
0188 
0189     while ((skb = skb_dequeue(&fnic->frame_queue))) {
0190 
0191         spin_lock_irqsave(&fnic->fnic_lock, flags);
0192         if (fnic->stop_rx_link_events) {
0193             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0194             dev_kfree_skb(skb);
0195             return;
0196         }
0197         fp = (struct fc_frame *)skb;
0198 
0199         /*
0200          * If we're in a transitional state, just re-queue and return.
0201          * The queue will be serviced when we get to a stable state.
0202          */
0203         if (fnic->state != FNIC_IN_FC_MODE &&
0204             fnic->state != FNIC_IN_ETH_MODE) {
0205             skb_queue_head(&fnic->frame_queue, skb);
0206             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0207             return;
0208         }
0209         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0210 
0211         fc_exch_recv(lp, fp);
0212     }
0213 }
0214 
0215 void fnic_fcoe_evlist_free(struct fnic *fnic)
0216 {
0217     struct fnic_event *fevt = NULL;
0218     struct fnic_event *next = NULL;
0219     unsigned long flags;
0220 
0221     spin_lock_irqsave(&fnic->fnic_lock, flags);
0222     if (list_empty(&fnic->evlist)) {
0223         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0224         return;
0225     }
0226 
0227     list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
0228         list_del(&fevt->list);
0229         kfree(fevt);
0230     }
0231     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0232 }
0233 
0234 void fnic_handle_event(struct work_struct *work)
0235 {
0236     struct fnic *fnic = container_of(work, struct fnic, event_work);
0237     struct fnic_event *fevt = NULL;
0238     struct fnic_event *next = NULL;
0239     unsigned long flags;
0240 
0241     spin_lock_irqsave(&fnic->fnic_lock, flags);
0242     if (list_empty(&fnic->evlist)) {
0243         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0244         return;
0245     }
0246 
0247     list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
0248         if (fnic->stop_rx_link_events) {
0249             list_del(&fevt->list);
0250             kfree(fevt);
0251             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0252             return;
0253         }
0254         /*
0255          * If we're in a transitional state, just re-queue and return.
0256          * The queue will be serviced when we get to a stable state.
0257          */
0258         if (fnic->state != FNIC_IN_FC_MODE &&
0259             fnic->state != FNIC_IN_ETH_MODE) {
0260             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0261             return;
0262         }
0263 
0264         list_del(&fevt->list);
0265         switch (fevt->event) {
0266         case FNIC_EVT_START_VLAN_DISC:
0267             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0268             fnic_fcoe_send_vlan_req(fnic);
0269             spin_lock_irqsave(&fnic->fnic_lock, flags);
0270             break;
0271         case FNIC_EVT_START_FCF_DISC:
0272             FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0273                   "Start FCF Discovery\n");
0274             fnic_fcoe_start_fcf_disc(fnic);
0275             break;
0276         default:
0277             FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0278                   "Unknown event 0x%x\n", fevt->event);
0279             break;
0280         }
0281         kfree(fevt);
0282     }
0283     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0284 }
0285 
0286 /**
0287  * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
0288  * @fip: The FCoE controller that received the frame
0289  * @skb: The received FIP frame
0290  *
0291  * Returns non-zero if the frame is rejected with unsupported cmd with
0292  * insufficient resource els explanation.
0293  */
0294 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
0295                      struct sk_buff *skb)
0296 {
0297     struct fc_lport *lport = fip->lp;
0298     struct fip_header *fiph;
0299     struct fc_frame_header *fh = NULL;
0300     struct fip_desc *desc;
0301     struct fip_encaps *els;
0302     u16 op;
0303     u8 els_op;
0304     u8 sub;
0305 
0306     size_t rlen;
0307     size_t dlen = 0;
0308 
0309     if (skb_linearize(skb))
0310         return 0;
0311 
0312     if (skb->len < sizeof(*fiph))
0313         return 0;
0314 
0315     fiph = (struct fip_header *)skb->data;
0316     op = ntohs(fiph->fip_op);
0317     sub = fiph->fip_subcode;
0318 
0319     if (op != FIP_OP_LS)
0320         return 0;
0321 
0322     if (sub != FIP_SC_REP)
0323         return 0;
0324 
0325     rlen = ntohs(fiph->fip_dl_len) * 4;
0326     if (rlen + sizeof(*fiph) > skb->len)
0327         return 0;
0328 
0329     desc = (struct fip_desc *)(fiph + 1);
0330     dlen = desc->fip_dlen * FIP_BPW;
0331 
0332     if (desc->fip_dtype == FIP_DT_FLOGI) {
0333 
0334         if (dlen < sizeof(*els) + sizeof(*fh) + 1)
0335             return 0;
0336 
0337         els = (struct fip_encaps *)desc;
0338         fh = (struct fc_frame_header *)(els + 1);
0339 
0340         if (!fh)
0341             return 0;
0342 
0343         /*
0344          * ELS command code, reason and explanation should be = Reject,
0345          * unsupported command and insufficient resource
0346          */
0347         els_op = *(u8 *)(fh + 1);
0348         if (els_op == ELS_LS_RJT) {
0349             shost_printk(KERN_INFO, lport->host,
0350                   "Flogi Request Rejected by Switch\n");
0351             return 1;
0352         }
0353         shost_printk(KERN_INFO, lport->host,
0354                 "Flogi Request Accepted by Switch\n");
0355     }
0356     return 0;
0357 }
0358 
0359 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
0360 {
0361     struct fcoe_ctlr *fip = &fnic->ctlr;
0362     struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0363     struct sk_buff *skb;
0364     char *eth_fr;
0365     struct fip_vlan *vlan;
0366     u64 vlan_tov;
0367 
0368     fnic_fcoe_reset_vlans(fnic);
0369     fnic->set_vlan(fnic, 0);
0370 
0371     if (printk_ratelimit())
0372         FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
0373               "Sending VLAN request...\n");
0374 
0375     skb = dev_alloc_skb(sizeof(struct fip_vlan));
0376     if (!skb)
0377         return;
0378 
0379     eth_fr = (char *)skb->data;
0380     vlan = (struct fip_vlan *)eth_fr;
0381 
0382     memset(vlan, 0, sizeof(*vlan));
0383     memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
0384     memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
0385     vlan->eth.h_proto = htons(ETH_P_FIP);
0386 
0387     vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
0388     vlan->fip.fip_op = htons(FIP_OP_VLAN);
0389     vlan->fip.fip_subcode = FIP_SC_VL_REQ;
0390     vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
0391 
0392     vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
0393     vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
0394     memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
0395 
0396     vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
0397     vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
0398     put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
0399     atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
0400 
0401     skb_put(skb, sizeof(*vlan));
0402     skb->protocol = htons(ETH_P_FIP);
0403     skb_reset_mac_header(skb);
0404     skb_reset_network_header(skb);
0405     fip->send(fip, skb);
0406 
0407     /* set a timer so that we can retry if there no response */
0408     vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
0409     mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
0410 }
0411 
0412 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
0413 {
0414     struct fcoe_ctlr *fip = &fnic->ctlr;
0415     struct fip_header *fiph;
0416     struct fip_desc *desc;
0417     struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0418     u16 vid;
0419     size_t rlen;
0420     size_t dlen;
0421     struct fcoe_vlan *vlan;
0422     u64 sol_time;
0423     unsigned long flags;
0424 
0425     FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
0426           "Received VLAN response...\n");
0427 
0428     fiph = (struct fip_header *) skb->data;
0429 
0430     FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
0431           "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
0432           ntohs(fiph->fip_op), fiph->fip_subcode);
0433 
0434     rlen = ntohs(fiph->fip_dl_len) * 4;
0435     fnic_fcoe_reset_vlans(fnic);
0436     spin_lock_irqsave(&fnic->vlans_lock, flags);
0437     desc = (struct fip_desc *)(fiph + 1);
0438     while (rlen > 0) {
0439         dlen = desc->fip_dlen * FIP_BPW;
0440         switch (desc->fip_dtype) {
0441         case FIP_DT_VLAN:
0442             vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
0443             shost_printk(KERN_INFO, fnic->lport->host,
0444                   "process_vlan_resp: FIP VLAN %d\n", vid);
0445             vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
0446             if (!vlan) {
0447                 /* retry from timer */
0448                 spin_unlock_irqrestore(&fnic->vlans_lock,
0449                             flags);
0450                 goto out;
0451             }
0452             vlan->vid = vid & 0x0fff;
0453             vlan->state = FIP_VLAN_AVAIL;
0454             list_add_tail(&vlan->list, &fnic->vlans);
0455             break;
0456         }
0457         desc = (struct fip_desc *)((char *)desc + dlen);
0458         rlen -= dlen;
0459     }
0460 
0461     /* any VLAN descriptors present ? */
0462     if (list_empty(&fnic->vlans)) {
0463         /* retry from timer */
0464         atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
0465         FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
0466               "No VLAN descriptors in FIP VLAN response\n");
0467         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0468         goto out;
0469     }
0470 
0471     vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
0472     fnic->set_vlan(fnic, vlan->vid);
0473     vlan->state = FIP_VLAN_SENT; /* sent now */
0474     vlan->sol_count++;
0475     spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0476 
0477     /* start the solicitation */
0478     fcoe_ctlr_link_up(fip);
0479 
0480     sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
0481     mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
0482 out:
0483     return;
0484 }
0485 
0486 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
0487 {
0488     unsigned long flags;
0489     struct fcoe_vlan *vlan;
0490     u64 sol_time;
0491 
0492     spin_lock_irqsave(&fnic->vlans_lock, flags);
0493     vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
0494     fnic->set_vlan(fnic, vlan->vid);
0495     vlan->state = FIP_VLAN_SENT; /* sent now */
0496     vlan->sol_count = 1;
0497     spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0498 
0499     /* start the solicitation */
0500     fcoe_ctlr_link_up(&fnic->ctlr);
0501 
0502     sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
0503     mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
0504 }
0505 
0506 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
0507 {
0508     unsigned long flags;
0509     struct fcoe_vlan *fvlan;
0510 
0511     spin_lock_irqsave(&fnic->vlans_lock, flags);
0512     if (list_empty(&fnic->vlans)) {
0513         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0514         return -EINVAL;
0515     }
0516 
0517     fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
0518     if (fvlan->state == FIP_VLAN_USED) {
0519         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0520         return 0;
0521     }
0522 
0523     if (fvlan->state == FIP_VLAN_SENT) {
0524         fvlan->state = FIP_VLAN_USED;
0525         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0526         return 0;
0527     }
0528     spin_unlock_irqrestore(&fnic->vlans_lock, flags);
0529     return -EINVAL;
0530 }
0531 
0532 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
0533 {
0534     struct fnic_event *fevt;
0535     unsigned long flags;
0536 
0537     fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
0538     if (!fevt)
0539         return;
0540 
0541     fevt->fnic = fnic;
0542     fevt->event = ev;
0543 
0544     spin_lock_irqsave(&fnic->fnic_lock, flags);
0545     list_add_tail(&fevt->list, &fnic->evlist);
0546     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0547 
0548     schedule_work(&fnic->event_work);
0549 }
0550 
0551 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
0552 {
0553     struct fip_header *fiph;
0554     int ret = 1;
0555     u16 op;
0556     u8 sub;
0557 
0558     if (!skb || !(skb->data))
0559         return -1;
0560 
0561     if (skb_linearize(skb))
0562         goto drop;
0563 
0564     fiph = (struct fip_header *)skb->data;
0565     op = ntohs(fiph->fip_op);
0566     sub = fiph->fip_subcode;
0567 
0568     if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
0569         goto drop;
0570 
0571     if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
0572         goto drop;
0573 
0574     if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
0575         if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
0576             goto drop;
0577         /* pass it on to fcoe */
0578         ret = 1;
0579     } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
0580         /* set the vlan as used */
0581         fnic_fcoe_process_vlan_resp(fnic, skb);
0582         ret = 0;
0583     } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
0584         /* received CVL request, restart vlan disc */
0585         fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
0586         /* pass it on to fcoe */
0587         ret = 1;
0588     }
0589 drop:
0590     return ret;
0591 }
0592 
0593 void fnic_handle_fip_frame(struct work_struct *work)
0594 {
0595     struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
0596     struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0597     unsigned long flags;
0598     struct sk_buff *skb;
0599     struct ethhdr *eh;
0600 
0601     while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
0602         spin_lock_irqsave(&fnic->fnic_lock, flags);
0603         if (fnic->stop_rx_link_events) {
0604             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0605             dev_kfree_skb(skb);
0606             return;
0607         }
0608         /*
0609          * If we're in a transitional state, just re-queue and return.
0610          * The queue will be serviced when we get to a stable state.
0611          */
0612         if (fnic->state != FNIC_IN_FC_MODE &&
0613             fnic->state != FNIC_IN_ETH_MODE) {
0614             skb_queue_head(&fnic->fip_frame_queue, skb);
0615             spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0616             return;
0617         }
0618         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0619         eh = (struct ethhdr *)skb->data;
0620         if (eh->h_proto == htons(ETH_P_FIP)) {
0621             skb_pull(skb, sizeof(*eh));
0622             if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
0623                 dev_kfree_skb(skb);
0624                 continue;
0625             }
0626             /*
0627              * If there's FLOGI rejects - clear all
0628              * fcf's & restart from scratch
0629              */
0630             if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
0631                 atomic64_inc(
0632                     &fnic_stats->vlan_stats.flogi_rejects);
0633                 shost_printk(KERN_INFO, fnic->lport->host,
0634                       "Trigger a Link down - VLAN Disc\n");
0635                 fcoe_ctlr_link_down(&fnic->ctlr);
0636                 /* start FCoE VLAN discovery */
0637                 fnic_fcoe_send_vlan_req(fnic);
0638                 dev_kfree_skb(skb);
0639                 continue;
0640             }
0641             fcoe_ctlr_recv(&fnic->ctlr, skb);
0642             continue;
0643         }
0644     }
0645 }
0646 
0647 /**
0648  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
0649  * @fnic:   fnic instance.
0650  * @skb:    Ethernet Frame.
0651  */
0652 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
0653 {
0654     struct fc_frame *fp;
0655     struct ethhdr *eh;
0656     struct fcoe_hdr *fcoe_hdr;
0657     struct fcoe_crc_eof *ft;
0658 
0659     /*
0660      * Undo VLAN encapsulation if present.
0661      */
0662     eh = (struct ethhdr *)skb->data;
0663     if (eh->h_proto == htons(ETH_P_8021Q)) {
0664         memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
0665         eh = skb_pull(skb, VLAN_HLEN);
0666         skb_reset_mac_header(skb);
0667     }
0668     if (eh->h_proto == htons(ETH_P_FIP)) {
0669         if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
0670             printk(KERN_ERR "Dropped FIP frame, as firmware "
0671                     "uses non-FIP mode, Enable FIP "
0672                     "using UCSM\n");
0673             goto drop;
0674         }
0675         if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
0676             FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
0677             printk(KERN_ERR "fnic ctlr frame trace error!!!");
0678         }
0679         skb_queue_tail(&fnic->fip_frame_queue, skb);
0680         queue_work(fnic_fip_queue, &fnic->fip_frame_work);
0681         return 1;       /* let caller know packet was used */
0682     }
0683     if (eh->h_proto != htons(ETH_P_FCOE))
0684         goto drop;
0685     skb_set_network_header(skb, sizeof(*eh));
0686     skb_pull(skb, sizeof(*eh));
0687 
0688     fcoe_hdr = (struct fcoe_hdr *)skb->data;
0689     if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
0690         goto drop;
0691 
0692     fp = (struct fc_frame *)skb;
0693     fc_frame_init(fp);
0694     fr_sof(fp) = fcoe_hdr->fcoe_sof;
0695     skb_pull(skb, sizeof(struct fcoe_hdr));
0696     skb_reset_transport_header(skb);
0697 
0698     ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
0699     fr_eof(fp) = ft->fcoe_eof;
0700     skb_trim(skb, skb->len - sizeof(*ft));
0701     return 0;
0702 drop:
0703     dev_kfree_skb_irq(skb);
0704     return -1;
0705 }
0706 
0707 /**
0708  * fnic_update_mac_locked() - set data MAC address and filters.
0709  * @fnic:   fnic instance.
0710  * @new:    newly-assigned FCoE MAC address.
0711  *
0712  * Called with the fnic lock held.
0713  */
0714 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
0715 {
0716     u8 *ctl = fnic->ctlr.ctl_src_addr;
0717     u8 *data = fnic->data_src_addr;
0718 
0719     if (is_zero_ether_addr(new))
0720         new = ctl;
0721     if (ether_addr_equal(data, new))
0722         return;
0723     FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
0724     if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
0725         vnic_dev_del_addr(fnic->vdev, data);
0726     memcpy(data, new, ETH_ALEN);
0727     if (!ether_addr_equal(new, ctl))
0728         vnic_dev_add_addr(fnic->vdev, new);
0729 }
0730 
0731 /**
0732  * fnic_update_mac() - set data MAC address and filters.
0733  * @lport:  local port.
0734  * @new:    newly-assigned FCoE MAC address.
0735  */
0736 void fnic_update_mac(struct fc_lport *lport, u8 *new)
0737 {
0738     struct fnic *fnic = lport_priv(lport);
0739 
0740     spin_lock_irq(&fnic->fnic_lock);
0741     fnic_update_mac_locked(fnic, new);
0742     spin_unlock_irq(&fnic->fnic_lock);
0743 }
0744 
0745 /**
0746  * fnic_set_port_id() - set the port_ID after successful FLOGI.
0747  * @lport:  local port.
0748  * @port_id:    assigned FC_ID.
0749  * @fp:     received frame containing the FLOGI accept or NULL.
0750  *
0751  * This is called from libfc when a new FC_ID has been assigned.
0752  * This causes us to reset the firmware to FC_MODE and setup the new MAC
0753  * address and FC_ID.
0754  *
0755  * It is also called with FC_ID 0 when we're logged off.
0756  *
0757  * If the FC_ID is due to point-to-point, fp may be NULL.
0758  */
0759 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
0760 {
0761     struct fnic *fnic = lport_priv(lport);
0762     u8 *mac;
0763     int ret;
0764 
0765     FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
0766              port_id, fp);
0767 
0768     /*
0769      * If we're clearing the FC_ID, change to use the ctl_src_addr.
0770      * Set ethernet mode to send FLOGI.
0771      */
0772     if (!port_id) {
0773         fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
0774         fnic_set_eth_mode(fnic);
0775         return;
0776     }
0777 
0778     if (fp) {
0779         mac = fr_cb(fp)->granted_mac;
0780         if (is_zero_ether_addr(mac)) {
0781             /* non-FIP - FLOGI already accepted - ignore return */
0782             fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
0783         }
0784         fnic_update_mac(lport, mac);
0785     }
0786 
0787     /* Change state to reflect transition to FC mode */
0788     spin_lock_irq(&fnic->fnic_lock);
0789     if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
0790         fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
0791     else {
0792         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0793                  "Unexpected fnic state %s while"
0794                  " processing flogi resp\n",
0795                  fnic_state_to_str(fnic->state));
0796         spin_unlock_irq(&fnic->fnic_lock);
0797         return;
0798     }
0799     spin_unlock_irq(&fnic->fnic_lock);
0800 
0801     /*
0802      * Send FLOGI registration to firmware to set up FC mode.
0803      * The new address will be set up when registration completes.
0804      */
0805     ret = fnic_flogi_reg_handler(fnic, port_id);
0806 
0807     if (ret < 0) {
0808         spin_lock_irq(&fnic->fnic_lock);
0809         if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
0810             fnic->state = FNIC_IN_ETH_MODE;
0811         spin_unlock_irq(&fnic->fnic_lock);
0812     }
0813 }
0814 
0815 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
0816                     *cq_desc, struct vnic_rq_buf *buf,
0817                     int skipped __attribute__((unused)),
0818                     void *opaque)
0819 {
0820     struct fnic *fnic = vnic_dev_priv(rq->vdev);
0821     struct sk_buff *skb;
0822     struct fc_frame *fp;
0823     struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0824     u8 type, color, eop, sop, ingress_port, vlan_stripped;
0825     u8 fcoe = 0, fcoe_sof, fcoe_eof;
0826     u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
0827     u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
0828     u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
0829     u8 fcs_ok = 1, packet_error = 0;
0830     u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
0831     u32 rss_hash;
0832     u16 exchange_id, tmpl;
0833     u8 sof = 0;
0834     u8 eof = 0;
0835     u32 fcp_bytes_written = 0;
0836     unsigned long flags;
0837 
0838     dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
0839              DMA_FROM_DEVICE);
0840     skb = buf->os_buf;
0841     fp = (struct fc_frame *)skb;
0842     buf->os_buf = NULL;
0843 
0844     cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
0845     if (type == CQ_DESC_TYPE_RQ_FCP) {
0846         cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
0847                    &type, &color, &q_number, &completed_index,
0848                    &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
0849                    &tmpl, &fcp_bytes_written, &sof, &eof,
0850                    &ingress_port, &packet_error,
0851                    &fcoe_enc_error, &fcs_ok, &vlan_stripped,
0852                    &vlan);
0853         skb_trim(skb, fcp_bytes_written);
0854         fr_sof(fp) = sof;
0855         fr_eof(fp) = eof;
0856 
0857     } else if (type == CQ_DESC_TYPE_RQ_ENET) {
0858         cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
0859                     &type, &color, &q_number, &completed_index,
0860                     &ingress_port, &fcoe, &eop, &sop,
0861                     &rss_type, &csum_not_calc, &rss_hash,
0862                     &bytes_written, &packet_error,
0863                     &vlan_stripped, &vlan, &checksum,
0864                     &fcoe_sof, &fcoe_fc_crc_ok,
0865                     &fcoe_enc_error, &fcoe_eof,
0866                     &tcp_udp_csum_ok, &udp, &tcp,
0867                     &ipv4_csum_ok, &ipv6, &ipv4,
0868                     &ipv4_fragment, &fcs_ok);
0869         skb_trim(skb, bytes_written);
0870         if (!fcs_ok) {
0871             atomic64_inc(&fnic_stats->misc_stats.frame_errors);
0872             FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0873                      "fcs error.  dropping packet.\n");
0874             goto drop;
0875         }
0876         if (fnic_import_rq_eth_pkt(fnic, skb))
0877             return;
0878 
0879     } else {
0880         /* wrong CQ type*/
0881         shost_printk(KERN_ERR, fnic->lport->host,
0882                  "fnic rq_cmpl wrong cq type x%x\n", type);
0883         goto drop;
0884     }
0885 
0886     if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
0887         atomic64_inc(&fnic_stats->misc_stats.frame_errors);
0888         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0889                  "fnic rq_cmpl fcoe x%x fcsok x%x"
0890                  " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
0891                  " x%x\n",
0892                  fcoe, fcs_ok, packet_error,
0893                  fcoe_fc_crc_ok, fcoe_enc_error);
0894         goto drop;
0895     }
0896 
0897     spin_lock_irqsave(&fnic->fnic_lock, flags);
0898     if (fnic->stop_rx_link_events) {
0899         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0900         goto drop;
0901     }
0902     fr_dev(fp) = fnic->lport;
0903     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0904     if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
0905                     (char *)skb->data, skb->len)) != 0) {
0906         printk(KERN_ERR "fnic ctlr frame trace error!!!");
0907     }
0908 
0909     skb_queue_tail(&fnic->frame_queue, skb);
0910     queue_work(fnic_event_queue, &fnic->frame_work);
0911 
0912     return;
0913 drop:
0914     dev_kfree_skb_irq(skb);
0915 }
0916 
0917 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
0918                      struct cq_desc *cq_desc, u8 type,
0919                      u16 q_number, u16 completed_index,
0920                      void *opaque)
0921 {
0922     struct fnic *fnic = vnic_dev_priv(vdev);
0923 
0924     vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
0925             VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
0926             NULL);
0927     return 0;
0928 }
0929 
0930 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
0931 {
0932     unsigned int tot_rq_work_done = 0, cur_work_done;
0933     unsigned int i;
0934     int err;
0935 
0936     for (i = 0; i < fnic->rq_count; i++) {
0937         cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
0938                         fnic_rq_cmpl_handler_cont,
0939                         NULL);
0940         if (cur_work_done) {
0941             err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
0942             if (err)
0943                 shost_printk(KERN_ERR, fnic->lport->host,
0944                          "fnic_alloc_rq_frame can't alloc"
0945                          " frame\n");
0946         }
0947         tot_rq_work_done += cur_work_done;
0948     }
0949 
0950     return tot_rq_work_done;
0951 }
0952 
0953 /*
0954  * This function is called once at init time to allocate and fill RQ
0955  * buffers. Subsequently, it is called in the interrupt context after RQ
0956  * buffer processing to replenish the buffers in the RQ
0957  */
0958 int fnic_alloc_rq_frame(struct vnic_rq *rq)
0959 {
0960     struct fnic *fnic = vnic_dev_priv(rq->vdev);
0961     struct sk_buff *skb;
0962     u16 len;
0963     dma_addr_t pa;
0964     int r;
0965 
0966     len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
0967     skb = dev_alloc_skb(len);
0968     if (!skb) {
0969         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
0970                  "Unable to allocate RQ sk_buff\n");
0971         return -ENOMEM;
0972     }
0973     skb_reset_mac_header(skb);
0974     skb_reset_transport_header(skb);
0975     skb_reset_network_header(skb);
0976     skb_put(skb, len);
0977     pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
0978     if (dma_mapping_error(&fnic->pdev->dev, pa)) {
0979         r = -ENOMEM;
0980         printk(KERN_ERR "PCI mapping failed with error %d\n", r);
0981         goto free_skb;
0982     }
0983 
0984     fnic_queue_rq_desc(rq, skb, pa, len);
0985     return 0;
0986 
0987 free_skb:
0988     kfree_skb(skb);
0989     return r;
0990 }
0991 
0992 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
0993 {
0994     struct fc_frame *fp = buf->os_buf;
0995     struct fnic *fnic = vnic_dev_priv(rq->vdev);
0996 
0997     dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
0998              DMA_FROM_DEVICE);
0999 
1000     dev_kfree_skb(fp_skb(fp));
1001     buf->os_buf = NULL;
1002 }
1003 
1004 /**
1005  * fnic_eth_send() - Send Ethernet frame.
1006  * @fip:    fcoe_ctlr instance.
1007  * @skb:    Ethernet Frame, FIP, without VLAN encapsulation.
1008  */
1009 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1010 {
1011     struct fnic *fnic = fnic_from_ctlr(fip);
1012     struct vnic_wq *wq = &fnic->wq[0];
1013     dma_addr_t pa;
1014     struct ethhdr *eth_hdr;
1015     struct vlan_ethhdr *vlan_hdr;
1016     unsigned long flags;
1017 
1018     if (!fnic->vlan_hw_insert) {
1019         eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1020         vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1021         memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1022         vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1023         vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1024         vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1025         if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1026             FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1027             printk(KERN_ERR "fnic ctlr frame trace error!!!");
1028         }
1029     } else {
1030         if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1031             FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1032             printk(KERN_ERR "fnic ctlr frame trace error!!!");
1033         }
1034     }
1035 
1036     pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1037             DMA_TO_DEVICE);
1038     if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1039         printk(KERN_ERR "DMA mapping failed\n");
1040         goto free_skb;
1041     }
1042 
1043     spin_lock_irqsave(&fnic->wq_lock[0], flags);
1044     if (!vnic_wq_desc_avail(wq))
1045         goto irq_restore;
1046 
1047     fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1048                    0 /* hw inserts cos value */,
1049                    fnic->vlan_id, 1);
1050     spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1051     return;
1052 
1053 irq_restore:
1054     spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1055     dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1056 free_skb:
1057     kfree_skb(skb);
1058 }
1059 
1060 /*
1061  * Send FC frame.
1062  */
1063 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1064 {
1065     struct vnic_wq *wq = &fnic->wq[0];
1066     struct sk_buff *skb;
1067     dma_addr_t pa;
1068     struct ethhdr *eth_hdr;
1069     struct vlan_ethhdr *vlan_hdr;
1070     struct fcoe_hdr *fcoe_hdr;
1071     struct fc_frame_header *fh;
1072     u32 tot_len, eth_hdr_len;
1073     int ret = 0;
1074     unsigned long flags;
1075 
1076     fh = fc_frame_header_get(fp);
1077     skb = fp_skb(fp);
1078 
1079     if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1080         fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1081         return 0;
1082 
1083     if (!fnic->vlan_hw_insert) {
1084         eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1085         vlan_hdr = skb_push(skb, eth_hdr_len);
1086         eth_hdr = (struct ethhdr *)vlan_hdr;
1087         vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1088         vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1089         vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1090         fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1091     } else {
1092         eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1093         eth_hdr = skb_push(skb, eth_hdr_len);
1094         eth_hdr->h_proto = htons(ETH_P_FCOE);
1095         fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1096     }
1097 
1098     if (fnic->ctlr.map_dest)
1099         fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1100     else
1101         memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1102     memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1103 
1104     tot_len = skb->len;
1105     BUG_ON(tot_len % 4);
1106 
1107     memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1108     fcoe_hdr->fcoe_sof = fr_sof(fp);
1109     if (FC_FCOE_VER)
1110         FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1111 
1112     pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1113     if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1114         ret = -ENOMEM;
1115         printk(KERN_ERR "DMA map failed with error %d\n", ret);
1116         goto free_skb_on_err;
1117     }
1118 
1119     if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1120                 (char *)eth_hdr, tot_len)) != 0) {
1121         printk(KERN_ERR "fnic ctlr frame trace error!!!");
1122     }
1123 
1124     spin_lock_irqsave(&fnic->wq_lock[0], flags);
1125 
1126     if (!vnic_wq_desc_avail(wq)) {
1127         dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1128         ret = -1;
1129         goto irq_restore;
1130     }
1131 
1132     fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1133                0 /* hw inserts cos value */,
1134                fnic->vlan_id, 1, 1, 1);
1135 
1136 irq_restore:
1137     spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1138 
1139 free_skb_on_err:
1140     if (ret)
1141         dev_kfree_skb_any(fp_skb(fp));
1142 
1143     return ret;
1144 }
1145 
1146 /*
1147  * fnic_send
1148  * Routine to send a raw frame
1149  */
1150 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1151 {
1152     struct fnic *fnic = lport_priv(lp);
1153     unsigned long flags;
1154 
1155     if (fnic->in_remove) {
1156         dev_kfree_skb(fp_skb(fp));
1157         return -1;
1158     }
1159 
1160     /*
1161      * Queue frame if in a transitional state.
1162      * This occurs while registering the Port_ID / MAC address after FLOGI.
1163      */
1164     spin_lock_irqsave(&fnic->fnic_lock, flags);
1165     if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1166         skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1167         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1168         return 0;
1169     }
1170     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1171 
1172     return fnic_send_frame(fnic, fp);
1173 }
1174 
1175 /**
1176  * fnic_flush_tx() - send queued frames.
1177  * @fnic: fnic device
1178  *
1179  * Send frames that were waiting to go out in FC or Ethernet mode.
1180  * Whenever changing modes we purge queued frames, so these frames should
1181  * be queued for the stable mode that we're in, either FC or Ethernet.
1182  *
1183  * Called without fnic_lock held.
1184  */
1185 void fnic_flush_tx(struct fnic *fnic)
1186 {
1187     struct sk_buff *skb;
1188     struct fc_frame *fp;
1189 
1190     while ((skb = skb_dequeue(&fnic->tx_queue))) {
1191         fp = (struct fc_frame *)skb;
1192         fnic_send_frame(fnic, fp);
1193     }
1194 }
1195 
1196 /**
1197  * fnic_set_eth_mode() - put fnic into ethernet mode.
1198  * @fnic: fnic device
1199  *
1200  * Called without fnic lock held.
1201  */
1202 static void fnic_set_eth_mode(struct fnic *fnic)
1203 {
1204     unsigned long flags;
1205     enum fnic_state old_state;
1206     int ret;
1207 
1208     spin_lock_irqsave(&fnic->fnic_lock, flags);
1209 again:
1210     old_state = fnic->state;
1211     switch (old_state) {
1212     case FNIC_IN_FC_MODE:
1213     case FNIC_IN_ETH_TRANS_FC_MODE:
1214     default:
1215         fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1216         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1217 
1218         ret = fnic_fw_reset_handler(fnic);
1219 
1220         spin_lock_irqsave(&fnic->fnic_lock, flags);
1221         if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1222             goto again;
1223         if (ret)
1224             fnic->state = old_state;
1225         break;
1226 
1227     case FNIC_IN_FC_TRANS_ETH_MODE:
1228     case FNIC_IN_ETH_MODE:
1229         break;
1230     }
1231     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1232 }
1233 
1234 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1235                     struct cq_desc *cq_desc,
1236                     struct vnic_wq_buf *buf, void *opaque)
1237 {
1238     struct sk_buff *skb = buf->os_buf;
1239     struct fc_frame *fp = (struct fc_frame *)skb;
1240     struct fnic *fnic = vnic_dev_priv(wq->vdev);
1241 
1242     dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1243              DMA_TO_DEVICE);
1244     dev_kfree_skb_irq(fp_skb(fp));
1245     buf->os_buf = NULL;
1246 }
1247 
1248 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1249                      struct cq_desc *cq_desc, u8 type,
1250                      u16 q_number, u16 completed_index,
1251                      void *opaque)
1252 {
1253     struct fnic *fnic = vnic_dev_priv(vdev);
1254     unsigned long flags;
1255 
1256     spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1257     vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1258             fnic_wq_complete_frame_send, NULL);
1259     spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1260 
1261     return 0;
1262 }
1263 
1264 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1265 {
1266     unsigned int wq_work_done = 0;
1267     unsigned int i;
1268 
1269     for (i = 0; i < fnic->raw_wq_count; i++) {
1270         wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1271                          work_to_do,
1272                          fnic_wq_cmpl_handler_cont,
1273                          NULL);
1274     }
1275 
1276     return wq_work_done;
1277 }
1278 
1279 
1280 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1281 {
1282     struct fc_frame *fp = buf->os_buf;
1283     struct fnic *fnic = vnic_dev_priv(wq->vdev);
1284 
1285     dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1286              DMA_TO_DEVICE);
1287 
1288     dev_kfree_skb(fp_skb(fp));
1289     buf->os_buf = NULL;
1290 }
1291 
1292 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1293 {
1294     unsigned long flags;
1295     struct fcoe_vlan *vlan;
1296     struct fcoe_vlan *next;
1297 
1298     /*
1299      * indicate a link down to fcoe so that all fcf's are free'd
1300      * might not be required since we did this before sending vlan
1301      * discovery request
1302      */
1303     spin_lock_irqsave(&fnic->vlans_lock, flags);
1304     if (!list_empty(&fnic->vlans)) {
1305         list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1306             list_del(&vlan->list);
1307             kfree(vlan);
1308         }
1309     }
1310     spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1311 }
1312 
1313 void fnic_handle_fip_timer(struct fnic *fnic)
1314 {
1315     unsigned long flags;
1316     struct fcoe_vlan *vlan;
1317     struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1318     u64 sol_time;
1319 
1320     spin_lock_irqsave(&fnic->fnic_lock, flags);
1321     if (fnic->stop_rx_link_events) {
1322         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1323         return;
1324     }
1325     spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1326 
1327     if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1328         return;
1329 
1330     spin_lock_irqsave(&fnic->vlans_lock, flags);
1331     if (list_empty(&fnic->vlans)) {
1332         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1333         /* no vlans available, try again */
1334         if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1335             if (printk_ratelimit())
1336                 shost_printk(KERN_DEBUG, fnic->lport->host,
1337                         "Start VLAN Discovery\n");
1338         fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1339         return;
1340     }
1341 
1342     vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1343     FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1344           "fip_timer: vlan %d state %d sol_count %d\n",
1345           vlan->vid, vlan->state, vlan->sol_count);
1346     switch (vlan->state) {
1347     case FIP_VLAN_USED:
1348         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1349               "FIP VLAN is selected for FC transaction\n");
1350         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1351         break;
1352     case FIP_VLAN_FAILED:
1353         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1354         /* if all vlans are in failed state, restart vlan disc */
1355         if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
1356             if (printk_ratelimit())
1357                 shost_printk(KERN_DEBUG, fnic->lport->host,
1358                       "Start VLAN Discovery\n");
1359         fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1360         break;
1361     case FIP_VLAN_SENT:
1362         if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1363             /*
1364              * no response on this vlan, remove  from the list.
1365              * Try the next vlan
1366              */
1367             FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1368                   "Dequeue this VLAN ID %d from list\n",
1369                   vlan->vid);
1370             list_del(&vlan->list);
1371             kfree(vlan);
1372             vlan = NULL;
1373             if (list_empty(&fnic->vlans)) {
1374                 /* we exhausted all vlans, restart vlan disc */
1375                 spin_unlock_irqrestore(&fnic->vlans_lock,
1376                             flags);
1377                 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1378                       "fip_timer: vlan list empty, "
1379                       "trigger vlan disc\n");
1380                 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1381                 return;
1382             }
1383             /* check the next vlan */
1384             vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1385                             list);
1386             fnic->set_vlan(fnic, vlan->vid);
1387             vlan->state = FIP_VLAN_SENT; /* sent now */
1388         }
1389         spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1390         atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1391         vlan->sol_count++;
1392         sol_time = jiffies + msecs_to_jiffies
1393                     (FCOE_CTLR_START_DELAY);
1394         mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1395         break;
1396     }
1397 }