Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 1999 - 2018 Intel Corporation. */
0003 
0004 #include "ixgbe.h"
0005 #include <linux/if_ether.h>
0006 #include <linux/gfp.h>
0007 #include <linux/if_vlan.h>
0008 #include <generated/utsrelease.h>
0009 #include <scsi/scsi_cmnd.h>
0010 #include <scsi/scsi_device.h>
0011 #include <scsi/fc/fc_fs.h>
0012 #include <scsi/fc/fc_fcoe.h>
0013 #include <scsi/libfc.h>
0014 #include <scsi/libfcoe.h>
0015 
0016 /**
0017  * ixgbe_fcoe_clear_ddp - clear the given ddp context
0018  * @ddp: ptr to the ixgbe_fcoe_ddp
0019  *
0020  * Returns : none
0021  *
0022  */
0023 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
0024 {
0025     ddp->len = 0;
0026     ddp->err = 1;
0027     ddp->udl = NULL;
0028     ddp->udp = 0UL;
0029     ddp->sgl = NULL;
0030     ddp->sgc = 0;
0031 }
0032 
0033 /**
0034  * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
0035  * @netdev: the corresponding net_device
0036  * @xid: the xid that corresponding ddp will be freed
0037  *
0038  * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
0039  * and it is expected to be called by ULD, i.e., FCP layer of libfc
0040  * to release the corresponding ddp context when the I/O is done.
0041  *
0042  * Returns : data length already ddp-ed in bytes
0043  */
0044 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
0045 {
0046     int len;
0047     struct ixgbe_fcoe *fcoe;
0048     struct ixgbe_adapter *adapter;
0049     struct ixgbe_fcoe_ddp *ddp;
0050     struct ixgbe_hw *hw;
0051     u32 fcbuff;
0052 
0053     if (!netdev)
0054         return 0;
0055 
0056     if (xid >= netdev->fcoe_ddp_xid)
0057         return 0;
0058 
0059     adapter = netdev_priv(netdev);
0060     fcoe = &adapter->fcoe;
0061     ddp = &fcoe->ddp[xid];
0062     if (!ddp->udl)
0063         return 0;
0064 
0065     hw = &adapter->hw;
0066     len = ddp->len;
0067     /* if no error then skip ddp context invalidation */
0068     if (!ddp->err)
0069         goto skip_ddpinv;
0070 
0071     if (hw->mac.type == ixgbe_mac_X550) {
0072         /* X550 does not require DDP FCoE lock */
0073 
0074         IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
0075         IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
0076                 (xid | IXGBE_FCFLTRW_WE));
0077 
0078         /* program FCBUFF */
0079         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
0080 
0081         /* program FCDMARW */
0082         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
0083                 (xid | IXGBE_FCDMARW_WE));
0084 
0085         /* read FCBUFF to check context invalidated */
0086         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
0087                 (xid | IXGBE_FCDMARW_RE));
0088         fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
0089     } else {
0090         /* other hardware requires DDP FCoE lock */
0091         spin_lock_bh(&fcoe->lock);
0092         IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
0093         IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
0094                 (xid | IXGBE_FCFLTRW_WE));
0095         IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
0096         IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
0097                 (xid | IXGBE_FCDMARW_WE));
0098 
0099         /* guaranteed to be invalidated after 100us */
0100         IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
0101                 (xid | IXGBE_FCDMARW_RE));
0102         fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
0103         spin_unlock_bh(&fcoe->lock);
0104         }
0105 
0106     if (fcbuff & IXGBE_FCBUFF_VALID)
0107         usleep_range(100, 150);
0108 
0109 skip_ddpinv:
0110     if (ddp->sgl)
0111         dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
0112                  DMA_FROM_DEVICE);
0113     if (ddp->pool) {
0114         dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
0115         ddp->pool = NULL;
0116     }
0117 
0118     ixgbe_fcoe_clear_ddp(ddp);
0119 
0120     return len;
0121 }
0122 
0123 /**
0124  * ixgbe_fcoe_ddp_setup - called to set up ddp context
0125  * @netdev: the corresponding net_device
0126  * @xid: the exchange id requesting ddp
0127  * @sgl: the scatter-gather list for this request
0128  * @sgc: the number of scatter-gather items
0129  * @target_mode: 1 to setup target mode, 0 to setup initiator mode
0130  *
0131  * Returns : 1 for success and 0 for no ddp
0132  */
0133 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
0134                 struct scatterlist *sgl, unsigned int sgc,
0135                 int target_mode)
0136 {
0137     struct ixgbe_adapter *adapter;
0138     struct ixgbe_hw *hw;
0139     struct ixgbe_fcoe *fcoe;
0140     struct ixgbe_fcoe_ddp *ddp;
0141     struct ixgbe_fcoe_ddp_pool *ddp_pool;
0142     struct scatterlist *sg;
0143     unsigned int i, j, dmacount;
0144     unsigned int len;
0145     static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
0146     unsigned int firstoff = 0;
0147     unsigned int lastsize;
0148     unsigned int thisoff = 0;
0149     unsigned int thislen = 0;
0150     u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
0151     dma_addr_t addr = 0;
0152 
0153     if (!netdev || !sgl)
0154         return 0;
0155 
0156     adapter = netdev_priv(netdev);
0157     if (xid >= netdev->fcoe_ddp_xid) {
0158         e_warn(drv, "xid=0x%x out-of-range\n", xid);
0159         return 0;
0160     }
0161 
0162     /* no DDP if we are already down or resetting */
0163     if (test_bit(__IXGBE_DOWN, &adapter->state) ||
0164         test_bit(__IXGBE_RESETTING, &adapter->state))
0165         return 0;
0166 
0167     fcoe = &adapter->fcoe;
0168     ddp = &fcoe->ddp[xid];
0169     if (ddp->sgl) {
0170         e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
0171               xid, ddp->sgl, ddp->sgc);
0172         return 0;
0173     }
0174     ixgbe_fcoe_clear_ddp(ddp);
0175 
0176 
0177     if (!fcoe->ddp_pool) {
0178         e_warn(drv, "No ddp_pool resources allocated\n");
0179         return 0;
0180     }
0181 
0182     ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
0183     if (!ddp_pool->pool) {
0184         e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
0185         goto out_noddp;
0186     }
0187 
0188     /* setup dma from scsi command sgl */
0189     dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
0190     if (dmacount == 0) {
0191         e_err(drv, "xid 0x%x DMA map error\n", xid);
0192         goto out_noddp;
0193     }
0194 
0195     /* alloc the udl from per cpu ddp pool */
0196     ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
0197     if (!ddp->udl) {
0198         e_err(drv, "failed allocated ddp context\n");
0199         goto out_noddp_unmap;
0200     }
0201     ddp->pool = ddp_pool->pool;
0202     ddp->sgl = sgl;
0203     ddp->sgc = sgc;
0204 
0205     j = 0;
0206     for_each_sg(sgl, sg, dmacount, i) {
0207         addr = sg_dma_address(sg);
0208         len = sg_dma_len(sg);
0209         while (len) {
0210             /* max number of buffers allowed in one DDP context */
0211             if (j >= IXGBE_BUFFCNT_MAX) {
0212                 ddp_pool->noddp++;
0213                 goto out_noddp_free;
0214             }
0215 
0216             /* get the offset of length of current buffer */
0217             thisoff = addr & ((dma_addr_t)bufflen - 1);
0218             thislen = min((bufflen - thisoff), len);
0219             /*
0220              * all but the 1st buffer (j == 0)
0221              * must be aligned on bufflen
0222              */
0223             if ((j != 0) && (thisoff))
0224                 goto out_noddp_free;
0225             /*
0226              * all but the last buffer
0227              * ((i == (dmacount - 1)) && (thislen == len))
0228              * must end at bufflen
0229              */
0230             if (((i != (dmacount - 1)) || (thislen != len))
0231                 && ((thislen + thisoff) != bufflen))
0232                 goto out_noddp_free;
0233 
0234             ddp->udl[j] = (u64)(addr - thisoff);
0235             /* only the first buffer may have none-zero offset */
0236             if (j == 0)
0237                 firstoff = thisoff;
0238             len -= thislen;
0239             addr += thislen;
0240             j++;
0241         }
0242     }
0243     /* only the last buffer may have non-full bufflen */
0244     lastsize = thisoff + thislen;
0245 
0246     /*
0247      * lastsize can not be buffer len.
0248      * If it is then adding another buffer with lastsize = 1.
0249      */
0250     if (lastsize == bufflen) {
0251         if (j >= IXGBE_BUFFCNT_MAX) {
0252             ddp_pool->noddp_ext_buff++;
0253             goto out_noddp_free;
0254         }
0255 
0256         ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
0257         j++;
0258         lastsize = 1;
0259     }
0260     put_cpu();
0261 
0262     fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
0263     fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
0264     fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
0265     /* Set WRCONTX bit to allow DDP for target */
0266     if (target_mode)
0267         fcbuff |= (IXGBE_FCBUFF_WRCONTX);
0268     fcbuff |= (IXGBE_FCBUFF_VALID);
0269 
0270     fcdmarw = xid;
0271     fcdmarw |= IXGBE_FCDMARW_WE;
0272     fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
0273 
0274     fcfltrw = xid;
0275     fcfltrw |= IXGBE_FCFLTRW_WE;
0276 
0277     /* program DMA context */
0278     hw = &adapter->hw;
0279 
0280     /* turn on last frame indication for target mode as FCP_RSPtarget is
0281      * supposed to send FCP_RSP when it is done. */
0282     if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
0283         set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
0284         fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
0285         fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
0286         IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
0287     }
0288 
0289     if (hw->mac.type == ixgbe_mac_X550) {
0290         /* X550 does not require DDP lock */
0291 
0292         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
0293                 ddp->udp & DMA_BIT_MASK(32));
0294         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
0295         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
0296         IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
0297         /* program filter context */
0298         IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
0299         IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
0300         IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
0301     } else {
0302         /* DDP lock for indirect DDP context access */
0303         spin_lock_bh(&fcoe->lock);
0304 
0305         IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
0306         IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
0307         IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
0308         IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
0309         /* program filter context */
0310         IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
0311         IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
0312         IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
0313 
0314         spin_unlock_bh(&fcoe->lock);
0315     }
0316 
0317     return 1;
0318 
0319 out_noddp_free:
0320     dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
0321     ixgbe_fcoe_clear_ddp(ddp);
0322 
0323 out_noddp_unmap:
0324     dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
0325 out_noddp:
0326     put_cpu();
0327     return 0;
0328 }
0329 
0330 /**
0331  * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
0332  * @netdev: the corresponding net_device
0333  * @xid: the exchange id requesting ddp
0334  * @sgl: the scatter-gather list for this request
0335  * @sgc: the number of scatter-gather items
0336  *
0337  * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
0338  * and is expected to be called from ULD, e.g., FCP layer of libfc
0339  * to set up ddp for the corresponding xid of the given sglist for
0340  * the corresponding I/O.
0341  *
0342  * Returns : 1 for success and 0 for no ddp
0343  */
0344 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
0345                struct scatterlist *sgl, unsigned int sgc)
0346 {
0347     return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
0348 }
0349 
0350 /**
0351  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
0352  * @netdev: the corresponding net_device
0353  * @xid: the exchange id requesting ddp
0354  * @sgl: the scatter-gather list for this request
0355  * @sgc: the number of scatter-gather items
0356  *
0357  * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
0358  * and is expected to be called from ULD, e.g., FCP layer of libfc
0359  * to set up ddp for the corresponding xid of the given sglist for
0360  * the corresponding I/O. The DDP in target mode is a write I/O request
0361  * from the initiator.
0362  *
0363  * Returns : 1 for success and 0 for no ddp
0364  */
0365 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
0366                 struct scatterlist *sgl, unsigned int sgc)
0367 {
0368     return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
0369 }
0370 
0371 /**
0372  * ixgbe_fcoe_ddp - check ddp status and mark it done
0373  * @adapter: ixgbe adapter
0374  * @rx_desc: advanced rx descriptor
0375  * @skb: the skb holding the received data
0376  *
0377  * This checks ddp status.
0378  *
0379  * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
0380  * not passing the skb to ULD, > 0 indicates is the length of data
0381  * being ddped.
0382  */
0383 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
0384            union ixgbe_adv_rx_desc *rx_desc,
0385            struct sk_buff *skb)
0386 {
0387     int rc = -EINVAL;
0388     struct ixgbe_fcoe *fcoe;
0389     struct ixgbe_fcoe_ddp *ddp;
0390     struct fc_frame_header *fh;
0391     struct fcoe_crc_eof *crc;
0392     __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
0393     __le32 ddp_err;
0394     int ddp_max;
0395     u32 fctl;
0396     u16 xid;
0397 
0398     if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
0399         skb->ip_summed = CHECKSUM_NONE;
0400     else
0401         skb->ip_summed = CHECKSUM_UNNECESSARY;
0402 
0403     if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
0404         fh = (struct fc_frame_header *)(skb->data +
0405             sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
0406     else
0407         fh = (struct fc_frame_header *)(skb->data +
0408             sizeof(struct fcoe_hdr));
0409 
0410     fctl = ntoh24(fh->fh_f_ctl);
0411     if (fctl & FC_FC_EX_CTX)
0412         xid =  be16_to_cpu(fh->fh_ox_id);
0413     else
0414         xid =  be16_to_cpu(fh->fh_rx_id);
0415 
0416     ddp_max = IXGBE_FCOE_DDP_MAX;
0417     /* X550 has different DDP Max limit */
0418     if (adapter->hw.mac.type == ixgbe_mac_X550)
0419         ddp_max = IXGBE_FCOE_DDP_MAX_X550;
0420     if (xid >= ddp_max)
0421         return -EINVAL;
0422 
0423     fcoe = &adapter->fcoe;
0424     ddp = &fcoe->ddp[xid];
0425     if (!ddp->udl)
0426         return -EINVAL;
0427 
0428     ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
0429                           IXGBE_RXDADV_ERR_FCERR);
0430     if (ddp_err)
0431         return -EINVAL;
0432 
0433     switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
0434     /* return 0 to bypass going to ULD for DDPed data */
0435     case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
0436         /* update length of DDPed data */
0437         ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
0438         rc = 0;
0439         break;
0440     /* unmap the sg list when FCPRSP is received */
0441     case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
0442         dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
0443                  ddp->sgc, DMA_FROM_DEVICE);
0444         ddp->err = (__force u32)ddp_err;
0445         ddp->sgl = NULL;
0446         ddp->sgc = 0;
0447         fallthrough;
0448     /* if DDP length is present pass it through to ULD */
0449     case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
0450         /* update length of DDPed data */
0451         ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
0452         if (ddp->len)
0453             rc = ddp->len;
0454         break;
0455     /* no match will return as an error */
0456     case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
0457     default:
0458         break;
0459     }
0460 
0461     /* In target mode, check the last data frame of the sequence.
0462      * For DDP in target mode, data is already DDPed but the header
0463      * indication of the last data frame ould allow is to tell if we
0464      * got all the data and the ULP can send FCP_RSP back, as this is
0465      * not a full fcoe frame, we fill the trailer here so it won't be
0466      * dropped by the ULP stack.
0467      */
0468     if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
0469         (fctl & FC_FC_END_SEQ)) {
0470         skb_linearize(skb);
0471         crc = skb_put(skb, sizeof(*crc));
0472         crc->fcoe_eof = FC_EOF_T;
0473     }
0474 
0475     return rc;
0476 }
0477 
0478 /**
0479  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
0480  * @tx_ring: tx desc ring
0481  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
0482  * @hdr_len: hdr_len to be returned
0483  *
0484  * This sets up large send offload for FCoE
0485  *
0486  * Returns : 0 indicates success, < 0 for error
0487  */
0488 int ixgbe_fso(struct ixgbe_ring *tx_ring,
0489           struct ixgbe_tx_buffer *first,
0490           u8 *hdr_len)
0491 {
0492     struct sk_buff *skb = first->skb;
0493     struct fc_frame_header *fh;
0494     u32 vlan_macip_lens;
0495     u32 fcoe_sof_eof = 0;
0496     u32 mss_l4len_idx;
0497     u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
0498     u8 sof, eof;
0499 
0500     if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
0501         dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
0502             skb_shinfo(skb)->gso_type);
0503         return -EINVAL;
0504     }
0505 
0506     /* resets the header to point fcoe/fc */
0507     skb_set_network_header(skb, skb->mac_len);
0508     skb_set_transport_header(skb, skb->mac_len +
0509                  sizeof(struct fcoe_hdr));
0510 
0511     /* sets up SOF and ORIS */
0512     sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
0513     switch (sof) {
0514     case FC_SOF_I2:
0515         fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
0516         break;
0517     case FC_SOF_I3:
0518         fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
0519                    IXGBE_ADVTXD_FCOEF_ORIS;
0520         break;
0521     case FC_SOF_N2:
0522         break;
0523     case FC_SOF_N3:
0524         fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
0525         break;
0526     default:
0527         dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
0528         return -EINVAL;
0529     }
0530 
0531     /* the first byte of the last dword is EOF */
0532     skb_copy_bits(skb, skb->len - 4, &eof, 1);
0533     /* sets up EOF and ORIE */
0534     switch (eof) {
0535     case FC_EOF_N:
0536         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
0537         break;
0538     case FC_EOF_T:
0539         /* lso needs ORIE */
0540         if (skb_is_gso(skb))
0541             fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
0542                     IXGBE_ADVTXD_FCOEF_ORIE;
0543         else
0544             fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
0545         break;
0546     case FC_EOF_NI:
0547         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
0548         break;
0549     case FC_EOF_A:
0550         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
0551         break;
0552     default:
0553         dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
0554         return -EINVAL;
0555     }
0556 
0557     /* sets up PARINC indicating data offset */
0558     fh = (struct fc_frame_header *)skb_transport_header(skb);
0559     if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
0560         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
0561 
0562     /* include trailer in headlen as it is replicated per frame */
0563     *hdr_len = sizeof(struct fcoe_crc_eof);
0564 
0565     /* hdr_len includes fc_hdr if FCoE LSO is enabled */
0566     if (skb_is_gso(skb)) {
0567         *hdr_len += skb_transport_offset(skb) +
0568                 sizeof(struct fc_frame_header);
0569         /* update gso_segs and bytecount */
0570         first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
0571                            skb_shinfo(skb)->gso_size);
0572         first->bytecount += (first->gso_segs - 1) * *hdr_len;
0573         first->tx_flags |= IXGBE_TX_FLAGS_TSO;
0574         /* Hardware expects L4T to be RSV for FCoE TSO */
0575         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
0576     }
0577 
0578     /* set flag indicating FCOE to ixgbe_tx_map call */
0579     first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
0580 
0581     /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
0582     mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
0583 
0584     /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
0585     vlan_macip_lens = skb_transport_offset(skb) +
0586               sizeof(struct fc_frame_header);
0587     vlan_macip_lens |= (skb_transport_offset(skb) - 4)
0588                << IXGBE_ADVTXD_MACLEN_SHIFT;
0589     vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
0590 
0591     /* write context desc */
0592     ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
0593               type_tucmd, mss_l4len_idx);
0594 
0595     return 0;
0596 }
0597 
0598 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
0599 {
0600     struct ixgbe_fcoe_ddp_pool *ddp_pool;
0601 
0602     ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
0603     dma_pool_destroy(ddp_pool->pool);
0604     ddp_pool->pool = NULL;
0605 }
0606 
0607 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
0608                      struct device *dev,
0609                      unsigned int cpu)
0610 {
0611     struct ixgbe_fcoe_ddp_pool *ddp_pool;
0612     struct dma_pool *pool;
0613     char pool_name[32];
0614 
0615     snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
0616 
0617     pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
0618                    IXGBE_FCPTR_ALIGN, PAGE_SIZE);
0619     if (!pool)
0620         return -ENOMEM;
0621 
0622     ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
0623     ddp_pool->pool = pool;
0624     ddp_pool->noddp = 0;
0625     ddp_pool->noddp_ext_buff = 0;
0626 
0627     return 0;
0628 }
0629 
0630 /**
0631  * ixgbe_configure_fcoe - configures registers for fcoe at start
0632  * @adapter: ptr to ixgbe adapter
0633  *
0634  * This sets up FCoE related registers
0635  *
0636  * Returns : none
0637  */
0638 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
0639 {
0640     struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
0641     struct ixgbe_hw *hw = &adapter->hw;
0642     int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
0643     int fcreta_size;
0644     u32 etqf;
0645 
0646     /* Minimal functionality for FCoE requires at least CRC offloads */
0647     if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
0648         return;
0649 
0650     /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
0651     etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
0652     if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
0653         etqf |= IXGBE_ETQF_POOL_ENABLE;
0654         etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
0655     }
0656     IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
0657     IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
0658 
0659     /* leave registers un-configured if FCoE is disabled */
0660     if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
0661         return;
0662 
0663     /* Use one or more Rx queues for FCoE by redirection table */
0664     fcreta_size = IXGBE_FCRETA_SIZE;
0665     if (adapter->hw.mac.type == ixgbe_mac_X550)
0666         fcreta_size = IXGBE_FCRETA_SIZE_X550;
0667 
0668     for (i = 0; i < fcreta_size; i++) {
0669         if (adapter->hw.mac.type == ixgbe_mac_X550) {
0670             int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
0671                             fcoe->indices);
0672             fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
0673             fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
0674                    IXGBE_FCRETA_ENTRY_HIGH_MASK;
0675         }
0676 
0677         fcoe_i = fcoe->offset + (i % fcoe->indices);
0678         fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
0679         fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
0680         fcoe_q |= fcoe_q_h;
0681         IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
0682     }
0683     IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
0684 
0685     /* Enable L2 EtherType filter for FIP */
0686     etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
0687     if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
0688         etqf |= IXGBE_ETQF_POOL_ENABLE;
0689         etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
0690     }
0691     IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
0692 
0693     /* Send FIP frames to the first FCoE queue */
0694     fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
0695     IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
0696             IXGBE_ETQS_QUEUE_EN |
0697             (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
0698 
0699     /* Configure FCoE Rx control */
0700     IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
0701             IXGBE_FCRXCTRL_FCCRCBO |
0702             (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
0703 }
0704 
0705 /**
0706  * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
0707  * @adapter : ixgbe adapter
0708  *
0709  * Cleans up outstanding ddp context resources
0710  *
0711  * Returns : none
0712  */
0713 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
0714 {
0715     struct ixgbe_fcoe *fcoe = &adapter->fcoe;
0716     int cpu, i, ddp_max;
0717 
0718     /* do nothing if no DDP pools were allocated */
0719     if (!fcoe->ddp_pool)
0720         return;
0721 
0722     ddp_max = IXGBE_FCOE_DDP_MAX;
0723     /* X550 has different DDP Max limit */
0724     if (adapter->hw.mac.type == ixgbe_mac_X550)
0725         ddp_max = IXGBE_FCOE_DDP_MAX_X550;
0726 
0727     for (i = 0; i < ddp_max; i++)
0728         ixgbe_fcoe_ddp_put(adapter->netdev, i);
0729 
0730     for_each_possible_cpu(cpu)
0731         ixgbe_fcoe_dma_pool_free(fcoe, cpu);
0732 
0733     dma_unmap_single(&adapter->pdev->dev,
0734              fcoe->extra_ddp_buffer_dma,
0735              IXGBE_FCBUFF_MIN,
0736              DMA_FROM_DEVICE);
0737     kfree(fcoe->extra_ddp_buffer);
0738 
0739     fcoe->extra_ddp_buffer = NULL;
0740     fcoe->extra_ddp_buffer_dma = 0;
0741 }
0742 
0743 /**
0744  * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
0745  * @adapter: ixgbe adapter
0746  *
0747  * Sets up ddp context resouces
0748  *
0749  * Returns : 0 indicates success or -EINVAL on failure
0750  */
0751 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
0752 {
0753     struct ixgbe_fcoe *fcoe = &adapter->fcoe;
0754     struct device *dev = &adapter->pdev->dev;
0755     void *buffer;
0756     dma_addr_t dma;
0757     unsigned int cpu;
0758 
0759     /* do nothing if no DDP pools were allocated */
0760     if (!fcoe->ddp_pool)
0761         return 0;
0762 
0763     /* Extra buffer to be shared by all DDPs for HW work around */
0764     buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
0765     if (!buffer)
0766         return -ENOMEM;
0767 
0768     dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
0769     if (dma_mapping_error(dev, dma)) {
0770         e_err(drv, "failed to map extra DDP buffer\n");
0771         kfree(buffer);
0772         return -ENOMEM;
0773     }
0774 
0775     fcoe->extra_ddp_buffer = buffer;
0776     fcoe->extra_ddp_buffer_dma = dma;
0777 
0778     /* allocate pci pool for each cpu */
0779     for_each_possible_cpu(cpu) {
0780         int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
0781         if (!err)
0782             continue;
0783 
0784         e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
0785         ixgbe_free_fcoe_ddp_resources(adapter);
0786         return -ENOMEM;
0787     }
0788 
0789     return 0;
0790 }
0791 
0792 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
0793 {
0794     struct ixgbe_fcoe *fcoe = &adapter->fcoe;
0795 
0796     if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
0797         return -EINVAL;
0798 
0799     fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
0800 
0801     if (!fcoe->ddp_pool) {
0802         e_err(drv, "failed to allocate percpu DDP resources\n");
0803         return -ENOMEM;
0804     }
0805 
0806     adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
0807     /* X550 has different DDP Max limit */
0808     if (adapter->hw.mac.type == ixgbe_mac_X550)
0809         adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
0810 
0811     return 0;
0812 }
0813 
0814 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
0815 {
0816     struct ixgbe_fcoe *fcoe = &adapter->fcoe;
0817 
0818     adapter->netdev->fcoe_ddp_xid = 0;
0819 
0820     if (!fcoe->ddp_pool)
0821         return;
0822 
0823     free_percpu(fcoe->ddp_pool);
0824     fcoe->ddp_pool = NULL;
0825 }
0826 
0827 /**
0828  * ixgbe_fcoe_enable - turn on FCoE offload feature
0829  * @netdev: the corresponding netdev
0830  *
0831  * Turns on FCoE offload feature in 82599.
0832  *
0833  * Returns : 0 indicates success or -EINVAL on failure
0834  */
0835 int ixgbe_fcoe_enable(struct net_device *netdev)
0836 {
0837     struct ixgbe_adapter *adapter = netdev_priv(netdev);
0838     struct ixgbe_fcoe *fcoe = &adapter->fcoe;
0839 
0840     atomic_inc(&fcoe->refcnt);
0841 
0842     if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
0843         return -EINVAL;
0844 
0845     if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
0846         return -EINVAL;
0847 
0848     e_info(drv, "Enabling FCoE offload features.\n");
0849 
0850     if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
0851         e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
0852 
0853     if (netif_running(netdev))
0854         netdev->netdev_ops->ndo_stop(netdev);
0855 
0856     /* Allocate per CPU memory to track DDP pools */
0857     ixgbe_fcoe_ddp_enable(adapter);
0858 
0859     /* enable FCoE and notify stack */
0860     adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
0861     netdev->features |= NETIF_F_FCOE_MTU;
0862     netdev_features_change(netdev);
0863 
0864     /* release existing queues and reallocate them */
0865     ixgbe_clear_interrupt_scheme(adapter);
0866     ixgbe_init_interrupt_scheme(adapter);
0867 
0868     if (netif_running(netdev))
0869         netdev->netdev_ops->ndo_open(netdev);
0870 
0871     return 0;
0872 }
0873 
0874 /**
0875  * ixgbe_fcoe_disable - turn off FCoE offload feature
0876  * @netdev: the corresponding netdev
0877  *
0878  * Turns off FCoE offload feature in 82599.
0879  *
0880  * Returns : 0 indicates success or -EINVAL on failure
0881  */
0882 int ixgbe_fcoe_disable(struct net_device *netdev)
0883 {
0884     struct ixgbe_adapter *adapter = netdev_priv(netdev);
0885 
0886     if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
0887         return -EINVAL;
0888 
0889     if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
0890         return -EINVAL;
0891 
0892     e_info(drv, "Disabling FCoE offload features.\n");
0893     if (netif_running(netdev))
0894         netdev->netdev_ops->ndo_stop(netdev);
0895 
0896     /* Free per CPU memory to track DDP pools */
0897     ixgbe_fcoe_ddp_disable(adapter);
0898 
0899     /* disable FCoE and notify stack */
0900     adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
0901     netdev->features &= ~NETIF_F_FCOE_MTU;
0902 
0903     netdev_features_change(netdev);
0904 
0905     /* release existing queues and reallocate them */
0906     ixgbe_clear_interrupt_scheme(adapter);
0907     ixgbe_init_interrupt_scheme(adapter);
0908 
0909     if (netif_running(netdev))
0910         netdev->netdev_ops->ndo_open(netdev);
0911 
0912     return 0;
0913 }
0914 
0915 /**
0916  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
0917  * @netdev : ixgbe adapter
0918  * @wwn : the world wide name
0919  * @type: the type of world wide name
0920  *
0921  * Returns the node or port world wide name if both the prefix and the san
0922  * mac address are valid, then the wwn is formed based on the NAA-2 for
0923  * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
0924  *
0925  * Returns : 0 on success
0926  */
0927 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
0928 {
0929     u16 prefix = 0xffff;
0930     struct ixgbe_adapter *adapter = netdev_priv(netdev);
0931     struct ixgbe_mac_info *mac = &adapter->hw.mac;
0932 
0933     switch (type) {
0934     case NETDEV_FCOE_WWNN:
0935         prefix = mac->wwnn_prefix;
0936         break;
0937     case NETDEV_FCOE_WWPN:
0938         prefix = mac->wwpn_prefix;
0939         break;
0940     default:
0941         break;
0942     }
0943 
0944     if ((prefix != 0xffff) &&
0945         is_valid_ether_addr(mac->san_addr)) {
0946         *wwn = ((u64) prefix << 48) |
0947                ((u64) mac->san_addr[0] << 40) |
0948                ((u64) mac->san_addr[1] << 32) |
0949                ((u64) mac->san_addr[2] << 24) |
0950                ((u64) mac->san_addr[3] << 16) |
0951                ((u64) mac->san_addr[4] << 8)  |
0952                ((u64) mac->san_addr[5]);
0953         return 0;
0954     }
0955     return -EINVAL;
0956 }
0957 
0958 /**
0959  * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
0960  * @netdev : ixgbe adapter
0961  * @info : HBA information
0962  *
0963  * Returns ixgbe HBA information
0964  *
0965  * Returns : 0 on success
0966  */
0967 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
0968                struct netdev_fcoe_hbainfo *info)
0969 {
0970     struct ixgbe_adapter *adapter = netdev_priv(netdev);
0971     struct ixgbe_hw *hw = &adapter->hw;
0972     u64 dsn;
0973 
0974     if (!info)
0975         return -EINVAL;
0976 
0977     /* Don't return information on unsupported devices */
0978     if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
0979         return -EINVAL;
0980 
0981     /* Manufacturer */
0982     snprintf(info->manufacturer, sizeof(info->manufacturer),
0983          "Intel Corporation");
0984 
0985     /* Serial Number */
0986 
0987     /* Get the PCI-e Device Serial Number Capability */
0988     dsn = pci_get_dsn(adapter->pdev);
0989     if (dsn)
0990         snprintf(info->serial_number, sizeof(info->serial_number),
0991              "%016llX", dsn);
0992     else
0993         snprintf(info->serial_number, sizeof(info->serial_number),
0994              "Unknown");
0995 
0996     /* Hardware Version */
0997     snprintf(info->hardware_version,
0998          sizeof(info->hardware_version),
0999          "Rev %d", hw->revision_id);
1000     /* Driver Name/Version */
1001     snprintf(info->driver_version,
1002          sizeof(info->driver_version),
1003          "%s v%s",
1004          ixgbe_driver_name,
1005          UTS_RELEASE);
1006     /* Firmware Version */
1007     strlcpy(info->firmware_version, adapter->eeprom_id,
1008         sizeof(info->firmware_version));
1009 
1010     /* Model */
1011     if (hw->mac.type == ixgbe_mac_82599EB) {
1012         snprintf(info->model,
1013              sizeof(info->model),
1014              "Intel 82599");
1015     } else if (hw->mac.type == ixgbe_mac_X550) {
1016         snprintf(info->model,
1017              sizeof(info->model),
1018              "Intel X550");
1019     } else {
1020         snprintf(info->model,
1021              sizeof(info->model),
1022              "Intel X540");
1023     }
1024 
1025     /* Model Description */
1026     snprintf(info->model_description,
1027          sizeof(info->model_description),
1028          "%s",
1029          ixgbe_default_device_descr);
1030 
1031     return 0;
1032 }
1033 
1034 /**
1035  * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
1036  * @adapter: pointer to the device adapter structure
1037  *
1038  * Return : TC that FCoE is mapped to
1039  */
1040 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
1041 {
1042 #ifdef CONFIG_IXGBE_DCB
1043     return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
1044 #else
1045     return 0;
1046 #endif
1047 }