Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
0002 /*
0003  * Copyright(c) 2017 - 2020 Intel Corporation.
0004  */
0005 
0006 /*
0007  * This file contains HFI1 support for VNIC functionality
0008  */
0009 
0010 #include <linux/io.h>
0011 #include <linux/if_vlan.h>
0012 
0013 #include "vnic.h"
0014 #include "netdev.h"
0015 
0016 #define HFI_TX_TIMEOUT_MS 1000
0017 
0018 #define HFI1_VNIC_RCV_Q_SIZE   1024
0019 
0020 #define HFI1_VNIC_UP 0
0021 
0022 static DEFINE_SPINLOCK(vport_cntr_lock);
0023 
0024 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do {            \
0025         u64 *src64, *dst64;                            \
0026         for (src64 = &qstats->x_grp.unicast,           \
0027             dst64 = &stats->x_grp.unicast;         \
0028             dst64 <= &stats->x_grp.s_1519_max;) {  \
0029             *dst64++ += *src64++;                  \
0030         }                                              \
0031     } while (0)
0032 
0033 #define VNIC_MASK (0xFF)
0034 #define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
0035 
0036 /* hfi1_vnic_update_stats - update statistics */
0037 static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,
0038                    struct opa_vnic_stats *stats)
0039 {
0040     struct net_device *netdev = vinfo->netdev;
0041     u8 i;
0042 
0043     /* add tx counters on different queues */
0044     for (i = 0; i < vinfo->num_tx_q; i++) {
0045         struct opa_vnic_stats *qstats = &vinfo->stats[i];
0046         struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats;
0047 
0048         stats->netstats.tx_fifo_errors += qnstats->tx_fifo_errors;
0049         stats->netstats.tx_carrier_errors += qnstats->tx_carrier_errors;
0050         stats->tx_drop_state += qstats->tx_drop_state;
0051         stats->tx_dlid_zero += qstats->tx_dlid_zero;
0052 
0053         SUM_GRP_COUNTERS(stats, qstats, tx_grp);
0054         stats->netstats.tx_packets += qnstats->tx_packets;
0055         stats->netstats.tx_bytes += qnstats->tx_bytes;
0056     }
0057 
0058     /* add rx counters on different queues */
0059     for (i = 0; i < vinfo->num_rx_q; i++) {
0060         struct opa_vnic_stats *qstats = &vinfo->stats[i];
0061         struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats;
0062 
0063         stats->netstats.rx_fifo_errors += qnstats->rx_fifo_errors;
0064         stats->netstats.rx_nohandler += qnstats->rx_nohandler;
0065         stats->rx_drop_state += qstats->rx_drop_state;
0066         stats->rx_oversize += qstats->rx_oversize;
0067         stats->rx_runt += qstats->rx_runt;
0068 
0069         SUM_GRP_COUNTERS(stats, qstats, rx_grp);
0070         stats->netstats.rx_packets += qnstats->rx_packets;
0071         stats->netstats.rx_bytes += qnstats->rx_bytes;
0072     }
0073 
0074     stats->netstats.tx_errors = stats->netstats.tx_fifo_errors +
0075                     stats->netstats.tx_carrier_errors +
0076                     stats->tx_drop_state + stats->tx_dlid_zero;
0077     stats->netstats.tx_dropped = stats->netstats.tx_errors;
0078 
0079     stats->netstats.rx_errors = stats->netstats.rx_fifo_errors +
0080                     stats->netstats.rx_nohandler +
0081                     stats->rx_drop_state + stats->rx_oversize +
0082                     stats->rx_runt;
0083     stats->netstats.rx_dropped = stats->netstats.rx_errors;
0084 
0085     netdev->stats.tx_packets = stats->netstats.tx_packets;
0086     netdev->stats.tx_bytes = stats->netstats.tx_bytes;
0087     netdev->stats.tx_fifo_errors = stats->netstats.tx_fifo_errors;
0088     netdev->stats.tx_carrier_errors = stats->netstats.tx_carrier_errors;
0089     netdev->stats.tx_errors = stats->netstats.tx_errors;
0090     netdev->stats.tx_dropped = stats->netstats.tx_dropped;
0091 
0092     netdev->stats.rx_packets = stats->netstats.rx_packets;
0093     netdev->stats.rx_bytes = stats->netstats.rx_bytes;
0094     netdev->stats.rx_fifo_errors = stats->netstats.rx_fifo_errors;
0095     netdev->stats.multicast = stats->rx_grp.mcastbcast;
0096     netdev->stats.rx_length_errors = stats->rx_oversize + stats->rx_runt;
0097     netdev->stats.rx_errors = stats->netstats.rx_errors;
0098     netdev->stats.rx_dropped = stats->netstats.rx_dropped;
0099 }
0100 
0101 /* update_len_counters - update pkt's len histogram counters */
0102 static inline void update_len_counters(struct opa_vnic_grp_stats *grp,
0103                        int len)
0104 {
0105     /* account for 4 byte FCS */
0106     if (len >= 1515)
0107         grp->s_1519_max++;
0108     else if (len >= 1020)
0109         grp->s_1024_1518++;
0110     else if (len >= 508)
0111         grp->s_512_1023++;
0112     else if (len >= 252)
0113         grp->s_256_511++;
0114     else if (len >= 124)
0115         grp->s_128_255++;
0116     else if (len >= 61)
0117         grp->s_65_127++;
0118     else
0119         grp->s_64++;
0120 }
0121 
0122 /* hfi1_vnic_update_tx_counters - update transmit counters */
0123 static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo,
0124                      u8 q_idx, struct sk_buff *skb, int err)
0125 {
0126     struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
0127     struct opa_vnic_stats *stats = &vinfo->stats[q_idx];
0128     struct opa_vnic_grp_stats *tx_grp = &stats->tx_grp;
0129     u16 vlan_tci;
0130 
0131     stats->netstats.tx_packets++;
0132     stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN;
0133 
0134     update_len_counters(tx_grp, skb->len);
0135 
0136     /* rest of the counts are for good packets only */
0137     if (unlikely(err))
0138         return;
0139 
0140     if (is_multicast_ether_addr(mac_hdr->h_dest))
0141         tx_grp->mcastbcast++;
0142     else
0143         tx_grp->unicast++;
0144 
0145     if (!__vlan_get_tag(skb, &vlan_tci))
0146         tx_grp->vlan++;
0147     else
0148         tx_grp->untagged++;
0149 }
0150 
0151 /* hfi1_vnic_update_rx_counters - update receive counters */
0152 static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo,
0153                      u8 q_idx, struct sk_buff *skb, int err)
0154 {
0155     struct ethhdr *mac_hdr = (struct ethhdr *)skb->data;
0156     struct opa_vnic_stats *stats = &vinfo->stats[q_idx];
0157     struct opa_vnic_grp_stats *rx_grp = &stats->rx_grp;
0158     u16 vlan_tci;
0159 
0160     stats->netstats.rx_packets++;
0161     stats->netstats.rx_bytes += skb->len + ETH_FCS_LEN;
0162 
0163     update_len_counters(rx_grp, skb->len);
0164 
0165     /* rest of the counts are for good packets only */
0166     if (unlikely(err))
0167         return;
0168 
0169     if (is_multicast_ether_addr(mac_hdr->h_dest))
0170         rx_grp->mcastbcast++;
0171     else
0172         rx_grp->unicast++;
0173 
0174     if (!__vlan_get_tag(skb, &vlan_tci))
0175         rx_grp->vlan++;
0176     else
0177         rx_grp->untagged++;
0178 }
0179 
0180 /* This function is overloaded for opa_vnic specific implementation */
0181 static void hfi1_vnic_get_stats64(struct net_device *netdev,
0182                   struct rtnl_link_stats64 *stats)
0183 {
0184     struct opa_vnic_stats *vstats = (struct opa_vnic_stats *)stats;
0185     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0186 
0187     hfi1_vnic_update_stats(vinfo, vstats);
0188 }
0189 
0190 static u64 create_bypass_pbc(u32 vl, u32 dw_len)
0191 {
0192     u64 pbc;
0193 
0194     pbc = ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
0195         | PBC_INSERT_BYPASS_ICRC | PBC_CREDIT_RETURN
0196         | PBC_PACKET_BYPASS
0197         | ((vl & PBC_VL_MASK) << PBC_VL_SHIFT)
0198         | (dw_len & PBC_LENGTH_DWS_MASK) << PBC_LENGTH_DWS_SHIFT;
0199 
0200     return pbc;
0201 }
0202 
0203 /* hfi1_vnic_maybe_stop_tx - stop tx queue if required */
0204 static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo,
0205                     u8 q_idx)
0206 {
0207     netif_stop_subqueue(vinfo->netdev, q_idx);
0208     if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx))
0209         return;
0210 
0211     netif_start_subqueue(vinfo->netdev, q_idx);
0212 }
0213 
0214 static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
0215                       struct net_device *netdev)
0216 {
0217     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0218     u8 pad_len, q_idx = skb->queue_mapping;
0219     struct hfi1_devdata *dd = vinfo->dd;
0220     struct opa_vnic_skb_mdata *mdata;
0221     u32 pkt_len, total_len;
0222     int err = -EINVAL;
0223     u64 pbc;
0224 
0225     v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len);
0226     if (unlikely(!netif_oper_up(netdev))) {
0227         vinfo->stats[q_idx].tx_drop_state++;
0228         goto tx_finish;
0229     }
0230 
0231     /* take out meta data */
0232     mdata = (struct opa_vnic_skb_mdata *)skb->data;
0233     skb_pull(skb, sizeof(*mdata));
0234     if (unlikely(mdata->flags & OPA_VNIC_SKB_MDATA_ENCAP_ERR)) {
0235         vinfo->stats[q_idx].tx_dlid_zero++;
0236         goto tx_finish;
0237     }
0238 
0239     /* add tail padding (for 8 bytes size alignment) and icrc */
0240     pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
0241     pad_len += OPA_VNIC_ICRC_TAIL_LEN;
0242 
0243     /*
0244      * pkt_len is how much data we have to write, includes header and data.
0245      * total_len is length of the packet in Dwords plus the PBC should not
0246      * include the CRC.
0247      */
0248     pkt_len = (skb->len + pad_len) >> 2;
0249     total_len = pkt_len + 2; /* PBC + packet */
0250 
0251     pbc = create_bypass_pbc(mdata->vl, total_len);
0252 
0253     skb_get(skb);
0254     v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len);
0255     err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len);
0256     if (unlikely(err)) {
0257         if (err == -ENOMEM)
0258             vinfo->stats[q_idx].netstats.tx_fifo_errors++;
0259         else if (err != -EBUSY)
0260             vinfo->stats[q_idx].netstats.tx_carrier_errors++;
0261     }
0262     /* remove the header before updating tx counters */
0263     skb_pull(skb, OPA_VNIC_HDR_LEN);
0264 
0265     if (unlikely(err == -EBUSY)) {
0266         hfi1_vnic_maybe_stop_tx(vinfo, q_idx);
0267         dev_kfree_skb_any(skb);
0268         return NETDEV_TX_BUSY;
0269     }
0270 
0271 tx_finish:
0272     /* update tx counters */
0273     hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err);
0274     dev_kfree_skb_any(skb);
0275     return NETDEV_TX_OK;
0276 }
0277 
0278 static u16 hfi1_vnic_select_queue(struct net_device *netdev,
0279                   struct sk_buff *skb,
0280                   struct net_device *sb_dev)
0281 {
0282     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0283     struct opa_vnic_skb_mdata *mdata;
0284     struct sdma_engine *sde;
0285 
0286     mdata = (struct opa_vnic_skb_mdata *)skb->data;
0287     sde = sdma_select_engine_vl(vinfo->dd, mdata->entropy, mdata->vl);
0288     return sde->this_idx;
0289 }
0290 
0291 /* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */
0292 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
0293                       struct sk_buff *skb)
0294 {
0295     struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
0296     int max_len = vinfo->netdev->mtu + VLAN_ETH_HLEN;
0297     int rc = -EFAULT;
0298 
0299     skb_pull(skb, OPA_VNIC_HDR_LEN);
0300 
0301     /* Validate Packet length */
0302     if (unlikely(skb->len > max_len))
0303         vinfo->stats[rxq->idx].rx_oversize++;
0304     else if (unlikely(skb->len < ETH_ZLEN))
0305         vinfo->stats[rxq->idx].rx_runt++;
0306     else
0307         rc = 0;
0308     return rc;
0309 }
0310 
0311 static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd,
0312                           int vesw_id)
0313 {
0314     int vnic_id = VNIC_ID(vesw_id);
0315 
0316     return hfi1_netdev_get_data(dd, vnic_id);
0317 }
0318 
0319 static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd)
0320 {
0321     struct hfi1_vnic_vport_info *vinfo;
0322     int next_id = VNIC_ID(0);
0323 
0324     vinfo = hfi1_netdev_get_first_data(dd, &next_id);
0325 
0326     if (next_id > VNIC_ID(VNIC_MASK))
0327         return NULL;
0328 
0329     return vinfo;
0330 }
0331 
0332 void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
0333 {
0334     struct hfi1_devdata *dd = packet->rcd->dd;
0335     struct hfi1_vnic_vport_info *vinfo = NULL;
0336     struct hfi1_vnic_rx_queue *rxq;
0337     struct sk_buff *skb;
0338     int l4_type, vesw_id = -1, rc;
0339     u8 q_idx;
0340     unsigned char *pad_info;
0341 
0342     l4_type = hfi1_16B_get_l4(packet->ebuf);
0343     if (likely(l4_type == OPA_16B_L4_ETHR)) {
0344         vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
0345         vinfo = get_vnic_port(dd, vesw_id);
0346 
0347         /*
0348          * In case of invalid vesw id, count the error on
0349          * the first available vport.
0350          */
0351         if (unlikely(!vinfo)) {
0352             struct hfi1_vnic_vport_info *vinfo_tmp;
0353 
0354             vinfo_tmp = get_first_vnic_port(dd);
0355             if (vinfo_tmp) {
0356                 spin_lock(&vport_cntr_lock);
0357                 vinfo_tmp->stats[0].netstats.rx_nohandler++;
0358                 spin_unlock(&vport_cntr_lock);
0359             }
0360         }
0361     }
0362 
0363     if (unlikely(!vinfo)) {
0364         dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
0365                 l4_type, vesw_id, packet->rcd->ctxt);
0366         return;
0367     }
0368 
0369     q_idx = packet->rcd->vnic_q_idx;
0370     rxq = &vinfo->rxq[q_idx];
0371     if (unlikely(!netif_oper_up(vinfo->netdev))) {
0372         vinfo->stats[q_idx].rx_drop_state++;
0373         return;
0374     }
0375 
0376     skb = netdev_alloc_skb(vinfo->netdev, packet->tlen);
0377     if (unlikely(!skb)) {
0378         vinfo->stats[q_idx].netstats.rx_fifo_errors++;
0379         return;
0380     }
0381 
0382     memcpy(skb->data, packet->ebuf, packet->tlen);
0383     skb_put(skb, packet->tlen);
0384 
0385     pad_info = skb->data + skb->len - 1;
0386     skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
0387                ((*pad_info) & 0x7)));
0388 
0389     rc = hfi1_vnic_decap_skb(rxq, skb);
0390 
0391     /* update rx counters */
0392     hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
0393     if (unlikely(rc)) {
0394         dev_kfree_skb_any(skb);
0395         return;
0396     }
0397 
0398     skb_checksum_none_assert(skb);
0399     skb->protocol = eth_type_trans(skb, rxq->netdev);
0400 
0401     napi_gro_receive(&rxq->napi, skb);
0402 }
0403 
0404 static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
0405 {
0406     struct hfi1_devdata *dd = vinfo->dd;
0407     struct net_device *netdev = vinfo->netdev;
0408     int rc;
0409 
0410     /* ensure virtual eth switch id is valid */
0411     if (!vinfo->vesw_id)
0412         return -EINVAL;
0413 
0414     rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo);
0415     if (rc < 0)
0416         return rc;
0417 
0418     rc = hfi1_netdev_rx_init(dd);
0419     if (rc)
0420         goto err_remove;
0421 
0422     netif_carrier_on(netdev);
0423     netif_tx_start_all_queues(netdev);
0424     set_bit(HFI1_VNIC_UP, &vinfo->flags);
0425 
0426     return 0;
0427 
0428 err_remove:
0429     hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
0430     return rc;
0431 }
0432 
0433 static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
0434 {
0435     struct hfi1_devdata *dd = vinfo->dd;
0436 
0437     clear_bit(HFI1_VNIC_UP, &vinfo->flags);
0438     netif_carrier_off(vinfo->netdev);
0439     netif_tx_disable(vinfo->netdev);
0440     hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
0441 
0442     hfi1_netdev_rx_destroy(dd);
0443 }
0444 
0445 static int hfi1_netdev_open(struct net_device *netdev)
0446 {
0447     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0448     int rc;
0449 
0450     mutex_lock(&vinfo->lock);
0451     rc = hfi1_vnic_up(vinfo);
0452     mutex_unlock(&vinfo->lock);
0453     return rc;
0454 }
0455 
0456 static int hfi1_netdev_close(struct net_device *netdev)
0457 {
0458     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0459 
0460     mutex_lock(&vinfo->lock);
0461     if (test_bit(HFI1_VNIC_UP, &vinfo->flags))
0462         hfi1_vnic_down(vinfo);
0463     mutex_unlock(&vinfo->lock);
0464     return 0;
0465 }
0466 
0467 static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
0468 {
0469     struct hfi1_devdata *dd = vinfo->dd;
0470     int rc = 0;
0471 
0472     mutex_lock(&hfi1_mutex);
0473     if (!dd->vnic_num_vports) {
0474         rc = hfi1_vnic_txreq_init(dd);
0475         if (rc)
0476             goto txreq_fail;
0477     }
0478 
0479     rc = hfi1_netdev_rx_init(dd);
0480     if (rc) {
0481         dd_dev_err(dd, "Unable to initialize netdev contexts\n");
0482         goto alloc_fail;
0483     }
0484 
0485     hfi1_init_vnic_rsm(dd);
0486 
0487     dd->vnic_num_vports++;
0488     hfi1_vnic_sdma_init(vinfo);
0489 
0490 alloc_fail:
0491     if (!dd->vnic_num_vports)
0492         hfi1_vnic_txreq_deinit(dd);
0493 txreq_fail:
0494     mutex_unlock(&hfi1_mutex);
0495     return rc;
0496 }
0497 
0498 static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
0499 {
0500     struct hfi1_devdata *dd = vinfo->dd;
0501 
0502     mutex_lock(&hfi1_mutex);
0503     if (--dd->vnic_num_vports == 0) {
0504         hfi1_deinit_vnic_rsm(dd);
0505         hfi1_vnic_txreq_deinit(dd);
0506     }
0507     mutex_unlock(&hfi1_mutex);
0508     hfi1_netdev_rx_destroy(dd);
0509 }
0510 
0511 static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id)
0512 {
0513     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0514     bool reopen = false;
0515 
0516     /*
0517      * If vesw_id is being changed, and if the vnic port is up,
0518      * reset the vnic port to ensure new vesw_id gets picked up
0519      */
0520     if (id != vinfo->vesw_id) {
0521         mutex_lock(&vinfo->lock);
0522         if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) {
0523             hfi1_vnic_down(vinfo);
0524             reopen = true;
0525         }
0526 
0527         vinfo->vesw_id = id;
0528         if (reopen)
0529             hfi1_vnic_up(vinfo);
0530 
0531         mutex_unlock(&vinfo->lock);
0532     }
0533 }
0534 
0535 /* netdev ops */
0536 static const struct net_device_ops hfi1_netdev_ops = {
0537     .ndo_open = hfi1_netdev_open,
0538     .ndo_stop = hfi1_netdev_close,
0539     .ndo_start_xmit = hfi1_netdev_start_xmit,
0540     .ndo_select_queue = hfi1_vnic_select_queue,
0541     .ndo_get_stats64 = hfi1_vnic_get_stats64,
0542 };
0543 
0544 static void hfi1_vnic_free_rn(struct net_device *netdev)
0545 {
0546     struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
0547 
0548     hfi1_vnic_deinit(vinfo);
0549     mutex_destroy(&vinfo->lock);
0550     free_netdev(netdev);
0551 }
0552 
0553 struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
0554                       u32 port_num,
0555                       enum rdma_netdev_t type,
0556                       const char *name,
0557                       unsigned char name_assign_type,
0558                       void (*setup)(struct net_device *))
0559 {
0560     struct hfi1_devdata *dd = dd_from_ibdev(device);
0561     struct hfi1_vnic_vport_info *vinfo;
0562     struct net_device *netdev;
0563     struct rdma_netdev *rn;
0564     int i, size, rc;
0565 
0566     if (!dd->num_netdev_contexts)
0567         return ERR_PTR(-ENOMEM);
0568 
0569     if (!port_num || (port_num > dd->num_pports))
0570         return ERR_PTR(-EINVAL);
0571 
0572     if (type != RDMA_NETDEV_OPA_VNIC)
0573         return ERR_PTR(-EOPNOTSUPP);
0574 
0575     size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
0576     netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
0577                   chip_sdma_engines(dd),
0578                   dd->num_netdev_contexts);
0579     if (!netdev)
0580         return ERR_PTR(-ENOMEM);
0581 
0582     rn = netdev_priv(netdev);
0583     vinfo = opa_vnic_dev_priv(netdev);
0584     vinfo->dd = dd;
0585     vinfo->num_tx_q = chip_sdma_engines(dd);
0586     vinfo->num_rx_q = dd->num_netdev_contexts;
0587     vinfo->netdev = netdev;
0588     rn->free_rdma_netdev = hfi1_vnic_free_rn;
0589     rn->set_id = hfi1_vnic_set_vesw_id;
0590 
0591     netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG;
0592     netdev->hw_features = netdev->features;
0593     netdev->vlan_features = netdev->features;
0594     netdev->watchdog_timeo = msecs_to_jiffies(HFI_TX_TIMEOUT_MS);
0595     netdev->netdev_ops = &hfi1_netdev_ops;
0596     mutex_init(&vinfo->lock);
0597 
0598     for (i = 0; i < vinfo->num_rx_q; i++) {
0599         struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
0600 
0601         rxq->idx = i;
0602         rxq->vinfo = vinfo;
0603         rxq->netdev = netdev;
0604     }
0605 
0606     rc = hfi1_vnic_init(vinfo);
0607     if (rc)
0608         goto init_fail;
0609 
0610     return netdev;
0611 init_fail:
0612     mutex_destroy(&vinfo->lock);
0613     free_netdev(netdev);
0614     return ERR_PTR(rc);
0615 }