Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 /* Copyright (c) 2021, Microsoft Corporation. */
0003 
0004 #include <uapi/linux/bpf.h>
0005 
0006 #include <linux/inetdevice.h>
0007 #include <linux/etherdevice.h>
0008 #include <linux/ethtool.h>
0009 #include <linux/filter.h>
0010 #include <linux/mm.h>
0011 
0012 #include <net/checksum.h>
0013 #include <net/ip6_checksum.h>
0014 
0015 #include "mana.h"
0016 
0017 /* Microsoft Azure Network Adapter (MANA) functions */
0018 
0019 static int mana_open(struct net_device *ndev)
0020 {
0021     struct mana_port_context *apc = netdev_priv(ndev);
0022     int err;
0023 
0024     err = mana_alloc_queues(ndev);
0025     if (err)
0026         return err;
0027 
0028     apc->port_is_up = true;
0029 
0030     /* Ensure port state updated before txq state */
0031     smp_wmb();
0032 
0033     netif_carrier_on(ndev);
0034     netif_tx_wake_all_queues(ndev);
0035 
0036     return 0;
0037 }
0038 
0039 static int mana_close(struct net_device *ndev)
0040 {
0041     struct mana_port_context *apc = netdev_priv(ndev);
0042 
0043     if (!apc->port_is_up)
0044         return 0;
0045 
0046     return mana_detach(ndev, true);
0047 }
0048 
0049 static bool mana_can_tx(struct gdma_queue *wq)
0050 {
0051     return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
0052 }
0053 
0054 static unsigned int mana_checksum_info(struct sk_buff *skb)
0055 {
0056     if (skb->protocol == htons(ETH_P_IP)) {
0057         struct iphdr *ip = ip_hdr(skb);
0058 
0059         if (ip->protocol == IPPROTO_TCP)
0060             return IPPROTO_TCP;
0061 
0062         if (ip->protocol == IPPROTO_UDP)
0063             return IPPROTO_UDP;
0064     } else if (skb->protocol == htons(ETH_P_IPV6)) {
0065         struct ipv6hdr *ip6 = ipv6_hdr(skb);
0066 
0067         if (ip6->nexthdr == IPPROTO_TCP)
0068             return IPPROTO_TCP;
0069 
0070         if (ip6->nexthdr == IPPROTO_UDP)
0071             return IPPROTO_UDP;
0072     }
0073 
0074     /* No csum offloading */
0075     return 0;
0076 }
0077 
0078 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
0079             struct mana_tx_package *tp)
0080 {
0081     struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
0082     struct gdma_dev *gd = apc->ac->gdma_dev;
0083     struct gdma_context *gc;
0084     struct device *dev;
0085     skb_frag_t *frag;
0086     dma_addr_t da;
0087     int i;
0088 
0089     gc = gd->gdma_context;
0090     dev = gc->dev;
0091     da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
0092 
0093     if (dma_mapping_error(dev, da))
0094         return -ENOMEM;
0095 
0096     ash->dma_handle[0] = da;
0097     ash->size[0] = skb_headlen(skb);
0098 
0099     tp->wqe_req.sgl[0].address = ash->dma_handle[0];
0100     tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
0101     tp->wqe_req.sgl[0].size = ash->size[0];
0102 
0103     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0104         frag = &skb_shinfo(skb)->frags[i];
0105         da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
0106                       DMA_TO_DEVICE);
0107 
0108         if (dma_mapping_error(dev, da))
0109             goto frag_err;
0110 
0111         ash->dma_handle[i + 1] = da;
0112         ash->size[i + 1] = skb_frag_size(frag);
0113 
0114         tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
0115         tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
0116         tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
0117     }
0118 
0119     return 0;
0120 
0121 frag_err:
0122     for (i = i - 1; i >= 0; i--)
0123         dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
0124                    DMA_TO_DEVICE);
0125 
0126     dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
0127 
0128     return -ENOMEM;
0129 }
0130 
0131 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
0132 {
0133     enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
0134     struct mana_port_context *apc = netdev_priv(ndev);
0135     u16 txq_idx = skb_get_queue_mapping(skb);
0136     struct gdma_dev *gd = apc->ac->gdma_dev;
0137     bool ipv4 = false, ipv6 = false;
0138     struct mana_tx_package pkg = {};
0139     struct netdev_queue *net_txq;
0140     struct mana_stats_tx *tx_stats;
0141     struct gdma_queue *gdma_sq;
0142     unsigned int csum_type;
0143     struct mana_txq *txq;
0144     struct mana_cq *cq;
0145     int err, len;
0146 
0147     if (unlikely(!apc->port_is_up))
0148         goto tx_drop;
0149 
0150     if (skb_cow_head(skb, MANA_HEADROOM))
0151         goto tx_drop_count;
0152 
0153     txq = &apc->tx_qp[txq_idx].txq;
0154     gdma_sq = txq->gdma_sq;
0155     cq = &apc->tx_qp[txq_idx].tx_cq;
0156 
0157     pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
0158     pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
0159 
0160     if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
0161         pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
0162         pkt_fmt = MANA_LONG_PKT_FMT;
0163     } else {
0164         pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
0165     }
0166 
0167     pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
0168 
0169     if (pkt_fmt == MANA_SHORT_PKT_FMT)
0170         pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
0171     else
0172         pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
0173 
0174     pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
0175     pkg.wqe_req.flags = 0;
0176     pkg.wqe_req.client_data_unit = 0;
0177 
0178     pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
0179     WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
0180 
0181     if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
0182         pkg.wqe_req.sgl = pkg.sgl_array;
0183     } else {
0184         pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
0185                         sizeof(struct gdma_sge),
0186                         GFP_ATOMIC);
0187         if (!pkg.sgl_ptr)
0188             goto tx_drop_count;
0189 
0190         pkg.wqe_req.sgl = pkg.sgl_ptr;
0191     }
0192 
0193     if (skb->protocol == htons(ETH_P_IP))
0194         ipv4 = true;
0195     else if (skb->protocol == htons(ETH_P_IPV6))
0196         ipv6 = true;
0197 
0198     if (skb_is_gso(skb)) {
0199         pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
0200         pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
0201 
0202         pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
0203         pkg.tx_oob.s_oob.comp_tcp_csum = 1;
0204         pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
0205 
0206         pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
0207         pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
0208         if (ipv4) {
0209             ip_hdr(skb)->tot_len = 0;
0210             ip_hdr(skb)->check = 0;
0211             tcp_hdr(skb)->check =
0212                 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
0213                            ip_hdr(skb)->daddr, 0,
0214                            IPPROTO_TCP, 0);
0215         } else {
0216             ipv6_hdr(skb)->payload_len = 0;
0217             tcp_hdr(skb)->check =
0218                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
0219                          &ipv6_hdr(skb)->daddr, 0,
0220                          IPPROTO_TCP, 0);
0221         }
0222     } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
0223         csum_type = mana_checksum_info(skb);
0224 
0225         if (csum_type == IPPROTO_TCP) {
0226             pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
0227             pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
0228 
0229             pkg.tx_oob.s_oob.comp_tcp_csum = 1;
0230             pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
0231 
0232         } else if (csum_type == IPPROTO_UDP) {
0233             pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
0234             pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
0235 
0236             pkg.tx_oob.s_oob.comp_udp_csum = 1;
0237         } else {
0238             /* Can't do offload of this type of checksum */
0239             if (skb_checksum_help(skb))
0240                 goto free_sgl_ptr;
0241         }
0242     }
0243 
0244     if (mana_map_skb(skb, apc, &pkg))
0245         goto free_sgl_ptr;
0246 
0247     skb_queue_tail(&txq->pending_skbs, skb);
0248 
0249     len = skb->len;
0250     net_txq = netdev_get_tx_queue(ndev, txq_idx);
0251 
0252     err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
0253                     (struct gdma_posted_wqe_info *)skb->cb);
0254     if (!mana_can_tx(gdma_sq)) {
0255         netif_tx_stop_queue(net_txq);
0256         apc->eth_stats.stop_queue++;
0257     }
0258 
0259     if (err) {
0260         (void)skb_dequeue_tail(&txq->pending_skbs);
0261         netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
0262         err = NETDEV_TX_BUSY;
0263         goto tx_busy;
0264     }
0265 
0266     err = NETDEV_TX_OK;
0267     atomic_inc(&txq->pending_sends);
0268 
0269     mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
0270 
0271     /* skb may be freed after mana_gd_post_work_request. Do not use it. */
0272     skb = NULL;
0273 
0274     tx_stats = &txq->stats;
0275     u64_stats_update_begin(&tx_stats->syncp);
0276     tx_stats->packets++;
0277     tx_stats->bytes += len;
0278     u64_stats_update_end(&tx_stats->syncp);
0279 
0280 tx_busy:
0281     if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
0282         netif_tx_wake_queue(net_txq);
0283         apc->eth_stats.wake_queue++;
0284     }
0285 
0286     kfree(pkg.sgl_ptr);
0287     return err;
0288 
0289 free_sgl_ptr:
0290     kfree(pkg.sgl_ptr);
0291 tx_drop_count:
0292     ndev->stats.tx_dropped++;
0293 tx_drop:
0294     dev_kfree_skb_any(skb);
0295     return NETDEV_TX_OK;
0296 }
0297 
0298 static void mana_get_stats64(struct net_device *ndev,
0299                  struct rtnl_link_stats64 *st)
0300 {
0301     struct mana_port_context *apc = netdev_priv(ndev);
0302     unsigned int num_queues = apc->num_queues;
0303     struct mana_stats_rx *rx_stats;
0304     struct mana_stats_tx *tx_stats;
0305     unsigned int start;
0306     u64 packets, bytes;
0307     int q;
0308 
0309     if (!apc->port_is_up)
0310         return;
0311 
0312     netdev_stats_to_stats64(st, &ndev->stats);
0313 
0314     for (q = 0; q < num_queues; q++) {
0315         rx_stats = &apc->rxqs[q]->stats;
0316 
0317         do {
0318             start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
0319             packets = rx_stats->packets;
0320             bytes = rx_stats->bytes;
0321         } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
0322 
0323         st->rx_packets += packets;
0324         st->rx_bytes += bytes;
0325     }
0326 
0327     for (q = 0; q < num_queues; q++) {
0328         tx_stats = &apc->tx_qp[q].txq.stats;
0329 
0330         do {
0331             start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
0332             packets = tx_stats->packets;
0333             bytes = tx_stats->bytes;
0334         } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
0335 
0336         st->tx_packets += packets;
0337         st->tx_bytes += bytes;
0338     }
0339 }
0340 
0341 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
0342                  int old_q)
0343 {
0344     struct mana_port_context *apc = netdev_priv(ndev);
0345     u32 hash = skb_get_hash(skb);
0346     struct sock *sk = skb->sk;
0347     int txq;
0348 
0349     txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
0350 
0351     if (txq != old_q && sk && sk_fullsock(sk) &&
0352         rcu_access_pointer(sk->sk_dst_cache))
0353         sk_tx_queue_set(sk, txq);
0354 
0355     return txq;
0356 }
0357 
0358 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
0359                  struct net_device *sb_dev)
0360 {
0361     int txq;
0362 
0363     if (ndev->real_num_tx_queues == 1)
0364         return 0;
0365 
0366     txq = sk_tx_queue_get(skb->sk);
0367 
0368     if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
0369         if (skb_rx_queue_recorded(skb))
0370             txq = skb_get_rx_queue(skb);
0371         else
0372             txq = mana_get_tx_queue(ndev, skb, txq);
0373     }
0374 
0375     return txq;
0376 }
0377 
0378 static const struct net_device_ops mana_devops = {
0379     .ndo_open       = mana_open,
0380     .ndo_stop       = mana_close,
0381     .ndo_select_queue   = mana_select_queue,
0382     .ndo_start_xmit     = mana_start_xmit,
0383     .ndo_validate_addr  = eth_validate_addr,
0384     .ndo_get_stats64    = mana_get_stats64,
0385     .ndo_bpf        = mana_bpf,
0386     .ndo_xdp_xmit       = mana_xdp_xmit,
0387 };
0388 
0389 static void mana_cleanup_port_context(struct mana_port_context *apc)
0390 {
0391     kfree(apc->rxqs);
0392     apc->rxqs = NULL;
0393 }
0394 
0395 static int mana_init_port_context(struct mana_port_context *apc)
0396 {
0397     apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
0398                 GFP_KERNEL);
0399 
0400     return !apc->rxqs ? -ENOMEM : 0;
0401 }
0402 
0403 static int mana_send_request(struct mana_context *ac, void *in_buf,
0404                  u32 in_len, void *out_buf, u32 out_len)
0405 {
0406     struct gdma_context *gc = ac->gdma_dev->gdma_context;
0407     struct gdma_resp_hdr *resp = out_buf;
0408     struct gdma_req_hdr *req = in_buf;
0409     struct device *dev = gc->dev;
0410     static atomic_t activity_id;
0411     int err;
0412 
0413     req->dev_id = gc->mana.dev_id;
0414     req->activity_id = atomic_inc_return(&activity_id);
0415 
0416     err = mana_gd_send_request(gc, in_len, in_buf, out_len,
0417                    out_buf);
0418     if (err || resp->status) {
0419         dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
0420             err, resp->status);
0421         return err ? err : -EPROTO;
0422     }
0423 
0424     if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
0425         req->activity_id != resp->activity_id) {
0426         dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
0427             req->dev_id.as_uint32, resp->dev_id.as_uint32,
0428             req->activity_id, resp->activity_id);
0429         return -EPROTO;
0430     }
0431 
0432     return 0;
0433 }
0434 
0435 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
0436                 const enum mana_command_code expected_code,
0437                 const u32 min_size)
0438 {
0439     if (resp_hdr->response.msg_type != expected_code)
0440         return -EPROTO;
0441 
0442     if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
0443         return -EPROTO;
0444 
0445     if (resp_hdr->response.msg_size < min_size)
0446         return -EPROTO;
0447 
0448     return 0;
0449 }
0450 
0451 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
0452 {
0453     struct mana_register_hw_vport_resp resp = {};
0454     struct mana_register_hw_vport_req req = {};
0455     int err;
0456 
0457     mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
0458                  sizeof(req), sizeof(resp));
0459     req.attached_gfid = 1;
0460     req.is_pf_default_vport = 1;
0461     req.allow_all_ether_types = 1;
0462 
0463     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0464                 sizeof(resp));
0465     if (err) {
0466         netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
0467         return err;
0468     }
0469 
0470     err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
0471                    sizeof(resp));
0472     if (err || resp.hdr.status) {
0473         netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
0474                err, resp.hdr.status);
0475         return err ? err : -EPROTO;
0476     }
0477 
0478     apc->port_handle = resp.hw_vport_handle;
0479     return 0;
0480 }
0481 
0482 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
0483 {
0484     struct mana_deregister_hw_vport_resp resp = {};
0485     struct mana_deregister_hw_vport_req req = {};
0486     int err;
0487 
0488     mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
0489                  sizeof(req), sizeof(resp));
0490     req.hw_vport_handle = apc->port_handle;
0491 
0492     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0493                 sizeof(resp));
0494     if (err) {
0495         netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
0496                err);
0497         return;
0498     }
0499 
0500     err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
0501                    sizeof(resp));
0502     if (err || resp.hdr.status)
0503         netdev_err(apc->ndev,
0504                "Failed to deregister hw vPort: %d, 0x%x\n",
0505                err, resp.hdr.status);
0506 }
0507 
0508 static int mana_pf_register_filter(struct mana_port_context *apc)
0509 {
0510     struct mana_register_filter_resp resp = {};
0511     struct mana_register_filter_req req = {};
0512     int err;
0513 
0514     mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
0515                  sizeof(req), sizeof(resp));
0516     req.vport = apc->port_handle;
0517     memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
0518 
0519     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0520                 sizeof(resp));
0521     if (err) {
0522         netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
0523         return err;
0524     }
0525 
0526     err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
0527                    sizeof(resp));
0528     if (err || resp.hdr.status) {
0529         netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
0530                err, resp.hdr.status);
0531         return err ? err : -EPROTO;
0532     }
0533 
0534     apc->pf_filter_handle = resp.filter_handle;
0535     return 0;
0536 }
0537 
0538 static void mana_pf_deregister_filter(struct mana_port_context *apc)
0539 {
0540     struct mana_deregister_filter_resp resp = {};
0541     struct mana_deregister_filter_req req = {};
0542     int err;
0543 
0544     mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
0545                  sizeof(req), sizeof(resp));
0546     req.filter_handle = apc->pf_filter_handle;
0547 
0548     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0549                 sizeof(resp));
0550     if (err) {
0551         netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
0552                err);
0553         return;
0554     }
0555 
0556     err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
0557                    sizeof(resp));
0558     if (err || resp.hdr.status)
0559         netdev_err(apc->ndev,
0560                "Failed to deregister filter: %d, 0x%x\n",
0561                err, resp.hdr.status);
0562 }
0563 
0564 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
0565                  u32 proto_minor_ver, u32 proto_micro_ver,
0566                  u16 *max_num_vports)
0567 {
0568     struct gdma_context *gc = ac->gdma_dev->gdma_context;
0569     struct mana_query_device_cfg_resp resp = {};
0570     struct mana_query_device_cfg_req req = {};
0571     struct device *dev = gc->dev;
0572     int err = 0;
0573 
0574     mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
0575                  sizeof(req), sizeof(resp));
0576     req.proto_major_ver = proto_major_ver;
0577     req.proto_minor_ver = proto_minor_ver;
0578     req.proto_micro_ver = proto_micro_ver;
0579 
0580     err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
0581     if (err) {
0582         dev_err(dev, "Failed to query config: %d", err);
0583         return err;
0584     }
0585 
0586     err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
0587                    sizeof(resp));
0588     if (err || resp.hdr.status) {
0589         dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
0590             resp.hdr.status);
0591         if (!err)
0592             err = -EPROTO;
0593         return err;
0594     }
0595 
0596     *max_num_vports = resp.max_num_vports;
0597 
0598     return 0;
0599 }
0600 
0601 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
0602                 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
0603 {
0604     struct mana_query_vport_cfg_resp resp = {};
0605     struct mana_query_vport_cfg_req req = {};
0606     int err;
0607 
0608     mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
0609                  sizeof(req), sizeof(resp));
0610 
0611     req.vport_index = vport_index;
0612 
0613     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0614                 sizeof(resp));
0615     if (err)
0616         return err;
0617 
0618     err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
0619                    sizeof(resp));
0620     if (err)
0621         return err;
0622 
0623     if (resp.hdr.status)
0624         return -EPROTO;
0625 
0626     *max_sq = resp.max_num_sq;
0627     *max_rq = resp.max_num_rq;
0628     *num_indir_entry = resp.num_indirection_ent;
0629 
0630     apc->port_handle = resp.vport;
0631     ether_addr_copy(apc->mac_addr, resp.mac_addr);
0632 
0633     return 0;
0634 }
0635 
0636 static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
0637               u32 doorbell_pg_id)
0638 {
0639     struct mana_config_vport_resp resp = {};
0640     struct mana_config_vport_req req = {};
0641     int err;
0642 
0643     mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
0644                  sizeof(req), sizeof(resp));
0645     req.vport = apc->port_handle;
0646     req.pdid = protection_dom_id;
0647     req.doorbell_pageid = doorbell_pg_id;
0648 
0649     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0650                 sizeof(resp));
0651     if (err) {
0652         netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
0653         goto out;
0654     }
0655 
0656     err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
0657                    sizeof(resp));
0658     if (err || resp.hdr.status) {
0659         netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
0660                err, resp.hdr.status);
0661         if (!err)
0662             err = -EPROTO;
0663 
0664         goto out;
0665     }
0666 
0667     apc->tx_shortform_allowed = resp.short_form_allowed;
0668     apc->tx_vp_offset = resp.tx_vport_offset;
0669 out:
0670     return err;
0671 }
0672 
0673 static int mana_cfg_vport_steering(struct mana_port_context *apc,
0674                    enum TRI_STATE rx,
0675                    bool update_default_rxobj, bool update_key,
0676                    bool update_tab)
0677 {
0678     u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
0679     struct mana_cfg_rx_steer_req *req = NULL;
0680     struct mana_cfg_rx_steer_resp resp = {};
0681     struct net_device *ndev = apc->ndev;
0682     mana_handle_t *req_indir_tab;
0683     u32 req_buf_size;
0684     int err;
0685 
0686     req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
0687     req = kzalloc(req_buf_size, GFP_KERNEL);
0688     if (!req)
0689         return -ENOMEM;
0690 
0691     mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
0692                  sizeof(resp));
0693 
0694     req->vport = apc->port_handle;
0695     req->num_indir_entries = num_entries;
0696     req->indir_tab_offset = sizeof(*req);
0697     req->rx_enable = rx;
0698     req->rss_enable = apc->rss_state;
0699     req->update_default_rxobj = update_default_rxobj;
0700     req->update_hashkey = update_key;
0701     req->update_indir_tab = update_tab;
0702     req->default_rxobj = apc->default_rxobj;
0703 
0704     if (update_key)
0705         memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
0706 
0707     if (update_tab) {
0708         req_indir_tab = (mana_handle_t *)(req + 1);
0709         memcpy(req_indir_tab, apc->rxobj_table,
0710                req->num_indir_entries * sizeof(mana_handle_t));
0711     }
0712 
0713     err = mana_send_request(apc->ac, req, req_buf_size, &resp,
0714                 sizeof(resp));
0715     if (err) {
0716         netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
0717         goto out;
0718     }
0719 
0720     err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
0721                    sizeof(resp));
0722     if (err) {
0723         netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
0724         goto out;
0725     }
0726 
0727     if (resp.hdr.status) {
0728         netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
0729                resp.hdr.status);
0730         err = -EPROTO;
0731     }
0732 out:
0733     kfree(req);
0734     return err;
0735 }
0736 
0737 static int mana_create_wq_obj(struct mana_port_context *apc,
0738                   mana_handle_t vport,
0739                   u32 wq_type, struct mana_obj_spec *wq_spec,
0740                   struct mana_obj_spec *cq_spec,
0741                   mana_handle_t *wq_obj)
0742 {
0743     struct mana_create_wqobj_resp resp = {};
0744     struct mana_create_wqobj_req req = {};
0745     struct net_device *ndev = apc->ndev;
0746     int err;
0747 
0748     mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
0749                  sizeof(req), sizeof(resp));
0750     req.vport = vport;
0751     req.wq_type = wq_type;
0752     req.wq_gdma_region = wq_spec->gdma_region;
0753     req.cq_gdma_region = cq_spec->gdma_region;
0754     req.wq_size = wq_spec->queue_size;
0755     req.cq_size = cq_spec->queue_size;
0756     req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
0757     req.cq_parent_qid = cq_spec->attached_eq;
0758 
0759     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0760                 sizeof(resp));
0761     if (err) {
0762         netdev_err(ndev, "Failed to create WQ object: %d\n", err);
0763         goto out;
0764     }
0765 
0766     err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
0767                    sizeof(resp));
0768     if (err || resp.hdr.status) {
0769         netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
0770                resp.hdr.status);
0771         if (!err)
0772             err = -EPROTO;
0773         goto out;
0774     }
0775 
0776     if (resp.wq_obj == INVALID_MANA_HANDLE) {
0777         netdev_err(ndev, "Got an invalid WQ object handle\n");
0778         err = -EPROTO;
0779         goto out;
0780     }
0781 
0782     *wq_obj = resp.wq_obj;
0783     wq_spec->queue_index = resp.wq_id;
0784     cq_spec->queue_index = resp.cq_id;
0785 
0786     return 0;
0787 out:
0788     return err;
0789 }
0790 
0791 static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
0792                 mana_handle_t wq_obj)
0793 {
0794     struct mana_destroy_wqobj_resp resp = {};
0795     struct mana_destroy_wqobj_req req = {};
0796     struct net_device *ndev = apc->ndev;
0797     int err;
0798 
0799     mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
0800                  sizeof(req), sizeof(resp));
0801     req.wq_type = wq_type;
0802     req.wq_obj_handle = wq_obj;
0803 
0804     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0805                 sizeof(resp));
0806     if (err) {
0807         netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
0808         return;
0809     }
0810 
0811     err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
0812                    sizeof(resp));
0813     if (err || resp.hdr.status)
0814         netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
0815                resp.hdr.status);
0816 }
0817 
0818 static void mana_destroy_eq(struct mana_context *ac)
0819 {
0820     struct gdma_context *gc = ac->gdma_dev->gdma_context;
0821     struct gdma_queue *eq;
0822     int i;
0823 
0824     if (!ac->eqs)
0825         return;
0826 
0827     for (i = 0; i < gc->max_num_queues; i++) {
0828         eq = ac->eqs[i].eq;
0829         if (!eq)
0830             continue;
0831 
0832         mana_gd_destroy_queue(gc, eq);
0833     }
0834 
0835     kfree(ac->eqs);
0836     ac->eqs = NULL;
0837 }
0838 
0839 static int mana_create_eq(struct mana_context *ac)
0840 {
0841     struct gdma_dev *gd = ac->gdma_dev;
0842     struct gdma_context *gc = gd->gdma_context;
0843     struct gdma_queue_spec spec = {};
0844     int err;
0845     int i;
0846 
0847     ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
0848               GFP_KERNEL);
0849     if (!ac->eqs)
0850         return -ENOMEM;
0851 
0852     spec.type = GDMA_EQ;
0853     spec.monitor_avl_buf = false;
0854     spec.queue_size = EQ_SIZE;
0855     spec.eq.callback = NULL;
0856     spec.eq.context = ac->eqs;
0857     spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
0858 
0859     for (i = 0; i < gc->max_num_queues; i++) {
0860         err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
0861         if (err)
0862             goto out;
0863     }
0864 
0865     return 0;
0866 out:
0867     mana_destroy_eq(ac);
0868     return err;
0869 }
0870 
0871 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
0872 {
0873     struct mana_fence_rq_resp resp = {};
0874     struct mana_fence_rq_req req = {};
0875     int err;
0876 
0877     init_completion(&rxq->fence_event);
0878 
0879     mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
0880                  sizeof(req), sizeof(resp));
0881     req.wq_obj_handle =  rxq->rxobj;
0882 
0883     err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
0884                 sizeof(resp));
0885     if (err) {
0886         netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
0887                rxq->rxq_idx, err);
0888         return err;
0889     }
0890 
0891     err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
0892     if (err || resp.hdr.status) {
0893         netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
0894                rxq->rxq_idx, err, resp.hdr.status);
0895         if (!err)
0896             err = -EPROTO;
0897 
0898         return err;
0899     }
0900 
0901     if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
0902         netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
0903                rxq->rxq_idx);
0904         return -ETIMEDOUT;
0905     }
0906 
0907     return 0;
0908 }
0909 
0910 static void mana_fence_rqs(struct mana_port_context *apc)
0911 {
0912     unsigned int rxq_idx;
0913     struct mana_rxq *rxq;
0914     int err;
0915 
0916     for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
0917         rxq = apc->rxqs[rxq_idx];
0918         err = mana_fence_rq(apc, rxq);
0919 
0920         /* In case of any error, use sleep instead. */
0921         if (err)
0922             msleep(100);
0923     }
0924 }
0925 
0926 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
0927 {
0928     u32 used_space_old;
0929     u32 used_space_new;
0930 
0931     used_space_old = wq->head - wq->tail;
0932     used_space_new = wq->head - (wq->tail + num_units);
0933 
0934     if (WARN_ON_ONCE(used_space_new > used_space_old))
0935         return -ERANGE;
0936 
0937     wq->tail += num_units;
0938     return 0;
0939 }
0940 
0941 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
0942 {
0943     struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
0944     struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
0945     struct device *dev = gc->dev;
0946     int i;
0947 
0948     dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
0949 
0950     for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
0951         dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
0952                    DMA_TO_DEVICE);
0953 }
0954 
0955 static void mana_poll_tx_cq(struct mana_cq *cq)
0956 {
0957     struct gdma_comp *completions = cq->gdma_comp_buf;
0958     struct gdma_posted_wqe_info *wqe_info;
0959     unsigned int pkt_transmitted = 0;
0960     unsigned int wqe_unit_cnt = 0;
0961     struct mana_txq *txq = cq->txq;
0962     struct mana_port_context *apc;
0963     struct netdev_queue *net_txq;
0964     struct gdma_queue *gdma_wq;
0965     unsigned int avail_space;
0966     struct net_device *ndev;
0967     struct sk_buff *skb;
0968     bool txq_stopped;
0969     int comp_read;
0970     int i;
0971 
0972     ndev = txq->ndev;
0973     apc = netdev_priv(ndev);
0974 
0975     comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
0976                     CQE_POLLING_BUFFER);
0977 
0978     if (comp_read < 1)
0979         return;
0980 
0981     for (i = 0; i < comp_read; i++) {
0982         struct mana_tx_comp_oob *cqe_oob;
0983 
0984         if (WARN_ON_ONCE(!completions[i].is_sq))
0985             return;
0986 
0987         cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
0988         if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
0989                  MANA_CQE_COMPLETION))
0990             return;
0991 
0992         switch (cqe_oob->cqe_hdr.cqe_type) {
0993         case CQE_TX_OKAY:
0994             break;
0995 
0996         case CQE_TX_SA_DROP:
0997         case CQE_TX_MTU_DROP:
0998         case CQE_TX_INVALID_OOB:
0999         case CQE_TX_INVALID_ETH_TYPE:
1000         case CQE_TX_HDR_PROCESSING_ERROR:
1001         case CQE_TX_VF_DISABLED:
1002         case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1003         case CQE_TX_VPORT_DISABLED:
1004         case CQE_TX_VLAN_TAGGING_VIOLATION:
1005             WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
1006                   cqe_oob->cqe_hdr.cqe_type);
1007             break;
1008 
1009         default:
1010             /* If the CQE type is unexpected, log an error, assert,
1011              * and go through the error path.
1012              */
1013             WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
1014                   cqe_oob->cqe_hdr.cqe_type);
1015             return;
1016         }
1017 
1018         if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1019             return;
1020 
1021         skb = skb_dequeue(&txq->pending_skbs);
1022         if (WARN_ON_ONCE(!skb))
1023             return;
1024 
1025         wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1026         wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1027 
1028         mana_unmap_skb(skb, apc);
1029 
1030         napi_consume_skb(skb, cq->budget);
1031 
1032         pkt_transmitted++;
1033     }
1034 
1035     if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1036         return;
1037 
1038     mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1039 
1040     gdma_wq = txq->gdma_sq;
1041     avail_space = mana_gd_wq_avail_space(gdma_wq);
1042 
1043     /* Ensure tail updated before checking q stop */
1044     smp_mb();
1045 
1046     net_txq = txq->net_txq;
1047     txq_stopped = netif_tx_queue_stopped(net_txq);
1048 
1049     /* Ensure checking txq_stopped before apc->port_is_up. */
1050     smp_rmb();
1051 
1052     if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1053         netif_tx_wake_queue(net_txq);
1054         apc->eth_stats.wake_queue++;
1055     }
1056 
1057     if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1058         WARN_ON_ONCE(1);
1059 
1060     cq->work_done = pkt_transmitted;
1061 }
1062 
1063 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1064 {
1065     struct mana_recv_buf_oob *recv_buf_oob;
1066     u32 curr_index;
1067     int err;
1068 
1069     curr_index = rxq->buf_index++;
1070     if (rxq->buf_index == rxq->num_rx_buf)
1071         rxq->buf_index = 0;
1072 
1073     recv_buf_oob = &rxq->rx_oobs[curr_index];
1074 
1075     err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1076                     &recv_buf_oob->wqe_inf);
1077     if (WARN_ON_ONCE(err))
1078         return;
1079 
1080     WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1081 }
1082 
1083 static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
1084                       struct xdp_buff *xdp)
1085 {
1086     struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
1087 
1088     if (!skb)
1089         return NULL;
1090 
1091     if (xdp->data_hard_start) {
1092         skb_reserve(skb, xdp->data - xdp->data_hard_start);
1093         skb_put(skb, xdp->data_end - xdp->data);
1094     } else {
1095         skb_reserve(skb, XDP_PACKET_HEADROOM);
1096         skb_put(skb, pkt_len);
1097     }
1098 
1099     return skb;
1100 }
1101 
1102 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
1103             struct mana_rxq *rxq)
1104 {
1105     struct mana_stats_rx *rx_stats = &rxq->stats;
1106     struct net_device *ndev = rxq->ndev;
1107     uint pkt_len = cqe->ppi[0].pkt_len;
1108     u16 rxq_idx = rxq->rxq_idx;
1109     struct napi_struct *napi;
1110     struct xdp_buff xdp = {};
1111     struct sk_buff *skb;
1112     u32 hash_value;
1113     u32 act;
1114 
1115     rxq->rx_cq.work_done++;
1116     napi = &rxq->rx_cq.napi;
1117 
1118     if (!buf_va) {
1119         ++ndev->stats.rx_dropped;
1120         return;
1121     }
1122 
1123     act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1124 
1125     if (act == XDP_REDIRECT && !rxq->xdp_rc)
1126         return;
1127 
1128     if (act != XDP_PASS && act != XDP_TX)
1129         goto drop_xdp;
1130 
1131     skb = mana_build_skb(buf_va, pkt_len, &xdp);
1132 
1133     if (!skb)
1134         goto drop;
1135 
1136     skb->dev = napi->dev;
1137 
1138     skb->protocol = eth_type_trans(skb, ndev);
1139     skb_checksum_none_assert(skb);
1140     skb_record_rx_queue(skb, rxq_idx);
1141 
1142     if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1143         if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1144             skb->ip_summed = CHECKSUM_UNNECESSARY;
1145     }
1146 
1147     if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1148         hash_value = cqe->ppi[0].pkt_hash;
1149 
1150         if (cqe->rx_hashtype & MANA_HASH_L4)
1151             skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1152         else
1153             skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1154     }
1155 
1156     u64_stats_update_begin(&rx_stats->syncp);
1157     rx_stats->packets++;
1158     rx_stats->bytes += pkt_len;
1159 
1160     if (act == XDP_TX)
1161         rx_stats->xdp_tx++;
1162     u64_stats_update_end(&rx_stats->syncp);
1163 
1164     if (act == XDP_TX) {
1165         skb_set_queue_mapping(skb, rxq_idx);
1166         mana_xdp_tx(skb, ndev);
1167         return;
1168     }
1169 
1170     napi_gro_receive(napi, skb);
1171 
1172     return;
1173 
1174 drop_xdp:
1175     u64_stats_update_begin(&rx_stats->syncp);
1176     rx_stats->xdp_drop++;
1177     u64_stats_update_end(&rx_stats->syncp);
1178 
1179 drop:
1180     WARN_ON_ONCE(rxq->xdp_save_page);
1181     rxq->xdp_save_page = virt_to_page(buf_va);
1182 
1183     ++ndev->stats.rx_dropped;
1184 
1185     return;
1186 }
1187 
1188 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1189                 struct gdma_comp *cqe)
1190 {
1191     struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1192     struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1193     struct net_device *ndev = rxq->ndev;
1194     struct mana_recv_buf_oob *rxbuf_oob;
1195     struct device *dev = gc->dev;
1196     void *new_buf, *old_buf;
1197     struct page *new_page;
1198     u32 curr, pktlen;
1199     dma_addr_t da;
1200 
1201     switch (oob->cqe_hdr.cqe_type) {
1202     case CQE_RX_OKAY:
1203         break;
1204 
1205     case CQE_RX_TRUNCATED:
1206         ++ndev->stats.rx_dropped;
1207         rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1208         netdev_warn_once(ndev, "Dropped a truncated packet\n");
1209         goto drop;
1210 
1211     case CQE_RX_COALESCED_4:
1212         netdev_err(ndev, "RX coalescing is unsupported\n");
1213         return;
1214 
1215     case CQE_RX_OBJECT_FENCE:
1216         complete(&rxq->fence_event);
1217         return;
1218 
1219     default:
1220         netdev_err(ndev, "Unknown RX CQE type = %d\n",
1221                oob->cqe_hdr.cqe_type);
1222         return;
1223     }
1224 
1225     pktlen = oob->ppi[0].pkt_len;
1226 
1227     if (pktlen == 0) {
1228         /* data packets should never have packetlength of zero */
1229         netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1230                rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1231         return;
1232     }
1233 
1234     curr = rxq->buf_index;
1235     rxbuf_oob = &rxq->rx_oobs[curr];
1236     WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1237 
1238     /* Reuse XDP dropped page if available */
1239     if (rxq->xdp_save_page) {
1240         new_page = rxq->xdp_save_page;
1241         rxq->xdp_save_page = NULL;
1242     } else {
1243         new_page = alloc_page(GFP_ATOMIC);
1244     }
1245 
1246     if (new_page) {
1247         da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1248                   DMA_FROM_DEVICE);
1249 
1250         if (dma_mapping_error(dev, da)) {
1251             __free_page(new_page);
1252             new_page = NULL;
1253         }
1254     }
1255 
1256     new_buf = new_page ? page_to_virt(new_page) : NULL;
1257 
1258     if (new_buf) {
1259         dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1260                    DMA_FROM_DEVICE);
1261 
1262         old_buf = rxbuf_oob->buf_va;
1263 
1264         /* refresh the rxbuf_oob with the new page */
1265         rxbuf_oob->buf_va = new_buf;
1266         rxbuf_oob->buf_dma_addr = da;
1267         rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1268     } else {
1269         old_buf = NULL; /* drop the packet if no memory */
1270     }
1271 
1272     mana_rx_skb(old_buf, oob, rxq);
1273 
1274 drop:
1275     mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1276 
1277     mana_post_pkt_rxq(rxq);
1278 }
1279 
1280 static void mana_poll_rx_cq(struct mana_cq *cq)
1281 {
1282     struct gdma_comp *comp = cq->gdma_comp_buf;
1283     struct mana_rxq *rxq = cq->rxq;
1284     int comp_read, i;
1285 
1286     comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1287     WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1288 
1289     rxq->xdp_flush = false;
1290 
1291     for (i = 0; i < comp_read; i++) {
1292         if (WARN_ON_ONCE(comp[i].is_sq))
1293             return;
1294 
1295         /* verify recv cqe references the right rxq */
1296         if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1297             return;
1298 
1299         mana_process_rx_cqe(rxq, cq, &comp[i]);
1300     }
1301 
1302     if (rxq->xdp_flush)
1303         xdp_do_flush();
1304 }
1305 
1306 static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1307 {
1308     struct mana_cq *cq = context;
1309     u8 arm_bit;
1310 
1311     WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1312 
1313     if (cq->type == MANA_CQ_TYPE_RX)
1314         mana_poll_rx_cq(cq);
1315     else
1316         mana_poll_tx_cq(cq);
1317 
1318     if (cq->work_done < cq->budget &&
1319         napi_complete_done(&cq->napi, cq->work_done)) {
1320         arm_bit = SET_ARM_BIT;
1321     } else {
1322         arm_bit = 0;
1323     }
1324 
1325     mana_gd_ring_cq(gdma_queue, arm_bit);
1326 }
1327 
1328 static int mana_poll(struct napi_struct *napi, int budget)
1329 {
1330     struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1331 
1332     cq->work_done = 0;
1333     cq->budget = budget;
1334 
1335     mana_cq_handler(cq, cq->gdma_cq);
1336 
1337     return min(cq->work_done, budget);
1338 }
1339 
1340 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1341 {
1342     struct mana_cq *cq = context;
1343 
1344     napi_schedule_irqoff(&cq->napi);
1345 }
1346 
1347 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1348 {
1349     struct gdma_dev *gd = apc->ac->gdma_dev;
1350 
1351     if (!cq->gdma_cq)
1352         return;
1353 
1354     mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1355 }
1356 
1357 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1358 {
1359     struct gdma_dev *gd = apc->ac->gdma_dev;
1360 
1361     if (!txq->gdma_sq)
1362         return;
1363 
1364     mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1365 }
1366 
1367 static void mana_destroy_txq(struct mana_port_context *apc)
1368 {
1369     struct napi_struct *napi;
1370     int i;
1371 
1372     if (!apc->tx_qp)
1373         return;
1374 
1375     for (i = 0; i < apc->num_queues; i++) {
1376         napi = &apc->tx_qp[i].tx_cq.napi;
1377         napi_synchronize(napi);
1378         napi_disable(napi);
1379         netif_napi_del(napi);
1380 
1381         mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1382 
1383         mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1384 
1385         mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1386     }
1387 
1388     kfree(apc->tx_qp);
1389     apc->tx_qp = NULL;
1390 }
1391 
1392 static int mana_create_txq(struct mana_port_context *apc,
1393                struct net_device *net)
1394 {
1395     struct mana_context *ac = apc->ac;
1396     struct gdma_dev *gd = ac->gdma_dev;
1397     struct mana_obj_spec wq_spec;
1398     struct mana_obj_spec cq_spec;
1399     struct gdma_queue_spec spec;
1400     struct gdma_context *gc;
1401     struct mana_txq *txq;
1402     struct mana_cq *cq;
1403     u32 txq_size;
1404     u32 cq_size;
1405     int err;
1406     int i;
1407 
1408     apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1409                  GFP_KERNEL);
1410     if (!apc->tx_qp)
1411         return -ENOMEM;
1412 
1413     /*  The minimum size of the WQE is 32 bytes, hence
1414      *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1415      *  the SQ can store. This value is then used to size other queues
1416      *  to prevent overflow.
1417      */
1418     txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1419     BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1420 
1421     cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1422     cq_size = PAGE_ALIGN(cq_size);
1423 
1424     gc = gd->gdma_context;
1425 
1426     for (i = 0; i < apc->num_queues; i++) {
1427         apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1428 
1429         /* Create SQ */
1430         txq = &apc->tx_qp[i].txq;
1431 
1432         u64_stats_init(&txq->stats.syncp);
1433         txq->ndev = net;
1434         txq->net_txq = netdev_get_tx_queue(net, i);
1435         txq->vp_offset = apc->tx_vp_offset;
1436         skb_queue_head_init(&txq->pending_skbs);
1437 
1438         memset(&spec, 0, sizeof(spec));
1439         spec.type = GDMA_SQ;
1440         spec.monitor_avl_buf = true;
1441         spec.queue_size = txq_size;
1442         err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1443         if (err)
1444             goto out;
1445 
1446         /* Create SQ's CQ */
1447         cq = &apc->tx_qp[i].tx_cq;
1448         cq->type = MANA_CQ_TYPE_TX;
1449 
1450         cq->txq = txq;
1451 
1452         memset(&spec, 0, sizeof(spec));
1453         spec.type = GDMA_CQ;
1454         spec.monitor_avl_buf = false;
1455         spec.queue_size = cq_size;
1456         spec.cq.callback = mana_schedule_napi;
1457         spec.cq.parent_eq = ac->eqs[i].eq;
1458         spec.cq.context = cq;
1459         err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1460         if (err)
1461             goto out;
1462 
1463         memset(&wq_spec, 0, sizeof(wq_spec));
1464         memset(&cq_spec, 0, sizeof(cq_spec));
1465 
1466         wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1467         wq_spec.queue_size = txq->gdma_sq->queue_size;
1468 
1469         cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1470         cq_spec.queue_size = cq->gdma_cq->queue_size;
1471         cq_spec.modr_ctx_id = 0;
1472         cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1473 
1474         err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1475                      &wq_spec, &cq_spec,
1476                      &apc->tx_qp[i].tx_object);
1477 
1478         if (err)
1479             goto out;
1480 
1481         txq->gdma_sq->id = wq_spec.queue_index;
1482         cq->gdma_cq->id = cq_spec.queue_index;
1483 
1484         txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1485         cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1486 
1487         txq->gdma_txq_id = txq->gdma_sq->id;
1488 
1489         cq->gdma_id = cq->gdma_cq->id;
1490 
1491         if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1492             err = -EINVAL;
1493             goto out;
1494         }
1495 
1496         gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1497 
1498         netif_napi_add_tx(net, &cq->napi, mana_poll);
1499         napi_enable(&cq->napi);
1500 
1501         mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1502     }
1503 
1504     return 0;
1505 out:
1506     mana_destroy_txq(apc);
1507     return err;
1508 }
1509 
1510 static void mana_destroy_rxq(struct mana_port_context *apc,
1511                  struct mana_rxq *rxq, bool validate_state)
1512 
1513 {
1514     struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1515     struct mana_recv_buf_oob *rx_oob;
1516     struct device *dev = gc->dev;
1517     struct napi_struct *napi;
1518     int i;
1519 
1520     if (!rxq)
1521         return;
1522 
1523     napi = &rxq->rx_cq.napi;
1524 
1525     if (validate_state)
1526         napi_synchronize(napi);
1527 
1528     napi_disable(napi);
1529 
1530     xdp_rxq_info_unreg(&rxq->xdp_rxq);
1531 
1532     netif_napi_del(napi);
1533 
1534     mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1535 
1536     mana_deinit_cq(apc, &rxq->rx_cq);
1537 
1538     if (rxq->xdp_save_page)
1539         __free_page(rxq->xdp_save_page);
1540 
1541     for (i = 0; i < rxq->num_rx_buf; i++) {
1542         rx_oob = &rxq->rx_oobs[i];
1543 
1544         if (!rx_oob->buf_va)
1545             continue;
1546 
1547         dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1548                    DMA_FROM_DEVICE);
1549 
1550         free_page((unsigned long)rx_oob->buf_va);
1551         rx_oob->buf_va = NULL;
1552     }
1553 
1554     if (rxq->gdma_rq)
1555         mana_gd_destroy_queue(gc, rxq->gdma_rq);
1556 
1557     kfree(rxq);
1558 }
1559 
1560 #define MANA_WQE_HEADER_SIZE 16
1561 #define MANA_WQE_SGE_SIZE 16
1562 
1563 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1564                  struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1565 {
1566     struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1567     struct mana_recv_buf_oob *rx_oob;
1568     struct device *dev = gc->dev;
1569     struct page *page;
1570     dma_addr_t da;
1571     u32 buf_idx;
1572 
1573     WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1574 
1575     *rxq_size = 0;
1576     *cq_size = 0;
1577 
1578     for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1579         rx_oob = &rxq->rx_oobs[buf_idx];
1580         memset(rx_oob, 0, sizeof(*rx_oob));
1581 
1582         page = alloc_page(GFP_KERNEL);
1583         if (!page)
1584             return -ENOMEM;
1585 
1586         da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1587                   DMA_FROM_DEVICE);
1588 
1589         if (dma_mapping_error(dev, da)) {
1590             __free_page(page);
1591             return -ENOMEM;
1592         }
1593 
1594         rx_oob->buf_va = page_to_virt(page);
1595         rx_oob->buf_dma_addr = da;
1596 
1597         rx_oob->num_sge = 1;
1598         rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1599         rx_oob->sgl[0].size = rxq->datasize;
1600         rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1601 
1602         rx_oob->wqe_req.sgl = rx_oob->sgl;
1603         rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1604         rx_oob->wqe_req.inline_oob_size = 0;
1605         rx_oob->wqe_req.inline_oob_data = NULL;
1606         rx_oob->wqe_req.flags = 0;
1607         rx_oob->wqe_req.client_data_unit = 0;
1608 
1609         *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1610                    MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1611         *cq_size += COMP_ENTRY_SIZE;
1612     }
1613 
1614     return 0;
1615 }
1616 
1617 static int mana_push_wqe(struct mana_rxq *rxq)
1618 {
1619     struct mana_recv_buf_oob *rx_oob;
1620     u32 buf_idx;
1621     int err;
1622 
1623     for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1624         rx_oob = &rxq->rx_oobs[buf_idx];
1625 
1626         err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1627                         &rx_oob->wqe_inf);
1628         if (err)
1629             return -ENOSPC;
1630     }
1631 
1632     return 0;
1633 }
1634 
1635 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1636                     u32 rxq_idx, struct mana_eq *eq,
1637                     struct net_device *ndev)
1638 {
1639     struct gdma_dev *gd = apc->ac->gdma_dev;
1640     struct mana_obj_spec wq_spec;
1641     struct mana_obj_spec cq_spec;
1642     struct gdma_queue_spec spec;
1643     struct mana_cq *cq = NULL;
1644     struct gdma_context *gc;
1645     u32 cq_size, rq_size;
1646     struct mana_rxq *rxq;
1647     int err;
1648 
1649     gc = gd->gdma_context;
1650 
1651     rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1652               GFP_KERNEL);
1653     if (!rxq)
1654         return NULL;
1655 
1656     rxq->ndev = ndev;
1657     rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1658     rxq->rxq_idx = rxq_idx;
1659     rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1660     rxq->rxobj = INVALID_MANA_HANDLE;
1661 
1662     err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1663     if (err)
1664         goto out;
1665 
1666     rq_size = PAGE_ALIGN(rq_size);
1667     cq_size = PAGE_ALIGN(cq_size);
1668 
1669     /* Create RQ */
1670     memset(&spec, 0, sizeof(spec));
1671     spec.type = GDMA_RQ;
1672     spec.monitor_avl_buf = true;
1673     spec.queue_size = rq_size;
1674     err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1675     if (err)
1676         goto out;
1677 
1678     /* Create RQ's CQ */
1679     cq = &rxq->rx_cq;
1680     cq->type = MANA_CQ_TYPE_RX;
1681     cq->rxq = rxq;
1682 
1683     memset(&spec, 0, sizeof(spec));
1684     spec.type = GDMA_CQ;
1685     spec.monitor_avl_buf = false;
1686     spec.queue_size = cq_size;
1687     spec.cq.callback = mana_schedule_napi;
1688     spec.cq.parent_eq = eq->eq;
1689     spec.cq.context = cq;
1690     err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1691     if (err)
1692         goto out;
1693 
1694     memset(&wq_spec, 0, sizeof(wq_spec));
1695     memset(&cq_spec, 0, sizeof(cq_spec));
1696     wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1697     wq_spec.queue_size = rxq->gdma_rq->queue_size;
1698 
1699     cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1700     cq_spec.queue_size = cq->gdma_cq->queue_size;
1701     cq_spec.modr_ctx_id = 0;
1702     cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1703 
1704     err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1705                  &wq_spec, &cq_spec, &rxq->rxobj);
1706     if (err)
1707         goto out;
1708 
1709     rxq->gdma_rq->id = wq_spec.queue_index;
1710     cq->gdma_cq->id = cq_spec.queue_index;
1711 
1712     rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1713     cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1714 
1715     rxq->gdma_id = rxq->gdma_rq->id;
1716     cq->gdma_id = cq->gdma_cq->id;
1717 
1718     err = mana_push_wqe(rxq);
1719     if (err)
1720         goto out;
1721 
1722     if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1723         err = -EINVAL;
1724         goto out;
1725     }
1726 
1727     gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1728 
1729     netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
1730 
1731     WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1732                  cq->napi.napi_id));
1733     WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1734                        MEM_TYPE_PAGE_SHARED, NULL));
1735 
1736     napi_enable(&cq->napi);
1737 
1738     mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1739 out:
1740     if (!err)
1741         return rxq;
1742 
1743     netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1744 
1745     mana_destroy_rxq(apc, rxq, false);
1746 
1747     if (cq)
1748         mana_deinit_cq(apc, cq);
1749 
1750     return NULL;
1751 }
1752 
1753 static int mana_add_rx_queues(struct mana_port_context *apc,
1754                   struct net_device *ndev)
1755 {
1756     struct mana_context *ac = apc->ac;
1757     struct mana_rxq *rxq;
1758     int err = 0;
1759     int i;
1760 
1761     for (i = 0; i < apc->num_queues; i++) {
1762         rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1763         if (!rxq) {
1764             err = -ENOMEM;
1765             goto out;
1766         }
1767 
1768         u64_stats_init(&rxq->stats.syncp);
1769 
1770         apc->rxqs[i] = rxq;
1771     }
1772 
1773     apc->default_rxobj = apc->rxqs[0]->rxobj;
1774 out:
1775     return err;
1776 }
1777 
1778 static void mana_destroy_vport(struct mana_port_context *apc)
1779 {
1780     struct gdma_dev *gd = apc->ac->gdma_dev;
1781     struct mana_rxq *rxq;
1782     u32 rxq_idx;
1783 
1784     for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1785         rxq = apc->rxqs[rxq_idx];
1786         if (!rxq)
1787             continue;
1788 
1789         mana_destroy_rxq(apc, rxq, true);
1790         apc->rxqs[rxq_idx] = NULL;
1791     }
1792 
1793     mana_destroy_txq(apc);
1794 
1795     if (gd->gdma_context->is_pf)
1796         mana_pf_deregister_hw_vport(apc);
1797 }
1798 
1799 static int mana_create_vport(struct mana_port_context *apc,
1800                  struct net_device *net)
1801 {
1802     struct gdma_dev *gd = apc->ac->gdma_dev;
1803     int err;
1804 
1805     apc->default_rxobj = INVALID_MANA_HANDLE;
1806 
1807     if (gd->gdma_context->is_pf) {
1808         err = mana_pf_register_hw_vport(apc);
1809         if (err)
1810             return err;
1811     }
1812 
1813     err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1814     if (err)
1815         return err;
1816 
1817     return mana_create_txq(apc, net);
1818 }
1819 
1820 static void mana_rss_table_init(struct mana_port_context *apc)
1821 {
1822     int i;
1823 
1824     for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1825         apc->indir_table[i] =
1826             ethtool_rxfh_indir_default(i, apc->num_queues);
1827 }
1828 
1829 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1830             bool update_hash, bool update_tab)
1831 {
1832     u32 queue_idx;
1833     int err;
1834     int i;
1835 
1836     if (update_tab) {
1837         for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1838             queue_idx = apc->indir_table[i];
1839             apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1840         }
1841     }
1842 
1843     err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1844     if (err)
1845         return err;
1846 
1847     mana_fence_rqs(apc);
1848 
1849     return 0;
1850 }
1851 
1852 static int mana_init_port(struct net_device *ndev)
1853 {
1854     struct mana_port_context *apc = netdev_priv(ndev);
1855     u32 max_txq, max_rxq, max_queues;
1856     int port_idx = apc->port_idx;
1857     u32 num_indirect_entries;
1858     int err;
1859 
1860     err = mana_init_port_context(apc);
1861     if (err)
1862         return err;
1863 
1864     err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1865                    &num_indirect_entries);
1866     if (err) {
1867         netdev_err(ndev, "Failed to query info for vPort %d\n",
1868                port_idx);
1869         goto reset_apc;
1870     }
1871 
1872     max_queues = min_t(u32, max_txq, max_rxq);
1873     if (apc->max_queues > max_queues)
1874         apc->max_queues = max_queues;
1875 
1876     if (apc->num_queues > apc->max_queues)
1877         apc->num_queues = apc->max_queues;
1878 
1879     eth_hw_addr_set(ndev, apc->mac_addr);
1880 
1881     return 0;
1882 
1883 reset_apc:
1884     kfree(apc->rxqs);
1885     apc->rxqs = NULL;
1886     return err;
1887 }
1888 
1889 int mana_alloc_queues(struct net_device *ndev)
1890 {
1891     struct mana_port_context *apc = netdev_priv(ndev);
1892     struct gdma_dev *gd = apc->ac->gdma_dev;
1893     int err;
1894 
1895     err = mana_create_vport(apc, ndev);
1896     if (err)
1897         return err;
1898 
1899     err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1900     if (err)
1901         goto destroy_vport;
1902 
1903     err = mana_add_rx_queues(apc, ndev);
1904     if (err)
1905         goto destroy_vport;
1906 
1907     apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1908 
1909     err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1910     if (err)
1911         goto destroy_vport;
1912 
1913     mana_rss_table_init(apc);
1914 
1915     err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1916     if (err)
1917         goto destroy_vport;
1918 
1919     if (gd->gdma_context->is_pf) {
1920         err = mana_pf_register_filter(apc);
1921         if (err)
1922             goto destroy_vport;
1923     }
1924 
1925     mana_chn_setxdp(apc, mana_xdp_get(apc));
1926 
1927     return 0;
1928 
1929 destroy_vport:
1930     mana_destroy_vport(apc);
1931     return err;
1932 }
1933 
1934 int mana_attach(struct net_device *ndev)
1935 {
1936     struct mana_port_context *apc = netdev_priv(ndev);
1937     int err;
1938 
1939     ASSERT_RTNL();
1940 
1941     err = mana_init_port(ndev);
1942     if (err)
1943         return err;
1944 
1945     if (apc->port_st_save) {
1946         err = mana_alloc_queues(ndev);
1947         if (err) {
1948             mana_cleanup_port_context(apc);
1949             return err;
1950         }
1951     }
1952 
1953     apc->port_is_up = apc->port_st_save;
1954 
1955     /* Ensure port state updated before txq state */
1956     smp_wmb();
1957 
1958     if (apc->port_is_up)
1959         netif_carrier_on(ndev);
1960 
1961     netif_device_attach(ndev);
1962 
1963     return 0;
1964 }
1965 
1966 static int mana_dealloc_queues(struct net_device *ndev)
1967 {
1968     struct mana_port_context *apc = netdev_priv(ndev);
1969     struct gdma_dev *gd = apc->ac->gdma_dev;
1970     struct mana_txq *txq;
1971     int i, err;
1972 
1973     if (apc->port_is_up)
1974         return -EINVAL;
1975 
1976     mana_chn_setxdp(apc, NULL);
1977 
1978     if (gd->gdma_context->is_pf)
1979         mana_pf_deregister_filter(apc);
1980 
1981     /* No packet can be transmitted now since apc->port_is_up is false.
1982      * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1983      * a txq because it may not timely see apc->port_is_up being cleared
1984      * to false, but it doesn't matter since mana_start_xmit() drops any
1985      * new packets due to apc->port_is_up being false.
1986      *
1987      * Drain all the in-flight TX packets
1988      */
1989     for (i = 0; i < apc->num_queues; i++) {
1990         txq = &apc->tx_qp[i].txq;
1991 
1992         while (atomic_read(&txq->pending_sends) > 0)
1993             usleep_range(1000, 2000);
1994     }
1995 
1996     /* We're 100% sure the queues can no longer be woken up, because
1997      * we're sure now mana_poll_tx_cq() can't be running.
1998      */
1999 
2000     apc->rss_state = TRI_STATE_FALSE;
2001     err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2002     if (err) {
2003         netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2004         return err;
2005     }
2006 
2007     mana_destroy_vport(apc);
2008 
2009     return 0;
2010 }
2011 
2012 int mana_detach(struct net_device *ndev, bool from_close)
2013 {
2014     struct mana_port_context *apc = netdev_priv(ndev);
2015     int err;
2016 
2017     ASSERT_RTNL();
2018 
2019     apc->port_st_save = apc->port_is_up;
2020     apc->port_is_up = false;
2021 
2022     /* Ensure port state updated before txq state */
2023     smp_wmb();
2024 
2025     netif_tx_disable(ndev);
2026     netif_carrier_off(ndev);
2027 
2028     if (apc->port_st_save) {
2029         err = mana_dealloc_queues(ndev);
2030         if (err)
2031             return err;
2032     }
2033 
2034     if (!from_close) {
2035         netif_device_detach(ndev);
2036         mana_cleanup_port_context(apc);
2037     }
2038 
2039     return 0;
2040 }
2041 
2042 static int mana_probe_port(struct mana_context *ac, int port_idx,
2043                struct net_device **ndev_storage)
2044 {
2045     struct gdma_context *gc = ac->gdma_dev->gdma_context;
2046     struct mana_port_context *apc;
2047     struct net_device *ndev;
2048     int err;
2049 
2050     ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2051                  gc->max_num_queues);
2052     if (!ndev)
2053         return -ENOMEM;
2054 
2055     *ndev_storage = ndev;
2056 
2057     apc = netdev_priv(ndev);
2058     apc->ac = ac;
2059     apc->ndev = ndev;
2060     apc->max_queues = gc->max_num_queues;
2061     apc->num_queues = gc->max_num_queues;
2062     apc->port_handle = INVALID_MANA_HANDLE;
2063     apc->pf_filter_handle = INVALID_MANA_HANDLE;
2064     apc->port_idx = port_idx;
2065 
2066     ndev->netdev_ops = &mana_devops;
2067     ndev->ethtool_ops = &mana_ethtool_ops;
2068     ndev->mtu = ETH_DATA_LEN;
2069     ndev->max_mtu = ndev->mtu;
2070     ndev->min_mtu = ndev->mtu;
2071     ndev->needed_headroom = MANA_HEADROOM;
2072     SET_NETDEV_DEV(ndev, gc->dev);
2073 
2074     netif_carrier_off(ndev);
2075 
2076     netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2077 
2078     err = mana_init_port(ndev);
2079     if (err)
2080         goto free_net;
2081 
2082     netdev_lockdep_set_classes(ndev);
2083 
2084     ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2085     ndev->hw_features |= NETIF_F_RXCSUM;
2086     ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2087     ndev->hw_features |= NETIF_F_RXHASH;
2088     ndev->features = ndev->hw_features;
2089     ndev->vlan_features = 0;
2090 
2091     err = register_netdev(ndev);
2092     if (err) {
2093         netdev_err(ndev, "Unable to register netdev.\n");
2094         goto reset_apc;
2095     }
2096 
2097     return 0;
2098 
2099 reset_apc:
2100     kfree(apc->rxqs);
2101     apc->rxqs = NULL;
2102 free_net:
2103     *ndev_storage = NULL;
2104     netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2105     free_netdev(ndev);
2106     return err;
2107 }
2108 
2109 int mana_probe(struct gdma_dev *gd, bool resuming)
2110 {
2111     struct gdma_context *gc = gd->gdma_context;
2112     struct mana_context *ac = gd->driver_data;
2113     struct device *dev = gc->dev;
2114     u16 num_ports = 0;
2115     int err;
2116     int i;
2117 
2118     dev_info(dev,
2119          "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2120          MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2121 
2122     err = mana_gd_register_device(gd);
2123     if (err)
2124         return err;
2125 
2126     if (!resuming) {
2127         ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2128         if (!ac)
2129             return -ENOMEM;
2130 
2131         ac->gdma_dev = gd;
2132         gd->driver_data = ac;
2133     }
2134 
2135     err = mana_create_eq(ac);
2136     if (err)
2137         goto out;
2138 
2139     err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2140                     MANA_MICRO_VERSION, &num_ports);
2141     if (err)
2142         goto out;
2143 
2144     if (!resuming) {
2145         ac->num_ports = num_ports;
2146     } else {
2147         if (ac->num_ports != num_ports) {
2148             dev_err(dev, "The number of vPorts changed: %d->%d\n",
2149                 ac->num_ports, num_ports);
2150             err = -EPROTO;
2151             goto out;
2152         }
2153     }
2154 
2155     if (ac->num_ports == 0)
2156         dev_err(dev, "Failed to detect any vPort\n");
2157 
2158     if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2159         ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2160 
2161     if (!resuming) {
2162         for (i = 0; i < ac->num_ports; i++) {
2163             err = mana_probe_port(ac, i, &ac->ports[i]);
2164             if (err)
2165                 break;
2166         }
2167     } else {
2168         for (i = 0; i < ac->num_ports; i++) {
2169             rtnl_lock();
2170             err = mana_attach(ac->ports[i]);
2171             rtnl_unlock();
2172             if (err)
2173                 break;
2174         }
2175     }
2176 out:
2177     if (err)
2178         mana_remove(gd, false);
2179 
2180     return err;
2181 }
2182 
2183 void mana_remove(struct gdma_dev *gd, bool suspending)
2184 {
2185     struct gdma_context *gc = gd->gdma_context;
2186     struct mana_context *ac = gd->driver_data;
2187     struct device *dev = gc->dev;
2188     struct net_device *ndev;
2189     int err;
2190     int i;
2191 
2192     for (i = 0; i < ac->num_ports; i++) {
2193         ndev = ac->ports[i];
2194         if (!ndev) {
2195             if (i == 0)
2196                 dev_err(dev, "No net device to remove\n");
2197             goto out;
2198         }
2199 
2200         /* All cleanup actions should stay after rtnl_lock(), otherwise
2201          * other functions may access partially cleaned up data.
2202          */
2203         rtnl_lock();
2204 
2205         err = mana_detach(ndev, false);
2206         if (err)
2207             netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2208                    i, err);
2209 
2210         if (suspending) {
2211             /* No need to unregister the ndev. */
2212             rtnl_unlock();
2213             continue;
2214         }
2215 
2216         unregister_netdevice(ndev);
2217 
2218         rtnl_unlock();
2219 
2220         free_netdev(ndev);
2221     }
2222 
2223     mana_destroy_eq(ac);
2224 
2225 out:
2226     mana_gd_deregister_device(gd);
2227 
2228     if (suspending)
2229         return;
2230 
2231     gd->driver_data = NULL;
2232     gd->gdma_context = NULL;
2233     kfree(ac);
2234 }