Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /* Copyright 2017-2019 NXP */
0003 
0004 #include "enetc.h"
0005 #include <linux/bpf_trace.h>
0006 #include <linux/tcp.h>
0007 #include <linux/udp.h>
0008 #include <linux/vmalloc.h>
0009 #include <linux/ptp_classify.h>
0010 #include <net/ip6_checksum.h>
0011 #include <net/pkt_sched.h>
0012 #include <net/tso.h>
0013 
0014 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
0015 {
0016     int num_tx_rings = priv->num_tx_rings;
0017     int i;
0018 
0019     for (i = 0; i < priv->num_rx_rings; i++)
0020         if (priv->rx_ring[i]->xdp.prog)
0021             return num_tx_rings - num_possible_cpus();
0022 
0023     return num_tx_rings;
0024 }
0025 
0026 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
0027                             struct enetc_bdr *tx_ring)
0028 {
0029     int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
0030 
0031     return priv->rx_ring[index];
0032 }
0033 
0034 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
0035 {
0036     if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
0037         return NULL;
0038 
0039     return tx_swbd->skb;
0040 }
0041 
0042 static struct xdp_frame *
0043 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
0044 {
0045     if (tx_swbd->is_xdp_redirect)
0046         return tx_swbd->xdp_frame;
0047 
0048     return NULL;
0049 }
0050 
0051 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
0052                 struct enetc_tx_swbd *tx_swbd)
0053 {
0054     /* For XDP_TX, pages come from RX, whereas for the other contexts where
0055      * we have is_dma_page_set, those come from skb_frag_dma_map. We need
0056      * to match the DMA mapping length, so we need to differentiate those.
0057      */
0058     if (tx_swbd->is_dma_page)
0059         dma_unmap_page(tx_ring->dev, tx_swbd->dma,
0060                    tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
0061                    tx_swbd->dir);
0062     else
0063         dma_unmap_single(tx_ring->dev, tx_swbd->dma,
0064                  tx_swbd->len, tx_swbd->dir);
0065     tx_swbd->dma = 0;
0066 }
0067 
0068 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
0069                 struct enetc_tx_swbd *tx_swbd)
0070 {
0071     struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
0072     struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
0073 
0074     if (tx_swbd->dma)
0075         enetc_unmap_tx_buff(tx_ring, tx_swbd);
0076 
0077     if (xdp_frame) {
0078         xdp_return_frame(tx_swbd->xdp_frame);
0079         tx_swbd->xdp_frame = NULL;
0080     } else if (skb) {
0081         dev_kfree_skb_any(skb);
0082         tx_swbd->skb = NULL;
0083     }
0084 }
0085 
0086 /* Let H/W know BD ring has been updated */
0087 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
0088 {
0089     /* includes wmb() */
0090     enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
0091 }
0092 
0093 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
0094                u8 *msgtype, u8 *twostep,
0095                u16 *correction_offset, u16 *body_offset)
0096 {
0097     unsigned int ptp_class;
0098     struct ptp_header *hdr;
0099     unsigned int type;
0100     u8 *base;
0101 
0102     ptp_class = ptp_classify_raw(skb);
0103     if (ptp_class == PTP_CLASS_NONE)
0104         return -EINVAL;
0105 
0106     hdr = ptp_parse_header(skb, ptp_class);
0107     if (!hdr)
0108         return -EINVAL;
0109 
0110     type = ptp_class & PTP_CLASS_PMASK;
0111     if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
0112         *udp = 1;
0113     else
0114         *udp = 0;
0115 
0116     *msgtype = ptp_get_msgtype(hdr, ptp_class);
0117     *twostep = hdr->flag_field[0] & 0x2;
0118 
0119     base = skb_mac_header(skb);
0120     *correction_offset = (u8 *)&hdr->correction - base;
0121     *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
0122 
0123     return 0;
0124 }
0125 
0126 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
0127 {
0128     bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
0129     struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
0130     struct enetc_hw *hw = &priv->si->hw;
0131     struct enetc_tx_swbd *tx_swbd;
0132     int len = skb_headlen(skb);
0133     union enetc_tx_bd temp_bd;
0134     u8 msgtype, twostep, udp;
0135     union enetc_tx_bd *txbd;
0136     u16 offset1, offset2;
0137     int i, count = 0;
0138     skb_frag_t *frag;
0139     unsigned int f;
0140     dma_addr_t dma;
0141     u8 flags = 0;
0142 
0143     i = tx_ring->next_to_use;
0144     txbd = ENETC_TXBD(*tx_ring, i);
0145     prefetchw(txbd);
0146 
0147     dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
0148     if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
0149         goto dma_err;
0150 
0151     temp_bd.addr = cpu_to_le64(dma);
0152     temp_bd.buf_len = cpu_to_le16(len);
0153     temp_bd.lstatus = 0;
0154 
0155     tx_swbd = &tx_ring->tx_swbd[i];
0156     tx_swbd->dma = dma;
0157     tx_swbd->len = len;
0158     tx_swbd->is_dma_page = 0;
0159     tx_swbd->dir = DMA_TO_DEVICE;
0160     count++;
0161 
0162     do_vlan = skb_vlan_tag_present(skb);
0163     if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
0164         if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
0165                     &offset2) ||
0166             msgtype != PTP_MSGTYPE_SYNC || twostep)
0167             WARN_ONCE(1, "Bad packet for one-step timestamping\n");
0168         else
0169             do_onestep_tstamp = true;
0170     } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
0171         do_twostep_tstamp = true;
0172     }
0173 
0174     tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
0175     tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
0176     tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
0177 
0178     if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
0179         flags |= ENETC_TXBD_FLAGS_EX;
0180 
0181     if (tx_ring->tsd_enable)
0182         flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
0183 
0184     /* first BD needs frm_len and offload flags set */
0185     temp_bd.frm_len = cpu_to_le16(skb->len);
0186     temp_bd.flags = flags;
0187 
0188     if (flags & ENETC_TXBD_FLAGS_TSE)
0189         temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
0190                               flags);
0191 
0192     if (flags & ENETC_TXBD_FLAGS_EX) {
0193         u8 e_flags = 0;
0194         *txbd = temp_bd;
0195         enetc_clear_tx_bd(&temp_bd);
0196 
0197         /* add extension BD for VLAN and/or timestamping */
0198         flags = 0;
0199         tx_swbd++;
0200         txbd++;
0201         i++;
0202         if (unlikely(i == tx_ring->bd_count)) {
0203             i = 0;
0204             tx_swbd = tx_ring->tx_swbd;
0205             txbd = ENETC_TXBD(*tx_ring, 0);
0206         }
0207         prefetchw(txbd);
0208 
0209         if (do_vlan) {
0210             temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
0211             temp_bd.ext.tpid = 0; /* < C-TAG */
0212             e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
0213         }
0214 
0215         if (do_onestep_tstamp) {
0216             u32 lo, hi, val;
0217             u64 sec, nsec;
0218             u8 *data;
0219 
0220             lo = enetc_rd_hot(hw, ENETC_SICTR0);
0221             hi = enetc_rd_hot(hw, ENETC_SICTR1);
0222             sec = (u64)hi << 32 | lo;
0223             nsec = do_div(sec, 1000000000);
0224 
0225             /* Configure extension BD */
0226             temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
0227             e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
0228 
0229             /* Update originTimestamp field of Sync packet
0230              * - 48 bits seconds field
0231              * - 32 bits nanseconds field
0232              */
0233             data = skb_mac_header(skb);
0234             *(__be16 *)(data + offset2) =
0235                 htons((sec >> 32) & 0xffff);
0236             *(__be32 *)(data + offset2 + 2) =
0237                 htonl(sec & 0xffffffff);
0238             *(__be32 *)(data + offset2 + 6) = htonl(nsec);
0239 
0240             /* Configure single-step register */
0241             val = ENETC_PM0_SINGLE_STEP_EN;
0242             val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
0243             if (udp)
0244                 val |= ENETC_PM0_SINGLE_STEP_CH;
0245 
0246             enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
0247             enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
0248         } else if (do_twostep_tstamp) {
0249             skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0250             e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
0251         }
0252 
0253         temp_bd.ext.e_flags = e_flags;
0254         count++;
0255     }
0256 
0257     frag = &skb_shinfo(skb)->frags[0];
0258     for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
0259         len = skb_frag_size(frag);
0260         dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
0261                        DMA_TO_DEVICE);
0262         if (dma_mapping_error(tx_ring->dev, dma))
0263             goto dma_err;
0264 
0265         *txbd = temp_bd;
0266         enetc_clear_tx_bd(&temp_bd);
0267 
0268         flags = 0;
0269         tx_swbd++;
0270         txbd++;
0271         i++;
0272         if (unlikely(i == tx_ring->bd_count)) {
0273             i = 0;
0274             tx_swbd = tx_ring->tx_swbd;
0275             txbd = ENETC_TXBD(*tx_ring, 0);
0276         }
0277         prefetchw(txbd);
0278 
0279         temp_bd.addr = cpu_to_le64(dma);
0280         temp_bd.buf_len = cpu_to_le16(len);
0281 
0282         tx_swbd->dma = dma;
0283         tx_swbd->len = len;
0284         tx_swbd->is_dma_page = 1;
0285         tx_swbd->dir = DMA_TO_DEVICE;
0286         count++;
0287     }
0288 
0289     /* last BD needs 'F' bit set */
0290     flags |= ENETC_TXBD_FLAGS_F;
0291     temp_bd.flags = flags;
0292     *txbd = temp_bd;
0293 
0294     tx_ring->tx_swbd[i].is_eof = true;
0295     tx_ring->tx_swbd[i].skb = skb;
0296 
0297     enetc_bdr_idx_inc(tx_ring, &i);
0298     tx_ring->next_to_use = i;
0299 
0300     skb_tx_timestamp(skb);
0301 
0302     enetc_update_tx_ring_tail(tx_ring);
0303 
0304     return count;
0305 
0306 dma_err:
0307     dev_err(tx_ring->dev, "DMA map error");
0308 
0309     do {
0310         tx_swbd = &tx_ring->tx_swbd[i];
0311         enetc_free_tx_frame(tx_ring, tx_swbd);
0312         if (i == 0)
0313             i = tx_ring->bd_count;
0314         i--;
0315     } while (count--);
0316 
0317     return 0;
0318 }
0319 
0320 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
0321                  struct enetc_tx_swbd *tx_swbd,
0322                  union enetc_tx_bd *txbd, int *i, int hdr_len,
0323                  int data_len)
0324 {
0325     union enetc_tx_bd txbd_tmp;
0326     u8 flags = 0, e_flags = 0;
0327     dma_addr_t addr;
0328 
0329     enetc_clear_tx_bd(&txbd_tmp);
0330     addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
0331 
0332     if (skb_vlan_tag_present(skb))
0333         flags |= ENETC_TXBD_FLAGS_EX;
0334 
0335     txbd_tmp.addr = cpu_to_le64(addr);
0336     txbd_tmp.buf_len = cpu_to_le16(hdr_len);
0337 
0338     /* first BD needs frm_len and offload flags set */
0339     txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
0340     txbd_tmp.flags = flags;
0341 
0342     /* For the TSO header we do not set the dma address since we do not
0343      * want it unmapped when we do cleanup. We still set len so that we
0344      * count the bytes sent.
0345      */
0346     tx_swbd->len = hdr_len;
0347     tx_swbd->do_twostep_tstamp = false;
0348     tx_swbd->check_wb = false;
0349 
0350     /* Actually write the header in the BD */
0351     *txbd = txbd_tmp;
0352 
0353     /* Add extension BD for VLAN */
0354     if (flags & ENETC_TXBD_FLAGS_EX) {
0355         /* Get the next BD */
0356         enetc_bdr_idx_inc(tx_ring, i);
0357         txbd = ENETC_TXBD(*tx_ring, *i);
0358         tx_swbd = &tx_ring->tx_swbd[*i];
0359         prefetchw(txbd);
0360 
0361         /* Setup the VLAN fields */
0362         enetc_clear_tx_bd(&txbd_tmp);
0363         txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
0364         txbd_tmp.ext.tpid = 0; /* < C-TAG */
0365         e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
0366 
0367         /* Write the BD */
0368         txbd_tmp.ext.e_flags = e_flags;
0369         *txbd = txbd_tmp;
0370     }
0371 }
0372 
0373 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
0374                  struct enetc_tx_swbd *tx_swbd,
0375                  union enetc_tx_bd *txbd, char *data,
0376                  int size, bool last_bd)
0377 {
0378     union enetc_tx_bd txbd_tmp;
0379     dma_addr_t addr;
0380     u8 flags = 0;
0381 
0382     enetc_clear_tx_bd(&txbd_tmp);
0383 
0384     addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
0385     if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
0386         netdev_err(tx_ring->ndev, "DMA map error\n");
0387         return -ENOMEM;
0388     }
0389 
0390     if (last_bd) {
0391         flags |= ENETC_TXBD_FLAGS_F;
0392         tx_swbd->is_eof = 1;
0393     }
0394 
0395     txbd_tmp.addr = cpu_to_le64(addr);
0396     txbd_tmp.buf_len = cpu_to_le16(size);
0397     txbd_tmp.flags = flags;
0398 
0399     tx_swbd->dma = addr;
0400     tx_swbd->len = size;
0401     tx_swbd->dir = DMA_TO_DEVICE;
0402 
0403     *txbd = txbd_tmp;
0404 
0405     return 0;
0406 }
0407 
0408 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
0409                  char *hdr, int hdr_len, int *l4_hdr_len)
0410 {
0411     char *l4_hdr = hdr + skb_transport_offset(skb);
0412     int mac_hdr_len = skb_network_offset(skb);
0413 
0414     if (tso->tlen != sizeof(struct udphdr)) {
0415         struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
0416 
0417         tcph->check = 0;
0418     } else {
0419         struct udphdr *udph = (struct udphdr *)(l4_hdr);
0420 
0421         udph->check = 0;
0422     }
0423 
0424     /* Compute the IP checksum. This is necessary since tso_build_hdr()
0425      * already incremented the IP ID field.
0426      */
0427     if (!tso->ipv6) {
0428         struct iphdr *iph = (void *)(hdr + mac_hdr_len);
0429 
0430         iph->check = 0;
0431         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
0432     }
0433 
0434     /* Compute the checksum over the L4 header. */
0435     *l4_hdr_len = hdr_len - skb_transport_offset(skb);
0436     return csum_partial(l4_hdr, *l4_hdr_len, 0);
0437 }
0438 
0439 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
0440                     struct sk_buff *skb, char *hdr, int len,
0441                     __wsum sum)
0442 {
0443     char *l4_hdr = hdr + skb_transport_offset(skb);
0444     __sum16 csum_final;
0445 
0446     /* Complete the L4 checksum by appending the pseudo-header to the
0447      * already computed checksum.
0448      */
0449     if (!tso->ipv6)
0450         csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
0451                            ip_hdr(skb)->daddr,
0452                            len, ip_hdr(skb)->protocol, sum);
0453     else
0454         csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
0455                          &ipv6_hdr(skb)->daddr,
0456                          len, ipv6_hdr(skb)->nexthdr, sum);
0457 
0458     if (tso->tlen != sizeof(struct udphdr)) {
0459         struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
0460 
0461         tcph->check = csum_final;
0462     } else {
0463         struct udphdr *udph = (struct udphdr *)(l4_hdr);
0464 
0465         udph->check = csum_final;
0466     }
0467 }
0468 
0469 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
0470 {
0471     int hdr_len, total_len, data_len;
0472     struct enetc_tx_swbd *tx_swbd;
0473     union enetc_tx_bd *txbd;
0474     struct tso_t tso;
0475     __wsum csum, csum2;
0476     int count = 0, pos;
0477     int err, i, bd_data_num;
0478 
0479     /* Initialize the TSO handler, and prepare the first payload */
0480     hdr_len = tso_start(skb, &tso);
0481     total_len = skb->len - hdr_len;
0482     i = tx_ring->next_to_use;
0483 
0484     while (total_len > 0) {
0485         char *hdr;
0486 
0487         /* Get the BD */
0488         txbd = ENETC_TXBD(*tx_ring, i);
0489         tx_swbd = &tx_ring->tx_swbd[i];
0490         prefetchw(txbd);
0491 
0492         /* Determine the length of this packet */
0493         data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
0494         total_len -= data_len;
0495 
0496         /* prepare packet headers: MAC + IP + TCP */
0497         hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
0498         tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
0499 
0500         /* compute the csum over the L4 header */
0501         csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
0502         enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
0503         bd_data_num = 0;
0504         count++;
0505 
0506         while (data_len > 0) {
0507             int size;
0508 
0509             size = min_t(int, tso.size, data_len);
0510 
0511             /* Advance the index in the BDR */
0512             enetc_bdr_idx_inc(tx_ring, &i);
0513             txbd = ENETC_TXBD(*tx_ring, i);
0514             tx_swbd = &tx_ring->tx_swbd[i];
0515             prefetchw(txbd);
0516 
0517             /* Compute the checksum over this segment of data and
0518              * add it to the csum already computed (over the L4
0519              * header and possible other data segments).
0520              */
0521             csum2 = csum_partial(tso.data, size, 0);
0522             csum = csum_block_add(csum, csum2, pos);
0523             pos += size;
0524 
0525             err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
0526                             tso.data, size,
0527                             size == data_len);
0528             if (err)
0529                 goto err_map_data;
0530 
0531             data_len -= size;
0532             count++;
0533             bd_data_num++;
0534             tso_build_data(skb, &tso, size);
0535 
0536             if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
0537                 goto err_chained_bd;
0538         }
0539 
0540         enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
0541 
0542         if (total_len == 0)
0543             tx_swbd->skb = skb;
0544 
0545         /* Go to the next BD */
0546         enetc_bdr_idx_inc(tx_ring, &i);
0547     }
0548 
0549     tx_ring->next_to_use = i;
0550     enetc_update_tx_ring_tail(tx_ring);
0551 
0552     return count;
0553 
0554 err_map_data:
0555     dev_err(tx_ring->dev, "DMA map error");
0556 
0557 err_chained_bd:
0558     do {
0559         tx_swbd = &tx_ring->tx_swbd[i];
0560         enetc_free_tx_frame(tx_ring, tx_swbd);
0561         if (i == 0)
0562             i = tx_ring->bd_count;
0563         i--;
0564     } while (count--);
0565 
0566     return 0;
0567 }
0568 
0569 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
0570                     struct net_device *ndev)
0571 {
0572     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0573     struct enetc_bdr *tx_ring;
0574     int count, err;
0575 
0576     /* Queue one-step Sync packet if already locked */
0577     if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
0578         if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
0579                       &priv->flags)) {
0580             skb_queue_tail(&priv->tx_skbs, skb);
0581             return NETDEV_TX_OK;
0582         }
0583     }
0584 
0585     tx_ring = priv->tx_ring[skb->queue_mapping];
0586 
0587     if (skb_is_gso(skb)) {
0588         if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
0589             netif_stop_subqueue(ndev, tx_ring->index);
0590             return NETDEV_TX_BUSY;
0591         }
0592 
0593         enetc_lock_mdio();
0594         count = enetc_map_tx_tso_buffs(tx_ring, skb);
0595         enetc_unlock_mdio();
0596     } else {
0597         if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
0598             if (unlikely(skb_linearize(skb)))
0599                 goto drop_packet_err;
0600 
0601         count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
0602         if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
0603             netif_stop_subqueue(ndev, tx_ring->index);
0604             return NETDEV_TX_BUSY;
0605         }
0606 
0607         if (skb->ip_summed == CHECKSUM_PARTIAL) {
0608             err = skb_checksum_help(skb);
0609             if (err)
0610                 goto drop_packet_err;
0611         }
0612         enetc_lock_mdio();
0613         count = enetc_map_tx_buffs(tx_ring, skb);
0614         enetc_unlock_mdio();
0615     }
0616 
0617     if (unlikely(!count))
0618         goto drop_packet_err;
0619 
0620     if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
0621         netif_stop_subqueue(ndev, tx_ring->index);
0622 
0623     return NETDEV_TX_OK;
0624 
0625 drop_packet_err:
0626     dev_kfree_skb_any(skb);
0627     return NETDEV_TX_OK;
0628 }
0629 
0630 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
0631 {
0632     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0633     u8 udp, msgtype, twostep;
0634     u16 offset1, offset2;
0635 
0636     /* Mark tx timestamp type on skb->cb[0] if requires */
0637     if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
0638         (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
0639         skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
0640     } else {
0641         skb->cb[0] = 0;
0642     }
0643 
0644     /* Fall back to two-step timestamp if not one-step Sync packet */
0645     if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
0646         if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
0647                     &offset1, &offset2) ||
0648             msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
0649             skb->cb[0] = ENETC_F_TX_TSTAMP;
0650     }
0651 
0652     return enetc_start_xmit(skb, ndev);
0653 }
0654 
0655 static irqreturn_t enetc_msix(int irq, void *data)
0656 {
0657     struct enetc_int_vector *v = data;
0658     int i;
0659 
0660     enetc_lock_mdio();
0661 
0662     /* disable interrupts */
0663     enetc_wr_reg_hot(v->rbier, 0);
0664     enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
0665 
0666     for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
0667         enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
0668 
0669     enetc_unlock_mdio();
0670 
0671     napi_schedule(&v->napi);
0672 
0673     return IRQ_HANDLED;
0674 }
0675 
0676 static void enetc_rx_dim_work(struct work_struct *w)
0677 {
0678     struct dim *dim = container_of(w, struct dim, work);
0679     struct dim_cq_moder moder =
0680         net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
0681     struct enetc_int_vector *v =
0682         container_of(dim, struct enetc_int_vector, rx_dim);
0683 
0684     v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
0685     dim->state = DIM_START_MEASURE;
0686 }
0687 
0688 static void enetc_rx_net_dim(struct enetc_int_vector *v)
0689 {
0690     struct dim_sample dim_sample = {};
0691 
0692     v->comp_cnt++;
0693 
0694     if (!v->rx_napi_work)
0695         return;
0696 
0697     dim_update_sample(v->comp_cnt,
0698               v->rx_ring.stats.packets,
0699               v->rx_ring.stats.bytes,
0700               &dim_sample);
0701     net_dim(&v->rx_dim, dim_sample);
0702 }
0703 
0704 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
0705 {
0706     int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
0707 
0708     return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
0709 }
0710 
0711 static bool enetc_page_reusable(struct page *page)
0712 {
0713     return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
0714 }
0715 
0716 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
0717                  struct enetc_rx_swbd *old)
0718 {
0719     struct enetc_rx_swbd *new;
0720 
0721     new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
0722 
0723     /* next buf that may reuse a page */
0724     enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
0725 
0726     /* copy page reference */
0727     *new = *old;
0728 }
0729 
0730 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
0731                 u64 *tstamp)
0732 {
0733     u32 lo, hi, tstamp_lo;
0734 
0735     lo = enetc_rd_hot(hw, ENETC_SICTR0);
0736     hi = enetc_rd_hot(hw, ENETC_SICTR1);
0737     tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
0738     if (lo <= tstamp_lo)
0739         hi -= 1;
0740     *tstamp = (u64)hi << 32 | tstamp_lo;
0741 }
0742 
0743 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
0744 {
0745     struct skb_shared_hwtstamps shhwtstamps;
0746 
0747     if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
0748         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
0749         shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
0750         skb_txtime_consumed(skb);
0751         skb_tstamp_tx(skb, &shhwtstamps);
0752     }
0753 }
0754 
0755 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
0756                       struct enetc_tx_swbd *tx_swbd)
0757 {
0758     struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
0759     struct enetc_rx_swbd rx_swbd = {
0760         .dma = tx_swbd->dma,
0761         .page = tx_swbd->page,
0762         .page_offset = tx_swbd->page_offset,
0763         .dir = tx_swbd->dir,
0764         .len = tx_swbd->len,
0765     };
0766     struct enetc_bdr *rx_ring;
0767 
0768     rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
0769 
0770     if (likely(enetc_swbd_unused(rx_ring))) {
0771         enetc_reuse_page(rx_ring, &rx_swbd);
0772 
0773         /* sync for use by the device */
0774         dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
0775                          rx_swbd.page_offset,
0776                          ENETC_RXB_DMA_SIZE_XDP,
0777                          rx_swbd.dir);
0778 
0779         rx_ring->stats.recycles++;
0780     } else {
0781         /* RX ring is already full, we need to unmap and free the
0782          * page, since there's nothing useful we can do with it.
0783          */
0784         rx_ring->stats.recycle_failures++;
0785 
0786         dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
0787                    rx_swbd.dir);
0788         __free_page(rx_swbd.page);
0789     }
0790 
0791     rx_ring->xdp.xdp_tx_in_flight--;
0792 }
0793 
0794 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
0795 {
0796     int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
0797     struct net_device *ndev = tx_ring->ndev;
0798     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0799     struct enetc_tx_swbd *tx_swbd;
0800     int i, bds_to_clean;
0801     bool do_twostep_tstamp;
0802     u64 tstamp = 0;
0803 
0804     i = tx_ring->next_to_clean;
0805     tx_swbd = &tx_ring->tx_swbd[i];
0806 
0807     bds_to_clean = enetc_bd_ready_count(tx_ring, i);
0808 
0809     do_twostep_tstamp = false;
0810 
0811     while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
0812         struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
0813         struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
0814         bool is_eof = tx_swbd->is_eof;
0815 
0816         if (unlikely(tx_swbd->check_wb)) {
0817             union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
0818 
0819             if (txbd->flags & ENETC_TXBD_FLAGS_W &&
0820                 tx_swbd->do_twostep_tstamp) {
0821                 enetc_get_tx_tstamp(&priv->si->hw, txbd,
0822                             &tstamp);
0823                 do_twostep_tstamp = true;
0824             }
0825 
0826             if (tx_swbd->qbv_en &&
0827                 txbd->wb.status & ENETC_TXBD_STATS_WIN)
0828                 tx_win_drop++;
0829         }
0830 
0831         if (tx_swbd->is_xdp_tx)
0832             enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
0833         else if (likely(tx_swbd->dma))
0834             enetc_unmap_tx_buff(tx_ring, tx_swbd);
0835 
0836         if (xdp_frame) {
0837             xdp_return_frame(xdp_frame);
0838         } else if (skb) {
0839             if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
0840                 /* Start work to release lock for next one-step
0841                  * timestamping packet. And send one skb in
0842                  * tx_skbs queue if has.
0843                  */
0844                 schedule_work(&priv->tx_onestep_tstamp);
0845             } else if (unlikely(do_twostep_tstamp)) {
0846                 enetc_tstamp_tx(skb, tstamp);
0847                 do_twostep_tstamp = false;
0848             }
0849             napi_consume_skb(skb, napi_budget);
0850         }
0851 
0852         tx_byte_cnt += tx_swbd->len;
0853         /* Scrub the swbd here so we don't have to do that
0854          * when we reuse it during xmit
0855          */
0856         memset(tx_swbd, 0, sizeof(*tx_swbd));
0857 
0858         bds_to_clean--;
0859         tx_swbd++;
0860         i++;
0861         if (unlikely(i == tx_ring->bd_count)) {
0862             i = 0;
0863             tx_swbd = tx_ring->tx_swbd;
0864         }
0865 
0866         /* BD iteration loop end */
0867         if (is_eof) {
0868             tx_frm_cnt++;
0869             /* re-arm interrupt source */
0870             enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
0871                      BIT(16 + tx_ring->index));
0872         }
0873 
0874         if (unlikely(!bds_to_clean))
0875             bds_to_clean = enetc_bd_ready_count(tx_ring, i);
0876     }
0877 
0878     tx_ring->next_to_clean = i;
0879     tx_ring->stats.packets += tx_frm_cnt;
0880     tx_ring->stats.bytes += tx_byte_cnt;
0881     tx_ring->stats.win_drop += tx_win_drop;
0882 
0883     if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
0884              __netif_subqueue_stopped(ndev, tx_ring->index) &&
0885              (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
0886         netif_wake_subqueue(ndev, tx_ring->index);
0887     }
0888 
0889     return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
0890 }
0891 
0892 static bool enetc_new_page(struct enetc_bdr *rx_ring,
0893                struct enetc_rx_swbd *rx_swbd)
0894 {
0895     bool xdp = !!(rx_ring->xdp.prog);
0896     struct page *page;
0897     dma_addr_t addr;
0898 
0899     page = dev_alloc_page();
0900     if (unlikely(!page))
0901         return false;
0902 
0903     /* For XDP_TX, we forgo dma_unmap -> dma_map */
0904     rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
0905 
0906     addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
0907     if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
0908         __free_page(page);
0909 
0910         return false;
0911     }
0912 
0913     rx_swbd->dma = addr;
0914     rx_swbd->page = page;
0915     rx_swbd->page_offset = rx_ring->buffer_offset;
0916 
0917     return true;
0918 }
0919 
0920 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
0921 {
0922     struct enetc_rx_swbd *rx_swbd;
0923     union enetc_rx_bd *rxbd;
0924     int i, j;
0925 
0926     i = rx_ring->next_to_use;
0927     rx_swbd = &rx_ring->rx_swbd[i];
0928     rxbd = enetc_rxbd(rx_ring, i);
0929 
0930     for (j = 0; j < buff_cnt; j++) {
0931         /* try reuse page */
0932         if (unlikely(!rx_swbd->page)) {
0933             if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
0934                 rx_ring->stats.rx_alloc_errs++;
0935                 break;
0936             }
0937         }
0938 
0939         /* update RxBD */
0940         rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
0941                        rx_swbd->page_offset);
0942         /* clear 'R" as well */
0943         rxbd->r.lstatus = 0;
0944 
0945         enetc_rxbd_next(rx_ring, &rxbd, &i);
0946         rx_swbd = &rx_ring->rx_swbd[i];
0947     }
0948 
0949     if (likely(j)) {
0950         rx_ring->next_to_alloc = i; /* keep track from page reuse */
0951         rx_ring->next_to_use = i;
0952 
0953         /* update ENETC's consumer index */
0954         enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
0955     }
0956 
0957     return j;
0958 }
0959 
0960 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
0961 static void enetc_get_rx_tstamp(struct net_device *ndev,
0962                 union enetc_rx_bd *rxbd,
0963                 struct sk_buff *skb)
0964 {
0965     struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
0966     struct enetc_ndev_priv *priv = netdev_priv(ndev);
0967     struct enetc_hw *hw = &priv->si->hw;
0968     u32 lo, hi, tstamp_lo;
0969     u64 tstamp;
0970 
0971     if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
0972         lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
0973         hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
0974         rxbd = enetc_rxbd_ext(rxbd);
0975         tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
0976         if (lo <= tstamp_lo)
0977             hi -= 1;
0978 
0979         tstamp = (u64)hi << 32 | tstamp_lo;
0980         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
0981         shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
0982     }
0983 }
0984 #endif
0985 
0986 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
0987                    union enetc_rx_bd *rxbd, struct sk_buff *skb)
0988 {
0989     struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
0990 
0991     /* TODO: hashing */
0992     if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
0993         u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
0994 
0995         skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
0996         skb->ip_summed = CHECKSUM_COMPLETE;
0997     }
0998 
0999     if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
1000         __be16 tpid = 0;
1001 
1002         switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
1003         case 0:
1004             tpid = htons(ETH_P_8021Q);
1005             break;
1006         case 1:
1007             tpid = htons(ETH_P_8021AD);
1008             break;
1009         case 2:
1010             tpid = htons(enetc_port_rd(&priv->si->hw,
1011                            ENETC_PCVLANR1));
1012             break;
1013         case 3:
1014             tpid = htons(enetc_port_rd(&priv->si->hw,
1015                            ENETC_PCVLANR2));
1016             break;
1017         default:
1018             break;
1019         }
1020 
1021         __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
1022     }
1023 
1024 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1025     if (priv->active_offloads & ENETC_F_RX_TSTAMP)
1026         enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
1027 #endif
1028 }
1029 
1030 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1031  * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
1032  * mapped buffers.
1033  */
1034 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
1035                            int i, u16 size)
1036 {
1037     struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1038 
1039     dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
1040                       rx_swbd->page_offset,
1041                       size, rx_swbd->dir);
1042     return rx_swbd;
1043 }
1044 
1045 /* Reuse the current page without performing half-page buffer flipping */
1046 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
1047                   struct enetc_rx_swbd *rx_swbd)
1048 {
1049     size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
1050 
1051     enetc_reuse_page(rx_ring, rx_swbd);
1052 
1053     dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
1054                      rx_swbd->page_offset,
1055                      buffer_size, rx_swbd->dir);
1056 
1057     rx_swbd->page = NULL;
1058 }
1059 
1060 /* Reuse the current page by performing half-page buffer flipping */
1061 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
1062                    struct enetc_rx_swbd *rx_swbd)
1063 {
1064     if (likely(enetc_page_reusable(rx_swbd->page))) {
1065         rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
1066         page_ref_inc(rx_swbd->page);
1067 
1068         enetc_put_rx_buff(rx_ring, rx_swbd);
1069     } else {
1070         dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1071                    rx_swbd->dir);
1072         rx_swbd->page = NULL;
1073     }
1074 }
1075 
1076 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
1077                         int i, u16 size)
1078 {
1079     struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1080     struct sk_buff *skb;
1081     void *ba;
1082 
1083     ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
1084     skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
1085     if (unlikely(!skb)) {
1086         rx_ring->stats.rx_alloc_errs++;
1087         return NULL;
1088     }
1089 
1090     skb_reserve(skb, rx_ring->buffer_offset);
1091     __skb_put(skb, size);
1092 
1093     enetc_flip_rx_buff(rx_ring, rx_swbd);
1094 
1095     return skb;
1096 }
1097 
1098 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
1099                      u16 size, struct sk_buff *skb)
1100 {
1101     struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1102 
1103     skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
1104             rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
1105 
1106     enetc_flip_rx_buff(rx_ring, rx_swbd);
1107 }
1108 
1109 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
1110                           u32 bd_status,
1111                           union enetc_rx_bd **rxbd, int *i)
1112 {
1113     if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
1114         return false;
1115 
1116     enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1117     enetc_rxbd_next(rx_ring, rxbd, i);
1118 
1119     while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1120         dma_rmb();
1121         bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1122 
1123         enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1124         enetc_rxbd_next(rx_ring, rxbd, i);
1125     }
1126 
1127     rx_ring->ndev->stats.rx_dropped++;
1128     rx_ring->ndev->stats.rx_errors++;
1129 
1130     return true;
1131 }
1132 
1133 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
1134                        u32 bd_status, union enetc_rx_bd **rxbd,
1135                        int *i, int *cleaned_cnt, int buffer_size)
1136 {
1137     struct sk_buff *skb;
1138     u16 size;
1139 
1140     size = le16_to_cpu((*rxbd)->r.buf_len);
1141     skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
1142     if (!skb)
1143         return NULL;
1144 
1145     enetc_get_offloads(rx_ring, *rxbd, skb);
1146 
1147     (*cleaned_cnt)++;
1148 
1149     enetc_rxbd_next(rx_ring, rxbd, i);
1150 
1151     /* not last BD in frame? */
1152     while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1153         bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1154         size = buffer_size;
1155 
1156         if (bd_status & ENETC_RXBD_LSTATUS_F) {
1157             dma_rmb();
1158             size = le16_to_cpu((*rxbd)->r.buf_len);
1159         }
1160 
1161         enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
1162 
1163         (*cleaned_cnt)++;
1164 
1165         enetc_rxbd_next(rx_ring, rxbd, i);
1166     }
1167 
1168     skb_record_rx_queue(skb, rx_ring->index);
1169     skb->protocol = eth_type_trans(skb, rx_ring->ndev);
1170 
1171     return skb;
1172 }
1173 
1174 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
1175 
1176 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
1177                    struct napi_struct *napi, int work_limit)
1178 {
1179     int rx_frm_cnt = 0, rx_byte_cnt = 0;
1180     int cleaned_cnt, i;
1181 
1182     cleaned_cnt = enetc_bd_unused(rx_ring);
1183     /* next descriptor to process */
1184     i = rx_ring->next_to_clean;
1185 
1186     while (likely(rx_frm_cnt < work_limit)) {
1187         union enetc_rx_bd *rxbd;
1188         struct sk_buff *skb;
1189         u32 bd_status;
1190 
1191         if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
1192             cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
1193                                 cleaned_cnt);
1194 
1195         rxbd = enetc_rxbd(rx_ring, i);
1196         bd_status = le32_to_cpu(rxbd->r.lstatus);
1197         if (!bd_status)
1198             break;
1199 
1200         enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1201         dma_rmb(); /* for reading other rxbd fields */
1202 
1203         if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1204                               &rxbd, &i))
1205             break;
1206 
1207         skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
1208                       &cleaned_cnt, ENETC_RXB_DMA_SIZE);
1209         if (!skb)
1210             break;
1211 
1212         rx_byte_cnt += skb->len;
1213         rx_frm_cnt++;
1214 
1215         napi_gro_receive(napi, skb);
1216     }
1217 
1218     rx_ring->next_to_clean = i;
1219 
1220     rx_ring->stats.packets += rx_frm_cnt;
1221     rx_ring->stats.bytes += rx_byte_cnt;
1222 
1223     return rx_frm_cnt;
1224 }
1225 
1226 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
1227                   struct enetc_tx_swbd *tx_swbd,
1228                   int frm_len)
1229 {
1230     union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1231 
1232     prefetchw(txbd);
1233 
1234     enetc_clear_tx_bd(txbd);
1235     txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
1236     txbd->buf_len = cpu_to_le16(tx_swbd->len);
1237     txbd->frm_len = cpu_to_le16(frm_len);
1238 
1239     memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
1240 }
1241 
1242 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
1243  * descriptors.
1244  */
1245 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
1246              struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
1247 {
1248     struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
1249     int i, k, frm_len = tmp_tx_swbd->len;
1250 
1251     if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
1252         return false;
1253 
1254     while (unlikely(!tmp_tx_swbd->is_eof)) {
1255         tmp_tx_swbd++;
1256         frm_len += tmp_tx_swbd->len;
1257     }
1258 
1259     i = tx_ring->next_to_use;
1260 
1261     for (k = 0; k < num_tx_swbd; k++) {
1262         struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
1263 
1264         enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
1265 
1266         /* last BD needs 'F' bit set */
1267         if (xdp_tx_swbd->is_eof) {
1268             union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1269 
1270             txbd->flags = ENETC_TXBD_FLAGS_F;
1271         }
1272 
1273         enetc_bdr_idx_inc(tx_ring, &i);
1274     }
1275 
1276     tx_ring->next_to_use = i;
1277 
1278     return true;
1279 }
1280 
1281 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1282                       struct enetc_tx_swbd *xdp_tx_arr,
1283                       struct xdp_frame *xdp_frame)
1284 {
1285     struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1286     struct skb_shared_info *shinfo;
1287     void *data = xdp_frame->data;
1288     int len = xdp_frame->len;
1289     skb_frag_t *frag;
1290     dma_addr_t dma;
1291     unsigned int f;
1292     int n = 0;
1293 
1294     dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1295     if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1296         netdev_err(tx_ring->ndev, "DMA map error\n");
1297         return -1;
1298     }
1299 
1300     xdp_tx_swbd->dma = dma;
1301     xdp_tx_swbd->dir = DMA_TO_DEVICE;
1302     xdp_tx_swbd->len = len;
1303     xdp_tx_swbd->is_xdp_redirect = true;
1304     xdp_tx_swbd->is_eof = false;
1305     xdp_tx_swbd->xdp_frame = NULL;
1306 
1307     n++;
1308     xdp_tx_swbd = &xdp_tx_arr[n];
1309 
1310     shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1311 
1312     for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1313          f++, frag++) {
1314         data = skb_frag_address(frag);
1315         len = skb_frag_size(frag);
1316 
1317         dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1318         if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1319             /* Undo the DMA mapping for all fragments */
1320             while (--n >= 0)
1321                 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1322 
1323             netdev_err(tx_ring->ndev, "DMA map error\n");
1324             return -1;
1325         }
1326 
1327         xdp_tx_swbd->dma = dma;
1328         xdp_tx_swbd->dir = DMA_TO_DEVICE;
1329         xdp_tx_swbd->len = len;
1330         xdp_tx_swbd->is_xdp_redirect = true;
1331         xdp_tx_swbd->is_eof = false;
1332         xdp_tx_swbd->xdp_frame = NULL;
1333 
1334         n++;
1335         xdp_tx_swbd = &xdp_tx_arr[n];
1336     }
1337 
1338     xdp_tx_arr[n - 1].is_eof = true;
1339     xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1340 
1341     return n;
1342 }
1343 
1344 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1345            struct xdp_frame **frames, u32 flags)
1346 {
1347     struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1348     struct enetc_ndev_priv *priv = netdev_priv(ndev);
1349     struct enetc_bdr *tx_ring;
1350     int xdp_tx_bd_cnt, i, k;
1351     int xdp_tx_frm_cnt = 0;
1352 
1353     enetc_lock_mdio();
1354 
1355     tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1356 
1357     prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1358 
1359     for (k = 0; k < num_frames; k++) {
1360         xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1361                                    xdp_redirect_arr,
1362                                    frames[k]);
1363         if (unlikely(xdp_tx_bd_cnt < 0))
1364             break;
1365 
1366         if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1367                        xdp_tx_bd_cnt))) {
1368             for (i = 0; i < xdp_tx_bd_cnt; i++)
1369                 enetc_unmap_tx_buff(tx_ring,
1370                             &xdp_redirect_arr[i]);
1371             tx_ring->stats.xdp_tx_drops++;
1372             break;
1373         }
1374 
1375         xdp_tx_frm_cnt++;
1376     }
1377 
1378     if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1379         enetc_update_tx_ring_tail(tx_ring);
1380 
1381     tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1382 
1383     enetc_unlock_mdio();
1384 
1385     return xdp_tx_frm_cnt;
1386 }
1387 
1388 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1389                      struct xdp_buff *xdp_buff, u16 size)
1390 {
1391     struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1392     void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1393     struct skb_shared_info *shinfo;
1394 
1395     /* To be used for XDP_TX */
1396     rx_swbd->len = size;
1397 
1398     xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1399              rx_ring->buffer_offset, size, false);
1400 
1401     shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1402     shinfo->nr_frags = 0;
1403 }
1404 
1405 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1406                      u16 size, struct xdp_buff *xdp_buff)
1407 {
1408     struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1409     struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1410     skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
1411 
1412     /* To be used for XDP_TX */
1413     rx_swbd->len = size;
1414 
1415     skb_frag_off_set(frag, rx_swbd->page_offset);
1416     skb_frag_size_set(frag, size);
1417     __skb_frag_set_page(frag, rx_swbd->page);
1418 
1419     shinfo->nr_frags++;
1420 }
1421 
1422 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1423                  union enetc_rx_bd **rxbd, int *i,
1424                  int *cleaned_cnt, struct xdp_buff *xdp_buff)
1425 {
1426     u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1427 
1428     xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1429 
1430     enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1431     (*cleaned_cnt)++;
1432     enetc_rxbd_next(rx_ring, rxbd, i);
1433 
1434     /* not last BD in frame? */
1435     while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1436         bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1437         size = ENETC_RXB_DMA_SIZE_XDP;
1438 
1439         if (bd_status & ENETC_RXBD_LSTATUS_F) {
1440             dma_rmb();
1441             size = le16_to_cpu((*rxbd)->r.buf_len);
1442         }
1443 
1444         enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1445         (*cleaned_cnt)++;
1446         enetc_rxbd_next(rx_ring, rxbd, i);
1447     }
1448 }
1449 
1450 /* Convert RX buffer descriptors to TX buffer descriptors. These will be
1451  * recycled back into the RX ring in enetc_clean_tx_ring.
1452  */
1453 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1454                     struct enetc_bdr *rx_ring,
1455                     int rx_ring_first, int rx_ring_last)
1456 {
1457     int n = 0;
1458 
1459     for (; rx_ring_first != rx_ring_last;
1460          n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1461         struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1462         struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1463 
1464         /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1465         tx_swbd->dma = rx_swbd->dma;
1466         tx_swbd->dir = rx_swbd->dir;
1467         tx_swbd->page = rx_swbd->page;
1468         tx_swbd->page_offset = rx_swbd->page_offset;
1469         tx_swbd->len = rx_swbd->len;
1470         tx_swbd->is_dma_page = true;
1471         tx_swbd->is_xdp_tx = true;
1472         tx_swbd->is_eof = false;
1473     }
1474 
1475     /* We rely on caller providing an rx_ring_last > rx_ring_first */
1476     xdp_tx_arr[n - 1].is_eof = true;
1477 
1478     return n;
1479 }
1480 
1481 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1482                int rx_ring_last)
1483 {
1484     while (rx_ring_first != rx_ring_last) {
1485         enetc_put_rx_buff(rx_ring,
1486                   &rx_ring->rx_swbd[rx_ring_first]);
1487         enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1488     }
1489     rx_ring->stats.xdp_drops++;
1490 }
1491 
1492 static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
1493                int rx_ring_last)
1494 {
1495     while (rx_ring_first != rx_ring_last) {
1496         struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1497 
1498         if (rx_swbd->page) {
1499             dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1500                        rx_swbd->dir);
1501             __free_page(rx_swbd->page);
1502             rx_swbd->page = NULL;
1503         }
1504         enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1505     }
1506     rx_ring->stats.xdp_redirect_failures++;
1507 }
1508 
1509 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1510                    struct napi_struct *napi, int work_limit,
1511                    struct bpf_prog *prog)
1512 {
1513     int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1514     struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1515     struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1516     int rx_frm_cnt = 0, rx_byte_cnt = 0;
1517     struct enetc_bdr *tx_ring;
1518     int cleaned_cnt, i;
1519     u32 xdp_act;
1520 
1521     cleaned_cnt = enetc_bd_unused(rx_ring);
1522     /* next descriptor to process */
1523     i = rx_ring->next_to_clean;
1524 
1525     while (likely(rx_frm_cnt < work_limit)) {
1526         union enetc_rx_bd *rxbd, *orig_rxbd;
1527         int orig_i, orig_cleaned_cnt;
1528         struct xdp_buff xdp_buff;
1529         struct sk_buff *skb;
1530         int tmp_orig_i, err;
1531         u32 bd_status;
1532 
1533         rxbd = enetc_rxbd(rx_ring, i);
1534         bd_status = le32_to_cpu(rxbd->r.lstatus);
1535         if (!bd_status)
1536             break;
1537 
1538         enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1539         dma_rmb(); /* for reading other rxbd fields */
1540 
1541         if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1542                               &rxbd, &i))
1543             break;
1544 
1545         orig_rxbd = rxbd;
1546         orig_cleaned_cnt = cleaned_cnt;
1547         orig_i = i;
1548 
1549         enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1550                      &cleaned_cnt, &xdp_buff);
1551 
1552         xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
1553 
1554         switch (xdp_act) {
1555         default:
1556             bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
1557             fallthrough;
1558         case XDP_ABORTED:
1559             trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
1560             fallthrough;
1561         case XDP_DROP:
1562             enetc_xdp_drop(rx_ring, orig_i, i);
1563             break;
1564         case XDP_PASS:
1565             rxbd = orig_rxbd;
1566             cleaned_cnt = orig_cleaned_cnt;
1567             i = orig_i;
1568 
1569             skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
1570                           &i, &cleaned_cnt,
1571                           ENETC_RXB_DMA_SIZE_XDP);
1572             if (unlikely(!skb))
1573                 goto out;
1574 
1575             napi_gro_receive(napi, skb);
1576             break;
1577         case XDP_TX:
1578             tx_ring = priv->xdp_tx_ring[rx_ring->index];
1579             xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
1580                                      rx_ring,
1581                                      orig_i, i);
1582 
1583             if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
1584                 enetc_xdp_drop(rx_ring, orig_i, i);
1585                 tx_ring->stats.xdp_tx_drops++;
1586             } else {
1587                 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
1588                 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
1589                 xdp_tx_frm_cnt++;
1590                 /* The XDP_TX enqueue was successful, so we
1591                  * need to scrub the RX software BDs because
1592                  * the ownership of the buffers no longer
1593                  * belongs to the RX ring, and we must prevent
1594                  * enetc_refill_rx_ring() from reusing
1595                  * rx_swbd->page.
1596                  */
1597                 while (orig_i != i) {
1598                     rx_ring->rx_swbd[orig_i].page = NULL;
1599                     enetc_bdr_idx_inc(rx_ring, &orig_i);
1600                 }
1601             }
1602             break;
1603         case XDP_REDIRECT:
1604             /* xdp_return_frame does not support S/G in the sense
1605              * that it leaks the fragments (__xdp_return should not
1606              * call page_frag_free only for the initial buffer).
1607              * Until XDP_REDIRECT gains support for S/G let's keep
1608              * the code structure in place, but dead. We drop the
1609              * S/G frames ourselves to avoid memory leaks which
1610              * would otherwise leave the kernel OOM.
1611              */
1612             if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
1613                 enetc_xdp_drop(rx_ring, orig_i, i);
1614                 rx_ring->stats.xdp_redirect_sg++;
1615                 break;
1616             }
1617 
1618             tmp_orig_i = orig_i;
1619 
1620             while (orig_i != i) {
1621                 enetc_flip_rx_buff(rx_ring,
1622                            &rx_ring->rx_swbd[orig_i]);
1623                 enetc_bdr_idx_inc(rx_ring, &orig_i);
1624             }
1625 
1626             err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
1627             if (unlikely(err)) {
1628                 enetc_xdp_free(rx_ring, tmp_orig_i, i);
1629             } else {
1630                 xdp_redirect_frm_cnt++;
1631                 rx_ring->stats.xdp_redirect++;
1632             }
1633         }
1634 
1635         rx_frm_cnt++;
1636     }
1637 
1638 out:
1639     rx_ring->next_to_clean = i;
1640 
1641     rx_ring->stats.packets += rx_frm_cnt;
1642     rx_ring->stats.bytes += rx_byte_cnt;
1643 
1644     if (xdp_redirect_frm_cnt)
1645         xdp_do_flush_map();
1646 
1647     if (xdp_tx_frm_cnt)
1648         enetc_update_tx_ring_tail(tx_ring);
1649 
1650     if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
1651         enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
1652                      rx_ring->xdp.xdp_tx_in_flight);
1653 
1654     return rx_frm_cnt;
1655 }
1656 
1657 static int enetc_poll(struct napi_struct *napi, int budget)
1658 {
1659     struct enetc_int_vector
1660         *v = container_of(napi, struct enetc_int_vector, napi);
1661     struct enetc_bdr *rx_ring = &v->rx_ring;
1662     struct bpf_prog *prog;
1663     bool complete = true;
1664     int work_done;
1665     int i;
1666 
1667     enetc_lock_mdio();
1668 
1669     for (i = 0; i < v->count_tx_rings; i++)
1670         if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
1671             complete = false;
1672 
1673     prog = rx_ring->xdp.prog;
1674     if (prog)
1675         work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
1676     else
1677         work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
1678     if (work_done == budget)
1679         complete = false;
1680     if (work_done)
1681         v->rx_napi_work = true;
1682 
1683     if (!complete) {
1684         enetc_unlock_mdio();
1685         return budget;
1686     }
1687 
1688     napi_complete_done(napi, work_done);
1689 
1690     if (likely(v->rx_dim_en))
1691         enetc_rx_net_dim(v);
1692 
1693     v->rx_napi_work = false;
1694 
1695     /* enable interrupts */
1696     enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
1697 
1698     for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1699         enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
1700                  ENETC_TBIER_TXTIE);
1701 
1702     enetc_unlock_mdio();
1703 
1704     return work_done;
1705 }
1706 
1707 /* Probing and Init */
1708 #define ENETC_MAX_RFS_SIZE 64
1709 void enetc_get_si_caps(struct enetc_si *si)
1710 {
1711     struct enetc_hw *hw = &si->hw;
1712     u32 val;
1713 
1714     /* find out how many of various resources we have to work with */
1715     val = enetc_rd(hw, ENETC_SICAPR0);
1716     si->num_rx_rings = (val >> 16) & 0xff;
1717     si->num_tx_rings = val & 0xff;
1718 
1719     val = enetc_rd(hw, ENETC_SIRFSCAPR);
1720     si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
1721     si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
1722 
1723     si->num_rss = 0;
1724     val = enetc_rd(hw, ENETC_SIPCAPR0);
1725     if (val & ENETC_SIPCAPR0_RSS) {
1726         u32 rss;
1727 
1728         rss = enetc_rd(hw, ENETC_SIRSSCAPR);
1729         si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
1730     }
1731 
1732     if (val & ENETC_SIPCAPR0_QBV)
1733         si->hw_features |= ENETC_SI_F_QBV;
1734 
1735     if (val & ENETC_SIPCAPR0_PSFP)
1736         si->hw_features |= ENETC_SI_F_PSFP;
1737 }
1738 
1739 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
1740 {
1741     r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
1742                     &r->bd_dma_base, GFP_KERNEL);
1743     if (!r->bd_base)
1744         return -ENOMEM;
1745 
1746     /* h/w requires 128B alignment */
1747     if (!IS_ALIGNED(r->bd_dma_base, 128)) {
1748         dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
1749                   r->bd_dma_base);
1750         return -EINVAL;
1751     }
1752 
1753     return 0;
1754 }
1755 
1756 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
1757 {
1758     int err;
1759 
1760     txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
1761     if (!txr->tx_swbd)
1762         return -ENOMEM;
1763 
1764     err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
1765     if (err)
1766         goto err_alloc_bdr;
1767 
1768     txr->tso_headers = dma_alloc_coherent(txr->dev,
1769                           txr->bd_count * TSO_HEADER_SIZE,
1770                           &txr->tso_headers_dma,
1771                           GFP_KERNEL);
1772     if (!txr->tso_headers) {
1773         err = -ENOMEM;
1774         goto err_alloc_tso;
1775     }
1776 
1777     txr->next_to_clean = 0;
1778     txr->next_to_use = 0;
1779 
1780     return 0;
1781 
1782 err_alloc_tso:
1783     dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
1784               txr->bd_base, txr->bd_dma_base);
1785     txr->bd_base = NULL;
1786 err_alloc_bdr:
1787     vfree(txr->tx_swbd);
1788     txr->tx_swbd = NULL;
1789 
1790     return err;
1791 }
1792 
1793 static void enetc_free_txbdr(struct enetc_bdr *txr)
1794 {
1795     int size, i;
1796 
1797     for (i = 0; i < txr->bd_count; i++)
1798         enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
1799 
1800     size = txr->bd_count * sizeof(union enetc_tx_bd);
1801 
1802     dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
1803               txr->tso_headers, txr->tso_headers_dma);
1804     txr->tso_headers = NULL;
1805 
1806     dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
1807     txr->bd_base = NULL;
1808 
1809     vfree(txr->tx_swbd);
1810     txr->tx_swbd = NULL;
1811 }
1812 
1813 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
1814 {
1815     int i, err;
1816 
1817     for (i = 0; i < priv->num_tx_rings; i++) {
1818         err = enetc_alloc_txbdr(priv->tx_ring[i]);
1819 
1820         if (err)
1821             goto fail;
1822     }
1823 
1824     return 0;
1825 
1826 fail:
1827     while (i-- > 0)
1828         enetc_free_txbdr(priv->tx_ring[i]);
1829 
1830     return err;
1831 }
1832 
1833 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
1834 {
1835     int i;
1836 
1837     for (i = 0; i < priv->num_tx_rings; i++)
1838         enetc_free_txbdr(priv->tx_ring[i]);
1839 }
1840 
1841 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
1842 {
1843     size_t size = sizeof(union enetc_rx_bd);
1844     int err;
1845 
1846     rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
1847     if (!rxr->rx_swbd)
1848         return -ENOMEM;
1849 
1850     if (extended)
1851         size *= 2;
1852 
1853     err = enetc_dma_alloc_bdr(rxr, size);
1854     if (err) {
1855         vfree(rxr->rx_swbd);
1856         return err;
1857     }
1858 
1859     rxr->next_to_clean = 0;
1860     rxr->next_to_use = 0;
1861     rxr->next_to_alloc = 0;
1862     rxr->ext_en = extended;
1863 
1864     return 0;
1865 }
1866 
1867 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
1868 {
1869     int size;
1870 
1871     size = rxr->bd_count * sizeof(union enetc_rx_bd);
1872 
1873     dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
1874     rxr->bd_base = NULL;
1875 
1876     vfree(rxr->rx_swbd);
1877     rxr->rx_swbd = NULL;
1878 }
1879 
1880 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
1881 {
1882     bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
1883     int i, err;
1884 
1885     for (i = 0; i < priv->num_rx_rings; i++) {
1886         err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
1887 
1888         if (err)
1889             goto fail;
1890     }
1891 
1892     return 0;
1893 
1894 fail:
1895     while (i-- > 0)
1896         enetc_free_rxbdr(priv->rx_ring[i]);
1897 
1898     return err;
1899 }
1900 
1901 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
1902 {
1903     int i;
1904 
1905     for (i = 0; i < priv->num_rx_rings; i++)
1906         enetc_free_rxbdr(priv->rx_ring[i]);
1907 }
1908 
1909 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
1910 {
1911     int i;
1912 
1913     if (!tx_ring->tx_swbd)
1914         return;
1915 
1916     for (i = 0; i < tx_ring->bd_count; i++) {
1917         struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
1918 
1919         enetc_free_tx_frame(tx_ring, tx_swbd);
1920     }
1921 
1922     tx_ring->next_to_clean = 0;
1923     tx_ring->next_to_use = 0;
1924 }
1925 
1926 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
1927 {
1928     int i;
1929 
1930     if (!rx_ring->rx_swbd)
1931         return;
1932 
1933     for (i = 0; i < rx_ring->bd_count; i++) {
1934         struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1935 
1936         if (!rx_swbd->page)
1937             continue;
1938 
1939         dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1940                    rx_swbd->dir);
1941         __free_page(rx_swbd->page);
1942         rx_swbd->page = NULL;
1943     }
1944 
1945     rx_ring->next_to_clean = 0;
1946     rx_ring->next_to_use = 0;
1947     rx_ring->next_to_alloc = 0;
1948 }
1949 
1950 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
1951 {
1952     int i;
1953 
1954     for (i = 0; i < priv->num_rx_rings; i++)
1955         enetc_free_rx_ring(priv->rx_ring[i]);
1956 
1957     for (i = 0; i < priv->num_tx_rings; i++)
1958         enetc_free_tx_ring(priv->tx_ring[i]);
1959 }
1960 
1961 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1962 {
1963     int *rss_table;
1964     int i;
1965 
1966     rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1967     if (!rss_table)
1968         return -ENOMEM;
1969 
1970     /* Set up RSS table defaults */
1971     for (i = 0; i < si->num_rss; i++)
1972         rss_table[i] = i % num_groups;
1973 
1974     enetc_set_rss_table(si, rss_table, si->num_rss);
1975 
1976     kfree(rss_table);
1977 
1978     return 0;
1979 }
1980 
1981 int enetc_configure_si(struct enetc_ndev_priv *priv)
1982 {
1983     struct enetc_si *si = priv->si;
1984     struct enetc_hw *hw = &si->hw;
1985     int err;
1986 
1987     /* set SI cache attributes */
1988     enetc_wr(hw, ENETC_SICAR0,
1989          ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1990     enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1991     /* enable SI */
1992     enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1993 
1994     if (si->num_rss) {
1995         err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1996         if (err)
1997             return err;
1998     }
1999 
2000     return 0;
2001 }
2002 
2003 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
2004 {
2005     struct enetc_si *si = priv->si;
2006     int cpus = num_online_cpus();
2007 
2008     priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
2009     priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
2010 
2011     /* Enable all available TX rings in order to configure as many
2012      * priorities as possible, when needed.
2013      * TODO: Make # of TX rings run-time configurable
2014      */
2015     priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
2016     priv->num_tx_rings = si->num_tx_rings;
2017     priv->bdr_int_num = cpus;
2018     priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
2019     priv->tx_ictt = ENETC_TXIC_TIMETHR;
2020 }
2021 
2022 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
2023 {
2024     struct enetc_si *si = priv->si;
2025 
2026     priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
2027                   GFP_KERNEL);
2028     if (!priv->cls_rules)
2029         return -ENOMEM;
2030 
2031     return 0;
2032 }
2033 
2034 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
2035 {
2036     kfree(priv->cls_rules);
2037 }
2038 
2039 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2040 {
2041     int idx = tx_ring->index;
2042     u32 tbmr;
2043 
2044     enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
2045                lower_32_bits(tx_ring->bd_dma_base));
2046 
2047     enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
2048                upper_32_bits(tx_ring->bd_dma_base));
2049 
2050     WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
2051     enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
2052                ENETC_RTBLENR_LEN(tx_ring->bd_count));
2053 
2054     /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
2055     tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
2056     tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
2057 
2058     /* enable Tx ints by setting pkt thr to 1 */
2059     enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
2060 
2061     tbmr = ENETC_TBMR_EN;
2062     if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
2063         tbmr |= ENETC_TBMR_VIH;
2064 
2065     /* enable ring */
2066     enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2067 
2068     tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
2069     tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
2070     tx_ring->idr = hw->reg + ENETC_SITXIDR;
2071 }
2072 
2073 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2074 {
2075     int idx = rx_ring->index;
2076     u32 rbmr;
2077 
2078     enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
2079                lower_32_bits(rx_ring->bd_dma_base));
2080 
2081     enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
2082                upper_32_bits(rx_ring->bd_dma_base));
2083 
2084     WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
2085     enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
2086                ENETC_RTBLENR_LEN(rx_ring->bd_count));
2087 
2088     if (rx_ring->xdp.prog)
2089         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
2090     else
2091         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
2092 
2093     enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
2094 
2095     /* enable Rx ints by setting pkt thr to 1 */
2096     enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
2097 
2098     rbmr = ENETC_RBMR_EN;
2099 
2100     if (rx_ring->ext_en)
2101         rbmr |= ENETC_RBMR_BDS;
2102 
2103     if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
2104         rbmr |= ENETC_RBMR_VTE;
2105 
2106     rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
2107     rx_ring->idr = hw->reg + ENETC_SIRXIDR;
2108 
2109     enetc_lock_mdio();
2110     enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
2111     enetc_unlock_mdio();
2112 
2113     /* enable ring */
2114     enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2115 }
2116 
2117 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
2118 {
2119     int i;
2120 
2121     for (i = 0; i < priv->num_tx_rings; i++)
2122         enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
2123 
2124     for (i = 0; i < priv->num_rx_rings; i++)
2125         enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
2126 }
2127 
2128 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2129 {
2130     int idx = rx_ring->index;
2131 
2132     /* disable EN bit on ring */
2133     enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
2134 }
2135 
2136 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2137 {
2138     int delay = 8, timeout = 100;
2139     int idx = tx_ring->index;
2140 
2141     /* disable EN bit on ring */
2142     enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
2143 
2144     /* wait for busy to clear */
2145     while (delay < timeout &&
2146            enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
2147         msleep(delay);
2148         delay *= 2;
2149     }
2150 
2151     if (delay >= timeout)
2152         netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
2153                 idx);
2154 }
2155 
2156 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
2157 {
2158     int i;
2159 
2160     for (i = 0; i < priv->num_tx_rings; i++)
2161         enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
2162 
2163     for (i = 0; i < priv->num_rx_rings; i++)
2164         enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
2165 
2166     udelay(1);
2167 }
2168 
2169 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2170 {
2171     struct pci_dev *pdev = priv->si->pdev;
2172     int i, j, err;
2173 
2174     for (i = 0; i < priv->bdr_int_num; i++) {
2175         int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2176         struct enetc_int_vector *v = priv->int_vector[i];
2177         int entry = ENETC_BDR_INT_BASE_IDX + i;
2178         struct enetc_hw *hw = &priv->si->hw;
2179 
2180         snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
2181              priv->ndev->name, i);
2182         err = request_irq(irq, enetc_msix, 0, v->name, v);
2183         if (err) {
2184             dev_err(priv->dev, "request_irq() failed!\n");
2185             goto irq_err;
2186         }
2187         disable_irq(irq);
2188 
2189         v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
2190         v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
2191         v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
2192 
2193         enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
2194 
2195         for (j = 0; j < v->count_tx_rings; j++) {
2196             int idx = v->tx_ring[j].index;
2197 
2198             enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
2199         }
2200         irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
2201     }
2202 
2203     return 0;
2204 
2205 irq_err:
2206     while (i--) {
2207         int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2208 
2209         irq_set_affinity_hint(irq, NULL);
2210         free_irq(irq, priv->int_vector[i]);
2211     }
2212 
2213     return err;
2214 }
2215 
2216 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
2217 {
2218     struct pci_dev *pdev = priv->si->pdev;
2219     int i;
2220 
2221     for (i = 0; i < priv->bdr_int_num; i++) {
2222         int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2223 
2224         irq_set_affinity_hint(irq, NULL);
2225         free_irq(irq, priv->int_vector[i]);
2226     }
2227 }
2228 
2229 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
2230 {
2231     struct enetc_hw *hw = &priv->si->hw;
2232     u32 icpt, ictt;
2233     int i;
2234 
2235     /* enable Tx & Rx event indication */
2236     if (priv->ic_mode &
2237         (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
2238         icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
2239         /* init to non-0 minimum, will be adjusted later */
2240         ictt = 0x1;
2241     } else {
2242         icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
2243         ictt = 0;
2244     }
2245 
2246     for (i = 0; i < priv->num_rx_rings; i++) {
2247         enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
2248         enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
2249         enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
2250     }
2251 
2252     if (priv->ic_mode & ENETC_IC_TX_MANUAL)
2253         icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
2254     else
2255         icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
2256 
2257     for (i = 0; i < priv->num_tx_rings; i++) {
2258         enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
2259         enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
2260         enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
2261     }
2262 }
2263 
2264 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
2265 {
2266     int i;
2267 
2268     for (i = 0; i < priv->num_tx_rings; i++)
2269         enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
2270 
2271     for (i = 0; i < priv->num_rx_rings; i++)
2272         enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
2273 }
2274 
2275 static int enetc_phylink_connect(struct net_device *ndev)
2276 {
2277     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2278     struct ethtool_eee edata;
2279     int err;
2280 
2281     if (!priv->phylink)
2282         return 0; /* phy-less mode */
2283 
2284     err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
2285     if (err) {
2286         dev_err(&ndev->dev, "could not attach to PHY\n");
2287         return err;
2288     }
2289 
2290     /* disable EEE autoneg, until ENETC driver supports it */
2291     memset(&edata, 0, sizeof(struct ethtool_eee));
2292     phylink_ethtool_set_eee(priv->phylink, &edata);
2293 
2294     return 0;
2295 }
2296 
2297 static void enetc_tx_onestep_tstamp(struct work_struct *work)
2298 {
2299     struct enetc_ndev_priv *priv;
2300     struct sk_buff *skb;
2301 
2302     priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2303 
2304     netif_tx_lock(priv->ndev);
2305 
2306     clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2307     skb = skb_dequeue(&priv->tx_skbs);
2308     if (skb)
2309         enetc_start_xmit(skb, priv->ndev);
2310 
2311     netif_tx_unlock(priv->ndev);
2312 }
2313 
2314 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2315 {
2316     INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2317     skb_queue_head_init(&priv->tx_skbs);
2318 }
2319 
2320 void enetc_start(struct net_device *ndev)
2321 {
2322     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2323     int i;
2324 
2325     enetc_setup_interrupts(priv);
2326 
2327     for (i = 0; i < priv->bdr_int_num; i++) {
2328         int irq = pci_irq_vector(priv->si->pdev,
2329                      ENETC_BDR_INT_BASE_IDX + i);
2330 
2331         napi_enable(&priv->int_vector[i]->napi);
2332         enable_irq(irq);
2333     }
2334 
2335     if (priv->phylink)
2336         phylink_start(priv->phylink);
2337     else
2338         netif_carrier_on(ndev);
2339 
2340     netif_tx_start_all_queues(ndev);
2341 }
2342 
2343 int enetc_open(struct net_device *ndev)
2344 {
2345     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2346     int num_stack_tx_queues;
2347     int err;
2348 
2349     err = enetc_setup_irqs(priv);
2350     if (err)
2351         return err;
2352 
2353     err = enetc_phylink_connect(ndev);
2354     if (err)
2355         goto err_phy_connect;
2356 
2357     err = enetc_alloc_tx_resources(priv);
2358     if (err)
2359         goto err_alloc_tx;
2360 
2361     err = enetc_alloc_rx_resources(priv);
2362     if (err)
2363         goto err_alloc_rx;
2364 
2365     num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2366 
2367     err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2368     if (err)
2369         goto err_set_queues;
2370 
2371     err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
2372     if (err)
2373         goto err_set_queues;
2374 
2375     enetc_tx_onestep_tstamp_init(priv);
2376     enetc_setup_bdrs(priv);
2377     enetc_start(ndev);
2378 
2379     return 0;
2380 
2381 err_set_queues:
2382     enetc_free_rx_resources(priv);
2383 err_alloc_rx:
2384     enetc_free_tx_resources(priv);
2385 err_alloc_tx:
2386     if (priv->phylink)
2387         phylink_disconnect_phy(priv->phylink);
2388 err_phy_connect:
2389     enetc_free_irqs(priv);
2390 
2391     return err;
2392 }
2393 
2394 void enetc_stop(struct net_device *ndev)
2395 {
2396     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2397     int i;
2398 
2399     netif_tx_stop_all_queues(ndev);
2400 
2401     for (i = 0; i < priv->bdr_int_num; i++) {
2402         int irq = pci_irq_vector(priv->si->pdev,
2403                      ENETC_BDR_INT_BASE_IDX + i);
2404 
2405         disable_irq(irq);
2406         napi_synchronize(&priv->int_vector[i]->napi);
2407         napi_disable(&priv->int_vector[i]->napi);
2408     }
2409 
2410     if (priv->phylink)
2411         phylink_stop(priv->phylink);
2412     else
2413         netif_carrier_off(ndev);
2414 
2415     enetc_clear_interrupts(priv);
2416 }
2417 
2418 int enetc_close(struct net_device *ndev)
2419 {
2420     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2421 
2422     enetc_stop(ndev);
2423     enetc_clear_bdrs(priv);
2424 
2425     if (priv->phylink)
2426         phylink_disconnect_phy(priv->phylink);
2427     enetc_free_rxtx_rings(priv);
2428     enetc_free_rx_resources(priv);
2429     enetc_free_tx_resources(priv);
2430     enetc_free_irqs(priv);
2431 
2432     return 0;
2433 }
2434 
2435 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2436 {
2437     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2438     struct tc_mqprio_qopt *mqprio = type_data;
2439     struct enetc_bdr *tx_ring;
2440     int num_stack_tx_queues;
2441     u8 num_tc;
2442     int i;
2443 
2444     num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2445     mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2446     num_tc = mqprio->num_tc;
2447 
2448     if (!num_tc) {
2449         netdev_reset_tc(ndev);
2450         netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2451 
2452         /* Reset all ring priorities to 0 */
2453         for (i = 0; i < priv->num_tx_rings; i++) {
2454             tx_ring = priv->tx_ring[i];
2455             enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
2456         }
2457 
2458         return 0;
2459     }
2460 
2461     /* Check if we have enough BD rings available to accommodate all TCs */
2462     if (num_tc > num_stack_tx_queues) {
2463         netdev_err(ndev, "Max %d traffic classes supported\n",
2464                priv->num_tx_rings);
2465         return -EINVAL;
2466     }
2467 
2468     /* For the moment, we use only one BD ring per TC.
2469      *
2470      * Configure num_tc BD rings with increasing priorities.
2471      */
2472     for (i = 0; i < num_tc; i++) {
2473         tx_ring = priv->tx_ring[i];
2474         enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
2475     }
2476 
2477     /* Reset the number of netdev queues based on the TC count */
2478     netif_set_real_num_tx_queues(ndev, num_tc);
2479 
2480     netdev_set_num_tc(ndev, num_tc);
2481 
2482     /* Each TC is associated with one netdev queue */
2483     for (i = 0; i < num_tc; i++)
2484         netdev_set_tc_queue(ndev, i, 1, i);
2485 
2486     return 0;
2487 }
2488 
2489 static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
2490                 struct netlink_ext_ack *extack)
2491 {
2492     struct enetc_ndev_priv *priv = netdev_priv(dev);
2493     struct bpf_prog *old_prog;
2494     bool is_up;
2495     int i;
2496 
2497     /* The buffer layout is changing, so we need to drain the old
2498      * RX buffers and seed new ones.
2499      */
2500     is_up = netif_running(dev);
2501     if (is_up)
2502         dev_close(dev);
2503 
2504     old_prog = xchg(&priv->xdp_prog, prog);
2505     if (old_prog)
2506         bpf_prog_put(old_prog);
2507 
2508     for (i = 0; i < priv->num_rx_rings; i++) {
2509         struct enetc_bdr *rx_ring = priv->rx_ring[i];
2510 
2511         rx_ring->xdp.prog = prog;
2512 
2513         if (prog)
2514             rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
2515         else
2516             rx_ring->buffer_offset = ENETC_RXB_PAD;
2517     }
2518 
2519     if (is_up)
2520         return dev_open(dev, extack);
2521 
2522     return 0;
2523 }
2524 
2525 int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
2526 {
2527     switch (xdp->command) {
2528     case XDP_SETUP_PROG:
2529         return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
2530     default:
2531         return -EINVAL;
2532     }
2533 
2534     return 0;
2535 }
2536 
2537 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
2538 {
2539     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2540     struct net_device_stats *stats = &ndev->stats;
2541     unsigned long packets = 0, bytes = 0;
2542     unsigned long tx_dropped = 0;
2543     int i;
2544 
2545     for (i = 0; i < priv->num_rx_rings; i++) {
2546         packets += priv->rx_ring[i]->stats.packets;
2547         bytes   += priv->rx_ring[i]->stats.bytes;
2548     }
2549 
2550     stats->rx_packets = packets;
2551     stats->rx_bytes = bytes;
2552     bytes = 0;
2553     packets = 0;
2554 
2555     for (i = 0; i < priv->num_tx_rings; i++) {
2556         packets += priv->tx_ring[i]->stats.packets;
2557         bytes   += priv->tx_ring[i]->stats.bytes;
2558         tx_dropped += priv->tx_ring[i]->stats.win_drop;
2559     }
2560 
2561     stats->tx_packets = packets;
2562     stats->tx_bytes = bytes;
2563     stats->tx_dropped = tx_dropped;
2564 
2565     return stats;
2566 }
2567 
2568 static int enetc_set_rss(struct net_device *ndev, int en)
2569 {
2570     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2571     struct enetc_hw *hw = &priv->si->hw;
2572     u32 reg;
2573 
2574     enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2575 
2576     reg = enetc_rd(hw, ENETC_SIMR);
2577     reg &= ~ENETC_SIMR_RSSE;
2578     reg |= (en) ? ENETC_SIMR_RSSE : 0;
2579     enetc_wr(hw, ENETC_SIMR, reg);
2580 
2581     return 0;
2582 }
2583 
2584 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2585 {
2586     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2587     int i;
2588 
2589     for (i = 0; i < priv->num_rx_rings; i++)
2590         enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
2591 }
2592 
2593 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2594 {
2595     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2596     int i;
2597 
2598     for (i = 0; i < priv->num_tx_rings; i++)
2599         enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
2600 }
2601 
2602 void enetc_set_features(struct net_device *ndev, netdev_features_t features)
2603 {
2604     netdev_features_t changed = ndev->features ^ features;
2605 
2606     if (changed & NETIF_F_RXHASH)
2607         enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2608 
2609     if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2610         enetc_enable_rxvlan(ndev,
2611                     !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2612 
2613     if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2614         enetc_enable_txvlan(ndev,
2615                     !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2616 }
2617 
2618 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2619 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
2620 {
2621     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2622     struct hwtstamp_config config;
2623     int ao;
2624 
2625     if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2626         return -EFAULT;
2627 
2628     switch (config.tx_type) {
2629     case HWTSTAMP_TX_OFF:
2630         priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2631         break;
2632     case HWTSTAMP_TX_ON:
2633         priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2634         priv->active_offloads |= ENETC_F_TX_TSTAMP;
2635         break;
2636     case HWTSTAMP_TX_ONESTEP_SYNC:
2637         priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2638         priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
2639         break;
2640     default:
2641         return -ERANGE;
2642     }
2643 
2644     ao = priv->active_offloads;
2645     switch (config.rx_filter) {
2646     case HWTSTAMP_FILTER_NONE:
2647         priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
2648         break;
2649     default:
2650         priv->active_offloads |= ENETC_F_RX_TSTAMP;
2651         config.rx_filter = HWTSTAMP_FILTER_ALL;
2652     }
2653 
2654     if (netif_running(ndev) && ao != priv->active_offloads) {
2655         enetc_close(ndev);
2656         enetc_open(ndev);
2657     }
2658 
2659     return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2660            -EFAULT : 0;
2661 }
2662 
2663 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
2664 {
2665     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2666     struct hwtstamp_config config;
2667 
2668     config.flags = 0;
2669 
2670     if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
2671         config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2672     else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
2673         config.tx_type = HWTSTAMP_TX_ON;
2674     else
2675         config.tx_type = HWTSTAMP_TX_OFF;
2676 
2677     config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
2678                 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
2679 
2680     return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2681            -EFAULT : 0;
2682 }
2683 #endif
2684 
2685 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2686 {
2687     struct enetc_ndev_priv *priv = netdev_priv(ndev);
2688 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2689     if (cmd == SIOCSHWTSTAMP)
2690         return enetc_hwtstamp_set(ndev, rq);
2691     if (cmd == SIOCGHWTSTAMP)
2692         return enetc_hwtstamp_get(ndev, rq);
2693 #endif
2694 
2695     if (!priv->phylink)
2696         return -EOPNOTSUPP;
2697 
2698     return phylink_mii_ioctl(priv->phylink, rq, cmd);
2699 }
2700 
2701 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
2702 {
2703     struct pci_dev *pdev = priv->si->pdev;
2704     int first_xdp_tx_ring;
2705     int i, n, err, nvec;
2706     int v_tx_rings;
2707 
2708     nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
2709     /* allocate MSIX for both messaging and Rx/Tx interrupts */
2710     n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
2711 
2712     if (n < 0)
2713         return n;
2714 
2715     if (n != nvec)
2716         return -EPERM;
2717 
2718     /* # of tx rings per int vector */
2719     v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
2720 
2721     for (i = 0; i < priv->bdr_int_num; i++) {
2722         struct enetc_int_vector *v;
2723         struct enetc_bdr *bdr;
2724         int j;
2725 
2726         v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
2727         if (!v) {
2728             err = -ENOMEM;
2729             goto fail;
2730         }
2731 
2732         priv->int_vector[i] = v;
2733 
2734         bdr = &v->rx_ring;
2735         bdr->index = i;
2736         bdr->ndev = priv->ndev;
2737         bdr->dev = priv->dev;
2738         bdr->bd_count = priv->rx_bd_count;
2739         bdr->buffer_offset = ENETC_RXB_PAD;
2740         priv->rx_ring[i] = bdr;
2741 
2742         err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
2743         if (err) {
2744             kfree(v);
2745             goto fail;
2746         }
2747 
2748         err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
2749                          MEM_TYPE_PAGE_SHARED, NULL);
2750         if (err) {
2751             xdp_rxq_info_unreg(&bdr->xdp.rxq);
2752             kfree(v);
2753             goto fail;
2754         }
2755 
2756         /* init defaults for adaptive IC */
2757         if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
2758             v->rx_ictt = 0x1;
2759             v->rx_dim_en = true;
2760         }
2761         INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
2762         netif_napi_add(priv->ndev, &v->napi, enetc_poll,
2763                    NAPI_POLL_WEIGHT);
2764         v->count_tx_rings = v_tx_rings;
2765 
2766         for (j = 0; j < v_tx_rings; j++) {
2767             int idx;
2768 
2769             /* default tx ring mapping policy */
2770             idx = priv->bdr_int_num * j + i;
2771             __set_bit(idx, &v->tx_rings_map);
2772             bdr = &v->tx_ring[j];
2773             bdr->index = idx;
2774             bdr->ndev = priv->ndev;
2775             bdr->dev = priv->dev;
2776             bdr->bd_count = priv->tx_bd_count;
2777             priv->tx_ring[idx] = bdr;
2778         }
2779     }
2780 
2781     first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
2782     priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
2783 
2784     return 0;
2785 
2786 fail:
2787     while (i--) {
2788         struct enetc_int_vector *v = priv->int_vector[i];
2789         struct enetc_bdr *rx_ring = &v->rx_ring;
2790 
2791         xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2792         xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2793         netif_napi_del(&v->napi);
2794         cancel_work_sync(&v->rx_dim.work);
2795         kfree(v);
2796     }
2797 
2798     pci_free_irq_vectors(pdev);
2799 
2800     return err;
2801 }
2802 
2803 void enetc_free_msix(struct enetc_ndev_priv *priv)
2804 {
2805     int i;
2806 
2807     for (i = 0; i < priv->bdr_int_num; i++) {
2808         struct enetc_int_vector *v = priv->int_vector[i];
2809         struct enetc_bdr *rx_ring = &v->rx_ring;
2810 
2811         xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2812         xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2813         netif_napi_del(&v->napi);
2814         cancel_work_sync(&v->rx_dim.work);
2815     }
2816 
2817     for (i = 0; i < priv->num_rx_rings; i++)
2818         priv->rx_ring[i] = NULL;
2819 
2820     for (i = 0; i < priv->num_tx_rings; i++)
2821         priv->tx_ring[i] = NULL;
2822 
2823     for (i = 0; i < priv->bdr_int_num; i++) {
2824         kfree(priv->int_vector[i]);
2825         priv->int_vector[i] = NULL;
2826     }
2827 
2828     /* disable all MSIX for this device */
2829     pci_free_irq_vectors(priv->si->pdev);
2830 }
2831 
2832 static void enetc_kfree_si(struct enetc_si *si)
2833 {
2834     char *p = (char *)si - si->pad;
2835 
2836     kfree(p);
2837 }
2838 
2839 static void enetc_detect_errata(struct enetc_si *si)
2840 {
2841     if (si->pdev->revision == ENETC_REV1)
2842         si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
2843 }
2844 
2845 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
2846 {
2847     struct enetc_si *si, *p;
2848     struct enetc_hw *hw;
2849     size_t alloc_size;
2850     int err, len;
2851 
2852     pcie_flr(pdev);
2853     err = pci_enable_device_mem(pdev);
2854     if (err)
2855         return dev_err_probe(&pdev->dev, err, "device enable failed\n");
2856 
2857     /* set up for high or low dma */
2858     err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2859     if (err) {
2860         dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
2861         goto err_dma;
2862     }
2863 
2864     err = pci_request_mem_regions(pdev, name);
2865     if (err) {
2866         dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
2867         goto err_pci_mem_reg;
2868     }
2869 
2870     pci_set_master(pdev);
2871 
2872     alloc_size = sizeof(struct enetc_si);
2873     if (sizeof_priv) {
2874         /* align priv to 32B */
2875         alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
2876         alloc_size += sizeof_priv;
2877     }
2878     /* force 32B alignment for enetc_si */
2879     alloc_size += ENETC_SI_ALIGN - 1;
2880 
2881     p = kzalloc(alloc_size, GFP_KERNEL);
2882     if (!p) {
2883         err = -ENOMEM;
2884         goto err_alloc_si;
2885     }
2886 
2887     si = PTR_ALIGN(p, ENETC_SI_ALIGN);
2888     si->pad = (char *)si - (char *)p;
2889 
2890     pci_set_drvdata(pdev, si);
2891     si->pdev = pdev;
2892     hw = &si->hw;
2893 
2894     len = pci_resource_len(pdev, ENETC_BAR_REGS);
2895     hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
2896     if (!hw->reg) {
2897         err = -ENXIO;
2898         dev_err(&pdev->dev, "ioremap() failed\n");
2899         goto err_ioremap;
2900     }
2901     if (len > ENETC_PORT_BASE)
2902         hw->port = hw->reg + ENETC_PORT_BASE;
2903     if (len > ENETC_GLOBAL_BASE)
2904         hw->global = hw->reg + ENETC_GLOBAL_BASE;
2905 
2906     enetc_detect_errata(si);
2907 
2908     return 0;
2909 
2910 err_ioremap:
2911     enetc_kfree_si(si);
2912 err_alloc_si:
2913     pci_release_mem_regions(pdev);
2914 err_pci_mem_reg:
2915 err_dma:
2916     pci_disable_device(pdev);
2917 
2918     return err;
2919 }
2920 
2921 void enetc_pci_remove(struct pci_dev *pdev)
2922 {
2923     struct enetc_si *si = pci_get_drvdata(pdev);
2924     struct enetc_hw *hw = &si->hw;
2925 
2926     iounmap(hw->reg);
2927     enetc_kfree_si(si);
2928     pci_release_mem_regions(pdev);
2929     pci_disable_device(pdev);
2930 }