Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
0002 
0003 /* Packet receive logic for Mellanox Gigabit Ethernet driver
0004  *
0005  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
0006  */
0007 
0008 #include <linux/etherdevice.h>
0009 #include <linux/skbuff.h>
0010 
0011 #include "mlxbf_gige.h"
0012 #include "mlxbf_gige_regs.h"
0013 
0014 void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
0015                   unsigned int index, u64 dmac)
0016 {
0017     void __iomem *base = priv->base;
0018     u64 control;
0019 
0020     /* Write destination MAC to specified MAC RX filter */
0021     writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
0022            (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
0023 
0024     /* Enable MAC receive filter mask for specified index */
0025     control = readq(base + MLXBF_GIGE_CONTROL);
0026     control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
0027     writeq(control, base + MLXBF_GIGE_CONTROL);
0028 }
0029 
0030 void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
0031                   unsigned int index, u64 *dmac)
0032 {
0033     void __iomem *base = priv->base;
0034 
0035     /* Read destination MAC from specified MAC RX filter */
0036     *dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
0037               (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
0038 }
0039 
0040 void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
0041 {
0042     void __iomem *base = priv->base;
0043     u64 control;
0044     u64 end_mac;
0045 
0046     /* Enable MAC_ID_RANGE match functionality */
0047     control = readq(base + MLXBF_GIGE_CONTROL);
0048     control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
0049     writeq(control, base + MLXBF_GIGE_CONTROL);
0050 
0051     /* Set start of destination MAC range check to 0 */
0052     writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
0053 
0054     /* Set end of destination MAC range check to all FFs */
0055     end_mac = BCAST_MAC_ADDR;
0056     writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
0057 }
0058 
0059 void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
0060 {
0061     void __iomem *base = priv->base;
0062     u64 control;
0063 
0064     /* Disable MAC_ID_RANGE match functionality */
0065     control = readq(base + MLXBF_GIGE_CONTROL);
0066     control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
0067     writeq(control, base + MLXBF_GIGE_CONTROL);
0068 
0069     /* NOTE: no need to change DMAC_RANGE_START or END;
0070      * those values are ignored since MAC_ID_RANGE_EN=0
0071      */
0072 }
0073 
0074 /* Receive Initialization
0075  * 1) Configures RX MAC filters via MMIO registers
0076  * 2) Allocates RX WQE array using coherent DMA mapping
0077  * 3) Initializes each element of RX WQE array with a receive
0078  *    buffer pointer (also using coherent DMA mapping)
0079  * 4) Allocates RX CQE array using coherent DMA mapping
0080  * 5) Completes other misc receive initialization
0081  */
0082 int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
0083 {
0084     size_t wq_size, cq_size;
0085     dma_addr_t *rx_wqe_ptr;
0086     dma_addr_t rx_buf_dma;
0087     u64 data;
0088     int i, j;
0089 
0090     /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
0091     mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
0092                      BCAST_MAC_ADDR);
0093 
0094     wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
0095     priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
0096                            &priv->rx_wqe_base_dma,
0097                            GFP_KERNEL);
0098     if (!priv->rx_wqe_base)
0099         return -ENOMEM;
0100 
0101     /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
0102      * Each RX WQE is simply a receive buffer pointer, so walk
0103      * the entire array, allocating a 2KB buffer for each element
0104      */
0105     rx_wqe_ptr = priv->rx_wqe_base;
0106 
0107     for (i = 0; i < priv->rx_q_entries; i++) {
0108         priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
0109                                &rx_buf_dma, DMA_FROM_DEVICE);
0110         if (!priv->rx_skb[i])
0111             goto free_wqe_and_skb;
0112         *rx_wqe_ptr++ = rx_buf_dma;
0113     }
0114 
0115     /* Write RX WQE base address into MMIO reg */
0116     writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
0117 
0118     cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
0119     priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
0120                            &priv->rx_cqe_base_dma,
0121                            GFP_KERNEL);
0122     if (!priv->rx_cqe_base)
0123         goto free_wqe_and_skb;
0124 
0125     for (i = 0; i < priv->rx_q_entries; i++)
0126         priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
0127 
0128     /* Write RX CQE base address into MMIO reg */
0129     writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
0130 
0131     /* Write RX_WQE_PI with current number of replenished buffers */
0132     writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
0133 
0134     /* Enable removal of CRC during RX */
0135     data = readq(priv->base + MLXBF_GIGE_RX);
0136     data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
0137     writeq(data, priv->base + MLXBF_GIGE_RX);
0138 
0139     /* Enable RX MAC filter pass and discard counters */
0140     writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
0141            priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
0142     writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
0143            priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
0144 
0145     /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
0146      * indicate readiness to receive interrupts
0147      */
0148     data = readq(priv->base + MLXBF_GIGE_INT_MASK);
0149     data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
0150     writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
0151 
0152     /* Enable RX DMA to write new packets to memory */
0153     data = readq(priv->base + MLXBF_GIGE_RX_DMA);
0154     data |= MLXBF_GIGE_RX_DMA_EN;
0155     writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
0156 
0157     writeq(ilog2(priv->rx_q_entries),
0158            priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
0159 
0160     return 0;
0161 
0162 free_wqe_and_skb:
0163     rx_wqe_ptr = priv->rx_wqe_base;
0164     for (j = 0; j < i; j++) {
0165         dma_unmap_single(priv->dev, *rx_wqe_ptr,
0166                  MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
0167         dev_kfree_skb(priv->rx_skb[j]);
0168         rx_wqe_ptr++;
0169     }
0170     dma_free_coherent(priv->dev, wq_size,
0171               priv->rx_wqe_base, priv->rx_wqe_base_dma);
0172     return -ENOMEM;
0173 }
0174 
0175 /* Receive Deinitialization
0176  * This routine will free allocations done by mlxbf_gige_rx_init(),
0177  * namely the RX WQE and RX CQE arrays, as well as all RX buffers
0178  */
0179 void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
0180 {
0181     dma_addr_t *rx_wqe_ptr;
0182     size_t size;
0183     u64 data;
0184     int i;
0185 
0186     /* Disable RX DMA to prevent packet transfers to memory */
0187     data = readq(priv->base + MLXBF_GIGE_RX_DMA);
0188     data &= ~MLXBF_GIGE_RX_DMA_EN;
0189     writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
0190 
0191     rx_wqe_ptr = priv->rx_wqe_base;
0192 
0193     for (i = 0; i < priv->rx_q_entries; i++) {
0194         dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
0195                  DMA_FROM_DEVICE);
0196         dev_kfree_skb(priv->rx_skb[i]);
0197         rx_wqe_ptr++;
0198     }
0199 
0200     size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
0201     dma_free_coherent(priv->dev, size,
0202               priv->rx_wqe_base, priv->rx_wqe_base_dma);
0203 
0204     size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
0205     dma_free_coherent(priv->dev, size,
0206               priv->rx_cqe_base, priv->rx_cqe_base_dma);
0207 
0208     priv->rx_wqe_base = NULL;
0209     priv->rx_wqe_base_dma = 0;
0210     priv->rx_cqe_base = NULL;
0211     priv->rx_cqe_base_dma = 0;
0212     writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
0213     writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
0214 }
0215 
0216 static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
0217 {
0218     struct net_device *netdev = priv->netdev;
0219     struct sk_buff *skb = NULL, *rx_skb;
0220     u16 rx_pi_rem, rx_ci_rem;
0221     dma_addr_t *rx_wqe_addr;
0222     dma_addr_t rx_buf_dma;
0223     u64 *rx_cqe_addr;
0224     u64 datalen;
0225     u64 rx_cqe;
0226     u16 rx_ci;
0227     u16 rx_pi;
0228 
0229     /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
0230     rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
0231     rx_pi_rem = rx_pi % priv->rx_q_entries;
0232 
0233     rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
0234     rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
0235     rx_cqe = *rx_cqe_addr;
0236 
0237     if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
0238         return false;
0239 
0240     if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
0241         /* Packet is OK, increment stats */
0242         datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
0243         netdev->stats.rx_packets++;
0244         netdev->stats.rx_bytes += datalen;
0245 
0246         skb = priv->rx_skb[rx_pi_rem];
0247 
0248         skb_put(skb, datalen);
0249 
0250         skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
0251 
0252         skb->protocol = eth_type_trans(skb, netdev);
0253 
0254         /* Alloc another RX SKB for this same index */
0255         rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
0256                           &rx_buf_dma, DMA_FROM_DEVICE);
0257         if (!rx_skb)
0258             return false;
0259         priv->rx_skb[rx_pi_rem] = rx_skb;
0260         dma_unmap_single(priv->dev, *rx_wqe_addr,
0261                  MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
0262         *rx_wqe_addr = rx_buf_dma;
0263     } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
0264         priv->stats.rx_mac_errors++;
0265     } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
0266         priv->stats.rx_truncate_errors++;
0267     }
0268 
0269     /* Let hardware know we've replenished one buffer */
0270     rx_pi++;
0271 
0272     /* Ensure completion of all writes before notifying HW of replenish */
0273     wmb();
0274     writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
0275 
0276     (*rx_pkts)++;
0277 
0278     rx_pi_rem = rx_pi % priv->rx_q_entries;
0279     if (rx_pi_rem == 0)
0280         priv->valid_polarity ^= 1;
0281     rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
0282     rx_ci_rem = rx_ci % priv->rx_q_entries;
0283 
0284     if (skb)
0285         netif_receive_skb(skb);
0286 
0287     return rx_pi_rem != rx_ci_rem;
0288 }
0289 
0290 /* Driver poll() function called by NAPI infrastructure */
0291 int mlxbf_gige_poll(struct napi_struct *napi, int budget)
0292 {
0293     struct mlxbf_gige *priv;
0294     bool remaining_pkts;
0295     int work_done = 0;
0296     u64 data;
0297 
0298     priv = container_of(napi, struct mlxbf_gige, napi);
0299 
0300     mlxbf_gige_handle_tx_complete(priv);
0301 
0302     do {
0303         remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
0304     } while (remaining_pkts && work_done < budget);
0305 
0306     /* If amount of work done < budget, turn off NAPI polling
0307      * via napi_complete_done(napi, work_done) and then
0308      * re-enable interrupts.
0309      */
0310     if (work_done < budget && napi_complete_done(napi, work_done)) {
0311         /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
0312          * indicate receive readiness
0313          */
0314         data = readq(priv->base + MLXBF_GIGE_INT_MASK);
0315         data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
0316         writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
0317     }
0318 
0319     return work_done;
0320 }