Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
0002 /*
0003  * Microsemi SoCs FDMA driver
0004  *
0005  * Copyright (c) 2021 Microchip
0006  *
0007  * Page recycling code is mostly taken from gianfar driver.
0008  */
0009 
0010 #include <linux/align.h>
0011 #include <linux/bitops.h>
0012 #include <linux/dmapool.h>
0013 #include <linux/dsa/ocelot.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/of_platform.h>
0016 #include <linux/skbuff.h>
0017 
0018 #include "ocelot_fdma.h"
0019 #include "ocelot_qs.h"
0020 
0021 DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
0022 
0023 static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
0024 {
0025     regmap_write(ocelot->targets[FDMA], reg, data);
0026 }
0027 
0028 static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
0029 {
0030     u32 retval;
0031 
0032     regmap_read(ocelot->targets[FDMA], reg, &retval);
0033 
0034     return retval;
0035 }
0036 
0037 static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
0038 {
0039     return base + idx * sizeof(struct ocelot_fdma_dcb);
0040 }
0041 
0042 static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
0043 {
0044     return (dma - base) / sizeof(struct ocelot_fdma_dcb);
0045 }
0046 
0047 static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
0048 {
0049     return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
0050 }
0051 
0052 static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
0053 {
0054     return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
0055 }
0056 
0057 static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
0058 {
0059     struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
0060 
0061     if (rx_ring->next_to_use >= rx_ring->next_to_clean)
0062         return OCELOT_FDMA_RX_RING_SIZE -
0063                (rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
0064     else
0065         return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
0066 }
0067 
0068 static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
0069 {
0070     struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
0071 
0072     if (tx_ring->next_to_use >= tx_ring->next_to_clean)
0073         return OCELOT_FDMA_TX_RING_SIZE -
0074                (tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
0075     else
0076         return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
0077 }
0078 
0079 static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
0080 {
0081     struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
0082 
0083     return tx_ring->next_to_clean == tx_ring->next_to_use;
0084 }
0085 
0086 static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
0087                       int chan)
0088 {
0089     ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
0090     /* Barrier to force memory writes to DCB to be completed before starting
0091      * the channel.
0092      */
0093     wmb();
0094     ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
0095 }
0096 
0097 static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
0098 {
0099     return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
0100 }
0101 
0102 static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
0103 {
0104     u32 safe;
0105 
0106     return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
0107                      safe & BIT(chan), 0,
0108                      OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
0109 }
0110 
0111 static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
0112                      dma_addr_t dma_addr,
0113                      size_t size)
0114 {
0115     u32 offset = dma_addr & 0x3;
0116 
0117     dcb->llp = 0;
0118     dcb->datap = ALIGN_DOWN(dma_addr, 4);
0119     dcb->datal = ALIGN_DOWN(size, 4);
0120     dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
0121 }
0122 
0123 static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
0124                       struct ocelot_fdma_rx_buf *rxb)
0125 {
0126     dma_addr_t mapping;
0127     struct page *page;
0128 
0129     page = dev_alloc_page();
0130     if (unlikely(!page))
0131         return false;
0132 
0133     mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
0134                    DMA_FROM_DEVICE);
0135     if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
0136         __free_page(page);
0137         return false;
0138     }
0139 
0140     rxb->page = page;
0141     rxb->page_offset = 0;
0142     rxb->dma_addr = mapping;
0143 
0144     return true;
0145 }
0146 
0147 static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
0148 {
0149     struct ocelot_fdma *fdma = ocelot->fdma;
0150     struct ocelot_fdma_rx_ring *rx_ring;
0151     struct ocelot_fdma_rx_buf *rxb;
0152     struct ocelot_fdma_dcb *dcb;
0153     dma_addr_t dma_addr;
0154     int ret = 0;
0155     u16 idx;
0156 
0157     rx_ring = &fdma->rx_ring;
0158     idx = rx_ring->next_to_use;
0159 
0160     while (alloc_cnt--) {
0161         rxb = &rx_ring->bufs[idx];
0162         /* try reuse page */
0163         if (unlikely(!rxb->page)) {
0164             if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
0165                 dev_err_ratelimited(ocelot->dev,
0166                             "Failed to allocate rx\n");
0167                 ret = -ENOMEM;
0168                 break;
0169             }
0170         }
0171 
0172         dcb = &rx_ring->dcbs[idx];
0173         dma_addr = rxb->dma_addr + rxb->page_offset;
0174         ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
0175 
0176         idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
0177         /* Chain the DCB to the next one */
0178         dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
0179     }
0180 
0181     rx_ring->next_to_use = idx;
0182     rx_ring->next_to_alloc = idx;
0183 
0184     return ret;
0185 }
0186 
0187 static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
0188                        struct ocelot_fdma_tx_buf *tx_buf,
0189                        struct ocelot_fdma_dcb *dcb,
0190                        struct sk_buff *skb)
0191 {
0192     dma_addr_t mapping;
0193 
0194     mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
0195                  DMA_TO_DEVICE);
0196     if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
0197         return false;
0198 
0199     dma_unmap_addr_set(tx_buf, dma_addr, mapping);
0200 
0201     ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
0202     tx_buf->skb = skb;
0203     dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
0204     dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
0205 
0206     return true;
0207 }
0208 
0209 static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
0210 {
0211     u32 llp;
0212 
0213     /* Check if the FDMA hits the DCB with LLP == NULL */
0214     llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
0215     if (unlikely(llp))
0216         return false;
0217 
0218     ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
0219                BIT(MSCC_FDMA_XTR_CHAN));
0220 
0221     return true;
0222 }
0223 
0224 static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
0225 {
0226     struct ocelot_fdma_dcb *dcb;
0227     unsigned int idx;
0228 
0229     idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
0230                    OCELOT_FDMA_RX_RING_SIZE);
0231     dcb = &rx_ring->dcbs[idx];
0232     dcb->llp = 0;
0233 }
0234 
0235 static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
0236 {
0237     struct ocelot_fdma *fdma = ocelot->fdma;
0238     struct ocelot_fdma_rx_ring *rx_ring;
0239     const u8 chan = MSCC_FDMA_XTR_CHAN;
0240     dma_addr_t new_llp, dma_base;
0241     unsigned int idx;
0242     u32 llp_prev;
0243     int ret;
0244 
0245     rx_ring = &fdma->rx_ring;
0246     ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
0247     if (ret) {
0248         dev_err_ratelimited(ocelot->dev,
0249                     "Unable to stop RX channel\n");
0250         return;
0251     }
0252 
0253     ocelot_fdma_rx_set_llp(rx_ring);
0254 
0255     /* FDMA stopped on the last DCB that contained a NULL LLP, since
0256      * we processed some DCBs in RX, there is free space, and  we must set
0257      * DCB_LLP to point to the next DCB
0258      */
0259     llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
0260     dma_base = rx_ring->dcbs_dma;
0261 
0262     /* Get the next DMA addr located after LLP == NULL DCB */
0263     idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
0264     idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
0265     new_llp = ocelot_fdma_idx_dma(dma_base, idx);
0266 
0267     /* Finally reactivate the channel */
0268     ocelot_fdma_activate_chan(ocelot, new_llp, chan);
0269 }
0270 
0271 static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
0272                     struct sk_buff *skb, bool first)
0273 {
0274     int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
0275     struct page *page = rxb->page;
0276 
0277     if (likely(first)) {
0278         skb_put(skb, size);
0279     } else {
0280         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
0281                 rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
0282     }
0283 
0284     /* Try to reuse page */
0285     if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
0286         return false;
0287 
0288     /* Change offset to the other half */
0289     rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
0290 
0291     page_ref_inc(page);
0292 
0293     return true;
0294 }
0295 
0296 static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
0297                       struct ocelot_fdma_rx_buf *old_rxb)
0298 {
0299     struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
0300     struct ocelot_fdma_rx_buf *new_rxb;
0301 
0302     new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
0303     rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
0304                               OCELOT_FDMA_RX_RING_SIZE);
0305 
0306     /* Copy page reference */
0307     *new_rxb = *old_rxb;
0308 
0309     /* Sync for use by the device */
0310     dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
0311                      old_rxb->page_offset,
0312                      OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
0313 }
0314 
0315 static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
0316                        struct ocelot_fdma_rx_buf *rxb,
0317                        struct sk_buff *skb)
0318 {
0319     bool first = false;
0320 
0321     /* Allocate skb head and data */
0322     if (likely(!skb)) {
0323         void *buff_addr = page_address(rxb->page) +
0324                   rxb->page_offset;
0325 
0326         skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
0327         if (unlikely(!skb)) {
0328             dev_err_ratelimited(ocelot->dev,
0329                         "build_skb failed !\n");
0330             return NULL;
0331         }
0332         first = true;
0333     }
0334 
0335     dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
0336                       rxb->page_offset, OCELOT_FDMA_RX_SIZE,
0337                       DMA_FROM_DEVICE);
0338 
0339     if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
0340         /* Reuse the free half of the page for the next_to_alloc DCB*/
0341         ocelot_fdma_reuse_rx_page(ocelot, rxb);
0342     } else {
0343         /* page cannot be reused, unmap it */
0344         dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
0345                    DMA_FROM_DEVICE);
0346     }
0347 
0348     /* clear rx buff content */
0349     rxb->page = NULL;
0350 
0351     return skb;
0352 }
0353 
0354 static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
0355 {
0356     struct net_device *ndev;
0357     void *xfh = skb->data;
0358     u64 timestamp;
0359     u64 src_port;
0360 
0361     skb_pull(skb, OCELOT_TAG_LEN);
0362 
0363     ocelot_xfh_get_src_port(xfh, &src_port);
0364     if (unlikely(src_port >= ocelot->num_phys_ports))
0365         return false;
0366 
0367     ndev = ocelot_port_to_netdev(ocelot, src_port);
0368     if (unlikely(!ndev))
0369         return false;
0370 
0371     pskb_trim(skb, skb->len - ETH_FCS_LEN);
0372 
0373     skb->dev = ndev;
0374     skb->protocol = eth_type_trans(skb, skb->dev);
0375     skb->dev->stats.rx_bytes += skb->len;
0376     skb->dev->stats.rx_packets++;
0377 
0378     if (ocelot->ptp) {
0379         ocelot_xfh_get_rew_val(xfh, &timestamp);
0380         ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
0381     }
0382 
0383     if (likely(!skb_defer_rx_timestamp(skb)))
0384         netif_receive_skb(skb);
0385 
0386     return true;
0387 }
0388 
0389 static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
0390 {
0391     struct ocelot_fdma *fdma = ocelot->fdma;
0392     struct ocelot_fdma_rx_ring *rx_ring;
0393     struct ocelot_fdma_rx_buf *rxb;
0394     struct ocelot_fdma_dcb *dcb;
0395     struct sk_buff *skb;
0396     int work_done = 0;
0397     int cleaned_cnt;
0398     u32 stat;
0399     u16 idx;
0400 
0401     cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
0402     rx_ring = &fdma->rx_ring;
0403     skb = rx_ring->skb;
0404 
0405     while (budget--) {
0406         idx = rx_ring->next_to_clean;
0407         dcb = &rx_ring->dcbs[idx];
0408         stat = dcb->stat;
0409         if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
0410             break;
0411 
0412         /* New packet is a start of frame but we already got a skb set,
0413          * we probably lost an EOF packet, free skb
0414          */
0415         if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
0416             dev_kfree_skb(skb);
0417             skb = NULL;
0418         }
0419 
0420         rxb = &rx_ring->bufs[idx];
0421         /* Fetch next to clean buffer from the rx_ring */
0422         skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
0423         if (unlikely(!skb))
0424             break;
0425 
0426         work_done++;
0427         cleaned_cnt++;
0428 
0429         idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
0430         rx_ring->next_to_clean = idx;
0431 
0432         if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
0433                  stat & MSCC_FDMA_DCB_STAT_PD)) {
0434             dev_err_ratelimited(ocelot->dev,
0435                         "DCB aborted or pruned\n");
0436             dev_kfree_skb(skb);
0437             skb = NULL;
0438             continue;
0439         }
0440 
0441         /* We still need to process the other fragment of the packet
0442          * before delivering it to the network stack
0443          */
0444         if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
0445             continue;
0446 
0447         if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
0448             dev_kfree_skb(skb);
0449 
0450         skb = NULL;
0451     }
0452 
0453     rx_ring->skb = skb;
0454 
0455     if (cleaned_cnt)
0456         ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
0457 
0458     return work_done;
0459 }
0460 
0461 static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
0462 {
0463     struct ocelot_port_private *priv;
0464     struct ocelot_port *ocelot_port;
0465     struct net_device *dev;
0466     int port;
0467 
0468     for (port = 0; port < ocelot->num_phys_ports; port++) {
0469         ocelot_port = ocelot->ports[port];
0470         if (!ocelot_port)
0471             continue;
0472         priv = container_of(ocelot_port, struct ocelot_port_private,
0473                     port);
0474         dev = priv->dev;
0475 
0476         if (unlikely(netif_queue_stopped(dev)))
0477             netif_wake_queue(dev);
0478     }
0479 }
0480 
0481 static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
0482 {
0483     struct ocelot_fdma *fdma = ocelot->fdma;
0484     struct ocelot_fdma_tx_ring *tx_ring;
0485     struct ocelot_fdma_tx_buf *buf;
0486     unsigned int new_null_llp_idx;
0487     struct ocelot_fdma_dcb *dcb;
0488     bool end_of_list = false;
0489     struct sk_buff *skb;
0490     dma_addr_t dma;
0491     u32 dcb_llp;
0492     u16 ntc;
0493     int ret;
0494 
0495     tx_ring = &fdma->tx_ring;
0496 
0497     /* Purge the TX packets that have been sent up to the NULL llp or the
0498      * end of done list.
0499      */
0500     while (!ocelot_fdma_tx_ring_empty(fdma)) {
0501         ntc = tx_ring->next_to_clean;
0502         dcb = &tx_ring->dcbs[ntc];
0503         if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
0504             break;
0505 
0506         buf = &tx_ring->bufs[ntc];
0507         skb = buf->skb;
0508         dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
0509                  skb->len, DMA_TO_DEVICE);
0510         napi_consume_skb(skb, budget);
0511         dcb_llp = dcb->llp;
0512 
0513         /* Only update after accessing all dcb fields */
0514         tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
0515                                   OCELOT_FDMA_TX_RING_SIZE);
0516 
0517         /* If we hit the NULL LLP, stop, we might need to reload FDMA */
0518         if (dcb_llp == 0) {
0519             end_of_list = true;
0520             break;
0521         }
0522     }
0523 
0524     /* No need to try to wake if there were no TX cleaned_cnt up. */
0525     if (ocelot_fdma_tx_ring_free(fdma))
0526         ocelot_fdma_wakeup_netdev(ocelot);
0527 
0528     /* If there is still some DCBs to be processed by the FDMA or if the
0529      * pending list is empty, there is no need to restart the FDMA.
0530      */
0531     if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
0532         return;
0533 
0534     ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
0535     if (ret) {
0536         dev_warn(ocelot->dev,
0537              "Failed to wait for TX channel to stop\n");
0538         return;
0539     }
0540 
0541     /* Set NULL LLP to be the last DCB used */
0542     new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
0543                         OCELOT_FDMA_TX_RING_SIZE);
0544     dcb = &tx_ring->dcbs[new_null_llp_idx];
0545     dcb->llp = 0;
0546 
0547     dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
0548     ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
0549 }
0550 
0551 static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
0552 {
0553     struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
0554     struct ocelot *ocelot = fdma->ocelot;
0555     int work_done = 0;
0556     bool rx_stopped;
0557 
0558     ocelot_fdma_tx_cleanup(ocelot, budget);
0559 
0560     rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
0561 
0562     work_done = ocelot_fdma_rx_get(ocelot, budget);
0563 
0564     if (rx_stopped)
0565         ocelot_fdma_rx_restart(ocelot);
0566 
0567     if (work_done < budget) {
0568         napi_complete_done(&fdma->napi, work_done);
0569         ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
0570                    BIT(MSCC_FDMA_INJ_CHAN) |
0571                    BIT(MSCC_FDMA_XTR_CHAN));
0572     }
0573 
0574     return work_done;
0575 }
0576 
0577 static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
0578 {
0579     u32 ident, llp, frm, err, err_code;
0580     struct ocelot *ocelot = dev_id;
0581 
0582     ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
0583     frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
0584     llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
0585 
0586     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
0587     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
0588     if (frm || llp) {
0589         ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
0590         napi_schedule(&ocelot->fdma->napi);
0591     }
0592 
0593     err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
0594     if (unlikely(err)) {
0595         err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
0596         dev_err_ratelimited(ocelot->dev,
0597                     "Error ! chans mask: %#x, code: %#x\n",
0598                     err, err_code);
0599 
0600         ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
0601         ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
0602     }
0603 
0604     return IRQ_HANDLED;
0605 }
0606 
0607 static void ocelot_fdma_send_skb(struct ocelot *ocelot,
0608                  struct ocelot_fdma *fdma, struct sk_buff *skb)
0609 {
0610     struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
0611     struct ocelot_fdma_tx_buf *tx_buf;
0612     struct ocelot_fdma_dcb *dcb;
0613     dma_addr_t dma;
0614     u16 next_idx;
0615 
0616     dcb = &tx_ring->dcbs[tx_ring->next_to_use];
0617     tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
0618     if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
0619         dev_kfree_skb_any(skb);
0620         return;
0621     }
0622 
0623     next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
0624                     OCELOT_FDMA_TX_RING_SIZE);
0625     skb_tx_timestamp(skb);
0626 
0627     /* If the FDMA TX chan is empty, then enqueue the DCB directly */
0628     if (ocelot_fdma_tx_ring_empty(fdma)) {
0629         dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
0630                       tx_ring->next_to_use);
0631         ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
0632     } else {
0633         /* Chain the DCBs */
0634         dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
0635     }
0636 
0637     tx_ring->next_to_use = next_idx;
0638 }
0639 
0640 static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
0641                    struct sk_buff *skb, struct net_device *dev)
0642 {
0643     int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
0644     int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
0645     void *ifh;
0646     int err;
0647 
0648     if (unlikely(needed_headroom || needed_tailroom ||
0649              skb_header_cloned(skb))) {
0650         err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
0651                        GFP_ATOMIC);
0652         if (unlikely(err)) {
0653             dev_kfree_skb_any(skb);
0654             return 1;
0655         }
0656     }
0657 
0658     err = skb_linearize(skb);
0659     if (err) {
0660         net_err_ratelimited("%s: skb_linearize error (%d)!\n",
0661                     dev->name, err);
0662         dev_kfree_skb_any(skb);
0663         return 1;
0664     }
0665 
0666     ifh = skb_push(skb, OCELOT_TAG_LEN);
0667     skb_put(skb, ETH_FCS_LEN);
0668     memset(ifh, 0, OCELOT_TAG_LEN);
0669     ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
0670 
0671     return 0;
0672 }
0673 
0674 int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
0675                  struct sk_buff *skb, struct net_device *dev)
0676 {
0677     struct ocelot_fdma *fdma = ocelot->fdma;
0678     int ret = NETDEV_TX_OK;
0679 
0680     spin_lock(&fdma->tx_ring.xmit_lock);
0681 
0682     if (ocelot_fdma_tx_ring_free(fdma) == 0) {
0683         netif_stop_queue(dev);
0684         ret = NETDEV_TX_BUSY;
0685         goto out;
0686     }
0687 
0688     if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
0689         goto out;
0690 
0691     ocelot_fdma_send_skb(ocelot, fdma, skb);
0692 
0693 out:
0694     spin_unlock(&fdma->tx_ring.xmit_lock);
0695 
0696     return ret;
0697 }
0698 
0699 static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
0700 {
0701     struct ocelot_fdma *fdma = ocelot->fdma;
0702     struct ocelot_fdma_rx_ring *rx_ring;
0703     struct ocelot_fdma_rx_buf *rxb;
0704     u16 idx;
0705 
0706     rx_ring = &fdma->rx_ring;
0707     idx = rx_ring->next_to_clean;
0708 
0709     /* Free the pages held in the RX ring */
0710     while (idx != rx_ring->next_to_use) {
0711         rxb = &rx_ring->bufs[idx];
0712         dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
0713                    DMA_FROM_DEVICE);
0714         __free_page(rxb->page);
0715         idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
0716     }
0717 
0718     if (fdma->rx_ring.skb)
0719         dev_kfree_skb_any(fdma->rx_ring.skb);
0720 }
0721 
0722 static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
0723 {
0724     struct ocelot_fdma *fdma = ocelot->fdma;
0725     struct ocelot_fdma_tx_ring *tx_ring;
0726     struct ocelot_fdma_tx_buf *txb;
0727     struct sk_buff *skb;
0728     u16 idx;
0729 
0730     tx_ring = &fdma->tx_ring;
0731     idx = tx_ring->next_to_clean;
0732 
0733     while (idx != tx_ring->next_to_use) {
0734         txb = &tx_ring->bufs[idx];
0735         skb = txb->skb;
0736         dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
0737                  skb->len, DMA_TO_DEVICE);
0738         dev_kfree_skb_any(skb);
0739         idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
0740     }
0741 }
0742 
0743 static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
0744 {
0745     struct ocelot_fdma *fdma = ocelot->fdma;
0746     struct ocelot_fdma_dcb *dcbs;
0747     unsigned int adjust;
0748     dma_addr_t dcbs_dma;
0749     int ret;
0750 
0751     /* Create a pool of consistent memory blocks for hardware descriptors */
0752     fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
0753                           OCELOT_DCBS_HW_ALLOC_SIZE,
0754                           &fdma->dcbs_dma_base, GFP_KERNEL);
0755     if (!fdma->dcbs_base)
0756         return -ENOMEM;
0757 
0758     /* DCBs must be aligned on a 32bit boundary */
0759     dcbs = fdma->dcbs_base;
0760     dcbs_dma = fdma->dcbs_dma_base;
0761     if (!IS_ALIGNED(dcbs_dma, 4)) {
0762         adjust = dcbs_dma & 0x3;
0763         dcbs_dma = ALIGN(dcbs_dma, 4);
0764         dcbs = (void *)dcbs + adjust;
0765     }
0766 
0767     /* TX queue */
0768     fdma->tx_ring.dcbs = dcbs;
0769     fdma->tx_ring.dcbs_dma = dcbs_dma;
0770     spin_lock_init(&fdma->tx_ring.xmit_lock);
0771 
0772     /* RX queue */
0773     fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
0774     fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
0775     ret = ocelot_fdma_alloc_rx_buffs(ocelot,
0776                      ocelot_fdma_tx_ring_free(fdma));
0777     if (ret) {
0778         ocelot_fdma_free_rx_ring(ocelot);
0779         return ret;
0780     }
0781 
0782     /* Set the last DCB LLP as NULL, this is normally done when restarting
0783      * the RX chan, but this is for the first run
0784      */
0785     ocelot_fdma_rx_set_llp(&fdma->rx_ring);
0786 
0787     return 0;
0788 }
0789 
0790 void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
0791 {
0792     struct ocelot_fdma *fdma = ocelot->fdma;
0793 
0794     dev->needed_headroom = OCELOT_TAG_LEN;
0795     dev->needed_tailroom = ETH_FCS_LEN;
0796 
0797     if (fdma->ndev)
0798         return;
0799 
0800     fdma->ndev = dev;
0801     netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll,
0802                   OCELOT_FDMA_WEIGHT);
0803 }
0804 
0805 void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
0806 {
0807     struct ocelot_fdma *fdma = ocelot->fdma;
0808 
0809     if (fdma->ndev == dev) {
0810         netif_napi_del(&fdma->napi);
0811         fdma->ndev = NULL;
0812     }
0813 }
0814 
0815 void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
0816 {
0817     struct device *dev = ocelot->dev;
0818     struct ocelot_fdma *fdma;
0819     int ret;
0820 
0821     fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
0822     if (!fdma)
0823         return;
0824 
0825     ocelot->fdma = fdma;
0826     ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
0827 
0828     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
0829 
0830     fdma->ocelot = ocelot;
0831     fdma->irq = platform_get_irq_byname(pdev, "fdma");
0832     ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
0833                    dev_name(dev), ocelot);
0834     if (ret)
0835         goto err_free_fdma;
0836 
0837     ret = ocelot_fdma_rings_alloc(ocelot);
0838     if (ret)
0839         goto err_free_irq;
0840 
0841     static_branch_enable(&ocelot_fdma_enabled);
0842 
0843     return;
0844 
0845 err_free_irq:
0846     devm_free_irq(dev, fdma->irq, fdma);
0847 err_free_fdma:
0848     devm_kfree(dev, fdma);
0849 
0850     ocelot->fdma = NULL;
0851 }
0852 
0853 void ocelot_fdma_start(struct ocelot *ocelot)
0854 {
0855     struct ocelot_fdma *fdma = ocelot->fdma;
0856 
0857     /* Reconfigure for extraction and injection using DMA */
0858     ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
0859     ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
0860 
0861     ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
0862 
0863     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
0864     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
0865 
0866     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
0867                BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
0868     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
0869                BIT(MSCC_FDMA_XTR_CHAN));
0870     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
0871                BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
0872 
0873     napi_enable(&fdma->napi);
0874 
0875     ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
0876                   MSCC_FDMA_XTR_CHAN);
0877 }
0878 
0879 void ocelot_fdma_deinit(struct ocelot *ocelot)
0880 {
0881     struct ocelot_fdma *fdma = ocelot->fdma;
0882 
0883     ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
0884     ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
0885                BIT(MSCC_FDMA_XTR_CHAN));
0886     ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
0887                BIT(MSCC_FDMA_INJ_CHAN));
0888     napi_synchronize(&fdma->napi);
0889     napi_disable(&fdma->napi);
0890 
0891     ocelot_fdma_free_rx_ring(ocelot);
0892     ocelot_fdma_free_tx_ring(ocelot);
0893 }