Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 
0003 #include "lan966x_main.h"
0004 
0005 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
0006 {
0007     return lan_rd(lan966x, FDMA_CH_ACTIVE);
0008 }
0009 
0010 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
0011                            struct lan966x_db *db)
0012 {
0013     struct lan966x *lan966x = rx->lan966x;
0014     dma_addr_t dma_addr;
0015     struct page *page;
0016 
0017     page = dev_alloc_pages(rx->page_order);
0018     if (unlikely(!page))
0019         return NULL;
0020 
0021     dma_addr = dma_map_page(lan966x->dev, page, 0,
0022                 PAGE_SIZE << rx->page_order,
0023                 DMA_FROM_DEVICE);
0024     if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
0025         goto free_page;
0026 
0027     db->dataptr = dma_addr;
0028 
0029     return page;
0030 
0031 free_page:
0032     __free_pages(page, rx->page_order);
0033     return NULL;
0034 }
0035 
0036 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
0037 {
0038     struct lan966x *lan966x = rx->lan966x;
0039     struct lan966x_rx_dcb *dcb;
0040     struct lan966x_db *db;
0041     int i, j;
0042 
0043     for (i = 0; i < FDMA_DCB_MAX; ++i) {
0044         dcb = &rx->dcbs[i];
0045 
0046         for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
0047             db = &dcb->db[j];
0048             dma_unmap_single(lan966x->dev,
0049                      (dma_addr_t)db->dataptr,
0050                      PAGE_SIZE << rx->page_order,
0051                      DMA_FROM_DEVICE);
0052             __free_pages(rx->page[i][j], rx->page_order);
0053         }
0054     }
0055 }
0056 
0057 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
0058                     struct lan966x_rx_dcb *dcb,
0059                     u64 nextptr)
0060 {
0061     struct lan966x_db *db;
0062     int i;
0063 
0064     for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
0065         db = &dcb->db[i];
0066         db->status = FDMA_DCB_STATUS_INTR;
0067     }
0068 
0069     dcb->nextptr = FDMA_DCB_INVALID_DATA;
0070     dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
0071 
0072     rx->last_entry->nextptr = nextptr;
0073     rx->last_entry = dcb;
0074 }
0075 
0076 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
0077 {
0078     struct lan966x *lan966x = rx->lan966x;
0079     struct lan966x_rx_dcb *dcb;
0080     struct lan966x_db *db;
0081     struct page *page;
0082     int i, j;
0083     int size;
0084 
0085     /* calculate how many pages are needed to allocate the dcbs */
0086     size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
0087     size = ALIGN(size, PAGE_SIZE);
0088 
0089     rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
0090     if (!rx->dcbs)
0091         return -ENOMEM;
0092 
0093     rx->last_entry = rx->dcbs;
0094     rx->db_index = 0;
0095     rx->dcb_index = 0;
0096 
0097     /* Now for each dcb allocate the dbs */
0098     for (i = 0; i < FDMA_DCB_MAX; ++i) {
0099         dcb = &rx->dcbs[i];
0100         dcb->info = 0;
0101 
0102         /* For each db allocate a page and map it to the DB dataptr. */
0103         for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
0104             db = &dcb->db[j];
0105             page = lan966x_fdma_rx_alloc_page(rx, db);
0106             if (!page)
0107                 return -ENOMEM;
0108 
0109             db->status = 0;
0110             rx->page[i][j] = page;
0111         }
0112 
0113         lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
0114     }
0115 
0116     return 0;
0117 }
0118 
0119 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
0120 {
0121     struct lan966x *lan966x = rx->lan966x;
0122     u32 size;
0123 
0124     /* Now it is possible to do the cleanup of dcb */
0125     size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
0126     size = ALIGN(size, PAGE_SIZE);
0127     dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
0128 }
0129 
0130 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
0131 {
0132     struct lan966x *lan966x = rx->lan966x;
0133     u32 mask;
0134 
0135     /* When activating a channel, first is required to write the first DCB
0136      * address and then to activate it
0137      */
0138     lan_wr(lower_32_bits((u64)rx->dma), lan966x,
0139            FDMA_DCB_LLP(rx->channel_id));
0140     lan_wr(upper_32_bits((u64)rx->dma), lan966x,
0141            FDMA_DCB_LLP1(rx->channel_id));
0142 
0143     lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
0144            FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
0145            FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
0146            FDMA_CH_CFG_CH_MEM_SET(1),
0147            lan966x, FDMA_CH_CFG(rx->channel_id));
0148 
0149     /* Start fdma */
0150     lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
0151         FDMA_PORT_CTRL_XTR_STOP,
0152         lan966x, FDMA_PORT_CTRL(0));
0153 
0154     /* Enable interrupts */
0155     mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
0156     mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
0157     mask |= BIT(rx->channel_id);
0158     lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
0159         FDMA_INTR_DB_ENA_INTR_DB_ENA,
0160         lan966x, FDMA_INTR_DB_ENA);
0161 
0162     /* Activate the channel */
0163     lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
0164         FDMA_CH_ACTIVATE_CH_ACTIVATE,
0165         lan966x, FDMA_CH_ACTIVATE);
0166 }
0167 
0168 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
0169 {
0170     struct lan966x *lan966x = rx->lan966x;
0171     u32 val;
0172 
0173     /* Disable the channel */
0174     lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
0175         FDMA_CH_DISABLE_CH_DISABLE,
0176         lan966x, FDMA_CH_DISABLE);
0177 
0178     readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
0179                   val, !(val & BIT(rx->channel_id)),
0180                   READL_SLEEP_US, READL_TIMEOUT_US);
0181 
0182     lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
0183         FDMA_CH_DB_DISCARD_DB_DISCARD,
0184         lan966x, FDMA_CH_DB_DISCARD);
0185 }
0186 
0187 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
0188 {
0189     struct lan966x *lan966x = rx->lan966x;
0190 
0191     lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
0192         FDMA_CH_RELOAD_CH_RELOAD,
0193         lan966x, FDMA_CH_RELOAD);
0194 }
0195 
0196 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
0197                     struct lan966x_tx_dcb *dcb)
0198 {
0199     dcb->nextptr = FDMA_DCB_INVALID_DATA;
0200     dcb->info = 0;
0201 }
0202 
0203 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
0204 {
0205     struct lan966x *lan966x = tx->lan966x;
0206     struct lan966x_tx_dcb *dcb;
0207     struct lan966x_db *db;
0208     int size;
0209     int i, j;
0210 
0211     tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
0212                    GFP_KERNEL);
0213     if (!tx->dcbs_buf)
0214         return -ENOMEM;
0215 
0216     /* calculate how many pages are needed to allocate the dcbs */
0217     size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
0218     size = ALIGN(size, PAGE_SIZE);
0219     tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
0220     if (!tx->dcbs)
0221         goto out;
0222 
0223     /* Now for each dcb allocate the db */
0224     for (i = 0; i < FDMA_DCB_MAX; ++i) {
0225         dcb = &tx->dcbs[i];
0226 
0227         for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
0228             db = &dcb->db[j];
0229             db->dataptr = 0;
0230             db->status = 0;
0231         }
0232 
0233         lan966x_fdma_tx_add_dcb(tx, dcb);
0234     }
0235 
0236     return 0;
0237 
0238 out:
0239     kfree(tx->dcbs_buf);
0240     return -ENOMEM;
0241 }
0242 
0243 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
0244 {
0245     struct lan966x *lan966x = tx->lan966x;
0246     int size;
0247 
0248     kfree(tx->dcbs_buf);
0249 
0250     size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
0251     size = ALIGN(size, PAGE_SIZE);
0252     dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
0253 }
0254 
0255 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
0256 {
0257     struct lan966x *lan966x = tx->lan966x;
0258     u32 mask;
0259 
0260     /* When activating a channel, first is required to write the first DCB
0261      * address and then to activate it
0262      */
0263     lan_wr(lower_32_bits((u64)tx->dma), lan966x,
0264            FDMA_DCB_LLP(tx->channel_id));
0265     lan_wr(upper_32_bits((u64)tx->dma), lan966x,
0266            FDMA_DCB_LLP1(tx->channel_id));
0267 
0268     lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
0269            FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
0270            FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
0271            FDMA_CH_CFG_CH_MEM_SET(1),
0272            lan966x, FDMA_CH_CFG(tx->channel_id));
0273 
0274     /* Start fdma */
0275     lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
0276         FDMA_PORT_CTRL_INJ_STOP,
0277         lan966x, FDMA_PORT_CTRL(0));
0278 
0279     /* Enable interrupts */
0280     mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
0281     mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
0282     mask |= BIT(tx->channel_id);
0283     lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
0284         FDMA_INTR_DB_ENA_INTR_DB_ENA,
0285         lan966x, FDMA_INTR_DB_ENA);
0286 
0287     /* Activate the channel */
0288     lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
0289         FDMA_CH_ACTIVATE_CH_ACTIVATE,
0290         lan966x, FDMA_CH_ACTIVATE);
0291 }
0292 
0293 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
0294 {
0295     struct lan966x *lan966x = tx->lan966x;
0296     u32 val;
0297 
0298     /* Disable the channel */
0299     lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
0300         FDMA_CH_DISABLE_CH_DISABLE,
0301         lan966x, FDMA_CH_DISABLE);
0302 
0303     readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
0304                   val, !(val & BIT(tx->channel_id)),
0305                   READL_SLEEP_US, READL_TIMEOUT_US);
0306 
0307     lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
0308         FDMA_CH_DB_DISCARD_DB_DISCARD,
0309         lan966x, FDMA_CH_DB_DISCARD);
0310 
0311     tx->activated = false;
0312 }
0313 
0314 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
0315 {
0316     struct lan966x *lan966x = tx->lan966x;
0317 
0318     /* Write the registers to reload the channel */
0319     lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
0320         FDMA_CH_RELOAD_CH_RELOAD,
0321         lan966x, FDMA_CH_RELOAD);
0322 }
0323 
0324 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
0325 {
0326     struct lan966x_port *port;
0327     int i;
0328 
0329     for (i = 0; i < lan966x->num_phys_ports; ++i) {
0330         port = lan966x->ports[i];
0331         if (!port)
0332             continue;
0333 
0334         if (netif_queue_stopped(port->dev))
0335             netif_wake_queue(port->dev);
0336     }
0337 }
0338 
0339 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
0340 {
0341     struct lan966x_port *port;
0342     int i;
0343 
0344     for (i = 0; i < lan966x->num_phys_ports; ++i) {
0345         port = lan966x->ports[i];
0346         if (!port)
0347             continue;
0348 
0349         netif_stop_queue(port->dev);
0350     }
0351 }
0352 
0353 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
0354 {
0355     struct lan966x_tx *tx = &lan966x->tx;
0356     struct lan966x_tx_dcb_buf *dcb_buf;
0357     struct lan966x_db *db;
0358     unsigned long flags;
0359     bool clear = false;
0360     int i;
0361 
0362     spin_lock_irqsave(&lan966x->tx_lock, flags);
0363     for (i = 0; i < FDMA_DCB_MAX; ++i) {
0364         dcb_buf = &tx->dcbs_buf[i];
0365 
0366         if (!dcb_buf->used)
0367             continue;
0368 
0369         db = &tx->dcbs[i].db[0];
0370         if (!(db->status & FDMA_DCB_STATUS_DONE))
0371             continue;
0372 
0373         dcb_buf->dev->stats.tx_packets++;
0374         dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
0375 
0376         dcb_buf->used = false;
0377         dma_unmap_single(lan966x->dev,
0378                  dcb_buf->dma_addr,
0379                  dcb_buf->skb->len,
0380                  DMA_TO_DEVICE);
0381         if (!dcb_buf->ptp)
0382             dev_kfree_skb_any(dcb_buf->skb);
0383 
0384         clear = true;
0385     }
0386 
0387     if (clear)
0388         lan966x_fdma_wakeup_netdev(lan966x);
0389 
0390     spin_unlock_irqrestore(&lan966x->tx_lock, flags);
0391 }
0392 
0393 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
0394 {
0395     struct lan966x_db *db;
0396 
0397     /* Check if there is any data */
0398     db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
0399     if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
0400         return false;
0401 
0402     return true;
0403 }
0404 
0405 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
0406 {
0407     struct lan966x *lan966x = rx->lan966x;
0408     u64 src_port, timestamp;
0409     struct lan966x_db *db;
0410     struct sk_buff *skb;
0411     struct page *page;
0412 
0413     /* Get the received frame and unmap it */
0414     db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
0415     page = rx->page[rx->dcb_index][rx->db_index];
0416     skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
0417     if (unlikely(!skb))
0418         goto unmap_page;
0419 
0420     dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
0421              FDMA_DCB_STATUS_BLOCKL(db->status),
0422              DMA_FROM_DEVICE);
0423     skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
0424 
0425     lan966x_ifh_get_src_port(skb->data, &src_port);
0426     lan966x_ifh_get_timestamp(skb->data, &timestamp);
0427 
0428     if (WARN_ON(src_port >= lan966x->num_phys_ports))
0429         goto free_skb;
0430 
0431     skb->dev = lan966x->ports[src_port]->dev;
0432     skb_pull(skb, IFH_LEN * sizeof(u32));
0433 
0434     if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
0435         skb_trim(skb, skb->len - ETH_FCS_LEN);
0436 
0437     lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
0438     skb->protocol = eth_type_trans(skb, skb->dev);
0439 
0440     if (lan966x->bridge_mask & BIT(src_port)) {
0441         skb->offload_fwd_mark = 1;
0442 
0443         skb_reset_network_header(skb);
0444         if (!lan966x_hw_offload(lan966x, src_port, skb))
0445             skb->offload_fwd_mark = 0;
0446     }
0447 
0448     skb->dev->stats.rx_bytes += skb->len;
0449     skb->dev->stats.rx_packets++;
0450 
0451     return skb;
0452 
0453 free_skb:
0454     kfree_skb(skb);
0455 unmap_page:
0456     dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
0457                FDMA_DCB_STATUS_BLOCKL(db->status),
0458                DMA_FROM_DEVICE);
0459     __free_pages(page, rx->page_order);
0460 
0461     return NULL;
0462 }
0463 
0464 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
0465 {
0466     struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
0467     struct lan966x_rx *rx = &lan966x->rx;
0468     int dcb_reload = rx->dcb_index;
0469     struct lan966x_rx_dcb *old_dcb;
0470     struct lan966x_db *db;
0471     struct sk_buff *skb;
0472     struct page *page;
0473     int counter = 0;
0474     u64 nextptr;
0475 
0476     lan966x_fdma_tx_clear_buf(lan966x, weight);
0477 
0478     /* Get all received skb */
0479     while (counter < weight) {
0480         if (!lan966x_fdma_rx_more_frames(rx))
0481             break;
0482 
0483         skb = lan966x_fdma_rx_get_frame(rx);
0484 
0485         rx->page[rx->dcb_index][rx->db_index] = NULL;
0486         rx->dcb_index++;
0487         rx->dcb_index &= FDMA_DCB_MAX - 1;
0488 
0489         if (!skb)
0490             break;
0491 
0492         napi_gro_receive(&lan966x->napi, skb);
0493         counter++;
0494     }
0495 
0496     /* Allocate new pages and map them */
0497     while (dcb_reload != rx->dcb_index) {
0498         db = &rx->dcbs[dcb_reload].db[rx->db_index];
0499         page = lan966x_fdma_rx_alloc_page(rx, db);
0500         if (unlikely(!page))
0501             break;
0502         rx->page[dcb_reload][rx->db_index] = page;
0503 
0504         old_dcb = &rx->dcbs[dcb_reload];
0505         dcb_reload++;
0506         dcb_reload &= FDMA_DCB_MAX - 1;
0507 
0508         nextptr = rx->dma + ((unsigned long)old_dcb -
0509                      (unsigned long)rx->dcbs);
0510         lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
0511         lan966x_fdma_rx_reload(rx);
0512     }
0513 
0514     if (counter < weight && napi_complete_done(napi, counter))
0515         lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
0516 
0517     return counter;
0518 }
0519 
0520 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
0521 {
0522     struct lan966x *lan966x = args;
0523     u32 db, err, err_type;
0524 
0525     db = lan_rd(lan966x, FDMA_INTR_DB);
0526     err = lan_rd(lan966x, FDMA_INTR_ERR);
0527 
0528     if (db) {
0529         lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
0530         lan_wr(db, lan966x, FDMA_INTR_DB);
0531 
0532         napi_schedule(&lan966x->napi);
0533     }
0534 
0535     if (err) {
0536         err_type = lan_rd(lan966x, FDMA_ERRORS);
0537 
0538         WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
0539 
0540         lan_wr(err, lan966x, FDMA_INTR_ERR);
0541         lan_wr(err_type, lan966x, FDMA_ERRORS);
0542     }
0543 
0544     return IRQ_HANDLED;
0545 }
0546 
0547 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
0548 {
0549     struct lan966x_tx_dcb_buf *dcb_buf;
0550     int i;
0551 
0552     for (i = 0; i < FDMA_DCB_MAX; ++i) {
0553         dcb_buf = &tx->dcbs_buf[i];
0554         if (!dcb_buf->used && i != tx->last_in_use)
0555             return i;
0556     }
0557 
0558     return -1;
0559 }
0560 
0561 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
0562 {
0563     struct lan966x_port *port = netdev_priv(dev);
0564     struct lan966x *lan966x = port->lan966x;
0565     struct lan966x_tx_dcb_buf *next_dcb_buf;
0566     struct lan966x_tx_dcb *next_dcb, *dcb;
0567     struct lan966x_tx *tx = &lan966x->tx;
0568     struct lan966x_db *next_db;
0569     int needed_headroom;
0570     int needed_tailroom;
0571     dma_addr_t dma_addr;
0572     int next_to_use;
0573     int err;
0574 
0575     /* Get next index */
0576     next_to_use = lan966x_fdma_get_next_dcb(tx);
0577     if (next_to_use < 0) {
0578         netif_stop_queue(dev);
0579         return NETDEV_TX_BUSY;
0580     }
0581 
0582     if (skb_put_padto(skb, ETH_ZLEN)) {
0583         dev->stats.tx_dropped++;
0584         return NETDEV_TX_OK;
0585     }
0586 
0587     /* skb processing */
0588     needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
0589     needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
0590     if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
0591         err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
0592                        GFP_ATOMIC);
0593         if (unlikely(err)) {
0594             dev->stats.tx_dropped++;
0595             err = NETDEV_TX_OK;
0596             goto release;
0597         }
0598     }
0599 
0600     skb_tx_timestamp(skb);
0601     skb_push(skb, IFH_LEN * sizeof(u32));
0602     memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
0603     skb_put(skb, 4);
0604 
0605     dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
0606                   DMA_TO_DEVICE);
0607     if (dma_mapping_error(lan966x->dev, dma_addr)) {
0608         dev->stats.tx_dropped++;
0609         err = NETDEV_TX_OK;
0610         goto release;
0611     }
0612 
0613     /* Setup next dcb */
0614     next_dcb = &tx->dcbs[next_to_use];
0615     next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
0616 
0617     next_db = &next_dcb->db[0];
0618     next_db->dataptr = dma_addr;
0619     next_db->status = FDMA_DCB_STATUS_SOF |
0620               FDMA_DCB_STATUS_EOF |
0621               FDMA_DCB_STATUS_INTR |
0622               FDMA_DCB_STATUS_BLOCKO(0) |
0623               FDMA_DCB_STATUS_BLOCKL(skb->len);
0624 
0625     /* Fill up the buffer */
0626     next_dcb_buf = &tx->dcbs_buf[next_to_use];
0627     next_dcb_buf->skb = skb;
0628     next_dcb_buf->dma_addr = dma_addr;
0629     next_dcb_buf->used = true;
0630     next_dcb_buf->ptp = false;
0631     next_dcb_buf->dev = dev;
0632 
0633     if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0634         LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0635         next_dcb_buf->ptp = true;
0636 
0637     if (likely(lan966x->tx.activated)) {
0638         /* Connect current dcb to the next db */
0639         dcb = &tx->dcbs[tx->last_in_use];
0640         dcb->nextptr = tx->dma + (next_to_use *
0641                       sizeof(struct lan966x_tx_dcb));
0642 
0643         lan966x_fdma_tx_reload(tx);
0644     } else {
0645         /* Because it is first time, then just activate */
0646         lan966x->tx.activated = true;
0647         lan966x_fdma_tx_activate(tx);
0648     }
0649 
0650     /* Move to next dcb because this last in use */
0651     tx->last_in_use = next_to_use;
0652 
0653     return NETDEV_TX_OK;
0654 
0655 release:
0656     if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0657         LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0658         lan966x_ptp_txtstamp_release(port, skb);
0659 
0660     dev_kfree_skb_any(skb);
0661     return err;
0662 }
0663 
0664 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
0665 {
0666     int max_mtu = 0;
0667     int i;
0668 
0669     for (i = 0; i < lan966x->num_phys_ports; ++i) {
0670         int mtu;
0671 
0672         if (!lan966x->ports[i])
0673             continue;
0674 
0675         mtu = lan966x->ports[i]->dev->mtu;
0676         if (mtu > max_mtu)
0677             max_mtu = mtu;
0678     }
0679 
0680     return max_mtu;
0681 }
0682 
0683 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
0684 {
0685     return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
0686 }
0687 
0688 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
0689 {
0690     void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
0691     dma_addr_t rx_dma, tx_dma;
0692     u32 size;
0693     int err;
0694 
0695     /* Store these for later to free them */
0696     rx_dma = lan966x->rx.dma;
0697     tx_dma = lan966x->tx.dma;
0698     rx_dcbs = lan966x->rx.dcbs;
0699     tx_dcbs = lan966x->tx.dcbs;
0700     tx_dcbs_buf = lan966x->tx.dcbs_buf;
0701 
0702     napi_synchronize(&lan966x->napi);
0703     napi_disable(&lan966x->napi);
0704     lan966x_fdma_stop_netdev(lan966x);
0705 
0706     lan966x_fdma_rx_disable(&lan966x->rx);
0707     lan966x_fdma_rx_free_pages(&lan966x->rx);
0708     lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
0709     err = lan966x_fdma_rx_alloc(&lan966x->rx);
0710     if (err)
0711         goto restore;
0712     lan966x_fdma_rx_start(&lan966x->rx);
0713 
0714     size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
0715     size = ALIGN(size, PAGE_SIZE);
0716     dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
0717 
0718     lan966x_fdma_tx_disable(&lan966x->tx);
0719     err = lan966x_fdma_tx_alloc(&lan966x->tx);
0720     if (err)
0721         goto restore_tx;
0722 
0723     size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
0724     size = ALIGN(size, PAGE_SIZE);
0725     dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
0726 
0727     kfree(tx_dcbs_buf);
0728 
0729     lan966x_fdma_wakeup_netdev(lan966x);
0730     napi_enable(&lan966x->napi);
0731 
0732     return err;
0733 restore:
0734     lan966x->rx.dma = rx_dma;
0735     lan966x->rx.dcbs = rx_dcbs;
0736     lan966x_fdma_rx_start(&lan966x->rx);
0737 
0738 restore_tx:
0739     lan966x->tx.dma = tx_dma;
0740     lan966x->tx.dcbs = tx_dcbs;
0741     lan966x->tx.dcbs_buf = tx_dcbs_buf;
0742 
0743     return err;
0744 }
0745 
0746 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
0747 {
0748     int max_mtu;
0749     int err;
0750     u32 val;
0751 
0752     max_mtu = lan966x_fdma_get_max_mtu(lan966x);
0753     max_mtu += IFH_LEN * sizeof(u32);
0754 
0755     if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
0756         lan966x->rx.page_order)
0757         return 0;
0758 
0759     /* Disable the CPU port */
0760     lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
0761         QSYS_SW_PORT_MODE_PORT_ENA,
0762         lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
0763 
0764     /* Flush the CPU queues */
0765     readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
0766                val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
0767                READL_SLEEP_US, READL_TIMEOUT_US);
0768 
0769     /* Add a sleep in case there are frames between the queues and the CPU
0770      * port
0771      */
0772     usleep_range(1000, 2000);
0773 
0774     err = lan966x_fdma_reload(lan966x, max_mtu);
0775 
0776     /* Enable back the CPU port */
0777     lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
0778         QSYS_SW_PORT_MODE_PORT_ENA,
0779         lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
0780 
0781     return err;
0782 }
0783 
0784 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
0785 {
0786     if (lan966x->fdma_ndev)
0787         return;
0788 
0789     lan966x->fdma_ndev = dev;
0790     netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
0791                NAPI_POLL_WEIGHT);
0792     napi_enable(&lan966x->napi);
0793 }
0794 
0795 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
0796 {
0797     if (lan966x->fdma_ndev == dev) {
0798         netif_napi_del(&lan966x->napi);
0799         lan966x->fdma_ndev = NULL;
0800     }
0801 }
0802 
0803 int lan966x_fdma_init(struct lan966x *lan966x)
0804 {
0805     int err;
0806 
0807     if (!lan966x->fdma)
0808         return 0;
0809 
0810     lan966x->rx.lan966x = lan966x;
0811     lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
0812     lan966x->tx.lan966x = lan966x;
0813     lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
0814     lan966x->tx.last_in_use = -1;
0815 
0816     err = lan966x_fdma_rx_alloc(&lan966x->rx);
0817     if (err)
0818         return err;
0819 
0820     err = lan966x_fdma_tx_alloc(&lan966x->tx);
0821     if (err) {
0822         lan966x_fdma_rx_free(&lan966x->rx);
0823         return err;
0824     }
0825 
0826     lan966x_fdma_rx_start(&lan966x->rx);
0827 
0828     return 0;
0829 }
0830 
0831 void lan966x_fdma_deinit(struct lan966x *lan966x)
0832 {
0833     if (!lan966x->fdma)
0834         return;
0835 
0836     lan966x_fdma_rx_disable(&lan966x->rx);
0837     lan966x_fdma_tx_disable(&lan966x->tx);
0838 
0839     napi_synchronize(&lan966x->napi);
0840     napi_disable(&lan966x->napi);
0841 
0842     lan966x_fdma_rx_free_pages(&lan966x->rx);
0843     lan966x_fdma_rx_free(&lan966x->rx);
0844     lan966x_fdma_tx_free(&lan966x->tx);
0845 }