Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*******************************************************************************
0003   Specialised functions for managing Ring mode
0004 
0005   Copyright(C) 2011  STMicroelectronics Ltd
0006 
0007   It defines all the functions used to handle the normal/enhanced
0008   descriptors in case of the DMA is configured to work in chained or
0009   in ring mode.
0010 
0011 
0012   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
0013 *******************************************************************************/
0014 
0015 #include "stmmac.h"
0016 
0017 static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
0018 {
0019     struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
0020     unsigned int nopaged_len = skb_headlen(skb);
0021     struct stmmac_priv *priv = tx_q->priv_data;
0022     unsigned int entry = tx_q->cur_tx;
0023     unsigned int bmax, len, des2;
0024     struct dma_desc *desc;
0025 
0026     if (priv->extend_desc)
0027         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
0028     else
0029         desc = tx_q->dma_tx + entry;
0030 
0031     if (priv->plat->enh_desc)
0032         bmax = BUF_SIZE_8KiB;
0033     else
0034         bmax = BUF_SIZE_2KiB;
0035 
0036     len = nopaged_len - bmax;
0037 
0038     if (nopaged_len > BUF_SIZE_8KiB) {
0039 
0040         des2 = dma_map_single(priv->device, skb->data, bmax,
0041                       DMA_TO_DEVICE);
0042         desc->des2 = cpu_to_le32(des2);
0043         if (dma_mapping_error(priv->device, des2))
0044             return -1;
0045 
0046         tx_q->tx_skbuff_dma[entry].buf = des2;
0047         tx_q->tx_skbuff_dma[entry].len = bmax;
0048         tx_q->tx_skbuff_dma[entry].is_jumbo = true;
0049 
0050         desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
0051         stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
0052                 STMMAC_RING_MODE, 0, false, skb->len);
0053         tx_q->tx_skbuff[entry] = NULL;
0054         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
0055 
0056         if (priv->extend_desc)
0057             desc = (struct dma_desc *)(tx_q->dma_etx + entry);
0058         else
0059             desc = tx_q->dma_tx + entry;
0060 
0061         des2 = dma_map_single(priv->device, skb->data + bmax, len,
0062                       DMA_TO_DEVICE);
0063         desc->des2 = cpu_to_le32(des2);
0064         if (dma_mapping_error(priv->device, des2))
0065             return -1;
0066         tx_q->tx_skbuff_dma[entry].buf = des2;
0067         tx_q->tx_skbuff_dma[entry].len = len;
0068         tx_q->tx_skbuff_dma[entry].is_jumbo = true;
0069 
0070         desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
0071         stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
0072                 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
0073                 skb->len);
0074     } else {
0075         des2 = dma_map_single(priv->device, skb->data,
0076                       nopaged_len, DMA_TO_DEVICE);
0077         desc->des2 = cpu_to_le32(des2);
0078         if (dma_mapping_error(priv->device, des2))
0079             return -1;
0080         tx_q->tx_skbuff_dma[entry].buf = des2;
0081         tx_q->tx_skbuff_dma[entry].len = nopaged_len;
0082         tx_q->tx_skbuff_dma[entry].is_jumbo = true;
0083         desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
0084         stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
0085                 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
0086                 skb->len);
0087     }
0088 
0089     tx_q->cur_tx = entry;
0090 
0091     return entry;
0092 }
0093 
0094 static unsigned int is_jumbo_frm(int len, int enh_desc)
0095 {
0096     unsigned int ret = 0;
0097 
0098     if (len >= BUF_SIZE_4KiB)
0099         ret = 1;
0100 
0101     return ret;
0102 }
0103 
0104 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
0105 {
0106     struct stmmac_rx_queue *rx_q = priv_ptr;
0107     struct stmmac_priv *priv = rx_q->priv_data;
0108 
0109     /* Fill DES3 in case of RING mode */
0110     if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
0111         p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
0112 }
0113 
0114 /* In ring mode we need to fill the desc3 because it is used as buffer */
0115 static void init_desc3(struct dma_desc *p)
0116 {
0117     p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
0118 }
0119 
0120 static void clean_desc3(void *priv_ptr, struct dma_desc *p)
0121 {
0122     struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
0123     struct stmmac_priv *priv = tx_q->priv_data;
0124     unsigned int entry = tx_q->dirty_tx;
0125 
0126     /* des3 is only used for jumbo frames tx or time stamping */
0127     if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
0128              (tx_q->tx_skbuff_dma[entry].last_segment &&
0129               !priv->extend_desc && priv->hwts_tx_en)))
0130         p->des3 = 0;
0131 }
0132 
0133 static int set_16kib_bfsize(int mtu)
0134 {
0135     int ret = 0;
0136     if (unlikely(mtu > BUF_SIZE_8KiB))
0137         ret = BUF_SIZE_16KiB;
0138     return ret;
0139 }
0140 
0141 const struct stmmac_mode_ops ring_mode_ops = {
0142     .is_jumbo_frm = is_jumbo_frm,
0143     .jumbo_frm = jumbo_frm,
0144     .refill_desc3 = refill_desc3,
0145     .init_desc3 = init_desc3,
0146     .clean_desc3 = clean_desc3,
0147     .set_16kib_bfsize = set_16kib_bfsize,
0148 };