0001
0002
0003
0004
0005
0006 #include <linux/platform_device.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/of_mdio.h>
0009
0010 #include "spl2sw_define.h"
0011 #include "spl2sw_desc.h"
0012
0013 void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
0014 {
0015 struct spl2sw_skb_info *rx_skbinfo;
0016 struct spl2sw_mac_desc *rx_desc;
0017 u32 i, j;
0018
0019 for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
0020 rx_desc = comm->rx_desc[i];
0021 rx_skbinfo = comm->rx_skb_info[i];
0022 for (j = 0; j < comm->rx_desc_num[i]; j++) {
0023 rx_desc[j].addr1 = rx_skbinfo[j].mapping;
0024 rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
0025 RXD_EOR | comm->rx_desc_buff_size :
0026 comm->rx_desc_buff_size;
0027 wmb();
0028 rx_desc[j].cmd1 = RXD_OWN;
0029 }
0030 }
0031 }
0032
0033 void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
0034 {
0035 u32 i;
0036
0037 if (!comm->tx_desc)
0038 return;
0039
0040 for (i = 0; i < TX_DESC_NUM; i++) {
0041 comm->tx_desc[i].cmd1 = 0;
0042 wmb();
0043 comm->tx_desc[i].cmd2 = 0;
0044 comm->tx_desc[i].addr1 = 0;
0045 comm->tx_desc[i].addr2 = 0;
0046
0047 if (comm->tx_temp_skb_info[i].mapping) {
0048 dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
0049 comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
0050 comm->tx_temp_skb_info[i].mapping = 0;
0051 }
0052
0053 if (comm->tx_temp_skb_info[i].skb) {
0054 dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
0055 comm->tx_temp_skb_info[i].skb = NULL;
0056 }
0057 }
0058 }
0059
0060 void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
0061 {
0062 struct spl2sw_skb_info *rx_skbinfo;
0063 struct spl2sw_mac_desc *rx_desc;
0064 u32 i, j;
0065
0066 for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
0067 if (!comm->rx_skb_info[i])
0068 continue;
0069
0070 rx_desc = comm->rx_desc[i];
0071 rx_skbinfo = comm->rx_skb_info[i];
0072 for (j = 0; j < comm->rx_desc_num[i]; j++) {
0073 rx_desc[j].cmd1 = 0;
0074 wmb();
0075 rx_desc[j].cmd2 = 0;
0076 rx_desc[j].addr1 = 0;
0077
0078 if (rx_skbinfo[j].skb) {
0079 dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
0080 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
0081 dev_kfree_skb_any(rx_skbinfo[j].skb);
0082 rx_skbinfo[j].skb = NULL;
0083 rx_skbinfo[j].mapping = 0;
0084 }
0085 }
0086
0087 kfree(rx_skbinfo);
0088 comm->rx_skb_info[i] = NULL;
0089 }
0090 }
0091
0092 void spl2sw_descs_clean(struct spl2sw_common *comm)
0093 {
0094 spl2sw_rx_descs_clean(comm);
0095 spl2sw_tx_descs_clean(comm);
0096 }
0097
0098 void spl2sw_descs_free(struct spl2sw_common *comm)
0099 {
0100 u32 i;
0101
0102 spl2sw_descs_clean(comm);
0103 comm->tx_desc = NULL;
0104 for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
0105 comm->rx_desc[i] = NULL;
0106
0107
0108 if (comm->desc_base) {
0109 dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
0110 comm->desc_dma);
0111 comm->desc_base = NULL;
0112 comm->desc_dma = 0;
0113 comm->desc_size = 0;
0114 }
0115 }
0116
0117 void spl2sw_tx_descs_init(struct spl2sw_common *comm)
0118 {
0119 memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
0120 (TX_DESC_NUM + MAC_GUARD_DESC_NUM));
0121 }
0122
0123 int spl2sw_rx_descs_init(struct spl2sw_common *comm)
0124 {
0125 struct spl2sw_skb_info *rx_skbinfo;
0126 struct spl2sw_mac_desc *rx_desc;
0127 struct sk_buff *skb;
0128 u32 mapping;
0129 u32 i, j;
0130
0131 for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
0132 comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
0133 GFP_KERNEL | GFP_DMA);
0134 if (!comm->rx_skb_info[i])
0135 goto mem_alloc_fail;
0136
0137 rx_skbinfo = comm->rx_skb_info[i];
0138 rx_desc = comm->rx_desc[i];
0139 for (j = 0; j < comm->rx_desc_num[i]; j++) {
0140 skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
0141 if (!skb)
0142 goto mem_alloc_fail;
0143
0144 rx_skbinfo[j].skb = skb;
0145 mapping = dma_map_single(&comm->pdev->dev, skb->data,
0146 comm->rx_desc_buff_size,
0147 DMA_FROM_DEVICE);
0148 if (dma_mapping_error(&comm->pdev->dev, mapping))
0149 goto mem_alloc_fail;
0150
0151 rx_skbinfo[j].mapping = mapping;
0152 rx_desc[j].addr1 = mapping;
0153 rx_desc[j].addr2 = 0;
0154 rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
0155 RXD_EOR | comm->rx_desc_buff_size :
0156 comm->rx_desc_buff_size;
0157 wmb();
0158 rx_desc[j].cmd1 = RXD_OWN;
0159 }
0160 }
0161
0162 return 0;
0163
0164 mem_alloc_fail:
0165 spl2sw_rx_descs_clean(comm);
0166 return -ENOMEM;
0167 }
0168
0169 int spl2sw_descs_alloc(struct spl2sw_common *comm)
0170 {
0171 s32 desc_size;
0172 u32 i;
0173
0174
0175 desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
0176 for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
0177 desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
0178
0179 comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
0180 GFP_KERNEL);
0181 if (!comm->desc_base)
0182 return -ENOMEM;
0183
0184 comm->desc_size = desc_size;
0185
0186
0187 comm->tx_desc = comm->desc_base;
0188
0189
0190 comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
0191 for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
0192 comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
0193
0194 return 0;
0195 }
0196
0197 int spl2sw_descs_init(struct spl2sw_common *comm)
0198 {
0199 u32 i, ret;
0200
0201
0202 comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
0203 comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
0204
0205 for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
0206 comm->rx_desc[i] = NULL;
0207 comm->rx_skb_info[i] = NULL;
0208 comm->rx_pos[i] = 0;
0209 }
0210 comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
0211
0212
0213 comm->tx_done_pos = 0;
0214 comm->tx_desc = NULL;
0215 comm->tx_pos = 0;
0216 comm->tx_desc_full = 0;
0217 for (i = 0; i < TX_DESC_NUM; i++)
0218 comm->tx_temp_skb_info[i].skb = NULL;
0219
0220
0221 ret = spl2sw_descs_alloc(comm);
0222 if (ret)
0223 return ret;
0224
0225 spl2sw_tx_descs_init(comm);
0226
0227 return spl2sw_rx_descs_init(comm);
0228 }