0001
0002
0003
0004
0005
0006 #include <linux/platform_device.h>
0007 #include <linux/etherdevice.h>
0008 #include <linux/netdevice.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/of_mdio.h>
0012
0013 #include "spl2sw_register.h"
0014 #include "spl2sw_define.h"
0015 #include "spl2sw_int.h"
0016
0017 int spl2sw_rx_poll(struct napi_struct *napi, int budget)
0018 {
0019 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
0020 struct spl2sw_mac_desc *desc, *h_desc;
0021 struct net_device_stats *stats;
0022 struct sk_buff *skb, *new_skb;
0023 struct spl2sw_skb_info *sinfo;
0024 int budget_left = budget;
0025 unsigned long flags;
0026 u32 rx_pos, pkg_len;
0027 u32 num, rx_count;
0028 s32 queue;
0029 u32 mask;
0030 int port;
0031 u32 cmd;
0032 u32 len;
0033
0034
0035 for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
0036 rx_pos = comm->rx_pos[queue];
0037 rx_count = comm->rx_desc_num[queue];
0038
0039 for (num = 0; num < rx_count && budget_left; num++) {
0040 sinfo = comm->rx_skb_info[queue] + rx_pos;
0041 desc = comm->rx_desc[queue] + rx_pos;
0042 cmd = desc->cmd1;
0043
0044 if (cmd & RXD_OWN)
0045 break;
0046
0047 port = FIELD_GET(RXD_PKT_SP, cmd);
0048 if (port < MAX_NETDEV_NUM && comm->ndev[port])
0049 stats = &comm->ndev[port]->stats;
0050 else
0051 goto spl2sw_rx_poll_rec_err;
0052
0053 pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
0054 if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
0055 stats->rx_length_errors++;
0056 stats->rx_dropped++;
0057 goto spl2sw_rx_poll_rec_err;
0058 }
0059
0060 dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
0061 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
0062
0063 skb = sinfo->skb;
0064 skb_put(skb, pkg_len - 4);
0065 skb->ip_summed = CHECKSUM_NONE;
0066 skb->protocol = eth_type_trans(skb, comm->ndev[port]);
0067 len = skb->len;
0068 netif_receive_skb(skb);
0069
0070 stats->rx_packets++;
0071 stats->rx_bytes += len;
0072
0073
0074 new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
0075 if (unlikely(!new_skb)) {
0076 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
0077 RXD_EOR : 0;
0078 sinfo->skb = NULL;
0079 sinfo->mapping = 0;
0080 desc->addr1 = 0;
0081 goto spl2sw_rx_poll_alloc_err;
0082 }
0083
0084 sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
0085 comm->rx_desc_buff_size,
0086 DMA_FROM_DEVICE);
0087 if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
0088 dev_kfree_skb_irq(new_skb);
0089 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
0090 RXD_EOR : 0;
0091 sinfo->skb = NULL;
0092 sinfo->mapping = 0;
0093 desc->addr1 = 0;
0094 goto spl2sw_rx_poll_alloc_err;
0095 }
0096
0097 sinfo->skb = new_skb;
0098 desc->addr1 = sinfo->mapping;
0099
0100 spl2sw_rx_poll_rec_err:
0101 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
0102 RXD_EOR | comm->rx_desc_buff_size :
0103 comm->rx_desc_buff_size;
0104
0105 wmb();
0106 desc->cmd1 = RXD_OWN;
0107
0108 spl2sw_rx_poll_alloc_err:
0109
0110 rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
0111
0112 budget_left--;
0113
0114
0115
0116
0117 if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
0118 break;
0119 }
0120
0121 comm->rx_pos[queue] = rx_pos;
0122
0123
0124 if (queue == 0)
0125 h_desc = comm->rx_desc[queue] + rx_pos;
0126 }
0127
0128 spin_lock_irqsave(&comm->int_mask_lock, flags);
0129 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0130 mask &= ~MAC_INT_RX;
0131 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0132 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
0133
0134 napi_complete(napi);
0135 return budget - budget_left;
0136 }
0137
0138 int spl2sw_tx_poll(struct napi_struct *napi, int budget)
0139 {
0140 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
0141 struct spl2sw_skb_info *skbinfo;
0142 struct net_device_stats *stats;
0143 int budget_left = budget;
0144 unsigned long flags;
0145 u32 tx_done_pos;
0146 u32 mask;
0147 u32 cmd;
0148 int i;
0149
0150 spin_lock(&comm->tx_lock);
0151
0152 tx_done_pos = comm->tx_done_pos;
0153 while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
0154 cmd = comm->tx_desc[tx_done_pos].cmd1;
0155 if (cmd & TXD_OWN)
0156 break;
0157
0158 skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
0159 if (unlikely(!skbinfo->skb))
0160 goto spl2sw_tx_poll_next;
0161
0162 i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
0163 if (i < MAX_NETDEV_NUM && comm->ndev[i])
0164 stats = &comm->ndev[i]->stats;
0165 else
0166 goto spl2sw_tx_poll_unmap;
0167
0168 if (unlikely(cmd & (TXD_ERR_CODE))) {
0169 stats->tx_errors++;
0170 } else {
0171 stats->tx_packets++;
0172 stats->tx_bytes += skbinfo->len;
0173 }
0174
0175 spl2sw_tx_poll_unmap:
0176 dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
0177 DMA_TO_DEVICE);
0178 skbinfo->mapping = 0;
0179 dev_kfree_skb_irq(skbinfo->skb);
0180 skbinfo->skb = NULL;
0181
0182 spl2sw_tx_poll_next:
0183
0184 tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
0185
0186 if (comm->tx_desc_full == 1)
0187 comm->tx_desc_full = 0;
0188
0189 budget_left--;
0190 }
0191
0192 comm->tx_done_pos = tx_done_pos;
0193 if (!comm->tx_desc_full)
0194 for (i = 0; i < MAX_NETDEV_NUM; i++)
0195 if (comm->ndev[i])
0196 if (netif_queue_stopped(comm->ndev[i]))
0197 netif_wake_queue(comm->ndev[i]);
0198
0199 spin_unlock(&comm->tx_lock);
0200
0201 spin_lock_irqsave(&comm->int_mask_lock, flags);
0202 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0203 mask &= ~MAC_INT_TX;
0204 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0205 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
0206
0207 napi_complete(napi);
0208 return budget - budget_left;
0209 }
0210
0211 irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
0212 {
0213 struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
0214 u32 status;
0215 u32 mask;
0216 int i;
0217
0218 status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
0219 if (unlikely(!status)) {
0220 dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
0221 goto spl2sw_ethernet_int_out;
0222 }
0223 writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
0224
0225 if (status & MAC_INT_RX) {
0226
0227 spin_lock(&comm->int_mask_lock);
0228 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0229 mask |= MAC_INT_RX;
0230 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0231 spin_unlock(&comm->int_mask_lock);
0232
0233 if (unlikely(status & MAC_INT_RX_DES_ERR)) {
0234 for (i = 0; i < MAX_NETDEV_NUM; i++)
0235 if (comm->ndev[i]) {
0236 comm->ndev[i]->stats.rx_fifo_errors++;
0237 break;
0238 }
0239 dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
0240 }
0241
0242 napi_schedule(&comm->rx_napi);
0243 }
0244
0245 if (status & MAC_INT_TX) {
0246
0247 spin_lock(&comm->int_mask_lock);
0248 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0249 mask |= MAC_INT_TX;
0250 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0251 spin_unlock(&comm->int_mask_lock);
0252
0253 if (unlikely(status & MAC_INT_TX_DES_ERR)) {
0254 for (i = 0; i < MAX_NETDEV_NUM; i++)
0255 if (comm->ndev[i]) {
0256 comm->ndev[i]->stats.tx_fifo_errors++;
0257 break;
0258 }
0259 dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
0260
0261 spin_lock(&comm->int_mask_lock);
0262 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0263 mask &= ~MAC_INT_TX;
0264 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
0265 spin_unlock(&comm->int_mask_lock);
0266 } else {
0267 napi_schedule(&comm->tx_napi);
0268 }
0269 }
0270
0271 spl2sw_ethernet_int_out:
0272 return IRQ_HANDLED;
0273 }