Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Altera Triple-Speed Ethernet MAC driver
0003  * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
0004  *
0005  * Contributors:
0006  *   Dalon Westergreen
0007  *   Thomas Chou
0008  *   Ian Abbott
0009  *   Yuriy Kozlov
0010  *   Tobias Klauser
0011  *   Andriy Smolskyy
0012  *   Roman Bulgakov
0013  *   Dmytro Mytarchuk
0014  *   Matthew Gerlach
0015  *
0016  * Original driver contributed by SLS.
0017  * Major updates contributed by GlobalLogic
0018  */
0019 
0020 #include <linux/atomic.h>
0021 #include <linux/delay.h>
0022 #include <linux/etherdevice.h>
0023 #include <linux/if_vlan.h>
0024 #include <linux/init.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/io.h>
0027 #include <linux/kernel.h>
0028 #include <linux/module.h>
0029 #include <linux/mii.h>
0030 #include <linux/netdevice.h>
0031 #include <linux/of_device.h>
0032 #include <linux/of_mdio.h>
0033 #include <linux/of_net.h>
0034 #include <linux/of_platform.h>
0035 #include <linux/phy.h>
0036 #include <linux/platform_device.h>
0037 #include <linux/skbuff.h>
0038 #include <asm/cacheflush.h>
0039 
0040 #include "altera_utils.h"
0041 #include "altera_tse.h"
0042 #include "altera_sgdma.h"
0043 #include "altera_msgdma.h"
0044 
0045 static atomic_t instance_count = ATOMIC_INIT(~0);
0046 /* Module parameters */
0047 static int debug = -1;
0048 module_param(debug, int, 0644);
0049 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
0050 
0051 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
0052                     NETIF_MSG_LINK | NETIF_MSG_IFUP |
0053                     NETIF_MSG_IFDOWN);
0054 
0055 #define RX_DESCRIPTORS 64
0056 static int dma_rx_num = RX_DESCRIPTORS;
0057 module_param(dma_rx_num, int, 0644);
0058 MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
0059 
0060 #define TX_DESCRIPTORS 64
0061 static int dma_tx_num = TX_DESCRIPTORS;
0062 module_param(dma_tx_num, int, 0644);
0063 MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
0064 
0065 
0066 #define POLL_PHY (-1)
0067 
0068 /* Make sure DMA buffer size is larger than the max frame size
0069  * plus some alignment offset and a VLAN header. If the max frame size is
0070  * 1518, a VLAN header would be additional 4 bytes and additional
0071  * headroom for alignment is 2 bytes, 2048 is just fine.
0072  */
0073 #define ALTERA_RXDMABUFFER_SIZE 2048
0074 
0075 /* Allow network stack to resume queuing packets after we've
0076  * finished transmitting at least 1/4 of the packets in the queue.
0077  */
0078 #define TSE_TX_THRESH(x)    (x->tx_ring_size / 4)
0079 
0080 #define TXQUEUESTOP_THRESHHOLD  2
0081 
0082 static const struct of_device_id altera_tse_ids[];
0083 
0084 static inline u32 tse_tx_avail(struct altera_tse_private *priv)
0085 {
0086     return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
0087 }
0088 
0089 /* PCS Register read/write functions
0090  */
0091 static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
0092 {
0093     return csrrd32(priv->mac_dev,
0094                tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
0095 }
0096 
0097 static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
0098                 u16 value)
0099 {
0100     csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
0101 }
0102 
0103 /* Check PCS scratch memory */
0104 static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
0105 {
0106     sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
0107     return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
0108 }
0109 
0110 /* MDIO specific functions
0111  */
0112 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
0113 {
0114     struct net_device *ndev = bus->priv;
0115     struct altera_tse_private *priv = netdev_priv(ndev);
0116 
0117     /* set MDIO address */
0118     csrwr32((mii_id & 0x1f), priv->mac_dev,
0119         tse_csroffs(mdio_phy1_addr));
0120 
0121     /* get the data */
0122     return csrrd32(priv->mac_dev,
0123                tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
0124 }
0125 
0126 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
0127                  u16 value)
0128 {
0129     struct net_device *ndev = bus->priv;
0130     struct altera_tse_private *priv = netdev_priv(ndev);
0131 
0132     /* set MDIO address */
0133     csrwr32((mii_id & 0x1f), priv->mac_dev,
0134         tse_csroffs(mdio_phy1_addr));
0135 
0136     /* write the data */
0137     csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
0138     return 0;
0139 }
0140 
0141 static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
0142 {
0143     struct altera_tse_private *priv = netdev_priv(dev);
0144     int ret;
0145     struct device_node *mdio_node = NULL;
0146     struct mii_bus *mdio = NULL;
0147     struct device_node *child_node = NULL;
0148 
0149     for_each_child_of_node(priv->device->of_node, child_node) {
0150         if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
0151             mdio_node = child_node;
0152             break;
0153         }
0154     }
0155 
0156     if (mdio_node) {
0157         netdev_dbg(dev, "FOUND MDIO subnode\n");
0158     } else {
0159         netdev_dbg(dev, "NO MDIO subnode\n");
0160         return 0;
0161     }
0162 
0163     mdio = mdiobus_alloc();
0164     if (mdio == NULL) {
0165         netdev_err(dev, "Error allocating MDIO bus\n");
0166         ret = -ENOMEM;
0167         goto put_node;
0168     }
0169 
0170     mdio->name = ALTERA_TSE_RESOURCE_NAME;
0171     mdio->read = &altera_tse_mdio_read;
0172     mdio->write = &altera_tse_mdio_write;
0173     snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
0174 
0175     mdio->priv = dev;
0176     mdio->parent = priv->device;
0177 
0178     ret = of_mdiobus_register(mdio, mdio_node);
0179     if (ret != 0) {
0180         netdev_err(dev, "Cannot register MDIO bus %s\n",
0181                mdio->id);
0182         goto out_free_mdio;
0183     }
0184     of_node_put(mdio_node);
0185 
0186     if (netif_msg_drv(priv))
0187         netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
0188 
0189     priv->mdio = mdio;
0190     return 0;
0191 out_free_mdio:
0192     mdiobus_free(mdio);
0193     mdio = NULL;
0194 put_node:
0195     of_node_put(mdio_node);
0196     return ret;
0197 }
0198 
0199 static void altera_tse_mdio_destroy(struct net_device *dev)
0200 {
0201     struct altera_tse_private *priv = netdev_priv(dev);
0202 
0203     if (priv->mdio == NULL)
0204         return;
0205 
0206     if (netif_msg_drv(priv))
0207         netdev_info(dev, "MDIO bus %s: removed\n",
0208                 priv->mdio->id);
0209 
0210     mdiobus_unregister(priv->mdio);
0211     mdiobus_free(priv->mdio);
0212     priv->mdio = NULL;
0213 }
0214 
0215 static int tse_init_rx_buffer(struct altera_tse_private *priv,
0216                   struct tse_buffer *rxbuffer, int len)
0217 {
0218     rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
0219     if (!rxbuffer->skb)
0220         return -ENOMEM;
0221 
0222     rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
0223                         len,
0224                         DMA_FROM_DEVICE);
0225 
0226     if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
0227         netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
0228         dev_kfree_skb_any(rxbuffer->skb);
0229         return -EINVAL;
0230     }
0231     rxbuffer->dma_addr &= (dma_addr_t)~3;
0232     rxbuffer->len = len;
0233     return 0;
0234 }
0235 
0236 static void tse_free_rx_buffer(struct altera_tse_private *priv,
0237                    struct tse_buffer *rxbuffer)
0238 {
0239     struct sk_buff *skb = rxbuffer->skb;
0240     dma_addr_t dma_addr = rxbuffer->dma_addr;
0241 
0242     if (skb != NULL) {
0243         if (dma_addr)
0244             dma_unmap_single(priv->device, dma_addr,
0245                      rxbuffer->len,
0246                      DMA_FROM_DEVICE);
0247         dev_kfree_skb_any(skb);
0248         rxbuffer->skb = NULL;
0249         rxbuffer->dma_addr = 0;
0250     }
0251 }
0252 
0253 /* Unmap and free Tx buffer resources
0254  */
0255 static void tse_free_tx_buffer(struct altera_tse_private *priv,
0256                    struct tse_buffer *buffer)
0257 {
0258     if (buffer->dma_addr) {
0259         if (buffer->mapped_as_page)
0260             dma_unmap_page(priv->device, buffer->dma_addr,
0261                        buffer->len, DMA_TO_DEVICE);
0262         else
0263             dma_unmap_single(priv->device, buffer->dma_addr,
0264                      buffer->len, DMA_TO_DEVICE);
0265         buffer->dma_addr = 0;
0266     }
0267     if (buffer->skb) {
0268         dev_kfree_skb_any(buffer->skb);
0269         buffer->skb = NULL;
0270     }
0271 }
0272 
0273 static int alloc_init_skbufs(struct altera_tse_private *priv)
0274 {
0275     unsigned int rx_descs = priv->rx_ring_size;
0276     unsigned int tx_descs = priv->tx_ring_size;
0277     int ret = -ENOMEM;
0278     int i;
0279 
0280     /* Create Rx ring buffer */
0281     priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
0282                 GFP_KERNEL);
0283     if (!priv->rx_ring)
0284         goto err_rx_ring;
0285 
0286     /* Create Tx ring buffer */
0287     priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
0288                 GFP_KERNEL);
0289     if (!priv->tx_ring)
0290         goto err_tx_ring;
0291 
0292     priv->tx_cons = 0;
0293     priv->tx_prod = 0;
0294 
0295     /* Init Rx ring */
0296     for (i = 0; i < rx_descs; i++) {
0297         ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
0298                      priv->rx_dma_buf_sz);
0299         if (ret)
0300             goto err_init_rx_buffers;
0301     }
0302 
0303     priv->rx_cons = 0;
0304     priv->rx_prod = 0;
0305 
0306     return 0;
0307 err_init_rx_buffers:
0308     while (--i >= 0)
0309         tse_free_rx_buffer(priv, &priv->rx_ring[i]);
0310     kfree(priv->tx_ring);
0311 err_tx_ring:
0312     kfree(priv->rx_ring);
0313 err_rx_ring:
0314     return ret;
0315 }
0316 
0317 static void free_skbufs(struct net_device *dev)
0318 {
0319     struct altera_tse_private *priv = netdev_priv(dev);
0320     unsigned int rx_descs = priv->rx_ring_size;
0321     unsigned int tx_descs = priv->tx_ring_size;
0322     int i;
0323 
0324     /* Release the DMA TX/RX socket buffers */
0325     for (i = 0; i < rx_descs; i++)
0326         tse_free_rx_buffer(priv, &priv->rx_ring[i]);
0327     for (i = 0; i < tx_descs; i++)
0328         tse_free_tx_buffer(priv, &priv->tx_ring[i]);
0329 
0330 
0331     kfree(priv->tx_ring);
0332 }
0333 
0334 /* Reallocate the skb for the reception process
0335  */
0336 static inline void tse_rx_refill(struct altera_tse_private *priv)
0337 {
0338     unsigned int rxsize = priv->rx_ring_size;
0339     unsigned int entry;
0340     int ret;
0341 
0342     for (; priv->rx_cons - priv->rx_prod > 0;
0343             priv->rx_prod++) {
0344         entry = priv->rx_prod % rxsize;
0345         if (likely(priv->rx_ring[entry].skb == NULL)) {
0346             ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
0347                 priv->rx_dma_buf_sz);
0348             if (unlikely(ret != 0))
0349                 break;
0350             priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
0351         }
0352     }
0353 }
0354 
0355 /* Pull out the VLAN tag and fix up the packet
0356  */
0357 static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
0358 {
0359     struct ethhdr *eth_hdr;
0360     u16 vid;
0361     if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
0362         !__vlan_get_tag(skb, &vid)) {
0363         eth_hdr = (struct ethhdr *)skb->data;
0364         memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
0365         skb_pull(skb, VLAN_HLEN);
0366         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
0367     }
0368 }
0369 
0370 /* Receive a packet: retrieve and pass over to upper levels
0371  */
0372 static int tse_rx(struct altera_tse_private *priv, int limit)
0373 {
0374     unsigned int count = 0;
0375     unsigned int next_entry;
0376     struct sk_buff *skb;
0377     unsigned int entry = priv->rx_cons % priv->rx_ring_size;
0378     u32 rxstatus;
0379     u16 pktlength;
0380     u16 pktstatus;
0381 
0382     /* Check for count < limit first as get_rx_status is changing
0383     * the response-fifo so we must process the next packet
0384     * after calling get_rx_status if a response is pending.
0385     * (reading the last byte of the response pops the value from the fifo.)
0386     */
0387     while ((count < limit) &&
0388            ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
0389         pktstatus = rxstatus >> 16;
0390         pktlength = rxstatus & 0xffff;
0391 
0392         if ((pktstatus & 0xFF) || (pktlength == 0))
0393             netdev_err(priv->dev,
0394                    "RCV pktstatus %08X pktlength %08X\n",
0395                    pktstatus, pktlength);
0396 
0397         /* DMA transfer from TSE starts with 2 additional bytes for
0398          * IP payload alignment. Status returned by get_rx_status()
0399          * contains DMA transfer length. Packet is 2 bytes shorter.
0400          */
0401         pktlength -= 2;
0402 
0403         count++;
0404         next_entry = (++priv->rx_cons) % priv->rx_ring_size;
0405 
0406         skb = priv->rx_ring[entry].skb;
0407         if (unlikely(!skb)) {
0408             netdev_err(priv->dev,
0409                    "%s: Inconsistent Rx descriptor chain\n",
0410                    __func__);
0411             priv->dev->stats.rx_dropped++;
0412             break;
0413         }
0414         priv->rx_ring[entry].skb = NULL;
0415 
0416         skb_put(skb, pktlength);
0417 
0418         dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
0419                  priv->rx_ring[entry].len, DMA_FROM_DEVICE);
0420 
0421         if (netif_msg_pktdata(priv)) {
0422             netdev_info(priv->dev, "frame received %d bytes\n",
0423                     pktlength);
0424             print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
0425                        16, 1, skb->data, pktlength, true);
0426         }
0427 
0428         tse_rx_vlan(priv->dev, skb);
0429 
0430         skb->protocol = eth_type_trans(skb, priv->dev);
0431         skb_checksum_none_assert(skb);
0432 
0433         napi_gro_receive(&priv->napi, skb);
0434 
0435         priv->dev->stats.rx_packets++;
0436         priv->dev->stats.rx_bytes += pktlength;
0437 
0438         entry = next_entry;
0439 
0440         tse_rx_refill(priv);
0441     }
0442 
0443     return count;
0444 }
0445 
0446 /* Reclaim resources after transmission completes
0447  */
0448 static int tse_tx_complete(struct altera_tse_private *priv)
0449 {
0450     unsigned int txsize = priv->tx_ring_size;
0451     u32 ready;
0452     unsigned int entry;
0453     struct tse_buffer *tx_buff;
0454     int txcomplete = 0;
0455 
0456     spin_lock(&priv->tx_lock);
0457 
0458     ready = priv->dmaops->tx_completions(priv);
0459 
0460     /* Free sent buffers */
0461     while (ready && (priv->tx_cons != priv->tx_prod)) {
0462         entry = priv->tx_cons % txsize;
0463         tx_buff = &priv->tx_ring[entry];
0464 
0465         if (netif_msg_tx_done(priv))
0466             netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
0467                    __func__, priv->tx_prod, priv->tx_cons);
0468 
0469         if (likely(tx_buff->skb))
0470             priv->dev->stats.tx_packets++;
0471 
0472         tse_free_tx_buffer(priv, tx_buff);
0473         priv->tx_cons++;
0474 
0475         txcomplete++;
0476         ready--;
0477     }
0478 
0479     if (unlikely(netif_queue_stopped(priv->dev) &&
0480              tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
0481         if (netif_queue_stopped(priv->dev) &&
0482             tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
0483             if (netif_msg_tx_done(priv))
0484                 netdev_dbg(priv->dev, "%s: restart transmit\n",
0485                        __func__);
0486             netif_wake_queue(priv->dev);
0487         }
0488     }
0489 
0490     spin_unlock(&priv->tx_lock);
0491     return txcomplete;
0492 }
0493 
0494 /* NAPI polling function
0495  */
0496 static int tse_poll(struct napi_struct *napi, int budget)
0497 {
0498     struct altera_tse_private *priv =
0499             container_of(napi, struct altera_tse_private, napi);
0500     int rxcomplete = 0;
0501     unsigned long int flags;
0502 
0503     tse_tx_complete(priv);
0504 
0505     rxcomplete = tse_rx(priv, budget);
0506 
0507     if (rxcomplete < budget) {
0508 
0509         napi_complete_done(napi, rxcomplete);
0510 
0511         netdev_dbg(priv->dev,
0512                "NAPI Complete, did %d packets with budget %d\n",
0513                rxcomplete, budget);
0514 
0515         spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
0516         priv->dmaops->enable_rxirq(priv);
0517         priv->dmaops->enable_txirq(priv);
0518         spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
0519     }
0520     return rxcomplete;
0521 }
0522 
0523 /* DMA TX & RX FIFO interrupt routing
0524  */
0525 static irqreturn_t altera_isr(int irq, void *dev_id)
0526 {
0527     struct net_device *dev = dev_id;
0528     struct altera_tse_private *priv;
0529 
0530     if (unlikely(!dev)) {
0531         pr_err("%s: invalid dev pointer\n", __func__);
0532         return IRQ_NONE;
0533     }
0534     priv = netdev_priv(dev);
0535 
0536     spin_lock(&priv->rxdma_irq_lock);
0537     /* reset IRQs */
0538     priv->dmaops->clear_rxirq(priv);
0539     priv->dmaops->clear_txirq(priv);
0540     spin_unlock(&priv->rxdma_irq_lock);
0541 
0542     if (likely(napi_schedule_prep(&priv->napi))) {
0543         spin_lock(&priv->rxdma_irq_lock);
0544         priv->dmaops->disable_rxirq(priv);
0545         priv->dmaops->disable_txirq(priv);
0546         spin_unlock(&priv->rxdma_irq_lock);
0547         __napi_schedule(&priv->napi);
0548     }
0549 
0550 
0551     return IRQ_HANDLED;
0552 }
0553 
0554 /* Transmit a packet (called by the kernel). Dispatches
0555  * either the SGDMA method for transmitting or the
0556  * MSGDMA method, assumes no scatter/gather support,
0557  * implying an assumption that there's only one
0558  * physically contiguous fragment starting at
0559  * skb->data, for length of skb_headlen(skb).
0560  */
0561 static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
0562 {
0563     struct altera_tse_private *priv = netdev_priv(dev);
0564     unsigned int txsize = priv->tx_ring_size;
0565     unsigned int entry;
0566     struct tse_buffer *buffer = NULL;
0567     int nfrags = skb_shinfo(skb)->nr_frags;
0568     unsigned int nopaged_len = skb_headlen(skb);
0569     netdev_tx_t ret = NETDEV_TX_OK;
0570     dma_addr_t dma_addr;
0571 
0572     spin_lock_bh(&priv->tx_lock);
0573 
0574     if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
0575         if (!netif_queue_stopped(dev)) {
0576             netif_stop_queue(dev);
0577             /* This is a hard error, log it. */
0578             netdev_err(priv->dev,
0579                    "%s: Tx list full when queue awake\n",
0580                    __func__);
0581         }
0582         ret = NETDEV_TX_BUSY;
0583         goto out;
0584     }
0585 
0586     /* Map the first skb fragment */
0587     entry = priv->tx_prod % txsize;
0588     buffer = &priv->tx_ring[entry];
0589 
0590     dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
0591                   DMA_TO_DEVICE);
0592     if (dma_mapping_error(priv->device, dma_addr)) {
0593         netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
0594         ret = NETDEV_TX_OK;
0595         goto out;
0596     }
0597 
0598     buffer->skb = skb;
0599     buffer->dma_addr = dma_addr;
0600     buffer->len = nopaged_len;
0601 
0602     priv->dmaops->tx_buffer(priv, buffer);
0603 
0604     skb_tx_timestamp(skb);
0605 
0606     priv->tx_prod++;
0607     dev->stats.tx_bytes += skb->len;
0608 
0609     if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
0610         if (netif_msg_hw(priv))
0611             netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
0612                    __func__);
0613         netif_stop_queue(dev);
0614     }
0615 
0616 out:
0617     spin_unlock_bh(&priv->tx_lock);
0618 
0619     return ret;
0620 }
0621 
0622 /* Called every time the controller might need to be made
0623  * aware of new link state.  The PHY code conveys this
0624  * information through variables in the phydev structure, and this
0625  * function converts those variables into the appropriate
0626  * register values, and can bring down the device if needed.
0627  */
0628 static void altera_tse_adjust_link(struct net_device *dev)
0629 {
0630     struct altera_tse_private *priv = netdev_priv(dev);
0631     struct phy_device *phydev = dev->phydev;
0632     int new_state = 0;
0633 
0634     /* only change config if there is a link */
0635     spin_lock(&priv->mac_cfg_lock);
0636     if (phydev->link) {
0637         /* Read old config */
0638         u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
0639 
0640         /* Check duplex */
0641         if (phydev->duplex != priv->oldduplex) {
0642             new_state = 1;
0643             if (!(phydev->duplex))
0644                 cfg_reg |= MAC_CMDCFG_HD_ENA;
0645             else
0646                 cfg_reg &= ~MAC_CMDCFG_HD_ENA;
0647 
0648             netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
0649                    dev->name, phydev->duplex);
0650 
0651             priv->oldduplex = phydev->duplex;
0652         }
0653 
0654         /* Check speed */
0655         if (phydev->speed != priv->oldspeed) {
0656             new_state = 1;
0657             switch (phydev->speed) {
0658             case 1000:
0659                 cfg_reg |= MAC_CMDCFG_ETH_SPEED;
0660                 cfg_reg &= ~MAC_CMDCFG_ENA_10;
0661                 break;
0662             case 100:
0663                 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
0664                 cfg_reg &= ~MAC_CMDCFG_ENA_10;
0665                 break;
0666             case 10:
0667                 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
0668                 cfg_reg |= MAC_CMDCFG_ENA_10;
0669                 break;
0670             default:
0671                 if (netif_msg_link(priv))
0672                     netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
0673                             phydev->speed);
0674                 break;
0675             }
0676             priv->oldspeed = phydev->speed;
0677         }
0678         iowrite32(cfg_reg, &priv->mac_dev->command_config);
0679 
0680         if (!priv->oldlink) {
0681             new_state = 1;
0682             priv->oldlink = 1;
0683         }
0684     } else if (priv->oldlink) {
0685         new_state = 1;
0686         priv->oldlink = 0;
0687         priv->oldspeed = 0;
0688         priv->oldduplex = -1;
0689     }
0690 
0691     if (new_state && netif_msg_link(priv))
0692         phy_print_status(phydev);
0693 
0694     spin_unlock(&priv->mac_cfg_lock);
0695 }
0696 static struct phy_device *connect_local_phy(struct net_device *dev)
0697 {
0698     struct altera_tse_private *priv = netdev_priv(dev);
0699     struct phy_device *phydev = NULL;
0700     char phy_id_fmt[MII_BUS_ID_SIZE + 3];
0701 
0702     if (priv->phy_addr != POLL_PHY) {
0703         snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
0704              priv->mdio->id, priv->phy_addr);
0705 
0706         netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
0707 
0708         phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
0709                      priv->phy_iface);
0710         if (IS_ERR(phydev)) {
0711             netdev_err(dev, "Could not attach to PHY\n");
0712             phydev = NULL;
0713         }
0714 
0715     } else {
0716         int ret;
0717         phydev = phy_find_first(priv->mdio);
0718         if (phydev == NULL) {
0719             netdev_err(dev, "No PHY found\n");
0720             return phydev;
0721         }
0722 
0723         ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
0724                 priv->phy_iface);
0725         if (ret != 0) {
0726             netdev_err(dev, "Could not attach to PHY\n");
0727             phydev = NULL;
0728         }
0729     }
0730     return phydev;
0731 }
0732 
0733 static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
0734 {
0735     struct altera_tse_private *priv = netdev_priv(dev);
0736     struct device_node *np = priv->device->of_node;
0737     int ret;
0738 
0739     ret = of_get_phy_mode(np, &priv->phy_iface);
0740 
0741     /* Avoid get phy addr and create mdio if no phy is present */
0742     if (ret)
0743         return 0;
0744 
0745     /* try to get PHY address from device tree, use PHY autodetection if
0746      * no valid address is given
0747      */
0748 
0749     if (of_property_read_u32(priv->device->of_node, "phy-addr",
0750              &priv->phy_addr)) {
0751         priv->phy_addr = POLL_PHY;
0752     }
0753 
0754     if (!((priv->phy_addr == POLL_PHY) ||
0755           ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
0756         netdev_err(dev, "invalid phy-addr specified %d\n",
0757             priv->phy_addr);
0758         return -ENODEV;
0759     }
0760 
0761     /* Create/attach to MDIO bus */
0762     ret = altera_tse_mdio_create(dev,
0763                      atomic_add_return(1, &instance_count));
0764 
0765     if (ret)
0766         return -ENODEV;
0767 
0768     return 0;
0769 }
0770 
0771 /* Initialize driver's PHY state, and attach to the PHY
0772  */
0773 static int init_phy(struct net_device *dev)
0774 {
0775     struct altera_tse_private *priv = netdev_priv(dev);
0776     struct phy_device *phydev;
0777     struct device_node *phynode;
0778     bool fixed_link = false;
0779     int rc = 0;
0780 
0781     /* Avoid init phy in case of no phy present */
0782     if (!priv->phy_iface)
0783         return 0;
0784 
0785     priv->oldlink = 0;
0786     priv->oldspeed = 0;
0787     priv->oldduplex = -1;
0788 
0789     phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
0790 
0791     if (!phynode) {
0792         /* check if a fixed-link is defined in device-tree */
0793         if (of_phy_is_fixed_link(priv->device->of_node)) {
0794             rc = of_phy_register_fixed_link(priv->device->of_node);
0795             if (rc < 0) {
0796                 netdev_err(dev, "cannot register fixed PHY\n");
0797                 return rc;
0798             }
0799 
0800             /* In the case of a fixed PHY, the DT node associated
0801              * to the PHY is the Ethernet MAC DT node.
0802              */
0803             phynode = of_node_get(priv->device->of_node);
0804             fixed_link = true;
0805 
0806             netdev_dbg(dev, "fixed-link detected\n");
0807             phydev = of_phy_connect(dev, phynode,
0808                         &altera_tse_adjust_link,
0809                         0, priv->phy_iface);
0810         } else {
0811             netdev_dbg(dev, "no phy-handle found\n");
0812             if (!priv->mdio) {
0813                 netdev_err(dev, "No phy-handle nor local mdio specified\n");
0814                 return -ENODEV;
0815             }
0816             phydev = connect_local_phy(dev);
0817         }
0818     } else {
0819         netdev_dbg(dev, "phy-handle found\n");
0820         phydev = of_phy_connect(dev, phynode,
0821             &altera_tse_adjust_link, 0, priv->phy_iface);
0822     }
0823     of_node_put(phynode);
0824 
0825     if (!phydev) {
0826         netdev_err(dev, "Could not find the PHY\n");
0827         if (fixed_link)
0828             of_phy_deregister_fixed_link(priv->device->of_node);
0829         return -ENODEV;
0830     }
0831 
0832     /* Stop Advertising 1000BASE Capability if interface is not GMII
0833      */
0834     if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
0835         (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
0836         phy_set_max_speed(phydev, SPEED_100);
0837 
0838     /* Broken HW is sometimes missing the pull-up resistor on the
0839      * MDIO line, which results in reads to non-existent devices returning
0840      * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
0841      * device as well. If a fixed-link is used the phy_id is always 0.
0842      * Note: phydev->phy_id is the result of reading the UID PHY registers.
0843      */
0844     if ((phydev->phy_id == 0) && !fixed_link) {
0845         netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
0846         phy_disconnect(phydev);
0847         return -ENODEV;
0848     }
0849 
0850     netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
0851            phydev->mdio.addr, phydev->phy_id, phydev->link);
0852 
0853     return 0;
0854 }
0855 
0856 static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
0857 {
0858     u32 msb;
0859     u32 lsb;
0860 
0861     msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
0862     lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
0863 
0864     /* Set primary MAC address */
0865     csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
0866     csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
0867 }
0868 
0869 /* MAC software reset.
0870  * When reset is triggered, the MAC function completes the current
0871  * transmission or reception, and subsequently disables the transmit and
0872  * receive logic, flushes the receive FIFO buffer, and resets the statistics
0873  * counters.
0874  */
0875 static int reset_mac(struct altera_tse_private *priv)
0876 {
0877     int counter;
0878     u32 dat;
0879 
0880     dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
0881     dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
0882     dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
0883     csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
0884 
0885     counter = 0;
0886     while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
0887         if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
0888                      MAC_CMDCFG_SW_RESET))
0889             break;
0890         udelay(1);
0891     }
0892 
0893     if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
0894         dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
0895         dat &= ~MAC_CMDCFG_SW_RESET;
0896         csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
0897         return -1;
0898     }
0899     return 0;
0900 }
0901 
0902 /* Initialize MAC core registers
0903 */
0904 static int init_mac(struct altera_tse_private *priv)
0905 {
0906     unsigned int cmd = 0;
0907     u32 frm_length;
0908 
0909     /* Setup Rx FIFO */
0910     csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
0911         priv->mac_dev, tse_csroffs(rx_section_empty));
0912 
0913     csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
0914         tse_csroffs(rx_section_full));
0915 
0916     csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
0917         tse_csroffs(rx_almost_empty));
0918 
0919     csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
0920         tse_csroffs(rx_almost_full));
0921 
0922     /* Setup Tx FIFO */
0923     csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
0924         priv->mac_dev, tse_csroffs(tx_section_empty));
0925 
0926     csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
0927         tse_csroffs(tx_section_full));
0928 
0929     csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
0930         tse_csroffs(tx_almost_empty));
0931 
0932     csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
0933         tse_csroffs(tx_almost_full));
0934 
0935     /* MAC Address Configuration */
0936     tse_update_mac_addr(priv, priv->dev->dev_addr);
0937 
0938     /* MAC Function Configuration */
0939     frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
0940     csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
0941 
0942     csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
0943         tse_csroffs(tx_ipg_length));
0944 
0945     /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
0946      * start address
0947      */
0948     tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
0949             ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
0950 
0951     tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
0952               ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
0953               ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
0954 
0955     /* Set the MAC options */
0956     cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
0957     cmd &= ~MAC_CMDCFG_PAD_EN;  /* No padding Removal on Receive */
0958     cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
0959     cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
0960                      * with CRC errors
0961                      */
0962     cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
0963     cmd &= ~MAC_CMDCFG_TX_ENA;
0964     cmd &= ~MAC_CMDCFG_RX_ENA;
0965 
0966     /* Default speed and duplex setting, full/100 */
0967     cmd &= ~MAC_CMDCFG_HD_ENA;
0968     cmd &= ~MAC_CMDCFG_ETH_SPEED;
0969     cmd &= ~MAC_CMDCFG_ENA_10;
0970 
0971     csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
0972 
0973     csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
0974         tse_csroffs(pause_quanta));
0975 
0976     if (netif_msg_hw(priv))
0977         dev_dbg(priv->device,
0978             "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
0979 
0980     return 0;
0981 }
0982 
0983 /* Start/stop MAC transmission logic
0984  */
0985 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
0986 {
0987     u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
0988 
0989     if (enable)
0990         value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
0991     else
0992         value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
0993 
0994     csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
0995 }
0996 
0997 /* Change the MTU
0998  */
0999 static int tse_change_mtu(struct net_device *dev, int new_mtu)
1000 {
1001     if (netif_running(dev)) {
1002         netdev_err(dev, "must be stopped to change its MTU\n");
1003         return -EBUSY;
1004     }
1005 
1006     dev->mtu = new_mtu;
1007     netdev_update_features(dev);
1008 
1009     return 0;
1010 }
1011 
1012 static void altera_tse_set_mcfilter(struct net_device *dev)
1013 {
1014     struct altera_tse_private *priv = netdev_priv(dev);
1015     int i;
1016     struct netdev_hw_addr *ha;
1017 
1018     /* clear the hash filter */
1019     for (i = 0; i < 64; i++)
1020         csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1021 
1022     netdev_for_each_mc_addr(ha, dev) {
1023         unsigned int hash = 0;
1024         int mac_octet;
1025 
1026         for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1027             unsigned char xor_bit = 0;
1028             unsigned char octet = ha->addr[mac_octet];
1029             unsigned int bitshift;
1030 
1031             for (bitshift = 0; bitshift < 8; bitshift++)
1032                 xor_bit ^= ((octet >> bitshift) & 0x01);
1033 
1034             hash = (hash << 1) | xor_bit;
1035         }
1036         csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1037     }
1038 }
1039 
1040 
1041 static void altera_tse_set_mcfilterall(struct net_device *dev)
1042 {
1043     struct altera_tse_private *priv = netdev_priv(dev);
1044     int i;
1045 
1046     /* set the hash filter */
1047     for (i = 0; i < 64; i++)
1048         csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1049 }
1050 
1051 /* Set or clear the multicast filter for this adapter
1052  */
1053 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1054 {
1055     struct altera_tse_private *priv = netdev_priv(dev);
1056 
1057     spin_lock(&priv->mac_cfg_lock);
1058 
1059     if (dev->flags & IFF_PROMISC)
1060         tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1061                 MAC_CMDCFG_PROMIS_EN);
1062 
1063     if (dev->flags & IFF_ALLMULTI)
1064         altera_tse_set_mcfilterall(dev);
1065     else
1066         altera_tse_set_mcfilter(dev);
1067 
1068     spin_unlock(&priv->mac_cfg_lock);
1069 }
1070 
1071 /* Set or clear the multicast filter for this adapter
1072  */
1073 static void tse_set_rx_mode(struct net_device *dev)
1074 {
1075     struct altera_tse_private *priv = netdev_priv(dev);
1076 
1077     spin_lock(&priv->mac_cfg_lock);
1078 
1079     if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1080         !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1081         tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1082                 MAC_CMDCFG_PROMIS_EN);
1083     else
1084         tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1085                   MAC_CMDCFG_PROMIS_EN);
1086 
1087     spin_unlock(&priv->mac_cfg_lock);
1088 }
1089 
1090 /* Initialise (if necessary) the SGMII PCS component
1091  */
1092 static int init_sgmii_pcs(struct net_device *dev)
1093 {
1094     struct altera_tse_private *priv = netdev_priv(dev);
1095     int n;
1096     unsigned int tmp_reg = 0;
1097 
1098     if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
1099         return 0; /* Nothing to do, not in SGMII mode */
1100 
1101     /* The TSE SGMII PCS block looks a little like a PHY, it is
1102      * mapped into the zeroth MDIO space of the MAC and it has
1103      * ID registers like a PHY would.  Sadly this is often
1104      * configured to zeroes, so don't be surprised if it does
1105      * show 0x00000000.
1106      */
1107 
1108     if (sgmii_pcs_scratch_test(priv, 0x0000) &&
1109         sgmii_pcs_scratch_test(priv, 0xffff) &&
1110         sgmii_pcs_scratch_test(priv, 0xa5a5) &&
1111         sgmii_pcs_scratch_test(priv, 0x5a5a)) {
1112         netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
1113                 sgmii_pcs_read(priv, MII_PHYSID1),
1114                 sgmii_pcs_read(priv, MII_PHYSID2));
1115     } else {
1116         netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
1117         return -ENOMEM;
1118     }
1119 
1120     /* Starting on page 5-29 of the MegaCore Function User Guide
1121      * Set SGMII Link timer to 1.6ms
1122      */
1123     sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
1124     sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
1125 
1126     /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
1127     sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
1128 
1129     /* Enable Autonegotiation */
1130     tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
1131     tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
1132     sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1133 
1134     /* Reset PCS block */
1135     tmp_reg |= BMCR_RESET;
1136     sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1137     for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
1138         if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
1139             netdev_info(dev, "SGMII PCS block initialised OK\n");
1140             return 0;
1141         }
1142         udelay(1);
1143     }
1144 
1145     /* We failed to reset the block, return a timeout */
1146     netdev_err(dev, "SGMII PCS block reset failed.\n");
1147     return -ETIMEDOUT;
1148 }
1149 
1150 /* Open and initialize the interface
1151  */
1152 static int tse_open(struct net_device *dev)
1153 {
1154     struct altera_tse_private *priv = netdev_priv(dev);
1155     int ret = 0;
1156     int i;
1157     unsigned long int flags;
1158 
1159     /* Reset and configure TSE MAC and probe associated PHY */
1160     ret = priv->dmaops->init_dma(priv);
1161     if (ret != 0) {
1162         netdev_err(dev, "Cannot initialize DMA\n");
1163         goto phy_error;
1164     }
1165 
1166     if (netif_msg_ifup(priv))
1167         netdev_warn(dev, "device MAC address %pM\n",
1168                 dev->dev_addr);
1169 
1170     if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1171         netdev_warn(dev, "TSE revision %x\n", priv->revision);
1172 
1173     spin_lock(&priv->mac_cfg_lock);
1174     /* no-op if MAC not operating in SGMII mode*/
1175     ret = init_sgmii_pcs(dev);
1176     if (ret) {
1177         netdev_err(dev,
1178                "Cannot init the SGMII PCS (error: %d)\n", ret);
1179         spin_unlock(&priv->mac_cfg_lock);
1180         goto phy_error;
1181     }
1182 
1183     ret = reset_mac(priv);
1184     /* Note that reset_mac will fail if the clocks are gated by the PHY
1185      * due to the PHY being put into isolation or power down mode.
1186      * This is not an error if reset fails due to no clock.
1187      */
1188     if (ret)
1189         netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1190 
1191     ret = init_mac(priv);
1192     spin_unlock(&priv->mac_cfg_lock);
1193     if (ret) {
1194         netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1195         goto alloc_skbuf_error;
1196     }
1197 
1198     priv->dmaops->reset_dma(priv);
1199 
1200     /* Create and initialize the TX/RX descriptors chains. */
1201     priv->rx_ring_size = dma_rx_num;
1202     priv->tx_ring_size = dma_tx_num;
1203     ret = alloc_init_skbufs(priv);
1204     if (ret) {
1205         netdev_err(dev, "DMA descriptors initialization failed\n");
1206         goto alloc_skbuf_error;
1207     }
1208 
1209 
1210     /* Register RX interrupt */
1211     ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1212               dev->name, dev);
1213     if (ret) {
1214         netdev_err(dev, "Unable to register RX interrupt %d\n",
1215                priv->rx_irq);
1216         goto init_error;
1217     }
1218 
1219     /* Register TX interrupt */
1220     ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1221               dev->name, dev);
1222     if (ret) {
1223         netdev_err(dev, "Unable to register TX interrupt %d\n",
1224                priv->tx_irq);
1225         goto tx_request_irq_error;
1226     }
1227 
1228     /* Enable DMA interrupts */
1229     spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1230     priv->dmaops->enable_rxirq(priv);
1231     priv->dmaops->enable_txirq(priv);
1232 
1233     /* Setup RX descriptor chain */
1234     for (i = 0; i < priv->rx_ring_size; i++)
1235         priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1236 
1237     spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1238 
1239     if (dev->phydev)
1240         phy_start(dev->phydev);
1241 
1242     napi_enable(&priv->napi);
1243     netif_start_queue(dev);
1244 
1245     priv->dmaops->start_rxdma(priv);
1246 
1247     /* Start MAC Rx/Tx */
1248     spin_lock(&priv->mac_cfg_lock);
1249     tse_set_mac(priv, true);
1250     spin_unlock(&priv->mac_cfg_lock);
1251 
1252     return 0;
1253 
1254 tx_request_irq_error:
1255     free_irq(priv->rx_irq, dev);
1256 init_error:
1257     free_skbufs(dev);
1258 alloc_skbuf_error:
1259 phy_error:
1260     return ret;
1261 }
1262 
1263 /* Stop TSE MAC interface and put the device in an inactive state
1264  */
1265 static int tse_shutdown(struct net_device *dev)
1266 {
1267     struct altera_tse_private *priv = netdev_priv(dev);
1268     int ret;
1269     unsigned long int flags;
1270 
1271     /* Stop the PHY */
1272     if (dev->phydev)
1273         phy_stop(dev->phydev);
1274 
1275     netif_stop_queue(dev);
1276     napi_disable(&priv->napi);
1277 
1278     /* Disable DMA interrupts */
1279     spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1280     priv->dmaops->disable_rxirq(priv);
1281     priv->dmaops->disable_txirq(priv);
1282     spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1283 
1284     /* Free the IRQ lines */
1285     free_irq(priv->rx_irq, dev);
1286     free_irq(priv->tx_irq, dev);
1287 
1288     /* disable and reset the MAC, empties fifo */
1289     spin_lock(&priv->mac_cfg_lock);
1290     spin_lock(&priv->tx_lock);
1291 
1292     ret = reset_mac(priv);
1293     /* Note that reset_mac will fail if the clocks are gated by the PHY
1294      * due to the PHY being put into isolation or power down mode.
1295      * This is not an error if reset fails due to no clock.
1296      */
1297     if (ret)
1298         netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1299     priv->dmaops->reset_dma(priv);
1300     free_skbufs(dev);
1301 
1302     spin_unlock(&priv->tx_lock);
1303     spin_unlock(&priv->mac_cfg_lock);
1304 
1305     priv->dmaops->uninit_dma(priv);
1306 
1307     return 0;
1308 }
1309 
1310 static struct net_device_ops altera_tse_netdev_ops = {
1311     .ndo_open       = tse_open,
1312     .ndo_stop       = tse_shutdown,
1313     .ndo_start_xmit     = tse_start_xmit,
1314     .ndo_set_mac_address    = eth_mac_addr,
1315     .ndo_set_rx_mode    = tse_set_rx_mode,
1316     .ndo_change_mtu     = tse_change_mtu,
1317     .ndo_validate_addr  = eth_validate_addr,
1318 };
1319 
1320 static int request_and_map(struct platform_device *pdev, const char *name,
1321                struct resource **res, void __iomem **ptr)
1322 {
1323     struct resource *region;
1324     struct device *device = &pdev->dev;
1325 
1326     *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1327     if (*res == NULL) {
1328         dev_err(device, "resource %s not defined\n", name);
1329         return -ENODEV;
1330     }
1331 
1332     region = devm_request_mem_region(device, (*res)->start,
1333                      resource_size(*res), dev_name(device));
1334     if (region == NULL) {
1335         dev_err(device, "unable to request %s\n", name);
1336         return -EBUSY;
1337     }
1338 
1339     *ptr = devm_ioremap(device, region->start,
1340                     resource_size(region));
1341     if (*ptr == NULL) {
1342         dev_err(device, "ioremap of %s failed!", name);
1343         return -ENOMEM;
1344     }
1345 
1346     return 0;
1347 }
1348 
1349 /* Probe Altera TSE MAC device
1350  */
1351 static int altera_tse_probe(struct platform_device *pdev)
1352 {
1353     struct net_device *ndev;
1354     int ret = -ENODEV;
1355     struct resource *control_port;
1356     struct resource *dma_res;
1357     struct altera_tse_private *priv;
1358     void __iomem *descmap;
1359     const struct of_device_id *of_id = NULL;
1360 
1361     ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1362     if (!ndev) {
1363         dev_err(&pdev->dev, "Could not allocate network device\n");
1364         return -ENODEV;
1365     }
1366 
1367     SET_NETDEV_DEV(ndev, &pdev->dev);
1368 
1369     priv = netdev_priv(ndev);
1370     priv->device = &pdev->dev;
1371     priv->dev = ndev;
1372     priv->msg_enable = netif_msg_init(debug, default_msg_level);
1373 
1374     of_id = of_match_device(altera_tse_ids, &pdev->dev);
1375 
1376     if (of_id)
1377         priv->dmaops = (struct altera_dmaops *)of_id->data;
1378 
1379 
1380     if (priv->dmaops &&
1381         priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1382         /* Get the mapped address to the SGDMA descriptor memory */
1383         ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1384         if (ret)
1385             goto err_free_netdev;
1386 
1387         /* Start of that memory is for transmit descriptors */
1388         priv->tx_dma_desc = descmap;
1389 
1390         /* First half is for tx descriptors, other half for tx */
1391         priv->txdescmem = resource_size(dma_res)/2;
1392 
1393         priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1394 
1395         priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1396                              priv->txdescmem));
1397         priv->rxdescmem = resource_size(dma_res)/2;
1398         priv->rxdescmem_busaddr = dma_res->start;
1399         priv->rxdescmem_busaddr += priv->txdescmem;
1400 
1401         if (upper_32_bits(priv->rxdescmem_busaddr)) {
1402             dev_dbg(priv->device,
1403                 "SGDMA bus addresses greater than 32-bits\n");
1404             ret = -EINVAL;
1405             goto err_free_netdev;
1406         }
1407         if (upper_32_bits(priv->txdescmem_busaddr)) {
1408             dev_dbg(priv->device,
1409                 "SGDMA bus addresses greater than 32-bits\n");
1410             ret = -EINVAL;
1411             goto err_free_netdev;
1412         }
1413     } else if (priv->dmaops &&
1414            priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1415         ret = request_and_map(pdev, "rx_resp", &dma_res,
1416                       &priv->rx_dma_resp);
1417         if (ret)
1418             goto err_free_netdev;
1419 
1420         ret = request_and_map(pdev, "tx_desc", &dma_res,
1421                       &priv->tx_dma_desc);
1422         if (ret)
1423             goto err_free_netdev;
1424 
1425         priv->txdescmem = resource_size(dma_res);
1426         priv->txdescmem_busaddr = dma_res->start;
1427 
1428         ret = request_and_map(pdev, "rx_desc", &dma_res,
1429                       &priv->rx_dma_desc);
1430         if (ret)
1431             goto err_free_netdev;
1432 
1433         priv->rxdescmem = resource_size(dma_res);
1434         priv->rxdescmem_busaddr = dma_res->start;
1435 
1436     } else {
1437         ret = -ENODEV;
1438         goto err_free_netdev;
1439     }
1440 
1441     if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
1442         dma_set_coherent_mask(priv->device,
1443                       DMA_BIT_MASK(priv->dmaops->dmamask));
1444     } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
1445         dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1446     } else {
1447         ret = -EIO;
1448         goto err_free_netdev;
1449     }
1450 
1451     /* MAC address space */
1452     ret = request_and_map(pdev, "control_port", &control_port,
1453                   (void __iomem **)&priv->mac_dev);
1454     if (ret)
1455         goto err_free_netdev;
1456 
1457     /* xSGDMA Rx Dispatcher address space */
1458     ret = request_and_map(pdev, "rx_csr", &dma_res,
1459                   &priv->rx_dma_csr);
1460     if (ret)
1461         goto err_free_netdev;
1462 
1463 
1464     /* xSGDMA Tx Dispatcher address space */
1465     ret = request_and_map(pdev, "tx_csr", &dma_res,
1466                   &priv->tx_dma_csr);
1467     if (ret)
1468         goto err_free_netdev;
1469 
1470 
1471     /* Rx IRQ */
1472     priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1473     if (priv->rx_irq == -ENXIO) {
1474         dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1475         ret = -ENXIO;
1476         goto err_free_netdev;
1477     }
1478 
1479     /* Tx IRQ */
1480     priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1481     if (priv->tx_irq == -ENXIO) {
1482         dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1483         ret = -ENXIO;
1484         goto err_free_netdev;
1485     }
1486 
1487     /* get FIFO depths from device tree */
1488     if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1489                  &priv->rx_fifo_depth)) {
1490         dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1491         ret = -ENXIO;
1492         goto err_free_netdev;
1493     }
1494 
1495     if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1496                  &priv->tx_fifo_depth)) {
1497         dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1498         ret = -ENXIO;
1499         goto err_free_netdev;
1500     }
1501 
1502     /* get hash filter settings for this instance */
1503     priv->hash_filter =
1504         of_property_read_bool(pdev->dev.of_node,
1505                       "altr,has-hash-multicast-filter");
1506 
1507     /* Set hash filter to not set for now until the
1508      * multicast filter receive issue is debugged
1509      */
1510     priv->hash_filter = 0;
1511 
1512     /* get supplemental address settings for this instance */
1513     priv->added_unicast =
1514         of_property_read_bool(pdev->dev.of_node,
1515                       "altr,has-supplementary-unicast");
1516 
1517     priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1518     /* Max MTU is 1500, ETH_DATA_LEN */
1519     priv->dev->max_mtu = ETH_DATA_LEN;
1520 
1521     /* Get the max mtu from the device tree. Note that the
1522      * "max-frame-size" parameter is actually max mtu. Definition
1523      * in the ePAPR v1.1 spec and usage differ, so go with usage.
1524      */
1525     of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1526                  &priv->dev->max_mtu);
1527 
1528     /* The DMA buffer size already accounts for an alignment bias
1529      * to avoid unaligned access exceptions for the NIOS processor,
1530      */
1531     priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1532 
1533     /* get default MAC address from device tree */
1534     ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
1535     if (ret)
1536         eth_hw_addr_random(ndev);
1537 
1538     /* get phy addr and create mdio */
1539     ret = altera_tse_phy_get_addr_mdio_create(ndev);
1540 
1541     if (ret)
1542         goto err_free_netdev;
1543 
1544     /* initialize netdev */
1545     ndev->mem_start = control_port->start;
1546     ndev->mem_end = control_port->end;
1547     ndev->netdev_ops = &altera_tse_netdev_ops;
1548     altera_tse_set_ethtool_ops(ndev);
1549 
1550     altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1551 
1552     if (priv->hash_filter)
1553         altera_tse_netdev_ops.ndo_set_rx_mode =
1554             tse_set_rx_mode_hashfilter;
1555 
1556     /* Scatter/gather IO is not supported,
1557      * so it is turned off
1558      */
1559     ndev->hw_features &= ~NETIF_F_SG;
1560     ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1561 
1562     /* VLAN offloading of tagging, stripping and filtering is not
1563      * supported by hardware, but driver will accommodate the
1564      * extra 4-byte VLAN tag for processing by upper layers
1565      */
1566     ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1567 
1568     /* setup NAPI interface */
1569     netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1570 
1571     spin_lock_init(&priv->mac_cfg_lock);
1572     spin_lock_init(&priv->tx_lock);
1573     spin_lock_init(&priv->rxdma_irq_lock);
1574 
1575     netif_carrier_off(ndev);
1576     ret = register_netdev(ndev);
1577     if (ret) {
1578         dev_err(&pdev->dev, "failed to register TSE net device\n");
1579         goto err_register_netdev;
1580     }
1581 
1582     platform_set_drvdata(pdev, ndev);
1583 
1584     priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1585 
1586     if (netif_msg_probe(priv))
1587         dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1588              (priv->revision >> 8) & 0xff,
1589              priv->revision & 0xff,
1590              (unsigned long) control_port->start, priv->rx_irq,
1591              priv->tx_irq);
1592 
1593     ret = init_phy(ndev);
1594     if (ret != 0) {
1595         netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1596         goto err_init_phy;
1597     }
1598     return 0;
1599 
1600 err_init_phy:
1601     unregister_netdev(ndev);
1602 err_register_netdev:
1603     netif_napi_del(&priv->napi);
1604     altera_tse_mdio_destroy(ndev);
1605 err_free_netdev:
1606     free_netdev(ndev);
1607     return ret;
1608 }
1609 
1610 /* Remove Altera TSE MAC device
1611  */
1612 static int altera_tse_remove(struct platform_device *pdev)
1613 {
1614     struct net_device *ndev = platform_get_drvdata(pdev);
1615     struct altera_tse_private *priv = netdev_priv(ndev);
1616 
1617     if (ndev->phydev) {
1618         phy_disconnect(ndev->phydev);
1619 
1620         if (of_phy_is_fixed_link(priv->device->of_node))
1621             of_phy_deregister_fixed_link(priv->device->of_node);
1622     }
1623 
1624     platform_set_drvdata(pdev, NULL);
1625     altera_tse_mdio_destroy(ndev);
1626     unregister_netdev(ndev);
1627     free_netdev(ndev);
1628 
1629     return 0;
1630 }
1631 
1632 static const struct altera_dmaops altera_dtype_sgdma = {
1633     .altera_dtype = ALTERA_DTYPE_SGDMA,
1634     .dmamask = 32,
1635     .reset_dma = sgdma_reset,
1636     .enable_txirq = sgdma_enable_txirq,
1637     .enable_rxirq = sgdma_enable_rxirq,
1638     .disable_txirq = sgdma_disable_txirq,
1639     .disable_rxirq = sgdma_disable_rxirq,
1640     .clear_txirq = sgdma_clear_txirq,
1641     .clear_rxirq = sgdma_clear_rxirq,
1642     .tx_buffer = sgdma_tx_buffer,
1643     .tx_completions = sgdma_tx_completions,
1644     .add_rx_desc = sgdma_add_rx_desc,
1645     .get_rx_status = sgdma_rx_status,
1646     .init_dma = sgdma_initialize,
1647     .uninit_dma = sgdma_uninitialize,
1648     .start_rxdma = sgdma_start_rxdma,
1649 };
1650 
1651 static const struct altera_dmaops altera_dtype_msgdma = {
1652     .altera_dtype = ALTERA_DTYPE_MSGDMA,
1653     .dmamask = 64,
1654     .reset_dma = msgdma_reset,
1655     .enable_txirq = msgdma_enable_txirq,
1656     .enable_rxirq = msgdma_enable_rxirq,
1657     .disable_txirq = msgdma_disable_txirq,
1658     .disable_rxirq = msgdma_disable_rxirq,
1659     .clear_txirq = msgdma_clear_txirq,
1660     .clear_rxirq = msgdma_clear_rxirq,
1661     .tx_buffer = msgdma_tx_buffer,
1662     .tx_completions = msgdma_tx_completions,
1663     .add_rx_desc = msgdma_add_rx_desc,
1664     .get_rx_status = msgdma_rx_status,
1665     .init_dma = msgdma_initialize,
1666     .uninit_dma = msgdma_uninitialize,
1667     .start_rxdma = msgdma_start_rxdma,
1668 };
1669 
1670 static const struct of_device_id altera_tse_ids[] = {
1671     { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1672     { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1673     { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1674     {},
1675 };
1676 MODULE_DEVICE_TABLE(of, altera_tse_ids);
1677 
1678 static struct platform_driver altera_tse_driver = {
1679     .probe      = altera_tse_probe,
1680     .remove     = altera_tse_remove,
1681     .suspend    = NULL,
1682     .resume     = NULL,
1683     .driver     = {
1684         .name   = ALTERA_TSE_RESOURCE_NAME,
1685         .of_match_table = altera_tse_ids,
1686     },
1687 };
1688 
1689 module_platform_driver(altera_tse_driver);
1690 
1691 MODULE_AUTHOR("Altera Corporation");
1692 MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1693 MODULE_LICENSE("GPL v2");