Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2009-2012 Cavium, Inc
0007  */
0008 
0009 #include <linux/platform_device.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/capability.h>
0013 #include <linux/net_tstamp.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/netdevice.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/if_vlan.h>
0018 #include <linux/of_mdio.h>
0019 #include <linux/module.h>
0020 #include <linux/of_net.h>
0021 #include <linux/init.h>
0022 #include <linux/slab.h>
0023 #include <linux/phy.h>
0024 #include <linux/io.h>
0025 
0026 #include <asm/octeon/octeon.h>
0027 #include <asm/octeon/cvmx-mixx-defs.h>
0028 #include <asm/octeon/cvmx-agl-defs.h>
0029 
0030 #define DRV_NAME "octeon_mgmt"
0031 #define DRV_DESCRIPTION \
0032     "Cavium Networks Octeon MII (management) port Network Driver"
0033 
0034 #define OCTEON_MGMT_NAPI_WEIGHT 16
0035 
0036 /* Ring sizes that are powers of two allow for more efficient modulo
0037  * opertions.
0038  */
0039 #define OCTEON_MGMT_RX_RING_SIZE 512
0040 #define OCTEON_MGMT_TX_RING_SIZE 128
0041 
0042 /* Allow 8 bytes for vlan and FCS. */
0043 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
0044 
0045 union mgmt_port_ring_entry {
0046     u64 d64;
0047     struct {
0048 #define RING_ENTRY_CODE_DONE 0xf
0049 #define RING_ENTRY_CODE_MORE 0x10
0050 #ifdef __BIG_ENDIAN_BITFIELD
0051         u64 reserved_62_63:2;
0052         /* Length of the buffer/packet in bytes */
0053         u64 len:14;
0054         /* For TX, signals that the packet should be timestamped */
0055         u64 tstamp:1;
0056         /* The RX error code */
0057         u64 code:7;
0058         /* Physical address of the buffer */
0059         u64 addr:40;
0060 #else
0061         u64 addr:40;
0062         u64 code:7;
0063         u64 tstamp:1;
0064         u64 len:14;
0065         u64 reserved_62_63:2;
0066 #endif
0067     } s;
0068 };
0069 
0070 #define MIX_ORING1  0x0
0071 #define MIX_ORING2  0x8
0072 #define MIX_IRING1  0x10
0073 #define MIX_IRING2  0x18
0074 #define MIX_CTL     0x20
0075 #define MIX_IRHWM   0x28
0076 #define MIX_IRCNT   0x30
0077 #define MIX_ORHWM   0x38
0078 #define MIX_ORCNT   0x40
0079 #define MIX_ISR     0x48
0080 #define MIX_INTENA  0x50
0081 #define MIX_REMCNT  0x58
0082 #define MIX_BIST    0x78
0083 
0084 #define AGL_GMX_PRT_CFG         0x10
0085 #define AGL_GMX_RX_FRM_CTL      0x18
0086 #define AGL_GMX_RX_FRM_MAX      0x30
0087 #define AGL_GMX_RX_JABBER       0x38
0088 #define AGL_GMX_RX_STATS_CTL        0x50
0089 
0090 #define AGL_GMX_RX_STATS_PKTS_DRP   0xb0
0091 #define AGL_GMX_RX_STATS_OCTS_DRP   0xb8
0092 #define AGL_GMX_RX_STATS_PKTS_BAD   0xc0
0093 
0094 #define AGL_GMX_RX_ADR_CTL      0x100
0095 #define AGL_GMX_RX_ADR_CAM_EN       0x108
0096 #define AGL_GMX_RX_ADR_CAM0     0x180
0097 #define AGL_GMX_RX_ADR_CAM1     0x188
0098 #define AGL_GMX_RX_ADR_CAM2     0x190
0099 #define AGL_GMX_RX_ADR_CAM3     0x198
0100 #define AGL_GMX_RX_ADR_CAM4     0x1a0
0101 #define AGL_GMX_RX_ADR_CAM5     0x1a8
0102 
0103 #define AGL_GMX_TX_CLK          0x208
0104 #define AGL_GMX_TX_STATS_CTL        0x268
0105 #define AGL_GMX_TX_CTL          0x270
0106 #define AGL_GMX_TX_STAT0        0x280
0107 #define AGL_GMX_TX_STAT1        0x288
0108 #define AGL_GMX_TX_STAT2        0x290
0109 #define AGL_GMX_TX_STAT3        0x298
0110 #define AGL_GMX_TX_STAT4        0x2a0
0111 #define AGL_GMX_TX_STAT5        0x2a8
0112 #define AGL_GMX_TX_STAT6        0x2b0
0113 #define AGL_GMX_TX_STAT7        0x2b8
0114 #define AGL_GMX_TX_STAT8        0x2c0
0115 #define AGL_GMX_TX_STAT9        0x2c8
0116 
0117 struct octeon_mgmt {
0118     struct net_device *netdev;
0119     u64 mix;
0120     u64 agl;
0121     u64 agl_prt_ctl;
0122     int port;
0123     int irq;
0124     bool has_rx_tstamp;
0125     u64 *tx_ring;
0126     dma_addr_t tx_ring_handle;
0127     unsigned int tx_next;
0128     unsigned int tx_next_clean;
0129     unsigned int tx_current_fill;
0130     /* The tx_list lock also protects the ring related variables */
0131     struct sk_buff_head tx_list;
0132 
0133     /* RX variables only touched in napi_poll.  No locking necessary. */
0134     u64 *rx_ring;
0135     dma_addr_t rx_ring_handle;
0136     unsigned int rx_next;
0137     unsigned int rx_next_fill;
0138     unsigned int rx_current_fill;
0139     struct sk_buff_head rx_list;
0140 
0141     spinlock_t lock;
0142     unsigned int last_duplex;
0143     unsigned int last_link;
0144     unsigned int last_speed;
0145     struct device *dev;
0146     struct napi_struct napi;
0147     struct tasklet_struct tx_clean_tasklet;
0148     struct device_node *phy_np;
0149     resource_size_t mix_phys;
0150     resource_size_t mix_size;
0151     resource_size_t agl_phys;
0152     resource_size_t agl_size;
0153     resource_size_t agl_prt_ctl_phys;
0154     resource_size_t agl_prt_ctl_size;
0155 };
0156 
0157 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
0158 {
0159     union cvmx_mixx_intena mix_intena;
0160     unsigned long flags;
0161 
0162     spin_lock_irqsave(&p->lock, flags);
0163     mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
0164     mix_intena.s.ithena = enable ? 1 : 0;
0165     cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
0166     spin_unlock_irqrestore(&p->lock, flags);
0167 }
0168 
0169 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
0170 {
0171     union cvmx_mixx_intena mix_intena;
0172     unsigned long flags;
0173 
0174     spin_lock_irqsave(&p->lock, flags);
0175     mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
0176     mix_intena.s.othena = enable ? 1 : 0;
0177     cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
0178     spin_unlock_irqrestore(&p->lock, flags);
0179 }
0180 
0181 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
0182 {
0183     octeon_mgmt_set_rx_irq(p, 1);
0184 }
0185 
0186 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
0187 {
0188     octeon_mgmt_set_rx_irq(p, 0);
0189 }
0190 
0191 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
0192 {
0193     octeon_mgmt_set_tx_irq(p, 1);
0194 }
0195 
0196 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
0197 {
0198     octeon_mgmt_set_tx_irq(p, 0);
0199 }
0200 
0201 static unsigned int ring_max_fill(unsigned int ring_size)
0202 {
0203     return ring_size - 8;
0204 }
0205 
0206 static unsigned int ring_size_to_bytes(unsigned int ring_size)
0207 {
0208     return ring_size * sizeof(union mgmt_port_ring_entry);
0209 }
0210 
0211 static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
0212 {
0213     struct octeon_mgmt *p = netdev_priv(netdev);
0214 
0215     while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
0216         unsigned int size;
0217         union mgmt_port_ring_entry re;
0218         struct sk_buff *skb;
0219 
0220         /* CN56XX pass 1 needs 8 bytes of padding.  */
0221         size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
0222 
0223         skb = netdev_alloc_skb(netdev, size);
0224         if (!skb)
0225             break;
0226         skb_reserve(skb, NET_IP_ALIGN);
0227         __skb_queue_tail(&p->rx_list, skb);
0228 
0229         re.d64 = 0;
0230         re.s.len = size;
0231         re.s.addr = dma_map_single(p->dev, skb->data,
0232                        size,
0233                        DMA_FROM_DEVICE);
0234 
0235         /* Put it in the ring.  */
0236         p->rx_ring[p->rx_next_fill] = re.d64;
0237         /* Make sure there is no reorder of filling the ring and ringing
0238          * the bell
0239          */
0240         wmb();
0241 
0242         dma_sync_single_for_device(p->dev, p->rx_ring_handle,
0243                        ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
0244                        DMA_BIDIRECTIONAL);
0245         p->rx_next_fill =
0246             (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
0247         p->rx_current_fill++;
0248         /* Ring the bell.  */
0249         cvmx_write_csr(p->mix + MIX_IRING2, 1);
0250     }
0251 }
0252 
0253 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
0254 {
0255     union cvmx_mixx_orcnt mix_orcnt;
0256     union mgmt_port_ring_entry re;
0257     struct sk_buff *skb;
0258     int cleaned = 0;
0259     unsigned long flags;
0260 
0261     mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
0262     while (mix_orcnt.s.orcnt) {
0263         spin_lock_irqsave(&p->tx_list.lock, flags);
0264 
0265         mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
0266 
0267         if (mix_orcnt.s.orcnt == 0) {
0268             spin_unlock_irqrestore(&p->tx_list.lock, flags);
0269             break;
0270         }
0271 
0272         dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
0273                     ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
0274                     DMA_BIDIRECTIONAL);
0275 
0276         re.d64 = p->tx_ring[p->tx_next_clean];
0277         p->tx_next_clean =
0278             (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
0279         skb = __skb_dequeue(&p->tx_list);
0280 
0281         mix_orcnt.u64 = 0;
0282         mix_orcnt.s.orcnt = 1;
0283 
0284         /* Acknowledge to hardware that we have the buffer.  */
0285         cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
0286         p->tx_current_fill--;
0287 
0288         spin_unlock_irqrestore(&p->tx_list.lock, flags);
0289 
0290         dma_unmap_single(p->dev, re.s.addr, re.s.len,
0291                  DMA_TO_DEVICE);
0292 
0293         /* Read the hardware TX timestamp if one was recorded */
0294         if (unlikely(re.s.tstamp)) {
0295             struct skb_shared_hwtstamps ts;
0296             u64 ns;
0297 
0298             memset(&ts, 0, sizeof(ts));
0299             /* Read the timestamp */
0300             ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
0301             /* Remove the timestamp from the FIFO */
0302             cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
0303             /* Tell the kernel about the timestamp */
0304             ts.hwtstamp = ns_to_ktime(ns);
0305             skb_tstamp_tx(skb, &ts);
0306         }
0307 
0308         dev_kfree_skb_any(skb);
0309         cleaned++;
0310 
0311         mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
0312     }
0313 
0314     if (cleaned && netif_queue_stopped(p->netdev))
0315         netif_wake_queue(p->netdev);
0316 }
0317 
0318 static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
0319 {
0320     struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
0321     octeon_mgmt_clean_tx_buffers(p);
0322     octeon_mgmt_enable_tx_irq(p);
0323 }
0324 
0325 static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
0326 {
0327     struct octeon_mgmt *p = netdev_priv(netdev);
0328     unsigned long flags;
0329     u64 drop, bad;
0330 
0331     /* These reads also clear the count registers.  */
0332     drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
0333     bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
0334 
0335     if (drop || bad) {
0336         /* Do an atomic update. */
0337         spin_lock_irqsave(&p->lock, flags);
0338         netdev->stats.rx_errors += bad;
0339         netdev->stats.rx_dropped += drop;
0340         spin_unlock_irqrestore(&p->lock, flags);
0341     }
0342 }
0343 
0344 static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
0345 {
0346     struct octeon_mgmt *p = netdev_priv(netdev);
0347     unsigned long flags;
0348 
0349     union cvmx_agl_gmx_txx_stat0 s0;
0350     union cvmx_agl_gmx_txx_stat1 s1;
0351 
0352     /* These reads also clear the count registers.  */
0353     s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
0354     s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
0355 
0356     if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
0357         /* Do an atomic update. */
0358         spin_lock_irqsave(&p->lock, flags);
0359         netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
0360         netdev->stats.collisions += s1.s.scol + s1.s.mcol;
0361         spin_unlock_irqrestore(&p->lock, flags);
0362     }
0363 }
0364 
0365 /*
0366  * Dequeue a receive skb and its corresponding ring entry.  The ring
0367  * entry is returned, *pskb is updated to point to the skb.
0368  */
0369 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
0370                      struct sk_buff **pskb)
0371 {
0372     union mgmt_port_ring_entry re;
0373 
0374     dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
0375                 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
0376                 DMA_BIDIRECTIONAL);
0377 
0378     re.d64 = p->rx_ring[p->rx_next];
0379     p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
0380     p->rx_current_fill--;
0381     *pskb = __skb_dequeue(&p->rx_list);
0382 
0383     dma_unmap_single(p->dev, re.s.addr,
0384              ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
0385              DMA_FROM_DEVICE);
0386 
0387     return re.d64;
0388 }
0389 
0390 
0391 static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
0392 {
0393     struct net_device *netdev = p->netdev;
0394     union cvmx_mixx_ircnt mix_ircnt;
0395     union mgmt_port_ring_entry re;
0396     struct sk_buff *skb;
0397     struct sk_buff *skb2;
0398     struct sk_buff *skb_new;
0399     union mgmt_port_ring_entry re2;
0400     int rc = 1;
0401 
0402 
0403     re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
0404     if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
0405         /* A good packet, send it up. */
0406         skb_put(skb, re.s.len);
0407 good:
0408         /* Process the RX timestamp if it was recorded */
0409         if (p->has_rx_tstamp) {
0410             /* The first 8 bytes are the timestamp */
0411             u64 ns = *(u64 *)skb->data;
0412             struct skb_shared_hwtstamps *ts;
0413             ts = skb_hwtstamps(skb);
0414             ts->hwtstamp = ns_to_ktime(ns);
0415             __skb_pull(skb, 8);
0416         }
0417         skb->protocol = eth_type_trans(skb, netdev);
0418         netdev->stats.rx_packets++;
0419         netdev->stats.rx_bytes += skb->len;
0420         netif_receive_skb(skb);
0421         rc = 0;
0422     } else if (re.s.code == RING_ENTRY_CODE_MORE) {
0423         /* Packet split across skbs.  This can happen if we
0424          * increase the MTU.  Buffers that are already in the
0425          * rx ring can then end up being too small.  As the rx
0426          * ring is refilled, buffers sized for the new MTU
0427          * will be used and we should go back to the normal
0428          * non-split case.
0429          */
0430         skb_put(skb, re.s.len);
0431         do {
0432             re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
0433             if (re2.s.code != RING_ENTRY_CODE_MORE
0434                 && re2.s.code != RING_ENTRY_CODE_DONE)
0435                 goto split_error;
0436             skb_put(skb2,  re2.s.len);
0437             skb_new = skb_copy_expand(skb, 0, skb2->len,
0438                           GFP_ATOMIC);
0439             if (!skb_new)
0440                 goto split_error;
0441             if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
0442                       skb2->len))
0443                 goto split_error;
0444             skb_put(skb_new, skb2->len);
0445             dev_kfree_skb_any(skb);
0446             dev_kfree_skb_any(skb2);
0447             skb = skb_new;
0448         } while (re2.s.code == RING_ENTRY_CODE_MORE);
0449         goto good;
0450     } else {
0451         /* Some other error, discard it. */
0452         dev_kfree_skb_any(skb);
0453         /* Error statistics are accumulated in
0454          * octeon_mgmt_update_rx_stats.
0455          */
0456     }
0457     goto done;
0458 split_error:
0459     /* Discard the whole mess. */
0460     dev_kfree_skb_any(skb);
0461     dev_kfree_skb_any(skb2);
0462     while (re2.s.code == RING_ENTRY_CODE_MORE) {
0463         re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
0464         dev_kfree_skb_any(skb2);
0465     }
0466     netdev->stats.rx_errors++;
0467 
0468 done:
0469     /* Tell the hardware we processed a packet.  */
0470     mix_ircnt.u64 = 0;
0471     mix_ircnt.s.ircnt = 1;
0472     cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
0473     return rc;
0474 }
0475 
0476 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
0477 {
0478     unsigned int work_done = 0;
0479     union cvmx_mixx_ircnt mix_ircnt;
0480     int rc;
0481 
0482     mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
0483     while (work_done < budget && mix_ircnt.s.ircnt) {
0484 
0485         rc = octeon_mgmt_receive_one(p);
0486         if (!rc)
0487             work_done++;
0488 
0489         /* Check for more packets. */
0490         mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
0491     }
0492 
0493     octeon_mgmt_rx_fill_ring(p->netdev);
0494 
0495     return work_done;
0496 }
0497 
0498 static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
0499 {
0500     struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
0501     struct net_device *netdev = p->netdev;
0502     unsigned int work_done = 0;
0503 
0504     work_done = octeon_mgmt_receive_packets(p, budget);
0505 
0506     if (work_done < budget) {
0507         /* We stopped because no more packets were available. */
0508         napi_complete_done(napi, work_done);
0509         octeon_mgmt_enable_rx_irq(p);
0510     }
0511     octeon_mgmt_update_rx_stats(netdev);
0512 
0513     return work_done;
0514 }
0515 
0516 /* Reset the hardware to clean state.  */
0517 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
0518 {
0519     union cvmx_mixx_ctl mix_ctl;
0520     union cvmx_mixx_bist mix_bist;
0521     union cvmx_agl_gmx_bist agl_gmx_bist;
0522 
0523     mix_ctl.u64 = 0;
0524     cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
0525     do {
0526         mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
0527     } while (mix_ctl.s.busy);
0528     mix_ctl.s.reset = 1;
0529     cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
0530     cvmx_read_csr(p->mix + MIX_CTL);
0531     octeon_io_clk_delay(64);
0532 
0533     mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
0534     if (mix_bist.u64)
0535         dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
0536             (unsigned long long)mix_bist.u64);
0537 
0538     agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
0539     if (agl_gmx_bist.u64)
0540         dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
0541              (unsigned long long)agl_gmx_bist.u64);
0542 }
0543 
0544 struct octeon_mgmt_cam_state {
0545     u64 cam[6];
0546     u64 cam_mask;
0547     int cam_index;
0548 };
0549 
0550 static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
0551                       const unsigned char *addr)
0552 {
0553     int i;
0554 
0555     for (i = 0; i < 6; i++)
0556         cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
0557     cs->cam_mask |= (1ULL << cs->cam_index);
0558     cs->cam_index++;
0559 }
0560 
0561 static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
0562 {
0563     struct octeon_mgmt *p = netdev_priv(netdev);
0564     union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
0565     union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
0566     unsigned long flags;
0567     unsigned int prev_packet_enable;
0568     unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
0569     unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
0570     struct octeon_mgmt_cam_state cam_state;
0571     struct netdev_hw_addr *ha;
0572     int available_cam_entries;
0573 
0574     memset(&cam_state, 0, sizeof(cam_state));
0575 
0576     if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
0577         cam_mode = 0;
0578         available_cam_entries = 8;
0579     } else {
0580         /* One CAM entry for the primary address, leaves seven
0581          * for the secondary addresses.
0582          */
0583         available_cam_entries = 7 - netdev->uc.count;
0584     }
0585 
0586     if (netdev->flags & IFF_MULTICAST) {
0587         if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
0588             netdev_mc_count(netdev) > available_cam_entries)
0589             multicast_mode = 2; /* 2 - Accept all multicast.  */
0590         else
0591             multicast_mode = 0; /* 0 - Use CAM.  */
0592     }
0593 
0594     if (cam_mode == 1) {
0595         /* Add primary address. */
0596         octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
0597         netdev_for_each_uc_addr(ha, netdev)
0598             octeon_mgmt_cam_state_add(&cam_state, ha->addr);
0599     }
0600     if (multicast_mode == 0) {
0601         netdev_for_each_mc_addr(ha, netdev)
0602             octeon_mgmt_cam_state_add(&cam_state, ha->addr);
0603     }
0604 
0605     spin_lock_irqsave(&p->lock, flags);
0606 
0607     /* Disable packet I/O. */
0608     agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0609     prev_packet_enable = agl_gmx_prtx.s.en;
0610     agl_gmx_prtx.s.en = 0;
0611     cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
0612 
0613     adr_ctl.u64 = 0;
0614     adr_ctl.s.cam_mode = cam_mode;
0615     adr_ctl.s.mcst = multicast_mode;
0616     adr_ctl.s.bcst = 1;     /* Allow broadcast */
0617 
0618     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
0619 
0620     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
0621     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
0622     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
0623     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
0624     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
0625     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
0626     cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
0627 
0628     /* Restore packet I/O. */
0629     agl_gmx_prtx.s.en = prev_packet_enable;
0630     cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
0631 
0632     spin_unlock_irqrestore(&p->lock, flags);
0633 }
0634 
0635 static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
0636 {
0637     int r = eth_mac_addr(netdev, addr);
0638 
0639     if (r)
0640         return r;
0641 
0642     octeon_mgmt_set_rx_filtering(netdev);
0643 
0644     return 0;
0645 }
0646 
0647 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
0648 {
0649     struct octeon_mgmt *p = netdev_priv(netdev);
0650     int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
0651 
0652     netdev->mtu = new_mtu;
0653 
0654     /* HW lifts the limit if the frame is VLAN tagged
0655      * (+4 bytes per each tag, up to two tags)
0656      */
0657     cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
0658     /* Set the hardware to truncate packets larger than the MTU. The jabber
0659      * register must be set to a multiple of 8 bytes, so round up. JABBER is
0660      * an unconditional limit, so we need to account for two possible VLAN
0661      * tags.
0662      */
0663     cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
0664                (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
0665 
0666     return 0;
0667 }
0668 
0669 static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
0670 {
0671     struct net_device *netdev = dev_id;
0672     struct octeon_mgmt *p = netdev_priv(netdev);
0673     union cvmx_mixx_isr mixx_isr;
0674 
0675     mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
0676 
0677     /* Clear any pending interrupts */
0678     cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
0679     cvmx_read_csr(p->mix + MIX_ISR);
0680 
0681     if (mixx_isr.s.irthresh) {
0682         octeon_mgmt_disable_rx_irq(p);
0683         napi_schedule(&p->napi);
0684     }
0685     if (mixx_isr.s.orthresh) {
0686         octeon_mgmt_disable_tx_irq(p);
0687         tasklet_schedule(&p->tx_clean_tasklet);
0688     }
0689 
0690     return IRQ_HANDLED;
0691 }
0692 
0693 static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
0694                       struct ifreq *rq, int cmd)
0695 {
0696     struct octeon_mgmt *p = netdev_priv(netdev);
0697     struct hwtstamp_config config;
0698     union cvmx_mio_ptp_clock_cfg ptp;
0699     union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
0700     bool have_hw_timestamps = false;
0701 
0702     if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
0703         return -EFAULT;
0704 
0705     /* Check the status of hardware for tiemstamps */
0706     if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0707         /* Get the current state of the PTP clock */
0708         ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
0709         if (!ptp.s.ext_clk_en) {
0710             /* The clock has not been configured to use an
0711              * external source.  Program it to use the main clock
0712              * reference.
0713              */
0714             u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
0715             if (!ptp.s.ptp_en)
0716                 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
0717             netdev_info(netdev,
0718                     "PTP Clock using sclk reference @ %lldHz\n",
0719                     (NSEC_PER_SEC << 32) / clock_comp);
0720         } else {
0721             /* The clock is already programmed to use a GPIO */
0722             u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
0723             netdev_info(netdev,
0724                     "PTP Clock using GPIO%d @ %lld Hz\n",
0725                     ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
0726         }
0727 
0728         /* Enable the clock if it wasn't done already */
0729         if (!ptp.s.ptp_en) {
0730             ptp.s.ptp_en = 1;
0731             cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
0732         }
0733         have_hw_timestamps = true;
0734     }
0735 
0736     if (!have_hw_timestamps)
0737         return -EINVAL;
0738 
0739     switch (config.tx_type) {
0740     case HWTSTAMP_TX_OFF:
0741     case HWTSTAMP_TX_ON:
0742         break;
0743     default:
0744         return -ERANGE;
0745     }
0746 
0747     switch (config.rx_filter) {
0748     case HWTSTAMP_FILTER_NONE:
0749         p->has_rx_tstamp = false;
0750         rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
0751         rxx_frm_ctl.s.ptp_mode = 0;
0752         cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
0753         break;
0754     case HWTSTAMP_FILTER_ALL:
0755     case HWTSTAMP_FILTER_SOME:
0756     case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
0757     case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
0758     case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
0759     case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
0760     case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
0761     case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
0762     case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
0763     case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
0764     case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
0765     case HWTSTAMP_FILTER_PTP_V2_EVENT:
0766     case HWTSTAMP_FILTER_PTP_V2_SYNC:
0767     case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
0768     case HWTSTAMP_FILTER_NTP_ALL:
0769         p->has_rx_tstamp = have_hw_timestamps;
0770         config.rx_filter = HWTSTAMP_FILTER_ALL;
0771         if (p->has_rx_tstamp) {
0772             rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
0773             rxx_frm_ctl.s.ptp_mode = 1;
0774             cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
0775         }
0776         break;
0777     default:
0778         return -ERANGE;
0779     }
0780 
0781     if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
0782         return -EFAULT;
0783 
0784     return 0;
0785 }
0786 
0787 static int octeon_mgmt_ioctl(struct net_device *netdev,
0788                  struct ifreq *rq, int cmd)
0789 {
0790     switch (cmd) {
0791     case SIOCSHWTSTAMP:
0792         return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
0793     default:
0794         return phy_do_ioctl(netdev, rq, cmd);
0795     }
0796 }
0797 
0798 static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
0799 {
0800     union cvmx_agl_gmx_prtx_cfg prtx_cfg;
0801 
0802     /* Disable GMX before we make any changes. */
0803     prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0804     prtx_cfg.s.en = 0;
0805     prtx_cfg.s.tx_en = 0;
0806     prtx_cfg.s.rx_en = 0;
0807     cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
0808 
0809     if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0810         int i;
0811         for (i = 0; i < 10; i++) {
0812             prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0813             if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
0814                 break;
0815             mdelay(1);
0816             i++;
0817         }
0818     }
0819 }
0820 
0821 static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
0822 {
0823     union cvmx_agl_gmx_prtx_cfg prtx_cfg;
0824 
0825     /* Restore the GMX enable state only if link is set */
0826     prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0827     prtx_cfg.s.tx_en = 1;
0828     prtx_cfg.s.rx_en = 1;
0829     prtx_cfg.s.en = 1;
0830     cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
0831 }
0832 
0833 static void octeon_mgmt_update_link(struct octeon_mgmt *p)
0834 {
0835     struct net_device *ndev = p->netdev;
0836     struct phy_device *phydev = ndev->phydev;
0837     union cvmx_agl_gmx_prtx_cfg prtx_cfg;
0838 
0839     prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0840 
0841     if (!phydev->link)
0842         prtx_cfg.s.duplex = 1;
0843     else
0844         prtx_cfg.s.duplex = phydev->duplex;
0845 
0846     switch (phydev->speed) {
0847     case 10:
0848         prtx_cfg.s.speed = 0;
0849         prtx_cfg.s.slottime = 0;
0850 
0851         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0852             prtx_cfg.s.burst = 1;
0853             prtx_cfg.s.speed_msb = 1;
0854         }
0855         break;
0856     case 100:
0857         prtx_cfg.s.speed = 0;
0858         prtx_cfg.s.slottime = 0;
0859 
0860         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0861             prtx_cfg.s.burst = 1;
0862             prtx_cfg.s.speed_msb = 0;
0863         }
0864         break;
0865     case 1000:
0866         /* 1000 MBits is only supported on 6XXX chips */
0867         if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0868             prtx_cfg.s.speed = 1;
0869             prtx_cfg.s.speed_msb = 0;
0870             /* Only matters for half-duplex */
0871             prtx_cfg.s.slottime = 1;
0872             prtx_cfg.s.burst = phydev->duplex;
0873         }
0874         break;
0875     case 0:  /* No link */
0876     default:
0877         break;
0878     }
0879 
0880     /* Write the new GMX setting with the port still disabled. */
0881     cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
0882 
0883     /* Read GMX CFG again to make sure the config is completed. */
0884     prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
0885 
0886     if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
0887         union cvmx_agl_gmx_txx_clk agl_clk;
0888         union cvmx_agl_prtx_ctl prtx_ctl;
0889 
0890         prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
0891         agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
0892         /* MII (both speeds) and RGMII 1000 speed. */
0893         agl_clk.s.clk_cnt = 1;
0894         if (prtx_ctl.s.mode == 0) { /* RGMII mode */
0895             if (phydev->speed == 10)
0896                 agl_clk.s.clk_cnt = 50;
0897             else if (phydev->speed == 100)
0898                 agl_clk.s.clk_cnt = 5;
0899         }
0900         cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
0901     }
0902 }
0903 
0904 static void octeon_mgmt_adjust_link(struct net_device *netdev)
0905 {
0906     struct octeon_mgmt *p = netdev_priv(netdev);
0907     struct phy_device *phydev = netdev->phydev;
0908     unsigned long flags;
0909     int link_changed = 0;
0910 
0911     if (!phydev)
0912         return;
0913 
0914     spin_lock_irqsave(&p->lock, flags);
0915 
0916 
0917     if (!phydev->link && p->last_link)
0918         link_changed = -1;
0919 
0920     if (phydev->link &&
0921         (p->last_duplex != phydev->duplex ||
0922          p->last_link != phydev->link ||
0923          p->last_speed != phydev->speed)) {
0924         octeon_mgmt_disable_link(p);
0925         link_changed = 1;
0926         octeon_mgmt_update_link(p);
0927         octeon_mgmt_enable_link(p);
0928     }
0929 
0930     p->last_link = phydev->link;
0931     p->last_speed = phydev->speed;
0932     p->last_duplex = phydev->duplex;
0933 
0934     spin_unlock_irqrestore(&p->lock, flags);
0935 
0936     if (link_changed != 0) {
0937         if (link_changed > 0)
0938             netdev_info(netdev, "Link is up - %d/%s\n",
0939                     phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
0940         else
0941             netdev_info(netdev, "Link is down\n");
0942     }
0943 }
0944 
0945 static int octeon_mgmt_init_phy(struct net_device *netdev)
0946 {
0947     struct octeon_mgmt *p = netdev_priv(netdev);
0948     struct phy_device *phydev = NULL;
0949 
0950     if (octeon_is_simulation() || p->phy_np == NULL) {
0951         /* No PHYs in the simulator. */
0952         netif_carrier_on(netdev);
0953         return 0;
0954     }
0955 
0956     phydev = of_phy_connect(netdev, p->phy_np,
0957                 octeon_mgmt_adjust_link, 0,
0958                 PHY_INTERFACE_MODE_MII);
0959 
0960     if (!phydev)
0961         return -EPROBE_DEFER;
0962 
0963     return 0;
0964 }
0965 
0966 static int octeon_mgmt_open(struct net_device *netdev)
0967 {
0968     struct octeon_mgmt *p = netdev_priv(netdev);
0969     union cvmx_mixx_ctl mix_ctl;
0970     union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
0971     union cvmx_mixx_oring1 oring1;
0972     union cvmx_mixx_iring1 iring1;
0973     union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
0974     union cvmx_mixx_irhwm mix_irhwm;
0975     union cvmx_mixx_orhwm mix_orhwm;
0976     union cvmx_mixx_intena mix_intena;
0977     struct sockaddr sa;
0978 
0979     /* Allocate ring buffers.  */
0980     p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
0981                  GFP_KERNEL);
0982     if (!p->tx_ring)
0983         return -ENOMEM;
0984     p->tx_ring_handle =
0985         dma_map_single(p->dev, p->tx_ring,
0986                    ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
0987                    DMA_BIDIRECTIONAL);
0988     p->tx_next = 0;
0989     p->tx_next_clean = 0;
0990     p->tx_current_fill = 0;
0991 
0992 
0993     p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
0994                  GFP_KERNEL);
0995     if (!p->rx_ring)
0996         goto err_nomem;
0997     p->rx_ring_handle =
0998         dma_map_single(p->dev, p->rx_ring,
0999                    ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1000                    DMA_BIDIRECTIONAL);
1001 
1002     p->rx_next = 0;
1003     p->rx_next_fill = 0;
1004     p->rx_current_fill = 0;
1005 
1006     octeon_mgmt_reset_hw(p);
1007 
1008     mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1009 
1010     /* Bring it out of reset if needed. */
1011     if (mix_ctl.s.reset) {
1012         mix_ctl.s.reset = 0;
1013         cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1014         do {
1015             mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1016         } while (mix_ctl.s.reset);
1017     }
1018 
1019     if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1020         agl_gmx_inf_mode.u64 = 0;
1021         agl_gmx_inf_mode.s.en = 1;
1022         cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1023     }
1024     if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1025         || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1026         /* Force compensation values, as they are not
1027          * determined properly by HW
1028          */
1029         union cvmx_agl_gmx_drv_ctl drv_ctl;
1030 
1031         drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1032         if (p->port) {
1033             drv_ctl.s.byp_en1 = 1;
1034             drv_ctl.s.nctl1 = 6;
1035             drv_ctl.s.pctl1 = 6;
1036         } else {
1037             drv_ctl.s.byp_en = 1;
1038             drv_ctl.s.nctl = 6;
1039             drv_ctl.s.pctl = 6;
1040         }
1041         cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1042     }
1043 
1044     oring1.u64 = 0;
1045     oring1.s.obase = p->tx_ring_handle >> 3;
1046     oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1047     cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1048 
1049     iring1.u64 = 0;
1050     iring1.s.ibase = p->rx_ring_handle >> 3;
1051     iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1052     cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1053 
1054     memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1055     octeon_mgmt_set_mac_address(netdev, &sa);
1056 
1057     octeon_mgmt_change_mtu(netdev, netdev->mtu);
1058 
1059     /* Enable the port HW. Packets are not allowed until
1060      * cvmx_mgmt_port_enable() is called.
1061      */
1062     mix_ctl.u64 = 0;
1063     mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
1064     mix_ctl.s.en = 1;           /* Enable the port */
1065     mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
1066     /* MII CB-request FIFO programmable high watermark */
1067     mix_ctl.s.mrq_hwm = 1;
1068 #ifdef __LITTLE_ENDIAN
1069     mix_ctl.s.lendian = 1;
1070 #endif
1071     cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1072 
1073     /* Read the PHY to find the mode of the interface. */
1074     if (octeon_mgmt_init_phy(netdev)) {
1075         dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1076         goto err_noirq;
1077     }
1078 
1079     /* Set the mode of the interface, RGMII/MII. */
1080     if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1081         union cvmx_agl_prtx_ctl agl_prtx_ctl;
1082         int rgmii_mode =
1083             (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1084                        netdev->phydev->supported) |
1085              linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1086                        netdev->phydev->supported)) != 0;
1087 
1088         agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1089         agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1090         cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1091 
1092         /* MII clocks counts are based on the 125Mhz
1093          * reference, which has an 8nS period. So our delays
1094          * need to be multiplied by this factor.
1095          */
1096 #define NS_PER_PHY_CLK 8
1097 
1098         /* Take the DLL and clock tree out of reset */
1099         agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1100         agl_prtx_ctl.s.clkrst = 0;
1101         if (rgmii_mode) {
1102             agl_prtx_ctl.s.dllrst = 0;
1103             agl_prtx_ctl.s.clktx_byp = 0;
1104         }
1105         cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1106         cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1107 
1108         /* Wait for the DLL to lock. External 125 MHz
1109          * reference clock must be stable at this point.
1110          */
1111         ndelay(256 * NS_PER_PHY_CLK);
1112 
1113         /* Enable the interface */
1114         agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1115         agl_prtx_ctl.s.enable = 1;
1116         cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1117 
1118         /* Read the value back to force the previous write */
1119         agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120 
1121         /* Enable the compensation controller */
1122         agl_prtx_ctl.s.comp = 1;
1123         agl_prtx_ctl.s.drv_byp = 0;
1124         cvmx_write_csr(p->agl_prt_ctl,  agl_prtx_ctl.u64);
1125         /* Force write out before wait. */
1126         cvmx_read_csr(p->agl_prt_ctl);
1127 
1128         /* For compensation state to lock. */
1129         ndelay(1040 * NS_PER_PHY_CLK);
1130 
1131         /* Default Interframe Gaps are too small.  Recommended
1132          * workaround is.
1133          *
1134          * AGL_GMX_TX_IFG[IFG1]=14
1135          * AGL_GMX_TX_IFG[IFG2]=10
1136          */
1137         cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1138     }
1139 
1140     octeon_mgmt_rx_fill_ring(netdev);
1141 
1142     /* Clear statistics. */
1143     /* Clear on read. */
1144     cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1145     cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1146     cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1147 
1148     cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1149     cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1150     cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1151 
1152     /* Clear any pending interrupts */
1153     cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1154 
1155     if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1156             netdev)) {
1157         dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1158         goto err_noirq;
1159     }
1160 
1161     /* Interrupt every single RX packet */
1162     mix_irhwm.u64 = 0;
1163     mix_irhwm.s.irhwm = 0;
1164     cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1165 
1166     /* Interrupt when we have 1 or more packets to clean.  */
1167     mix_orhwm.u64 = 0;
1168     mix_orhwm.s.orhwm = 0;
1169     cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1170 
1171     /* Enable receive and transmit interrupts */
1172     mix_intena.u64 = 0;
1173     mix_intena.s.ithena = 1;
1174     mix_intena.s.othena = 1;
1175     cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1176 
1177     /* Enable packet I/O. */
1178 
1179     rxx_frm_ctl.u64 = 0;
1180     rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1181     rxx_frm_ctl.s.pre_align = 1;
1182     /* When set, disables the length check for non-min sized pkts
1183      * with padding in the client data.
1184      */
1185     rxx_frm_ctl.s.pad_len = 1;
1186     /* When set, disables the length check for VLAN pkts */
1187     rxx_frm_ctl.s.vlan_len = 1;
1188     /* When set, PREAMBLE checking is  less strict */
1189     rxx_frm_ctl.s.pre_free = 1;
1190     /* Control Pause Frames can match station SMAC */
1191     rxx_frm_ctl.s.ctl_smac = 0;
1192     /* Control Pause Frames can match globally assign Multicast address */
1193     rxx_frm_ctl.s.ctl_mcst = 1;
1194     /* Forward pause information to TX block */
1195     rxx_frm_ctl.s.ctl_bck = 1;
1196     /* Drop Control Pause Frames */
1197     rxx_frm_ctl.s.ctl_drp = 1;
1198     /* Strip off the preamble */
1199     rxx_frm_ctl.s.pre_strp = 1;
1200     /* This port is configured to send PREAMBLE+SFD to begin every
1201      * frame.  GMX checks that the PREAMBLE is sent correctly.
1202      */
1203     rxx_frm_ctl.s.pre_chk = 1;
1204     cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1205 
1206     /* Configure the port duplex, speed and enables */
1207     octeon_mgmt_disable_link(p);
1208     if (netdev->phydev)
1209         octeon_mgmt_update_link(p);
1210     octeon_mgmt_enable_link(p);
1211 
1212     p->last_link = 0;
1213     p->last_speed = 0;
1214     /* PHY is not present in simulator. The carrier is enabled
1215      * while initializing the phy for simulator, leave it enabled.
1216      */
1217     if (netdev->phydev) {
1218         netif_carrier_off(netdev);
1219         phy_start(netdev->phydev);
1220     }
1221 
1222     netif_wake_queue(netdev);
1223     napi_enable(&p->napi);
1224 
1225     return 0;
1226 err_noirq:
1227     octeon_mgmt_reset_hw(p);
1228     dma_unmap_single(p->dev, p->rx_ring_handle,
1229              ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1230              DMA_BIDIRECTIONAL);
1231     kfree(p->rx_ring);
1232 err_nomem:
1233     dma_unmap_single(p->dev, p->tx_ring_handle,
1234              ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1235              DMA_BIDIRECTIONAL);
1236     kfree(p->tx_ring);
1237     return -ENOMEM;
1238 }
1239 
1240 static int octeon_mgmt_stop(struct net_device *netdev)
1241 {
1242     struct octeon_mgmt *p = netdev_priv(netdev);
1243 
1244     napi_disable(&p->napi);
1245     netif_stop_queue(netdev);
1246 
1247     if (netdev->phydev) {
1248         phy_stop(netdev->phydev);
1249         phy_disconnect(netdev->phydev);
1250     }
1251 
1252     netif_carrier_off(netdev);
1253 
1254     octeon_mgmt_reset_hw(p);
1255 
1256     free_irq(p->irq, netdev);
1257 
1258     /* dma_unmap is a nop on Octeon, so just free everything.  */
1259     skb_queue_purge(&p->tx_list);
1260     skb_queue_purge(&p->rx_list);
1261 
1262     dma_unmap_single(p->dev, p->rx_ring_handle,
1263              ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1264              DMA_BIDIRECTIONAL);
1265     kfree(p->rx_ring);
1266 
1267     dma_unmap_single(p->dev, p->tx_ring_handle,
1268              ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1269              DMA_BIDIRECTIONAL);
1270     kfree(p->tx_ring);
1271 
1272     return 0;
1273 }
1274 
1275 static netdev_tx_t
1276 octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1277 {
1278     struct octeon_mgmt *p = netdev_priv(netdev);
1279     union mgmt_port_ring_entry re;
1280     unsigned long flags;
1281     netdev_tx_t rv = NETDEV_TX_BUSY;
1282 
1283     re.d64 = 0;
1284     re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1285     re.s.len = skb->len;
1286     re.s.addr = dma_map_single(p->dev, skb->data,
1287                    skb->len,
1288                    DMA_TO_DEVICE);
1289 
1290     spin_lock_irqsave(&p->tx_list.lock, flags);
1291 
1292     if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1293         spin_unlock_irqrestore(&p->tx_list.lock, flags);
1294         netif_stop_queue(netdev);
1295         spin_lock_irqsave(&p->tx_list.lock, flags);
1296     }
1297 
1298     if (unlikely(p->tx_current_fill >=
1299              ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1300         spin_unlock_irqrestore(&p->tx_list.lock, flags);
1301         dma_unmap_single(p->dev, re.s.addr, re.s.len,
1302                  DMA_TO_DEVICE);
1303         goto out;
1304     }
1305 
1306     __skb_queue_tail(&p->tx_list, skb);
1307 
1308     /* Put it in the ring.  */
1309     p->tx_ring[p->tx_next] = re.d64;
1310     p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1311     p->tx_current_fill++;
1312 
1313     spin_unlock_irqrestore(&p->tx_list.lock, flags);
1314 
1315     dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1316                    ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1317                    DMA_BIDIRECTIONAL);
1318 
1319     netdev->stats.tx_packets++;
1320     netdev->stats.tx_bytes += skb->len;
1321 
1322     /* Ring the bell.  */
1323     cvmx_write_csr(p->mix + MIX_ORING2, 1);
1324 
1325     netif_trans_update(netdev);
1326     rv = NETDEV_TX_OK;
1327 out:
1328     octeon_mgmt_update_tx_stats(netdev);
1329     return rv;
1330 }
1331 
1332 #ifdef CONFIG_NET_POLL_CONTROLLER
1333 static void octeon_mgmt_poll_controller(struct net_device *netdev)
1334 {
1335     struct octeon_mgmt *p = netdev_priv(netdev);
1336 
1337     octeon_mgmt_receive_packets(p, 16);
1338     octeon_mgmt_update_rx_stats(netdev);
1339 }
1340 #endif
1341 
1342 static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1343                     struct ethtool_drvinfo *info)
1344 {
1345     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1346 }
1347 
1348 static int octeon_mgmt_nway_reset(struct net_device *dev)
1349 {
1350     if (!capable(CAP_NET_ADMIN))
1351         return -EPERM;
1352 
1353     if (dev->phydev)
1354         return phy_start_aneg(dev->phydev);
1355 
1356     return -EOPNOTSUPP;
1357 }
1358 
1359 static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1360     .get_drvinfo = octeon_mgmt_get_drvinfo,
1361     .nway_reset = octeon_mgmt_nway_reset,
1362     .get_link = ethtool_op_get_link,
1363     .get_link_ksettings = phy_ethtool_get_link_ksettings,
1364     .set_link_ksettings = phy_ethtool_set_link_ksettings,
1365 };
1366 
1367 static const struct net_device_ops octeon_mgmt_ops = {
1368     .ndo_open =         octeon_mgmt_open,
1369     .ndo_stop =         octeon_mgmt_stop,
1370     .ndo_start_xmit =       octeon_mgmt_xmit,
1371     .ndo_set_rx_mode =      octeon_mgmt_set_rx_filtering,
1372     .ndo_set_mac_address =      octeon_mgmt_set_mac_address,
1373     .ndo_eth_ioctl =            octeon_mgmt_ioctl,
1374     .ndo_change_mtu =       octeon_mgmt_change_mtu,
1375 #ifdef CONFIG_NET_POLL_CONTROLLER
1376     .ndo_poll_controller =      octeon_mgmt_poll_controller,
1377 #endif
1378 };
1379 
1380 static int octeon_mgmt_probe(struct platform_device *pdev)
1381 {
1382     struct net_device *netdev;
1383     struct octeon_mgmt *p;
1384     const __be32 *data;
1385     struct resource *res_mix;
1386     struct resource *res_agl;
1387     struct resource *res_agl_prt_ctl;
1388     int len;
1389     int result;
1390 
1391     netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1392     if (netdev == NULL)
1393         return -ENOMEM;
1394 
1395     SET_NETDEV_DEV(netdev, &pdev->dev);
1396 
1397     platform_set_drvdata(pdev, netdev);
1398     p = netdev_priv(netdev);
1399     netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1400                OCTEON_MGMT_NAPI_WEIGHT);
1401 
1402     p->netdev = netdev;
1403     p->dev = &pdev->dev;
1404     p->has_rx_tstamp = false;
1405 
1406     data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1407     if (data && len == sizeof(*data)) {
1408         p->port = be32_to_cpup(data);
1409     } else {
1410         dev_err(&pdev->dev, "no 'cell-index' property\n");
1411         result = -ENXIO;
1412         goto err;
1413     }
1414 
1415     snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1416 
1417     result = platform_get_irq(pdev, 0);
1418     if (result < 0)
1419         goto err;
1420 
1421     p->irq = result;
1422 
1423     res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424     if (res_mix == NULL) {
1425         dev_err(&pdev->dev, "no 'reg' resource\n");
1426         result = -ENXIO;
1427         goto err;
1428     }
1429 
1430     res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1431     if (res_agl == NULL) {
1432         dev_err(&pdev->dev, "no 'reg' resource\n");
1433         result = -ENXIO;
1434         goto err;
1435     }
1436 
1437     res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1438     if (res_agl_prt_ctl == NULL) {
1439         dev_err(&pdev->dev, "no 'reg' resource\n");
1440         result = -ENXIO;
1441         goto err;
1442     }
1443 
1444     p->mix_phys = res_mix->start;
1445     p->mix_size = resource_size(res_mix);
1446     p->agl_phys = res_agl->start;
1447     p->agl_size = resource_size(res_agl);
1448     p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1449     p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1450 
1451 
1452     if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1453                      res_mix->name)) {
1454         dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1455             res_mix->name);
1456         result = -ENXIO;
1457         goto err;
1458     }
1459 
1460     if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1461                      res_agl->name)) {
1462         result = -ENXIO;
1463         dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1464             res_agl->name);
1465         goto err;
1466     }
1467 
1468     if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1469                      p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1470         result = -ENXIO;
1471         dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1472             res_agl_prt_ctl->name);
1473         goto err;
1474     }
1475 
1476     p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1477     p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1478     p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1479                        p->agl_prt_ctl_size);
1480     if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1481         dev_err(&pdev->dev, "failed to map I/O memory\n");
1482         result = -ENOMEM;
1483         goto err;
1484     }
1485 
1486     spin_lock_init(&p->lock);
1487 
1488     skb_queue_head_init(&p->tx_list);
1489     skb_queue_head_init(&p->rx_list);
1490     tasklet_setup(&p->tx_clean_tasklet,
1491               octeon_mgmt_clean_tx_tasklet);
1492 
1493     netdev->priv_flags |= IFF_UNICAST_FLT;
1494 
1495     netdev->netdev_ops = &octeon_mgmt_ops;
1496     netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1497 
1498     netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1499     netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1500 
1501     result = of_get_ethdev_address(pdev->dev.of_node, netdev);
1502     if (result)
1503         eth_hw_addr_random(netdev);
1504 
1505     p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1506 
1507     result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1508     if (result)
1509         goto err;
1510 
1511     netif_carrier_off(netdev);
1512     result = register_netdev(netdev);
1513     if (result)
1514         goto err;
1515 
1516     return 0;
1517 
1518 err:
1519     of_node_put(p->phy_np);
1520     free_netdev(netdev);
1521     return result;
1522 }
1523 
1524 static int octeon_mgmt_remove(struct platform_device *pdev)
1525 {
1526     struct net_device *netdev = platform_get_drvdata(pdev);
1527     struct octeon_mgmt *p = netdev_priv(netdev);
1528 
1529     unregister_netdev(netdev);
1530     of_node_put(p->phy_np);
1531     free_netdev(netdev);
1532     return 0;
1533 }
1534 
1535 static const struct of_device_id octeon_mgmt_match[] = {
1536     {
1537         .compatible = "cavium,octeon-5750-mix",
1538     },
1539     {},
1540 };
1541 MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1542 
1543 static struct platform_driver octeon_mgmt_driver = {
1544     .driver = {
1545         .name       = "octeon_mgmt",
1546         .of_match_table = octeon_mgmt_match,
1547     },
1548     .probe      = octeon_mgmt_probe,
1549     .remove     = octeon_mgmt_remove,
1550 };
1551 
1552 module_platform_driver(octeon_mgmt_driver);
1553 
1554 MODULE_SOFTDEP("pre: mdio-cavium");
1555 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1556 MODULE_AUTHOR("David Daney");
1557 MODULE_LICENSE("GPL");