0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/crc32.h>
0014 #include <linux/etherdevice.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/module.h>
0018 #include <linux/of_address.h>
0019 #include <linux/of_irq.h>
0020 #include <linux/of_mdio.h>
0021 #include <linux/of_net.h>
0022 #include <linux/of_platform.h>
0023
0024 #include "emac.h"
0025
0026 static void arc_emac_restart(struct net_device *ndev);
0027
0028
0029
0030
0031
0032
0033
0034 static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
0035 {
0036 return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
0037 }
0038
0039
0040
0041
0042
0043
0044
0045
0046 static void arc_emac_adjust_link(struct net_device *ndev)
0047 {
0048 struct arc_emac_priv *priv = netdev_priv(ndev);
0049 struct phy_device *phy_dev = ndev->phydev;
0050 unsigned int reg, state_changed = 0;
0051
0052 if (priv->link != phy_dev->link) {
0053 priv->link = phy_dev->link;
0054 state_changed = 1;
0055 }
0056
0057 if (priv->speed != phy_dev->speed) {
0058 priv->speed = phy_dev->speed;
0059 state_changed = 1;
0060 if (priv->set_mac_speed)
0061 priv->set_mac_speed(priv, priv->speed);
0062 }
0063
0064 if (priv->duplex != phy_dev->duplex) {
0065 reg = arc_reg_get(priv, R_CTRL);
0066
0067 if (phy_dev->duplex == DUPLEX_FULL)
0068 reg |= ENFL_MASK;
0069 else
0070 reg &= ~ENFL_MASK;
0071
0072 arc_reg_set(priv, R_CTRL, reg);
0073 priv->duplex = phy_dev->duplex;
0074 state_changed = 1;
0075 }
0076
0077 if (state_changed)
0078 phy_print_status(phy_dev);
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static void arc_emac_get_drvinfo(struct net_device *ndev,
0090 struct ethtool_drvinfo *info)
0091 {
0092 struct arc_emac_priv *priv = netdev_priv(ndev);
0093
0094 strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
0095 }
0096
0097 static const struct ethtool_ops arc_emac_ethtool_ops = {
0098 .get_drvinfo = arc_emac_get_drvinfo,
0099 .get_link = ethtool_op_get_link,
0100 .get_link_ksettings = phy_ethtool_get_link_ksettings,
0101 .set_link_ksettings = phy_ethtool_set_link_ksettings,
0102 };
0103
0104 #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
0105
0106
0107
0108
0109
0110 static void arc_emac_tx_clean(struct net_device *ndev)
0111 {
0112 struct arc_emac_priv *priv = netdev_priv(ndev);
0113 struct net_device_stats *stats = &ndev->stats;
0114 unsigned int i;
0115
0116 for (i = 0; i < TX_BD_NUM; i++) {
0117 unsigned int *txbd_dirty = &priv->txbd_dirty;
0118 struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
0119 struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
0120 struct sk_buff *skb = tx_buff->skb;
0121 unsigned int info = le32_to_cpu(txbd->info);
0122
0123 if ((info & FOR_EMAC) || !txbd->data || !skb)
0124 break;
0125
0126 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
0127 stats->tx_errors++;
0128 stats->tx_dropped++;
0129
0130 if (info & DEFR)
0131 stats->tx_carrier_errors++;
0132
0133 if (info & LTCL)
0134 stats->collisions++;
0135
0136 if (info & UFLO)
0137 stats->tx_fifo_errors++;
0138 } else if (likely(info & FIRST_OR_LAST_MASK)) {
0139 stats->tx_packets++;
0140 stats->tx_bytes += skb->len;
0141 }
0142
0143 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
0144 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
0145
0146
0147 dev_consume_skb_irq(skb);
0148
0149 txbd->data = 0;
0150 txbd->info = 0;
0151 tx_buff->skb = NULL;
0152
0153 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
0154 }
0155
0156
0157
0158
0159 smp_mb();
0160
0161 if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
0162 netif_wake_queue(ndev);
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static int arc_emac_rx(struct net_device *ndev, int budget)
0175 {
0176 struct arc_emac_priv *priv = netdev_priv(ndev);
0177 unsigned int work_done;
0178
0179 for (work_done = 0; work_done < budget; work_done++) {
0180 unsigned int *last_rx_bd = &priv->last_rx_bd;
0181 struct net_device_stats *stats = &ndev->stats;
0182 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
0183 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
0184 unsigned int pktlen, info = le32_to_cpu(rxbd->info);
0185 struct sk_buff *skb;
0186 dma_addr_t addr;
0187
0188 if (unlikely((info & OWN_MASK) == FOR_EMAC))
0189 break;
0190
0191
0192
0193
0194 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
0195
0196 if (unlikely((info & FIRST_OR_LAST_MASK) !=
0197 FIRST_OR_LAST_MASK)) {
0198
0199
0200
0201 if (net_ratelimit())
0202 netdev_err(ndev, "incomplete packet received\n");
0203
0204
0205 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0206 stats->rx_errors++;
0207 stats->rx_length_errors++;
0208 continue;
0209 }
0210
0211
0212
0213
0214
0215 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
0216 if (unlikely(!skb)) {
0217 if (net_ratelimit())
0218 netdev_err(ndev, "cannot allocate skb\n");
0219
0220 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0221 stats->rx_errors++;
0222 stats->rx_dropped++;
0223 continue;
0224 }
0225
0226 addr = dma_map_single(&ndev->dev, (void *)skb->data,
0227 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
0228 if (dma_mapping_error(&ndev->dev, addr)) {
0229 if (net_ratelimit())
0230 netdev_err(ndev, "cannot map dma buffer\n");
0231 dev_kfree_skb(skb);
0232
0233 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0234 stats->rx_errors++;
0235 stats->rx_dropped++;
0236 continue;
0237 }
0238
0239
0240 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
0241 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
0242
0243 pktlen = info & LEN_MASK;
0244 stats->rx_packets++;
0245 stats->rx_bytes += pktlen;
0246 skb_put(rx_buff->skb, pktlen);
0247 rx_buff->skb->dev = ndev;
0248 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
0249
0250 netif_receive_skb(rx_buff->skb);
0251
0252 rx_buff->skb = skb;
0253 dma_unmap_addr_set(rx_buff, addr, addr);
0254 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
0255
0256 rxbd->data = cpu_to_le32(addr);
0257
0258
0259 wmb();
0260
0261
0262 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0263 }
0264
0265 return work_done;
0266 }
0267
0268
0269
0270
0271
0272 static void arc_emac_rx_miss_handle(struct net_device *ndev)
0273 {
0274 struct arc_emac_priv *priv = netdev_priv(ndev);
0275 struct net_device_stats *stats = &ndev->stats;
0276 unsigned int miss;
0277
0278 miss = arc_reg_get(priv, R_MISS);
0279 if (miss) {
0280 stats->rx_errors += miss;
0281 stats->rx_missed_errors += miss;
0282 priv->rx_missed_errors += miss;
0283 }
0284 }
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 static void arc_emac_rx_stall_check(struct net_device *ndev,
0298 int budget, unsigned int work_done)
0299 {
0300 struct arc_emac_priv *priv = netdev_priv(ndev);
0301 struct arc_emac_bd *rxbd;
0302
0303 if (work_done)
0304 priv->rx_missed_errors = 0;
0305
0306 if (priv->rx_missed_errors && budget) {
0307 rxbd = &priv->rxbd[priv->last_rx_bd];
0308 if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
0309 arc_emac_restart(ndev);
0310 priv->rx_missed_errors = 0;
0311 }
0312 }
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322 static int arc_emac_poll(struct napi_struct *napi, int budget)
0323 {
0324 struct net_device *ndev = napi->dev;
0325 struct arc_emac_priv *priv = netdev_priv(ndev);
0326 unsigned int work_done;
0327
0328 arc_emac_tx_clean(ndev);
0329 arc_emac_rx_miss_handle(ndev);
0330
0331 work_done = arc_emac_rx(ndev, budget);
0332 if (work_done < budget) {
0333 napi_complete_done(napi, work_done);
0334 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
0335 }
0336
0337 arc_emac_rx_stall_check(ndev, budget, work_done);
0338
0339 return work_done;
0340 }
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
0353 {
0354 struct net_device *ndev = dev_instance;
0355 struct arc_emac_priv *priv = netdev_priv(ndev);
0356 struct net_device_stats *stats = &ndev->stats;
0357 unsigned int status;
0358
0359 status = arc_reg_get(priv, R_STATUS);
0360 status &= ~MDIO_MASK;
0361
0362
0363 arc_reg_set(priv, R_STATUS, status);
0364
0365 if (status & (RXINT_MASK | TXINT_MASK)) {
0366 if (likely(napi_schedule_prep(&priv->napi))) {
0367 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
0368 __napi_schedule(&priv->napi);
0369 }
0370 }
0371
0372 if (status & ERR_MASK) {
0373
0374
0375
0376
0377 if (status & MSER_MASK) {
0378 stats->rx_missed_errors += 0x100;
0379 stats->rx_errors += 0x100;
0380 priv->rx_missed_errors += 0x100;
0381 napi_schedule(&priv->napi);
0382 }
0383
0384 if (status & RXCR_MASK) {
0385 stats->rx_crc_errors += 0x100;
0386 stats->rx_errors += 0x100;
0387 }
0388
0389 if (status & RXFR_MASK) {
0390 stats->rx_frame_errors += 0x100;
0391 stats->rx_errors += 0x100;
0392 }
0393
0394 if (status & RXFL_MASK) {
0395 stats->rx_over_errors += 0x100;
0396 stats->rx_errors += 0x100;
0397 }
0398 }
0399
0400 return IRQ_HANDLED;
0401 }
0402
0403 #ifdef CONFIG_NET_POLL_CONTROLLER
0404 static void arc_emac_poll_controller(struct net_device *dev)
0405 {
0406 disable_irq(dev->irq);
0407 arc_emac_intr(dev->irq, dev);
0408 enable_irq(dev->irq);
0409 }
0410 #endif
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 static int arc_emac_open(struct net_device *ndev)
0423 {
0424 struct arc_emac_priv *priv = netdev_priv(ndev);
0425 struct phy_device *phy_dev = ndev->phydev;
0426 int i;
0427
0428 phy_dev->autoneg = AUTONEG_ENABLE;
0429 phy_dev->speed = 0;
0430 phy_dev->duplex = 0;
0431 linkmode_and(phy_dev->advertising, phy_dev->advertising,
0432 phy_dev->supported);
0433
0434 priv->last_rx_bd = 0;
0435
0436
0437 for (i = 0; i < RX_BD_NUM; i++) {
0438 dma_addr_t addr;
0439 unsigned int *last_rx_bd = &priv->last_rx_bd;
0440 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
0441 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
0442
0443 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
0444 EMAC_BUFFER_SIZE);
0445 if (unlikely(!rx_buff->skb))
0446 return -ENOMEM;
0447
0448 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
0449 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
0450 if (dma_mapping_error(&ndev->dev, addr)) {
0451 netdev_err(ndev, "cannot dma map\n");
0452 dev_kfree_skb(rx_buff->skb);
0453 return -ENOMEM;
0454 }
0455 dma_unmap_addr_set(rx_buff, addr, addr);
0456 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
0457
0458 rxbd->data = cpu_to_le32(addr);
0459
0460
0461 wmb();
0462
0463
0464 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0465
0466 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
0467 }
0468
0469 priv->txbd_curr = 0;
0470 priv->txbd_dirty = 0;
0471
0472
0473 memset(priv->txbd, 0, TX_RING_SZ);
0474
0475
0476 arc_reg_set(priv, R_LAFL, 0);
0477 arc_reg_set(priv, R_LAFH, 0);
0478
0479
0480 arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
0481 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
0482
0483
0484 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
0485
0486
0487 arc_reg_set(priv, R_CTRL,
0488 (RX_BD_NUM << 24) |
0489 (TX_BD_NUM << 16) |
0490 TXRN_MASK | RXRN_MASK);
0491
0492 napi_enable(&priv->napi);
0493
0494
0495 arc_reg_or(priv, R_CTRL, EN_MASK);
0496
0497 phy_start(ndev->phydev);
0498
0499 netif_start_queue(ndev);
0500
0501 return 0;
0502 }
0503
0504
0505
0506
0507
0508
0509
0510
0511 static void arc_emac_set_rx_mode(struct net_device *ndev)
0512 {
0513 struct arc_emac_priv *priv = netdev_priv(ndev);
0514
0515 if (ndev->flags & IFF_PROMISC) {
0516 arc_reg_or(priv, R_CTRL, PROM_MASK);
0517 } else {
0518 arc_reg_clr(priv, R_CTRL, PROM_MASK);
0519
0520 if (ndev->flags & IFF_ALLMULTI) {
0521 arc_reg_set(priv, R_LAFL, ~0);
0522 arc_reg_set(priv, R_LAFH, ~0);
0523 } else if (ndev->flags & IFF_MULTICAST) {
0524 struct netdev_hw_addr *ha;
0525 unsigned int filter[2] = { 0, 0 };
0526 int bit;
0527
0528 netdev_for_each_mc_addr(ha, ndev) {
0529 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
0530 filter[bit >> 5] |= 1 << (bit & 31);
0531 }
0532
0533 arc_reg_set(priv, R_LAFL, filter[0]);
0534 arc_reg_set(priv, R_LAFH, filter[1]);
0535 } else {
0536 arc_reg_set(priv, R_LAFL, 0);
0537 arc_reg_set(priv, R_LAFH, 0);
0538 }
0539 }
0540 }
0541
0542
0543
0544
0545
0546
0547
0548 static void arc_free_tx_queue(struct net_device *ndev)
0549 {
0550 struct arc_emac_priv *priv = netdev_priv(ndev);
0551 unsigned int i;
0552
0553 for (i = 0; i < TX_BD_NUM; i++) {
0554 struct arc_emac_bd *txbd = &priv->txbd[i];
0555 struct buffer_state *tx_buff = &priv->tx_buff[i];
0556
0557 if (tx_buff->skb) {
0558 dma_unmap_single(&ndev->dev,
0559 dma_unmap_addr(tx_buff, addr),
0560 dma_unmap_len(tx_buff, len),
0561 DMA_TO_DEVICE);
0562
0563
0564 dev_kfree_skb_irq(tx_buff->skb);
0565 }
0566
0567 txbd->info = 0;
0568 txbd->data = 0;
0569 tx_buff->skb = NULL;
0570 }
0571 }
0572
0573
0574
0575
0576
0577
0578
0579 static void arc_free_rx_queue(struct net_device *ndev)
0580 {
0581 struct arc_emac_priv *priv = netdev_priv(ndev);
0582 unsigned int i;
0583
0584 for (i = 0; i < RX_BD_NUM; i++) {
0585 struct arc_emac_bd *rxbd = &priv->rxbd[i];
0586 struct buffer_state *rx_buff = &priv->rx_buff[i];
0587
0588 if (rx_buff->skb) {
0589 dma_unmap_single(&ndev->dev,
0590 dma_unmap_addr(rx_buff, addr),
0591 dma_unmap_len(rx_buff, len),
0592 DMA_FROM_DEVICE);
0593
0594
0595 dev_kfree_skb_irq(rx_buff->skb);
0596 }
0597
0598 rxbd->info = 0;
0599 rxbd->data = 0;
0600 rx_buff->skb = NULL;
0601 }
0602 }
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 static int arc_emac_stop(struct net_device *ndev)
0613 {
0614 struct arc_emac_priv *priv = netdev_priv(ndev);
0615
0616 napi_disable(&priv->napi);
0617 netif_stop_queue(ndev);
0618
0619 phy_stop(ndev->phydev);
0620
0621
0622 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
0623
0624
0625 arc_reg_clr(priv, R_CTRL, EN_MASK);
0626
0627
0628 arc_free_tx_queue(ndev);
0629 arc_free_rx_queue(ndev);
0630
0631 return 0;
0632 }
0633
0634
0635
0636
0637
0638
0639
0640
0641 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
0642 {
0643 struct arc_emac_priv *priv = netdev_priv(ndev);
0644 struct net_device_stats *stats = &ndev->stats;
0645 unsigned long miss, rxerr;
0646 u8 rxcrc, rxfram, rxoflow;
0647
0648 rxerr = arc_reg_get(priv, R_RXERR);
0649 miss = arc_reg_get(priv, R_MISS);
0650
0651 rxcrc = rxerr;
0652 rxfram = rxerr >> 8;
0653 rxoflow = rxerr >> 16;
0654
0655 stats->rx_errors += miss;
0656 stats->rx_errors += rxcrc + rxfram + rxoflow;
0657
0658 stats->rx_over_errors += rxoflow;
0659 stats->rx_frame_errors += rxfram;
0660 stats->rx_crc_errors += rxcrc;
0661 stats->rx_missed_errors += miss;
0662
0663 return stats;
0664 }
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676 static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
0677 {
0678 struct arc_emac_priv *priv = netdev_priv(ndev);
0679 unsigned int len, *txbd_curr = &priv->txbd_curr;
0680 struct net_device_stats *stats = &ndev->stats;
0681 __le32 *info = &priv->txbd[*txbd_curr].info;
0682 dma_addr_t addr;
0683
0684 if (skb_padto(skb, ETH_ZLEN))
0685 return NETDEV_TX_OK;
0686
0687 len = max_t(unsigned int, ETH_ZLEN, skb->len);
0688
0689 if (unlikely(!arc_emac_tx_avail(priv))) {
0690 netif_stop_queue(ndev);
0691 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
0692 return NETDEV_TX_BUSY;
0693 }
0694
0695 addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
0696 DMA_TO_DEVICE);
0697
0698 if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
0699 stats->tx_dropped++;
0700 stats->tx_errors++;
0701 dev_kfree_skb_any(skb);
0702 return NETDEV_TX_OK;
0703 }
0704 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
0705 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
0706
0707 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
0708
0709
0710 wmb();
0711
0712 skb_tx_timestamp(skb);
0713
0714 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
0715
0716
0717 wmb();
0718
0719 priv->tx_buff[*txbd_curr].skb = skb;
0720
0721
0722 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
0723
0724
0725
0726
0727
0728 smp_mb();
0729
0730 if (!arc_emac_tx_avail(priv)) {
0731 netif_stop_queue(ndev);
0732
0733 smp_mb();
0734 if (arc_emac_tx_avail(priv))
0735 netif_start_queue(ndev);
0736 }
0737
0738 arc_reg_set(priv, R_STATUS, TXPL_MASK);
0739
0740 return NETDEV_TX_OK;
0741 }
0742
0743 static void arc_emac_set_address_internal(struct net_device *ndev)
0744 {
0745 struct arc_emac_priv *priv = netdev_priv(ndev);
0746 unsigned int addr_low, addr_hi;
0747
0748 addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]);
0749 addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]);
0750
0751 arc_reg_set(priv, R_ADDRL, addr_low);
0752 arc_reg_set(priv, R_ADDRH, addr_hi);
0753 }
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 static int arc_emac_set_address(struct net_device *ndev, void *p)
0767 {
0768 struct sockaddr *addr = p;
0769
0770 if (netif_running(ndev))
0771 return -EBUSY;
0772
0773 if (!is_valid_ether_addr(addr->sa_data))
0774 return -EADDRNOTAVAIL;
0775
0776 eth_hw_addr_set(ndev, addr->sa_data);
0777
0778 arc_emac_set_address_internal(ndev);
0779
0780 return 0;
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790 static void arc_emac_restart(struct net_device *ndev)
0791 {
0792 struct arc_emac_priv *priv = netdev_priv(ndev);
0793 struct net_device_stats *stats = &ndev->stats;
0794 int i;
0795
0796 if (net_ratelimit())
0797 netdev_warn(ndev, "restarting stalled EMAC\n");
0798
0799 netif_stop_queue(ndev);
0800
0801
0802 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
0803
0804
0805 arc_reg_clr(priv, R_CTRL, EN_MASK);
0806
0807
0808 arc_free_tx_queue(ndev);
0809
0810
0811 priv->txbd_curr = 0;
0812 priv->txbd_dirty = 0;
0813 memset(priv->txbd, 0, TX_RING_SZ);
0814
0815 for (i = 0; i < RX_BD_NUM; i++) {
0816 struct arc_emac_bd *rxbd = &priv->rxbd[i];
0817 unsigned int info = le32_to_cpu(rxbd->info);
0818
0819 if (!(info & FOR_EMAC)) {
0820 stats->rx_errors++;
0821 stats->rx_dropped++;
0822 }
0823
0824 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
0825 }
0826 priv->last_rx_bd = 0;
0827
0828
0829 wmb();
0830
0831
0832 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
0833
0834
0835 arc_reg_or(priv, R_CTRL, EN_MASK);
0836
0837 netif_start_queue(ndev);
0838 }
0839
0840 static const struct net_device_ops arc_emac_netdev_ops = {
0841 .ndo_open = arc_emac_open,
0842 .ndo_stop = arc_emac_stop,
0843 .ndo_start_xmit = arc_emac_tx,
0844 .ndo_set_mac_address = arc_emac_set_address,
0845 .ndo_get_stats = arc_emac_stats,
0846 .ndo_set_rx_mode = arc_emac_set_rx_mode,
0847 .ndo_eth_ioctl = phy_do_ioctl_running,
0848 #ifdef CONFIG_NET_POLL_CONTROLLER
0849 .ndo_poll_controller = arc_emac_poll_controller,
0850 #endif
0851 };
0852
0853 int arc_emac_probe(struct net_device *ndev, int interface)
0854 {
0855 struct device *dev = ndev->dev.parent;
0856 struct resource res_regs;
0857 struct device_node *phy_node;
0858 struct phy_device *phydev = NULL;
0859 struct arc_emac_priv *priv;
0860 unsigned int id, clock_frequency, irq;
0861 int err;
0862
0863
0864 phy_node = of_parse_phandle(dev->of_node, "phy", 0);
0865 if (!phy_node) {
0866 dev_err(dev, "failed to retrieve phy description from device tree\n");
0867 return -ENODEV;
0868 }
0869
0870
0871 err = of_address_to_resource(dev->of_node, 0, &res_regs);
0872 if (err) {
0873 dev_err(dev, "failed to retrieve registers base from device tree\n");
0874 err = -ENODEV;
0875 goto out_put_node;
0876 }
0877
0878
0879 irq = irq_of_parse_and_map(dev->of_node, 0);
0880 if (!irq) {
0881 dev_err(dev, "failed to retrieve <irq> value from device tree\n");
0882 err = -ENODEV;
0883 goto out_put_node;
0884 }
0885
0886 ndev->netdev_ops = &arc_emac_netdev_ops;
0887 ndev->ethtool_ops = &arc_emac_ethtool_ops;
0888 ndev->watchdog_timeo = TX_TIMEOUT;
0889
0890 priv = netdev_priv(ndev);
0891 priv->dev = dev;
0892
0893 priv->regs = devm_ioremap_resource(dev, &res_regs);
0894 if (IS_ERR(priv->regs)) {
0895 err = PTR_ERR(priv->regs);
0896 goto out_put_node;
0897 }
0898
0899 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
0900
0901 if (priv->clk) {
0902 err = clk_prepare_enable(priv->clk);
0903 if (err) {
0904 dev_err(dev, "failed to enable clock\n");
0905 goto out_put_node;
0906 }
0907
0908 clock_frequency = clk_get_rate(priv->clk);
0909 } else {
0910
0911 if (of_property_read_u32(dev->of_node, "clock-frequency",
0912 &clock_frequency)) {
0913 dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
0914 err = -EINVAL;
0915 goto out_put_node;
0916 }
0917 }
0918
0919 id = arc_reg_get(priv, R_ID);
0920
0921
0922 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
0923 dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id);
0924 err = -ENODEV;
0925 goto out_clken;
0926 }
0927 dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id);
0928
0929
0930 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
0931
0932 ndev->irq = irq;
0933 dev_info(dev, "IRQ is %d\n", ndev->irq);
0934
0935
0936 err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0,
0937 ndev->name, ndev);
0938 if (err) {
0939 dev_err(dev, "could not allocate IRQ\n");
0940 goto out_clken;
0941 }
0942
0943
0944 err = of_get_ethdev_address(dev->of_node, ndev);
0945 if (err)
0946 eth_hw_addr_random(ndev);
0947
0948 arc_emac_set_address_internal(ndev);
0949 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
0950
0951
0952 priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ,
0953 &priv->rxbd_dma, GFP_KERNEL);
0954
0955 if (!priv->rxbd) {
0956 dev_err(dev, "failed to allocate data buffers\n");
0957 err = -ENOMEM;
0958 goto out_clken;
0959 }
0960
0961 priv->txbd = priv->rxbd + RX_BD_NUM;
0962
0963 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
0964 dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
0965 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
0966
0967 err = arc_mdio_probe(priv);
0968 if (err) {
0969 dev_err(dev, "failed to probe MII bus\n");
0970 goto out_clken;
0971 }
0972
0973 phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
0974 interface);
0975 if (!phydev) {
0976 dev_err(dev, "of_phy_connect() failed\n");
0977 err = -ENODEV;
0978 goto out_mdio;
0979 }
0980
0981 dev_info(dev, "connected to %s phy with id 0x%x\n",
0982 phydev->drv->name, phydev->phy_id);
0983
0984 netif_napi_add_weight(ndev, &priv->napi, arc_emac_poll,
0985 ARC_EMAC_NAPI_WEIGHT);
0986
0987 err = register_netdev(ndev);
0988 if (err) {
0989 dev_err(dev, "failed to register network device\n");
0990 goto out_netif_api;
0991 }
0992
0993 of_node_put(phy_node);
0994 return 0;
0995
0996 out_netif_api:
0997 netif_napi_del(&priv->napi);
0998 phy_disconnect(phydev);
0999 out_mdio:
1000 arc_mdio_remove(priv);
1001 out_clken:
1002 if (priv->clk)
1003 clk_disable_unprepare(priv->clk);
1004 out_put_node:
1005 of_node_put(phy_node);
1006
1007 return err;
1008 }
1009 EXPORT_SYMBOL_GPL(arc_emac_probe);
1010
1011 int arc_emac_remove(struct net_device *ndev)
1012 {
1013 struct arc_emac_priv *priv = netdev_priv(ndev);
1014
1015 phy_disconnect(ndev->phydev);
1016 arc_mdio_remove(priv);
1017 unregister_netdev(ndev);
1018 netif_napi_del(&priv->napi);
1019
1020 if (!IS_ERR(priv->clk))
1021 clk_disable_unprepare(priv->clk);
1022
1023 return 0;
1024 }
1025 EXPORT_SYMBOL_GPL(arc_emac_remove);
1026
1027 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
1028 MODULE_DESCRIPTION("ARC EMAC driver");
1029 MODULE_LICENSE("GPL");