0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include "tsnep.h"
0019 #include "tsnep_hw.h"
0020
0021 #include <linux/module.h>
0022 #include <linux/of.h>
0023 #include <linux/of_net.h>
0024 #include <linux/of_mdio.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/etherdevice.h>
0027 #include <linux/phy.h>
0028 #include <linux/iopoll.h>
0029
0030 #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
0031 TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
0032 #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
0033 #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
0034
0035 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0036 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
0037 #else
0038 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
0039 #endif
0040 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
0041
0042 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
0043 {
0044 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
0045 }
0046
0047 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
0048 {
0049 mask |= ECM_INT_DISABLE;
0050 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
0051 }
0052
0053 static irqreturn_t tsnep_irq(int irq, void *arg)
0054 {
0055 struct tsnep_adapter *adapter = arg;
0056 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
0057
0058
0059 if (active != 0)
0060 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
0061
0062
0063 if ((active & ECM_INT_LINK) != 0) {
0064 if (adapter->netdev->phydev)
0065 phy_mac_interrupt(adapter->netdev->phydev);
0066 }
0067
0068
0069 if ((active & adapter->queue[0].irq_mask) != 0) {
0070 if (adapter->netdev) {
0071 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
0072 napi_schedule(&adapter->queue[0].napi);
0073 }
0074 }
0075
0076 return IRQ_HANDLED;
0077 }
0078
0079 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
0080 {
0081 struct tsnep_adapter *adapter = bus->priv;
0082 u32 md;
0083 int retval;
0084
0085 if (regnum & MII_ADDR_C45)
0086 return -EOPNOTSUPP;
0087
0088 md = ECM_MD_READ;
0089 if (!adapter->suppress_preamble)
0090 md |= ECM_MD_PREAMBLE;
0091 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
0092 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
0093 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
0094 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
0095 !(md & ECM_MD_BUSY), 16, 1000);
0096 if (retval != 0)
0097 return retval;
0098
0099 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
0100 }
0101
0102 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
0103 u16 val)
0104 {
0105 struct tsnep_adapter *adapter = bus->priv;
0106 u32 md;
0107 int retval;
0108
0109 if (regnum & MII_ADDR_C45)
0110 return -EOPNOTSUPP;
0111
0112 md = ECM_MD_WRITE;
0113 if (!adapter->suppress_preamble)
0114 md |= ECM_MD_PREAMBLE;
0115 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
0116 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
0117 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
0118 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
0119 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
0120 !(md & ECM_MD_BUSY), 16, 1000);
0121 if (retval != 0)
0122 return retval;
0123
0124 return 0;
0125 }
0126
0127 static void tsnep_phy_link_status_change(struct net_device *netdev)
0128 {
0129 struct tsnep_adapter *adapter = netdev_priv(netdev);
0130 struct phy_device *phydev = netdev->phydev;
0131 u32 mode;
0132
0133 if (phydev->link) {
0134 switch (phydev->speed) {
0135 case SPEED_100:
0136 mode = ECM_LINK_MODE_100;
0137 break;
0138 case SPEED_1000:
0139 mode = ECM_LINK_MODE_1000;
0140 break;
0141 default:
0142 mode = ECM_LINK_MODE_OFF;
0143 break;
0144 }
0145 iowrite32(mode, adapter->addr + ECM_STATUS);
0146 }
0147
0148 phy_print_status(netdev->phydev);
0149 }
0150
0151 static int tsnep_phy_open(struct tsnep_adapter *adapter)
0152 {
0153 struct phy_device *phydev;
0154 struct ethtool_eee ethtool_eee;
0155 int retval;
0156
0157 retval = phy_connect_direct(adapter->netdev, adapter->phydev,
0158 tsnep_phy_link_status_change,
0159 adapter->phy_mode);
0160 if (retval)
0161 return retval;
0162 phydev = adapter->netdev->phydev;
0163
0164
0165
0166
0167 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
0168 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
0169 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
0170 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
0171
0172
0173 memset(ðtool_eee, 0, sizeof(ethtool_eee));
0174 phy_ethtool_set_eee(adapter->phydev, ðtool_eee);
0175
0176 adapter->phydev->irq = PHY_MAC_INTERRUPT;
0177 phy_start(adapter->phydev);
0178
0179 return 0;
0180 }
0181
0182 static void tsnep_phy_close(struct tsnep_adapter *adapter)
0183 {
0184 phy_stop(adapter->netdev->phydev);
0185 phy_disconnect(adapter->netdev->phydev);
0186 adapter->netdev->phydev = NULL;
0187 }
0188
0189 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
0190 {
0191 struct device *dmadev = tx->adapter->dmadev;
0192 int i;
0193
0194 memset(tx->entry, 0, sizeof(tx->entry));
0195
0196 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
0197 if (tx->page[i]) {
0198 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
0199 tx->page_dma[i]);
0200 tx->page[i] = NULL;
0201 tx->page_dma[i] = 0;
0202 }
0203 }
0204 }
0205
0206 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
0207 {
0208 struct device *dmadev = tx->adapter->dmadev;
0209 struct tsnep_tx_entry *entry;
0210 struct tsnep_tx_entry *next_entry;
0211 int i, j;
0212 int retval;
0213
0214 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
0215 tx->page[i] =
0216 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
0217 GFP_KERNEL);
0218 if (!tx->page[i]) {
0219 retval = -ENOMEM;
0220 goto alloc_failed;
0221 }
0222 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
0223 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
0224 entry->desc_wb = (struct tsnep_tx_desc_wb *)
0225 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
0226 entry->desc = (struct tsnep_tx_desc *)
0227 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
0228 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
0229 }
0230 }
0231 for (i = 0; i < TSNEP_RING_SIZE; i++) {
0232 entry = &tx->entry[i];
0233 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
0234 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
0235 }
0236
0237 return 0;
0238
0239 alloc_failed:
0240 tsnep_tx_ring_cleanup(tx);
0241 return retval;
0242 }
0243
0244 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
0245 {
0246 struct tsnep_tx_entry *entry = &tx->entry[index];
0247
0248 entry->properties = 0;
0249 if (entry->skb) {
0250 entry->properties =
0251 skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
0252 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
0253 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
0254 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 entry->owner_user_flag = !entry->owner_user_flag;
0275 }
0276 if (last)
0277 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
0278 if (index == tx->increment_owner_counter) {
0279 tx->owner_counter++;
0280 if (tx->owner_counter == 4)
0281 tx->owner_counter = 1;
0282 tx->increment_owner_counter--;
0283 if (tx->increment_owner_counter < 0)
0284 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
0285 }
0286 entry->properties |=
0287 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
0288 TSNEP_DESC_OWNER_COUNTER_MASK;
0289 if (entry->owner_user_flag)
0290 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
0291 entry->desc->more_properties =
0292 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
0293
0294
0295
0296
0297 dma_wmb();
0298
0299 entry->desc->properties = __cpu_to_le32(entry->properties);
0300 }
0301
0302 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
0303 {
0304 if (tx->read <= tx->write)
0305 return TSNEP_RING_SIZE - tx->write + tx->read - 1;
0306 else
0307 return tx->read - tx->write - 1;
0308 }
0309
0310 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
0311 {
0312 struct device *dmadev = tx->adapter->dmadev;
0313 struct tsnep_tx_entry *entry;
0314 unsigned int len;
0315 dma_addr_t dma;
0316 int i;
0317
0318 for (i = 0; i < count; i++) {
0319 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
0320
0321 if (i == 0) {
0322 len = skb_headlen(skb);
0323 dma = dma_map_single(dmadev, skb->data, len,
0324 DMA_TO_DEVICE);
0325 } else {
0326 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
0327 dma = skb_frag_dma_map(dmadev,
0328 &skb_shinfo(skb)->frags[i - 1],
0329 0, len, DMA_TO_DEVICE);
0330 }
0331 if (dma_mapping_error(dmadev, dma))
0332 return -ENOMEM;
0333
0334 entry->len = len;
0335 dma_unmap_addr_set(entry, dma, dma);
0336
0337 entry->desc->tx = __cpu_to_le64(dma);
0338 }
0339
0340 return 0;
0341 }
0342
0343 static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
0344 {
0345 struct device *dmadev = tx->adapter->dmadev;
0346 struct tsnep_tx_entry *entry;
0347 int i;
0348
0349 for (i = 0; i < count; i++) {
0350 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
0351
0352 if (entry->len) {
0353 if (i == 0)
0354 dma_unmap_single(dmadev,
0355 dma_unmap_addr(entry, dma),
0356 dma_unmap_len(entry, len),
0357 DMA_TO_DEVICE);
0358 else
0359 dma_unmap_page(dmadev,
0360 dma_unmap_addr(entry, dma),
0361 dma_unmap_len(entry, len),
0362 DMA_TO_DEVICE);
0363 entry->len = 0;
0364 }
0365 }
0366 }
0367
0368 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
0369 struct tsnep_tx *tx)
0370 {
0371 unsigned long flags;
0372 int count = 1;
0373 struct tsnep_tx_entry *entry;
0374 int i;
0375 int retval;
0376
0377 if (skb_shinfo(skb)->nr_frags > 0)
0378 count += skb_shinfo(skb)->nr_frags;
0379
0380 spin_lock_irqsave(&tx->lock, flags);
0381
0382 if (tsnep_tx_desc_available(tx) < count) {
0383
0384
0385
0386 netif_stop_queue(tx->adapter->netdev);
0387
0388 spin_unlock_irqrestore(&tx->lock, flags);
0389
0390 return NETDEV_TX_BUSY;
0391 }
0392
0393 entry = &tx->entry[tx->write];
0394 entry->skb = skb;
0395
0396 retval = tsnep_tx_map(skb, tx, count);
0397 if (retval != 0) {
0398 tsnep_tx_unmap(tx, tx->write, count);
0399 dev_kfree_skb_any(entry->skb);
0400 entry->skb = NULL;
0401
0402 tx->dropped++;
0403
0404 spin_unlock_irqrestore(&tx->lock, flags);
0405
0406 netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
0407
0408 return NETDEV_TX_OK;
0409 }
0410
0411 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
0412 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0413
0414 for (i = 0; i < count; i++)
0415 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
0416 i == (count - 1));
0417 tx->write = (tx->write + count) % TSNEP_RING_SIZE;
0418
0419 skb_tx_timestamp(skb);
0420
0421
0422 dma_wmb();
0423
0424 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
0425
0426 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
0427
0428 netif_stop_queue(tx->adapter->netdev);
0429 }
0430
0431 tx->packets++;
0432 tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
0433
0434 spin_unlock_irqrestore(&tx->lock, flags);
0435
0436 return NETDEV_TX_OK;
0437 }
0438
0439 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
0440 {
0441 unsigned long flags;
0442 int budget = 128;
0443 struct tsnep_tx_entry *entry;
0444 int count;
0445
0446 spin_lock_irqsave(&tx->lock, flags);
0447
0448 do {
0449 if (tx->read == tx->write)
0450 break;
0451
0452 entry = &tx->entry[tx->read];
0453 if ((__le32_to_cpu(entry->desc_wb->properties) &
0454 TSNEP_TX_DESC_OWNER_MASK) !=
0455 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
0456 break;
0457
0458
0459
0460
0461 dma_rmb();
0462
0463 count = 1;
0464 if (skb_shinfo(entry->skb)->nr_frags > 0)
0465 count += skb_shinfo(entry->skb)->nr_frags;
0466
0467 tsnep_tx_unmap(tx, tx->read, count);
0468
0469 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
0470 (__le32_to_cpu(entry->desc_wb->properties) &
0471 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
0472 struct skb_shared_hwtstamps hwtstamps;
0473 u64 timestamp;
0474
0475 if (skb_shinfo(entry->skb)->tx_flags &
0476 SKBTX_HW_TSTAMP_USE_CYCLES)
0477 timestamp =
0478 __le64_to_cpu(entry->desc_wb->counter);
0479 else
0480 timestamp =
0481 __le64_to_cpu(entry->desc_wb->timestamp);
0482
0483 memset(&hwtstamps, 0, sizeof(hwtstamps));
0484 hwtstamps.hwtstamp = ns_to_ktime(timestamp);
0485
0486 skb_tstamp_tx(entry->skb, &hwtstamps);
0487 }
0488
0489 napi_consume_skb(entry->skb, budget);
0490 entry->skb = NULL;
0491
0492 tx->read = (tx->read + count) % TSNEP_RING_SIZE;
0493
0494 budget--;
0495 } while (likely(budget));
0496
0497 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
0498 netif_queue_stopped(tx->adapter->netdev)) {
0499 netif_wake_queue(tx->adapter->netdev);
0500 }
0501
0502 spin_unlock_irqrestore(&tx->lock, flags);
0503
0504 return (budget != 0);
0505 }
0506
0507 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
0508 struct tsnep_tx *tx)
0509 {
0510 dma_addr_t dma;
0511 int retval;
0512
0513 memset(tx, 0, sizeof(*tx));
0514 tx->adapter = adapter;
0515 tx->addr = addr;
0516
0517 retval = tsnep_tx_ring_init(tx);
0518 if (retval)
0519 return retval;
0520
0521 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
0522 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
0523 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
0524 tx->owner_counter = 1;
0525 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
0526
0527 spin_lock_init(&tx->lock);
0528
0529 return 0;
0530 }
0531
0532 static void tsnep_tx_close(struct tsnep_tx *tx)
0533 {
0534 u32 val;
0535
0536 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
0537 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
0538 1000000);
0539
0540 tsnep_tx_ring_cleanup(tx);
0541 }
0542
0543 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
0544 {
0545 struct device *dmadev = rx->adapter->dmadev;
0546 struct tsnep_rx_entry *entry;
0547 int i;
0548
0549 for (i = 0; i < TSNEP_RING_SIZE; i++) {
0550 entry = &rx->entry[i];
0551 if (dma_unmap_addr(entry, dma))
0552 dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
0553 dma_unmap_len(entry, len),
0554 DMA_FROM_DEVICE);
0555 if (entry->skb)
0556 dev_kfree_skb(entry->skb);
0557 }
0558
0559 memset(rx->entry, 0, sizeof(rx->entry));
0560
0561 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
0562 if (rx->page[i]) {
0563 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
0564 rx->page_dma[i]);
0565 rx->page[i] = NULL;
0566 rx->page_dma[i] = 0;
0567 }
0568 }
0569 }
0570
0571 static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
0572 struct tsnep_rx_entry *entry)
0573 {
0574 struct device *dmadev = rx->adapter->dmadev;
0575 struct sk_buff *skb;
0576 dma_addr_t dma;
0577
0578 skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
0579 GFP_ATOMIC | GFP_DMA);
0580 if (!skb)
0581 return -ENOMEM;
0582
0583 skb_reserve(skb, RX_SKB_RESERVE);
0584
0585 dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
0586 DMA_FROM_DEVICE);
0587 if (dma_mapping_error(dmadev, dma)) {
0588 dev_kfree_skb(skb);
0589 return -ENOMEM;
0590 }
0591
0592 entry->skb = skb;
0593 entry->len = RX_SKB_LENGTH;
0594 dma_unmap_addr_set(entry, dma, dma);
0595 entry->desc->rx = __cpu_to_le64(dma);
0596
0597 return 0;
0598 }
0599
0600 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
0601 {
0602 struct device *dmadev = rx->adapter->dmadev;
0603 struct tsnep_rx_entry *entry;
0604 struct tsnep_rx_entry *next_entry;
0605 int i, j;
0606 int retval;
0607
0608 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
0609 rx->page[i] =
0610 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
0611 GFP_KERNEL);
0612 if (!rx->page[i]) {
0613 retval = -ENOMEM;
0614 goto failed;
0615 }
0616 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
0617 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
0618 entry->desc_wb = (struct tsnep_rx_desc_wb *)
0619 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
0620 entry->desc = (struct tsnep_rx_desc *)
0621 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
0622 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
0623 }
0624 }
0625 for (i = 0; i < TSNEP_RING_SIZE; i++) {
0626 entry = &rx->entry[i];
0627 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
0628 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
0629
0630 retval = tsnep_rx_alloc_and_map_skb(rx, entry);
0631 if (retval)
0632 goto failed;
0633 }
0634
0635 return 0;
0636
0637 failed:
0638 tsnep_rx_ring_cleanup(rx);
0639 return retval;
0640 }
0641
0642 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
0643 {
0644 struct tsnep_rx_entry *entry = &rx->entry[index];
0645
0646
0647 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
0648 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
0649 if (index == rx->increment_owner_counter) {
0650 rx->owner_counter++;
0651 if (rx->owner_counter == 4)
0652 rx->owner_counter = 1;
0653 rx->increment_owner_counter--;
0654 if (rx->increment_owner_counter < 0)
0655 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
0656 }
0657 entry->properties |=
0658 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
0659 TSNEP_DESC_OWNER_COUNTER_MASK;
0660
0661
0662
0663
0664 dma_wmb();
0665
0666 entry->desc->properties = __cpu_to_le32(entry->properties);
0667 }
0668
0669 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
0670 int budget)
0671 {
0672 struct device *dmadev = rx->adapter->dmadev;
0673 int done = 0;
0674 struct tsnep_rx_entry *entry;
0675 struct sk_buff *skb;
0676 size_t len;
0677 dma_addr_t dma;
0678 int length;
0679 bool enable = false;
0680 int retval;
0681
0682 while (likely(done < budget)) {
0683 entry = &rx->entry[rx->read];
0684 if ((__le32_to_cpu(entry->desc_wb->properties) &
0685 TSNEP_DESC_OWNER_COUNTER_MASK) !=
0686 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
0687 break;
0688
0689
0690
0691
0692 dma_rmb();
0693
0694 skb = entry->skb;
0695 len = dma_unmap_len(entry, len);
0696 dma = dma_unmap_addr(entry, dma);
0697
0698
0699
0700
0701 retval = tsnep_rx_alloc_and_map_skb(rx, entry);
0702 if (!retval) {
0703 dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
0704
0705 length = __le32_to_cpu(entry->desc_wb->properties) &
0706 TSNEP_DESC_LENGTH_MASK;
0707 skb_put(skb, length - ETH_FCS_LEN);
0708 if (rx->adapter->hwtstamp_config.rx_filter ==
0709 HWTSTAMP_FILTER_ALL) {
0710 struct skb_shared_hwtstamps *hwtstamps =
0711 skb_hwtstamps(skb);
0712 struct tsnep_rx_inline *rx_inline =
0713 (struct tsnep_rx_inline *)skb->data;
0714
0715 skb_shinfo(skb)->tx_flags |=
0716 SKBTX_HW_TSTAMP_NETDEV;
0717 memset(hwtstamps, 0, sizeof(*hwtstamps));
0718 hwtstamps->netdev_data = rx_inline;
0719 }
0720 skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
0721 skb->protocol = eth_type_trans(skb,
0722 rx->adapter->netdev);
0723
0724 rx->packets++;
0725 rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
0726 if (skb->pkt_type == PACKET_MULTICAST)
0727 rx->multicast++;
0728
0729 napi_gro_receive(napi, skb);
0730 done++;
0731 } else {
0732 rx->dropped++;
0733 }
0734
0735 tsnep_rx_activate(rx, rx->read);
0736
0737 enable = true;
0738
0739 rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
0740 }
0741
0742 if (enable) {
0743
0744
0745
0746 dma_wmb();
0747
0748 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
0749 }
0750
0751 return done;
0752 }
0753
0754 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
0755 struct tsnep_rx *rx)
0756 {
0757 dma_addr_t dma;
0758 int i;
0759 int retval;
0760
0761 memset(rx, 0, sizeof(*rx));
0762 rx->adapter = adapter;
0763 rx->addr = addr;
0764
0765 retval = tsnep_rx_ring_init(rx);
0766 if (retval)
0767 return retval;
0768
0769 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
0770 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
0771 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
0772 rx->owner_counter = 1;
0773 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
0774
0775 for (i = 0; i < TSNEP_RING_SIZE; i++)
0776 tsnep_rx_activate(rx, i);
0777
0778
0779 dma_wmb();
0780
0781 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
0782
0783 return 0;
0784 }
0785
0786 static void tsnep_rx_close(struct tsnep_rx *rx)
0787 {
0788 u32 val;
0789
0790 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
0791 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
0792 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
0793 1000000);
0794
0795 tsnep_rx_ring_cleanup(rx);
0796 }
0797
0798 static int tsnep_poll(struct napi_struct *napi, int budget)
0799 {
0800 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
0801 napi);
0802 bool complete = true;
0803 int done = 0;
0804
0805 if (queue->tx)
0806 complete = tsnep_tx_poll(queue->tx, budget);
0807
0808 if (queue->rx) {
0809 done = tsnep_rx_poll(queue->rx, napi, budget);
0810 if (done >= budget)
0811 complete = false;
0812 }
0813
0814
0815 if (!complete)
0816 return budget;
0817
0818 if (likely(napi_complete_done(napi, done)))
0819 tsnep_enable_irq(queue->adapter, queue->irq_mask);
0820
0821 return min(done, budget - 1);
0822 }
0823
0824 static int tsnep_netdev_open(struct net_device *netdev)
0825 {
0826 struct tsnep_adapter *adapter = netdev_priv(netdev);
0827 int i;
0828 void __iomem *addr;
0829 int tx_queue_index = 0;
0830 int rx_queue_index = 0;
0831 int retval;
0832
0833 retval = tsnep_phy_open(adapter);
0834 if (retval)
0835 return retval;
0836
0837 for (i = 0; i < adapter->num_queues; i++) {
0838 adapter->queue[i].adapter = adapter;
0839 if (adapter->queue[i].tx) {
0840 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
0841 retval = tsnep_tx_open(adapter, addr,
0842 adapter->queue[i].tx);
0843 if (retval)
0844 goto failed;
0845 tx_queue_index++;
0846 }
0847 if (adapter->queue[i].rx) {
0848 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
0849 retval = tsnep_rx_open(adapter, addr,
0850 adapter->queue[i].rx);
0851 if (retval)
0852 goto failed;
0853 rx_queue_index++;
0854 }
0855 }
0856
0857 retval = netif_set_real_num_tx_queues(adapter->netdev,
0858 adapter->num_tx_queues);
0859 if (retval)
0860 goto failed;
0861 retval = netif_set_real_num_rx_queues(adapter->netdev,
0862 adapter->num_rx_queues);
0863 if (retval)
0864 goto failed;
0865
0866 for (i = 0; i < adapter->num_queues; i++) {
0867 netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
0868 tsnep_poll, 64);
0869 napi_enable(&adapter->queue[i].napi);
0870
0871 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
0872 }
0873
0874 return 0;
0875
0876 failed:
0877 for (i = 0; i < adapter->num_queues; i++) {
0878 if (adapter->queue[i].rx)
0879 tsnep_rx_close(adapter->queue[i].rx);
0880 if (adapter->queue[i].tx)
0881 tsnep_tx_close(adapter->queue[i].tx);
0882 }
0883 tsnep_phy_close(adapter);
0884 return retval;
0885 }
0886
0887 static int tsnep_netdev_close(struct net_device *netdev)
0888 {
0889 struct tsnep_adapter *adapter = netdev_priv(netdev);
0890 int i;
0891
0892 for (i = 0; i < adapter->num_queues; i++) {
0893 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
0894
0895 napi_disable(&adapter->queue[i].napi);
0896 netif_napi_del(&adapter->queue[i].napi);
0897
0898 if (adapter->queue[i].rx)
0899 tsnep_rx_close(adapter->queue[i].rx);
0900 if (adapter->queue[i].tx)
0901 tsnep_tx_close(adapter->queue[i].tx);
0902 }
0903
0904 tsnep_phy_close(adapter);
0905
0906 return 0;
0907 }
0908
0909 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
0910 struct net_device *netdev)
0911 {
0912 struct tsnep_adapter *adapter = netdev_priv(netdev);
0913 u16 queue_mapping = skb_get_queue_mapping(skb);
0914
0915 if (queue_mapping >= adapter->num_tx_queues)
0916 queue_mapping = 0;
0917
0918 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
0919 }
0920
0921 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
0922 int cmd)
0923 {
0924 if (!netif_running(netdev))
0925 return -EINVAL;
0926 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
0927 return tsnep_ptp_ioctl(netdev, ifr, cmd);
0928 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
0929 }
0930
0931 static void tsnep_netdev_set_multicast(struct net_device *netdev)
0932 {
0933 struct tsnep_adapter *adapter = netdev_priv(netdev);
0934
0935 u16 rx_filter = 0;
0936
0937
0938 if (netdev->flags & IFF_PROMISC) {
0939 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
0940 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
0941 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
0942 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
0943 }
0944 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
0945 }
0946
0947 static void tsnep_netdev_get_stats64(struct net_device *netdev,
0948 struct rtnl_link_stats64 *stats)
0949 {
0950 struct tsnep_adapter *adapter = netdev_priv(netdev);
0951 u32 reg;
0952 u32 val;
0953 int i;
0954
0955 for (i = 0; i < adapter->num_tx_queues; i++) {
0956 stats->tx_packets += adapter->tx[i].packets;
0957 stats->tx_bytes += adapter->tx[i].bytes;
0958 stats->tx_dropped += adapter->tx[i].dropped;
0959 }
0960 for (i = 0; i < adapter->num_rx_queues; i++) {
0961 stats->rx_packets += adapter->rx[i].packets;
0962 stats->rx_bytes += adapter->rx[i].bytes;
0963 stats->rx_dropped += adapter->rx[i].dropped;
0964 stats->multicast += adapter->rx[i].multicast;
0965
0966 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
0967 TSNEP_RX_STATISTIC);
0968 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
0969 TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
0970 stats->rx_dropped += val;
0971 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
0972 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
0973 stats->rx_dropped += val;
0974 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
0975 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
0976 stats->rx_errors += val;
0977 stats->rx_fifo_errors += val;
0978 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
0979 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
0980 stats->rx_errors += val;
0981 stats->rx_frame_errors += val;
0982 }
0983
0984 reg = ioread32(adapter->addr + ECM_STAT);
0985 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
0986 stats->rx_errors += val;
0987 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
0988 stats->rx_errors += val;
0989 stats->rx_crc_errors += val;
0990 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
0991 stats->rx_errors += val;
0992 }
0993
0994 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
0995 {
0996 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
0997 iowrite16(*(u16 *)(addr + sizeof(u32)),
0998 adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
0999
1000 ether_addr_copy(adapter->mac_address, addr);
1001 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
1002 addr);
1003 }
1004
1005 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
1006 {
1007 struct tsnep_adapter *adapter = netdev_priv(netdev);
1008 struct sockaddr *sock_addr = addr;
1009 int retval;
1010
1011 retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1012 if (retval)
1013 return retval;
1014 eth_hw_addr_set(netdev, sock_addr->sa_data);
1015 tsnep_mac_set_address(adapter, sock_addr->sa_data);
1016
1017 return 0;
1018 }
1019
1020 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1021 const struct skb_shared_hwtstamps *hwtstamps,
1022 bool cycles)
1023 {
1024 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
1025 u64 timestamp;
1026
1027 if (cycles)
1028 timestamp = __le64_to_cpu(rx_inline->counter);
1029 else
1030 timestamp = __le64_to_cpu(rx_inline->timestamp);
1031
1032 return ns_to_ktime(timestamp);
1033 }
1034
1035 static const struct net_device_ops tsnep_netdev_ops = {
1036 .ndo_open = tsnep_netdev_open,
1037 .ndo_stop = tsnep_netdev_close,
1038 .ndo_start_xmit = tsnep_netdev_xmit_frame,
1039 .ndo_eth_ioctl = tsnep_netdev_ioctl,
1040 .ndo_set_rx_mode = tsnep_netdev_set_multicast,
1041
1042 .ndo_get_stats64 = tsnep_netdev_get_stats64,
1043 .ndo_set_mac_address = tsnep_netdev_set_mac_address,
1044 .ndo_get_tstamp = tsnep_netdev_get_tstamp,
1045 .ndo_setup_tc = tsnep_tc_setup,
1046 };
1047
1048 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1049 {
1050 int retval;
1051
1052
1053
1054
1055 iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1056
1057
1058
1059
1060
1061
1062
1063 retval = of_get_mac_address(adapter->pdev->dev.of_node,
1064 adapter->mac_address);
1065 if (retval == -EPROBE_DEFER)
1066 return retval;
1067 if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1068 *(u32 *)adapter->mac_address =
1069 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1070 *(u16 *)(adapter->mac_address + sizeof(u32)) =
1071 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1072 if (!is_valid_ether_addr(adapter->mac_address))
1073 eth_random_addr(adapter->mac_address);
1074 }
1075
1076 tsnep_mac_set_address(adapter, adapter->mac_address);
1077 eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1078
1079 return 0;
1080 }
1081
1082 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1083 {
1084 struct device_node *np = adapter->pdev->dev.of_node;
1085 int retval;
1086
1087 if (np) {
1088 np = of_get_child_by_name(np, "mdio");
1089 if (!np)
1090 return 0;
1091
1092 adapter->suppress_preamble =
1093 of_property_read_bool(np, "suppress-preamble");
1094 }
1095
1096 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1097 if (!adapter->mdiobus) {
1098 retval = -ENOMEM;
1099
1100 goto out;
1101 }
1102
1103 adapter->mdiobus->priv = (void *)adapter;
1104 adapter->mdiobus->parent = &adapter->pdev->dev;
1105 adapter->mdiobus->read = tsnep_mdiobus_read;
1106 adapter->mdiobus->write = tsnep_mdiobus_write;
1107 adapter->mdiobus->name = TSNEP "-mdiobus";
1108 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1109 adapter->pdev->name);
1110
1111
1112 adapter->mdiobus->phy_mask = 0x0000001;
1113
1114 retval = of_mdiobus_register(adapter->mdiobus, np);
1115
1116 out:
1117 of_node_put(np);
1118
1119 return retval;
1120 }
1121
1122 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1123 {
1124 struct device_node *phy_node;
1125 int retval;
1126
1127 retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1128 &adapter->phy_mode);
1129 if (retval)
1130 adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1131
1132 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1133 0);
1134 adapter->phydev = of_phy_find_device(phy_node);
1135 of_node_put(phy_node);
1136 if (!adapter->phydev && adapter->mdiobus)
1137 adapter->phydev = phy_find_first(adapter->mdiobus);
1138 if (!adapter->phydev)
1139 return -EIO;
1140
1141 return 0;
1142 }
1143
1144 static int tsnep_probe(struct platform_device *pdev)
1145 {
1146 struct tsnep_adapter *adapter;
1147 struct net_device *netdev;
1148 struct resource *io;
1149 u32 type;
1150 int revision;
1151 int version;
1152 int retval;
1153
1154 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1155 sizeof(struct tsnep_adapter),
1156 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1157 if (!netdev)
1158 return -ENODEV;
1159 SET_NETDEV_DEV(netdev, &pdev->dev);
1160 adapter = netdev_priv(netdev);
1161 platform_set_drvdata(pdev, adapter);
1162 adapter->pdev = pdev;
1163 adapter->dmadev = &pdev->dev;
1164 adapter->netdev = netdev;
1165 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1166 NETIF_MSG_LINK | NETIF_MSG_IFUP |
1167 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1168
1169 netdev->min_mtu = ETH_MIN_MTU;
1170 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1171
1172 mutex_init(&adapter->gate_control_lock);
1173
1174 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1175 adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1176 if (IS_ERR(adapter->addr))
1177 return PTR_ERR(adapter->addr);
1178 adapter->irq = platform_get_irq(pdev, 0);
1179 netdev->mem_start = io->start;
1180 netdev->mem_end = io->end;
1181 netdev->irq = adapter->irq;
1182
1183 type = ioread32(adapter->addr + ECM_TYPE);
1184 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1185 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1186 adapter->gate_control = type & ECM_GATE_CONTROL;
1187
1188 adapter->num_tx_queues = TSNEP_QUEUES;
1189 adapter->num_rx_queues = TSNEP_QUEUES;
1190 adapter->num_queues = TSNEP_QUEUES;
1191 adapter->queue[0].tx = &adapter->tx[0];
1192 adapter->queue[0].rx = &adapter->rx[0];
1193 adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1194
1195 tsnep_disable_irq(adapter, ECM_INT_ALL);
1196 retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
1197 0, TSNEP, adapter);
1198 if (retval != 0) {
1199 dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
1200 adapter->irq);
1201 return retval;
1202 }
1203 tsnep_enable_irq(adapter, ECM_INT_LINK);
1204
1205 retval = tsnep_mac_init(adapter);
1206 if (retval)
1207 goto mac_init_failed;
1208
1209 retval = tsnep_mdio_init(adapter);
1210 if (retval)
1211 goto mdio_init_failed;
1212
1213 retval = tsnep_phy_init(adapter);
1214 if (retval)
1215 goto phy_init_failed;
1216
1217 retval = tsnep_ptp_init(adapter);
1218 if (retval)
1219 goto ptp_init_failed;
1220
1221 retval = tsnep_tc_init(adapter);
1222 if (retval)
1223 goto tc_init_failed;
1224
1225 netdev->netdev_ops = &tsnep_netdev_ops;
1226 netdev->ethtool_ops = &tsnep_ethtool_ops;
1227 netdev->features = NETIF_F_SG;
1228 netdev->hw_features = netdev->features;
1229
1230
1231 netif_carrier_off(netdev);
1232
1233 retval = register_netdev(netdev);
1234 if (retval)
1235 goto register_failed;
1236
1237 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1238 revision);
1239 if (adapter->gate_control)
1240 dev_info(&adapter->pdev->dev, "gate control detected\n");
1241
1242 return 0;
1243
1244 register_failed:
1245 tsnep_tc_cleanup(adapter);
1246 tc_init_failed:
1247 tsnep_ptp_cleanup(adapter);
1248 ptp_init_failed:
1249 phy_init_failed:
1250 if (adapter->mdiobus)
1251 mdiobus_unregister(adapter->mdiobus);
1252 mdio_init_failed:
1253 mac_init_failed:
1254 tsnep_disable_irq(adapter, ECM_INT_ALL);
1255 return retval;
1256 }
1257
1258 static int tsnep_remove(struct platform_device *pdev)
1259 {
1260 struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1261
1262 unregister_netdev(adapter->netdev);
1263
1264 tsnep_tc_cleanup(adapter);
1265
1266 tsnep_ptp_cleanup(adapter);
1267
1268 if (adapter->mdiobus)
1269 mdiobus_unregister(adapter->mdiobus);
1270
1271 tsnep_disable_irq(adapter, ECM_INT_ALL);
1272
1273 return 0;
1274 }
1275
1276 static const struct of_device_id tsnep_of_match[] = {
1277 { .compatible = "engleder,tsnep", },
1278 { },
1279 };
1280 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1281
1282 static struct platform_driver tsnep_driver = {
1283 .driver = {
1284 .name = TSNEP,
1285 .of_match_table = tsnep_of_match,
1286 },
1287 .probe = tsnep_probe,
1288 .remove = tsnep_remove,
1289 };
1290 module_platform_driver(tsnep_driver);
1291
1292 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1293 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1294 MODULE_LICENSE("GPL");