0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 static const char version[] =
0048 "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
0049
0050
0051 #ifndef SMC_DEBUG
0052 #define SMC_DEBUG 0
0053 #endif
0054
0055
0056 #include <linux/module.h>
0057 #include <linux/kernel.h>
0058 #include <linux/sched.h>
0059 #include <linux/delay.h>
0060 #include <linux/interrupt.h>
0061 #include <linux/irq.h>
0062 #include <linux/errno.h>
0063 #include <linux/ioport.h>
0064 #include <linux/crc32.h>
0065 #include <linux/platform_device.h>
0066 #include <linux/spinlock.h>
0067 #include <linux/ethtool.h>
0068 #include <linux/mii.h>
0069 #include <linux/workqueue.h>
0070 #include <linux/of.h>
0071 #include <linux/of_device.h>
0072 #include <linux/of_gpio.h>
0073
0074 #include <linux/netdevice.h>
0075 #include <linux/etherdevice.h>
0076 #include <linux/skbuff.h>
0077
0078 #include <asm/io.h>
0079
0080 #include "smc91x.h"
0081
0082 #if defined(CONFIG_ASSABET_NEPONSET)
0083 #include <mach/assabet.h>
0084 #include <mach/neponset.h>
0085 #endif
0086
0087 #ifndef SMC_NOWAIT
0088 # define SMC_NOWAIT 0
0089 #endif
0090 static int nowait = SMC_NOWAIT;
0091 module_param(nowait, int, 0400);
0092 MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
0093
0094
0095
0096
0097 static int watchdog = 1000;
0098 module_param(watchdog, int, 0400);
0099 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
0100
0101 MODULE_LICENSE("GPL");
0102 MODULE_ALIAS("platform:smc91x");
0103
0104
0105
0106
0107
0108
0109 #define CARDNAME "smc91x"
0110
0111
0112
0113
0114 #define POWER_DOWN 1
0115
0116
0117
0118
0119
0120
0121 #define MEMORY_WAIT_TIME 16
0122
0123
0124
0125
0126
0127 #define MAX_IRQ_LOOPS 8
0128
0129
0130
0131
0132
0133
0134
0135
0136 #define THROTTLE_TX_PKTS 0
0137
0138
0139
0140
0141
0142 #define MII_DELAY 1
0143
0144 #define DBG(n, dev, fmt, ...) \
0145 do { \
0146 if (SMC_DEBUG >= (n)) \
0147 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
0148 } while (0)
0149
0150 #define PRINTK(dev, fmt, ...) \
0151 do { \
0152 if (SMC_DEBUG > 0) \
0153 netdev_info(dev, fmt, ##__VA_ARGS__); \
0154 else \
0155 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
0156 } while (0)
0157
0158 #if SMC_DEBUG > 3
0159 static void PRINT_PKT(u_char *buf, int length)
0160 {
0161 int i;
0162 int remainder;
0163 int lines;
0164
0165 lines = length / 16;
0166 remainder = length % 16;
0167
0168 for (i = 0; i < lines ; i ++) {
0169 int cur;
0170 printk(KERN_DEBUG);
0171 for (cur = 0; cur < 8; cur++) {
0172 u_char a, b;
0173 a = *buf++;
0174 b = *buf++;
0175 pr_cont("%02x%02x ", a, b);
0176 }
0177 pr_cont("\n");
0178 }
0179 printk(KERN_DEBUG);
0180 for (i = 0; i < remainder/2 ; i++) {
0181 u_char a, b;
0182 a = *buf++;
0183 b = *buf++;
0184 pr_cont("%02x%02x ", a, b);
0185 }
0186 pr_cont("\n");
0187 }
0188 #else
0189 static inline void PRINT_PKT(u_char *buf, int length) { }
0190 #endif
0191
0192
0193
0194 #define SMC_ENABLE_INT(lp, x) do { \
0195 unsigned char mask; \
0196 unsigned long smc_enable_flags; \
0197 spin_lock_irqsave(&lp->lock, smc_enable_flags); \
0198 mask = SMC_GET_INT_MASK(lp); \
0199 mask |= (x); \
0200 SMC_SET_INT_MASK(lp, mask); \
0201 spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
0202 } while (0)
0203
0204
0205 #define SMC_DISABLE_INT(lp, x) do { \
0206 unsigned char mask; \
0207 unsigned long smc_disable_flags; \
0208 spin_lock_irqsave(&lp->lock, smc_disable_flags); \
0209 mask = SMC_GET_INT_MASK(lp); \
0210 mask &= ~(x); \
0211 SMC_SET_INT_MASK(lp, mask); \
0212 spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
0213 } while (0)
0214
0215
0216
0217
0218
0219
0220 #define SMC_WAIT_MMU_BUSY(lp) do { \
0221 if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \
0222 unsigned long timeout = jiffies + 2; \
0223 while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
0224 if (time_after(jiffies, timeout)) { \
0225 netdev_dbg(dev, "timeout %s line %d\n", \
0226 __FILE__, __LINE__); \
0227 break; \
0228 } \
0229 cpu_relax(); \
0230 } \
0231 } \
0232 } while (0)
0233
0234
0235
0236
0237
0238 static void smc_reset(struct net_device *dev)
0239 {
0240 struct smc_local *lp = netdev_priv(dev);
0241 void __iomem *ioaddr = lp->base;
0242 unsigned int ctl, cfg;
0243 struct sk_buff *pending_skb;
0244
0245 DBG(2, dev, "%s\n", __func__);
0246
0247
0248 spin_lock_irq(&lp->lock);
0249 SMC_SELECT_BANK(lp, 2);
0250 SMC_SET_INT_MASK(lp, 0);
0251 pending_skb = lp->pending_tx_skb;
0252 lp->pending_tx_skb = NULL;
0253 spin_unlock_irq(&lp->lock);
0254
0255
0256 if (pending_skb) {
0257 dev_kfree_skb(pending_skb);
0258 dev->stats.tx_errors++;
0259 dev->stats.tx_aborted_errors++;
0260 }
0261
0262
0263
0264
0265
0266 SMC_SELECT_BANK(lp, 0);
0267 SMC_SET_RCR(lp, RCR_SOFTRST);
0268
0269
0270
0271
0272
0273
0274 SMC_SELECT_BANK(lp, 1);
0275
0276 cfg = CONFIG_DEFAULT;
0277
0278
0279
0280
0281
0282
0283 if (lp->cfg.flags & SMC91X_NOWAIT)
0284 cfg |= CONFIG_NO_WAIT;
0285
0286
0287
0288
0289
0290 cfg |= CONFIG_EPH_POWER_EN;
0291
0292 SMC_SET_CONFIG(lp, cfg);
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 udelay(1);
0303
0304
0305 SMC_SELECT_BANK(lp, 0);
0306 SMC_SET_RCR(lp, RCR_CLEAR);
0307 SMC_SET_TCR(lp, TCR_CLEAR);
0308
0309 SMC_SELECT_BANK(lp, 1);
0310 ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE;
0311
0312
0313
0314
0315
0316
0317 if(!THROTTLE_TX_PKTS)
0318 ctl |= CTL_AUTO_RELEASE;
0319 else
0320 ctl &= ~CTL_AUTO_RELEASE;
0321 SMC_SET_CTL(lp, ctl);
0322
0323
0324 SMC_SELECT_BANK(lp, 2);
0325 SMC_SET_MMU_CMD(lp, MC_RESET);
0326 SMC_WAIT_MMU_BUSY(lp);
0327 }
0328
0329
0330
0331
0332 static void smc_enable(struct net_device *dev)
0333 {
0334 struct smc_local *lp = netdev_priv(dev);
0335 void __iomem *ioaddr = lp->base;
0336 int mask;
0337
0338 DBG(2, dev, "%s\n", __func__);
0339
0340
0341 SMC_SELECT_BANK(lp, 0);
0342 SMC_SET_TCR(lp, lp->tcr_cur_mode);
0343 SMC_SET_RCR(lp, lp->rcr_cur_mode);
0344
0345 SMC_SELECT_BANK(lp, 1);
0346 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
0347
0348
0349 mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
0350 if (lp->version >= (CHIP_91100 << 4))
0351 mask |= IM_MDINT;
0352 SMC_SELECT_BANK(lp, 2);
0353 SMC_SET_INT_MASK(lp, mask);
0354
0355
0356
0357
0358
0359
0360
0361 }
0362
0363
0364
0365
0366 static void smc_shutdown(struct net_device *dev)
0367 {
0368 struct smc_local *lp = netdev_priv(dev);
0369 void __iomem *ioaddr = lp->base;
0370 struct sk_buff *pending_skb;
0371
0372 DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
0373
0374
0375 spin_lock_irq(&lp->lock);
0376 SMC_SELECT_BANK(lp, 2);
0377 SMC_SET_INT_MASK(lp, 0);
0378 pending_skb = lp->pending_tx_skb;
0379 lp->pending_tx_skb = NULL;
0380 spin_unlock_irq(&lp->lock);
0381 dev_kfree_skb(pending_skb);
0382
0383
0384 SMC_SELECT_BANK(lp, 0);
0385 SMC_SET_RCR(lp, RCR_CLEAR);
0386 SMC_SET_TCR(lp, TCR_CLEAR);
0387
0388 #ifdef POWER_DOWN
0389
0390 SMC_SELECT_BANK(lp, 1);
0391 SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN);
0392 #endif
0393 }
0394
0395
0396
0397
0398 static inline void smc_rcv(struct net_device *dev)
0399 {
0400 struct smc_local *lp = netdev_priv(dev);
0401 void __iomem *ioaddr = lp->base;
0402 unsigned int packet_number, status, packet_len;
0403
0404 DBG(3, dev, "%s\n", __func__);
0405
0406 packet_number = SMC_GET_RXFIFO(lp);
0407 if (unlikely(packet_number & RXFIFO_REMPTY)) {
0408 PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
0409 return;
0410 }
0411
0412
0413 SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC);
0414
0415
0416 SMC_GET_PKT_HDR(lp, status, packet_len);
0417 packet_len &= 0x07ff;
0418 DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
0419 packet_number, status, packet_len, packet_len);
0420
0421 back:
0422 if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
0423 if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
0424
0425 status &= ~RS_TOOLONG;
0426 goto back;
0427 }
0428 if (packet_len < 6) {
0429
0430 netdev_err(dev, "fubar (rxlen %u status %x\n",
0431 packet_len, status);
0432 status |= RS_TOOSHORT;
0433 }
0434 SMC_WAIT_MMU_BUSY(lp);
0435 SMC_SET_MMU_CMD(lp, MC_RELEASE);
0436 dev->stats.rx_errors++;
0437 if (status & RS_ALGNERR)
0438 dev->stats.rx_frame_errors++;
0439 if (status & (RS_TOOSHORT | RS_TOOLONG))
0440 dev->stats.rx_length_errors++;
0441 if (status & RS_BADCRC)
0442 dev->stats.rx_crc_errors++;
0443 } else {
0444 struct sk_buff *skb;
0445 unsigned char *data;
0446 unsigned int data_len;
0447
0448
0449 if (status & RS_MULTICAST)
0450 dev->stats.multicast++;
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 skb = netdev_alloc_skb(dev, packet_len);
0461 if (unlikely(skb == NULL)) {
0462 SMC_WAIT_MMU_BUSY(lp);
0463 SMC_SET_MMU_CMD(lp, MC_RELEASE);
0464 dev->stats.rx_dropped++;
0465 return;
0466 }
0467
0468
0469 skb_reserve(skb, 2);
0470
0471
0472 if (lp->version == 0x90)
0473 status |= RS_ODDFRAME;
0474
0475
0476
0477
0478
0479
0480 data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
0481 data = skb_put(skb, data_len);
0482 SMC_PULL_DATA(lp, data, packet_len - 4);
0483
0484 SMC_WAIT_MMU_BUSY(lp);
0485 SMC_SET_MMU_CMD(lp, MC_RELEASE);
0486
0487 PRINT_PKT(data, packet_len - 4);
0488
0489 skb->protocol = eth_type_trans(skb, dev);
0490 netif_rx(skb);
0491 dev->stats.rx_packets++;
0492 dev->stats.rx_bytes += data_len;
0493 }
0494 }
0495
0496 #ifdef CONFIG_SMP
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 #define smc_special_trylock(lock, flags) \
0519 ({ \
0520 int __ret; \
0521 local_irq_save(flags); \
0522 __ret = spin_trylock(lock); \
0523 if (!__ret) \
0524 local_irq_restore(flags); \
0525 __ret; \
0526 })
0527 #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
0528 #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
0529 #else
0530 #define smc_special_trylock(lock, flags) ((void)flags, true)
0531 #define smc_special_lock(lock, flags) do { flags = 0; } while (0)
0532 #define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
0533 #endif
0534
0535
0536
0537
0538 static void smc_hardware_send_pkt(struct tasklet_struct *t)
0539 {
0540 struct smc_local *lp = from_tasklet(lp, t, tx_task);
0541 struct net_device *dev = lp->dev;
0542 void __iomem *ioaddr = lp->base;
0543 struct sk_buff *skb;
0544 unsigned int packet_no, len;
0545 unsigned char *buf;
0546 unsigned long flags;
0547
0548 DBG(3, dev, "%s\n", __func__);
0549
0550 if (!smc_special_trylock(&lp->lock, flags)) {
0551 netif_stop_queue(dev);
0552 tasklet_schedule(&lp->tx_task);
0553 return;
0554 }
0555
0556 skb = lp->pending_tx_skb;
0557 if (unlikely(!skb)) {
0558 smc_special_unlock(&lp->lock, flags);
0559 return;
0560 }
0561 lp->pending_tx_skb = NULL;
0562
0563 packet_no = SMC_GET_AR(lp);
0564 if (unlikely(packet_no & AR_FAILED)) {
0565 netdev_err(dev, "Memory allocation failed.\n");
0566 dev->stats.tx_errors++;
0567 dev->stats.tx_fifo_errors++;
0568 smc_special_unlock(&lp->lock, flags);
0569 goto done;
0570 }
0571
0572
0573 SMC_SET_PN(lp, packet_no);
0574 SMC_SET_PTR(lp, PTR_AUTOINC);
0575
0576 buf = skb->data;
0577 len = skb->len;
0578 DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
0579 packet_no, len, len, buf);
0580 PRINT_PKT(buf, len);
0581
0582
0583
0584
0585
0586 SMC_PUT_PKT_HDR(lp, 0, len + 6);
0587
0588
0589 SMC_PUSH_DATA(lp, buf, len & ~1);
0590
0591
0592 SMC_outw(lp, ((len & 1) ? (0x2000 | buf[len - 1]) : 0), ioaddr,
0593 DATA_REG(lp));
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603 if (THROTTLE_TX_PKTS)
0604 netif_stop_queue(dev);
0605
0606
0607 SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
0608 smc_special_unlock(&lp->lock, flags);
0609
0610 netif_trans_update(dev);
0611 dev->stats.tx_packets++;
0612 dev->stats.tx_bytes += len;
0613
0614 SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT);
0615
0616 done: if (!THROTTLE_TX_PKTS)
0617 netif_wake_queue(dev);
0618
0619 dev_consume_skb_any(skb);
0620 }
0621
0622
0623
0624
0625
0626
0627
0628 static netdev_tx_t
0629 smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
0630 {
0631 struct smc_local *lp = netdev_priv(dev);
0632 void __iomem *ioaddr = lp->base;
0633 unsigned int numPages, poll_count, status;
0634 unsigned long flags;
0635
0636 DBG(3, dev, "%s\n", __func__);
0637
0638 BUG_ON(lp->pending_tx_skb != NULL);
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651 numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
0652 if (unlikely(numPages > 7)) {
0653 netdev_warn(dev, "Far too big packet error.\n");
0654 dev->stats.tx_errors++;
0655 dev->stats.tx_dropped++;
0656 dev_kfree_skb_any(skb);
0657 return NETDEV_TX_OK;
0658 }
0659
0660 smc_special_lock(&lp->lock, flags);
0661
0662
0663 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
0664
0665
0666
0667
0668
0669 poll_count = MEMORY_WAIT_TIME;
0670 do {
0671 status = SMC_GET_INT(lp);
0672 if (status & IM_ALLOC_INT) {
0673 SMC_ACK_INT(lp, IM_ALLOC_INT);
0674 break;
0675 }
0676 } while (--poll_count);
0677
0678 smc_special_unlock(&lp->lock, flags);
0679
0680 lp->pending_tx_skb = skb;
0681 if (!poll_count) {
0682
0683 netif_stop_queue(dev);
0684 DBG(2, dev, "TX memory allocation deferred.\n");
0685 SMC_ENABLE_INT(lp, IM_ALLOC_INT);
0686 } else {
0687
0688
0689
0690
0691 smc_hardware_send_pkt(&lp->tx_task);
0692 }
0693
0694 return NETDEV_TX_OK;
0695 }
0696
0697
0698
0699
0700
0701
0702 static void smc_tx(struct net_device *dev)
0703 {
0704 struct smc_local *lp = netdev_priv(dev);
0705 void __iomem *ioaddr = lp->base;
0706 unsigned int saved_packet, packet_no, tx_status;
0707 unsigned int pkt_len __always_unused;
0708
0709 DBG(3, dev, "%s\n", __func__);
0710
0711
0712 packet_no = SMC_GET_TXFIFO(lp);
0713 if (unlikely(packet_no & TXFIFO_TEMPTY)) {
0714 PRINTK(dev, "smc_tx with nothing on FIFO.\n");
0715 return;
0716 }
0717
0718
0719 saved_packet = SMC_GET_PN(lp);
0720 SMC_SET_PN(lp, packet_no);
0721
0722
0723 SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
0724 SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
0725 DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
0726 tx_status, packet_no);
0727
0728 if (!(tx_status & ES_TX_SUC))
0729 dev->stats.tx_errors++;
0730
0731 if (tx_status & ES_LOSTCARR)
0732 dev->stats.tx_carrier_errors++;
0733
0734 if (tx_status & (ES_LATCOL | ES_16COL)) {
0735 PRINTK(dev, "%s occurred on last xmit\n",
0736 (tx_status & ES_LATCOL) ?
0737 "late collision" : "too many collisions");
0738 dev->stats.tx_window_errors++;
0739 if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
0740 netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
0741 }
0742 }
0743
0744
0745 SMC_WAIT_MMU_BUSY(lp);
0746 SMC_SET_MMU_CMD(lp, MC_FREEPKT);
0747
0748
0749 SMC_WAIT_MMU_BUSY(lp);
0750 SMC_SET_PN(lp, saved_packet);
0751
0752
0753 SMC_SELECT_BANK(lp, 0);
0754 SMC_SET_TCR(lp, lp->tcr_cur_mode);
0755 SMC_SELECT_BANK(lp, 2);
0756 }
0757
0758
0759
0760
0761 static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
0762 {
0763 struct smc_local *lp = netdev_priv(dev);
0764 void __iomem *ioaddr = lp->base;
0765 unsigned int mii_reg, mask;
0766
0767 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
0768 mii_reg |= MII_MDOE;
0769
0770 for (mask = 1 << (bits - 1); mask; mask >>= 1) {
0771 if (val & mask)
0772 mii_reg |= MII_MDO;
0773 else
0774 mii_reg &= ~MII_MDO;
0775
0776 SMC_SET_MII(lp, mii_reg);
0777 udelay(MII_DELAY);
0778 SMC_SET_MII(lp, mii_reg | MII_MCLK);
0779 udelay(MII_DELAY);
0780 }
0781 }
0782
0783 static unsigned int smc_mii_in(struct net_device *dev, int bits)
0784 {
0785 struct smc_local *lp = netdev_priv(dev);
0786 void __iomem *ioaddr = lp->base;
0787 unsigned int mii_reg, mask, val;
0788
0789 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
0790 SMC_SET_MII(lp, mii_reg);
0791
0792 for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
0793 if (SMC_GET_MII(lp) & MII_MDI)
0794 val |= mask;
0795
0796 SMC_SET_MII(lp, mii_reg);
0797 udelay(MII_DELAY);
0798 SMC_SET_MII(lp, mii_reg | MII_MCLK);
0799 udelay(MII_DELAY);
0800 }
0801
0802 return val;
0803 }
0804
0805
0806
0807
0808 static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
0809 {
0810 struct smc_local *lp = netdev_priv(dev);
0811 void __iomem *ioaddr = lp->base;
0812 unsigned int phydata;
0813
0814 SMC_SELECT_BANK(lp, 3);
0815
0816
0817 smc_mii_out(dev, 0xffffffff, 32);
0818
0819
0820 smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14);
0821
0822
0823 phydata = smc_mii_in(dev, 18);
0824
0825
0826 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
0827
0828 DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
0829 __func__, phyaddr, phyreg, phydata);
0830
0831 SMC_SELECT_BANK(lp, 2);
0832 return phydata;
0833 }
0834
0835
0836
0837
0838 static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
0839 int phydata)
0840 {
0841 struct smc_local *lp = netdev_priv(dev);
0842 void __iomem *ioaddr = lp->base;
0843
0844 SMC_SELECT_BANK(lp, 3);
0845
0846
0847 smc_mii_out(dev, 0xffffffff, 32);
0848
0849
0850 smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
0851
0852
0853 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
0854
0855 DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
0856 __func__, phyaddr, phyreg, phydata);
0857
0858 SMC_SELECT_BANK(lp, 2);
0859 }
0860
0861
0862
0863
0864 static void smc_phy_detect(struct net_device *dev)
0865 {
0866 struct smc_local *lp = netdev_priv(dev);
0867 int phyaddr;
0868
0869 DBG(2, dev, "%s\n", __func__);
0870
0871 lp->phy_type = 0;
0872
0873
0874
0875
0876
0877 for (phyaddr = 1; phyaddr < 33; ++phyaddr) {
0878 unsigned int id1, id2;
0879
0880
0881 id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
0882 id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
0883
0884 DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
0885 id1, id2);
0886
0887
0888 if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
0889 id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) {
0890
0891 lp->mii.phy_id = phyaddr & 31;
0892 lp->phy_type = id1 << 16 | id2;
0893 break;
0894 }
0895 }
0896 }
0897
0898
0899
0900
0901 static int smc_phy_fixed(struct net_device *dev)
0902 {
0903 struct smc_local *lp = netdev_priv(dev);
0904 void __iomem *ioaddr = lp->base;
0905 int phyaddr = lp->mii.phy_id;
0906 int bmcr, cfg1;
0907
0908 DBG(3, dev, "%s\n", __func__);
0909
0910
0911 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
0912 cfg1 |= PHY_CFG1_LNKDIS;
0913 smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1);
0914
0915
0916
0917
0918
0919 bmcr = 0;
0920
0921 if (lp->ctl_rfduplx)
0922 bmcr |= BMCR_FULLDPLX;
0923
0924 if (lp->ctl_rspeed == 100)
0925 bmcr |= BMCR_SPEED100;
0926
0927
0928 smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
0929
0930
0931 SMC_SELECT_BANK(lp, 0);
0932 SMC_SET_RPC(lp, lp->rpc_cur_mode);
0933 SMC_SELECT_BANK(lp, 2);
0934
0935 return 1;
0936 }
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 static int smc_phy_reset(struct net_device *dev, int phy)
0952 {
0953 struct smc_local *lp = netdev_priv(dev);
0954 unsigned int bmcr;
0955 int timeout;
0956
0957 smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET);
0958
0959 for (timeout = 2; timeout; timeout--) {
0960 spin_unlock_irq(&lp->lock);
0961 msleep(50);
0962 spin_lock_irq(&lp->lock);
0963
0964 bmcr = smc_phy_read(dev, phy, MII_BMCR);
0965 if (!(bmcr & BMCR_RESET))
0966 break;
0967 }
0968
0969 return bmcr & BMCR_RESET;
0970 }
0971
0972
0973
0974
0975
0976
0977
0978 static void smc_phy_powerdown(struct net_device *dev)
0979 {
0980 struct smc_local *lp = netdev_priv(dev);
0981 unsigned int bmcr;
0982 int phy = lp->mii.phy_id;
0983
0984 if (lp->phy_type == 0)
0985 return;
0986
0987
0988
0989
0990 cancel_work_sync(&lp->phy_configure);
0991
0992 bmcr = smc_phy_read(dev, phy, MII_BMCR);
0993 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
0994 }
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004 static void smc_phy_check_media(struct net_device *dev, int init)
1005 {
1006 struct smc_local *lp = netdev_priv(dev);
1007 void __iomem *ioaddr = lp->base;
1008
1009 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
1010
1011 if (lp->mii.full_duplex) {
1012 lp->tcr_cur_mode |= TCR_SWFDUP;
1013 } else {
1014 lp->tcr_cur_mode &= ~TCR_SWFDUP;
1015 }
1016
1017 SMC_SELECT_BANK(lp, 0);
1018 SMC_SET_TCR(lp, lp->tcr_cur_mode);
1019 }
1020 }
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031 static void smc_phy_configure(struct work_struct *work)
1032 {
1033 struct smc_local *lp =
1034 container_of(work, struct smc_local, phy_configure);
1035 struct net_device *dev = lp->dev;
1036 void __iomem *ioaddr = lp->base;
1037 int phyaddr = lp->mii.phy_id;
1038 int my_phy_caps;
1039 int my_ad_caps;
1040
1041 DBG(3, dev, "smc_program_phy()\n");
1042
1043 spin_lock_irq(&lp->lock);
1044
1045
1046
1047
1048 if (lp->phy_type == 0)
1049 goto smc_phy_configure_exit;
1050
1051 if (smc_phy_reset(dev, phyaddr)) {
1052 netdev_info(dev, "PHY reset timed out\n");
1053 goto smc_phy_configure_exit;
1054 }
1055
1056
1057
1058
1059
1060 smc_phy_write(dev, phyaddr, PHY_MASK_REG,
1061 PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD |
1062 PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB |
1063 PHY_INT_SPDDET | PHY_INT_DPLXDET);
1064
1065
1066 SMC_SELECT_BANK(lp, 0);
1067 SMC_SET_RPC(lp, lp->rpc_cur_mode);
1068
1069
1070 if (lp->mii.force_media) {
1071 smc_phy_fixed(dev);
1072 goto smc_phy_configure_exit;
1073 }
1074
1075
1076 my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
1077
1078 if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
1079 netdev_info(dev, "Auto negotiation NOT supported\n");
1080 smc_phy_fixed(dev);
1081 goto smc_phy_configure_exit;
1082 }
1083
1084 my_ad_caps = ADVERTISE_CSMA;
1085
1086 if (my_phy_caps & BMSR_100BASE4)
1087 my_ad_caps |= ADVERTISE_100BASE4;
1088 if (my_phy_caps & BMSR_100FULL)
1089 my_ad_caps |= ADVERTISE_100FULL;
1090 if (my_phy_caps & BMSR_100HALF)
1091 my_ad_caps |= ADVERTISE_100HALF;
1092 if (my_phy_caps & BMSR_10FULL)
1093 my_ad_caps |= ADVERTISE_10FULL;
1094 if (my_phy_caps & BMSR_10HALF)
1095 my_ad_caps |= ADVERTISE_10HALF;
1096
1097
1098 if (lp->ctl_rspeed != 100)
1099 my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
1100
1101 if (!lp->ctl_rfduplx)
1102 my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
1103
1104
1105 smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps);
1106 lp->mii.advertising = my_ad_caps;
1107
1108
1109
1110
1111
1112
1113 smc_phy_read(dev, phyaddr, MII_ADVERTISE);
1114
1115 DBG(2, dev, "phy caps=%x\n", my_phy_caps);
1116 DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
1117
1118
1119 smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1120
1121 smc_phy_check_media(dev, 1);
1122
1123 smc_phy_configure_exit:
1124 SMC_SELECT_BANK(lp, 2);
1125 spin_unlock_irq(&lp->lock);
1126 }
1127
1128
1129
1130
1131
1132
1133
1134 static void smc_phy_interrupt(struct net_device *dev)
1135 {
1136 struct smc_local *lp = netdev_priv(dev);
1137 int phyaddr = lp->mii.phy_id;
1138 int phy18;
1139
1140 DBG(2, dev, "%s\n", __func__);
1141
1142 if (lp->phy_type == 0)
1143 return;
1144
1145 for(;;) {
1146 smc_phy_check_media(dev, 0);
1147
1148
1149 phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG);
1150 if ((phy18 & PHY_INT_INT) == 0)
1151 break;
1152 }
1153 }
1154
1155
1156
1157 static void smc_10bt_check_media(struct net_device *dev, int init)
1158 {
1159 struct smc_local *lp = netdev_priv(dev);
1160 void __iomem *ioaddr = lp->base;
1161 unsigned int old_carrier, new_carrier;
1162
1163 old_carrier = netif_carrier_ok(dev) ? 1 : 0;
1164
1165 SMC_SELECT_BANK(lp, 0);
1166 new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0;
1167 SMC_SELECT_BANK(lp, 2);
1168
1169 if (init || (old_carrier != new_carrier)) {
1170 if (!new_carrier) {
1171 netif_carrier_off(dev);
1172 } else {
1173 netif_carrier_on(dev);
1174 }
1175 if (netif_msg_link(lp))
1176 netdev_info(dev, "link %s\n",
1177 new_carrier ? "up" : "down");
1178 }
1179 }
1180
1181 static void smc_eph_interrupt(struct net_device *dev)
1182 {
1183 struct smc_local *lp = netdev_priv(dev);
1184 void __iomem *ioaddr = lp->base;
1185 unsigned int ctl;
1186
1187 smc_10bt_check_media(dev, 0);
1188
1189 SMC_SELECT_BANK(lp, 1);
1190 ctl = SMC_GET_CTL(lp);
1191 SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE);
1192 SMC_SET_CTL(lp, ctl);
1193 SMC_SELECT_BANK(lp, 2);
1194 }
1195
1196
1197
1198
1199
1200 static irqreturn_t smc_interrupt(int irq, void *dev_id)
1201 {
1202 struct net_device *dev = dev_id;
1203 struct smc_local *lp = netdev_priv(dev);
1204 void __iomem *ioaddr = lp->base;
1205 int status, mask, timeout, card_stats;
1206 int saved_pointer;
1207
1208 DBG(3, dev, "%s\n", __func__);
1209
1210 spin_lock(&lp->lock);
1211
1212
1213
1214
1215 SMC_INTERRUPT_PREAMBLE;
1216
1217 saved_pointer = SMC_GET_PTR(lp);
1218 mask = SMC_GET_INT_MASK(lp);
1219 SMC_SET_INT_MASK(lp, 0);
1220
1221
1222 timeout = MAX_IRQ_LOOPS;
1223
1224 do {
1225 status = SMC_GET_INT(lp);
1226
1227 DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
1228 status, mask,
1229 ({ int meminfo; SMC_SELECT_BANK(lp, 0);
1230 meminfo = SMC_GET_MIR(lp);
1231 SMC_SELECT_BANK(lp, 2); meminfo; }),
1232 SMC_GET_FIFO(lp));
1233
1234 status &= mask;
1235 if (!status)
1236 break;
1237
1238 if (status & IM_TX_INT) {
1239
1240 DBG(3, dev, "TX int\n");
1241 smc_tx(dev);
1242 SMC_ACK_INT(lp, IM_TX_INT);
1243 if (THROTTLE_TX_PKTS)
1244 netif_wake_queue(dev);
1245 } else if (status & IM_RCV_INT) {
1246 DBG(3, dev, "RX irq\n");
1247 smc_rcv(dev);
1248 } else if (status & IM_ALLOC_INT) {
1249 DBG(3, dev, "Allocation irq\n");
1250 tasklet_hi_schedule(&lp->tx_task);
1251 mask &= ~IM_ALLOC_INT;
1252 } else if (status & IM_TX_EMPTY_INT) {
1253 DBG(3, dev, "TX empty\n");
1254 mask &= ~IM_TX_EMPTY_INT;
1255
1256
1257 SMC_SELECT_BANK(lp, 0);
1258 card_stats = SMC_GET_COUNTER(lp);
1259 SMC_SELECT_BANK(lp, 2);
1260
1261
1262 dev->stats.collisions += card_stats & 0xF;
1263 card_stats >>= 4;
1264
1265
1266 dev->stats.collisions += card_stats & 0xF;
1267 } else if (status & IM_RX_OVRN_INT) {
1268 DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
1269 ({ int eph_st; SMC_SELECT_BANK(lp, 0);
1270 eph_st = SMC_GET_EPH_STATUS(lp);
1271 SMC_SELECT_BANK(lp, 2); eph_st; }));
1272 SMC_ACK_INT(lp, IM_RX_OVRN_INT);
1273 dev->stats.rx_errors++;
1274 dev->stats.rx_fifo_errors++;
1275 } else if (status & IM_EPH_INT) {
1276 smc_eph_interrupt(dev);
1277 } else if (status & IM_MDINT) {
1278 SMC_ACK_INT(lp, IM_MDINT);
1279 smc_phy_interrupt(dev);
1280 } else if (status & IM_ERCV_INT) {
1281 SMC_ACK_INT(lp, IM_ERCV_INT);
1282 PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
1283 }
1284 } while (--timeout);
1285
1286
1287 SMC_SET_PTR(lp, saved_pointer);
1288 SMC_SET_INT_MASK(lp, mask);
1289 spin_unlock(&lp->lock);
1290
1291 #ifndef CONFIG_NET_POLL_CONTROLLER
1292 if (timeout == MAX_IRQ_LOOPS)
1293 PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
1294 mask);
1295 #endif
1296 DBG(3, dev, "Interrupt done (%d loops)\n",
1297 MAX_IRQ_LOOPS - timeout);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 return IRQ_HANDLED;
1308 }
1309
1310 #ifdef CONFIG_NET_POLL_CONTROLLER
1311
1312
1313
1314
1315 static void smc_poll_controller(struct net_device *dev)
1316 {
1317 disable_irq(dev->irq);
1318 smc_interrupt(dev->irq, dev);
1319 enable_irq(dev->irq);
1320 }
1321 #endif
1322
1323
1324 static void smc_timeout(struct net_device *dev, unsigned int txqueue)
1325 {
1326 struct smc_local *lp = netdev_priv(dev);
1327 void __iomem *ioaddr = lp->base;
1328 int status, mask, eph_st, meminfo, fifo;
1329
1330 DBG(2, dev, "%s\n", __func__);
1331
1332 spin_lock_irq(&lp->lock);
1333 status = SMC_GET_INT(lp);
1334 mask = SMC_GET_INT_MASK(lp);
1335 fifo = SMC_GET_FIFO(lp);
1336 SMC_SELECT_BANK(lp, 0);
1337 eph_st = SMC_GET_EPH_STATUS(lp);
1338 meminfo = SMC_GET_MIR(lp);
1339 SMC_SELECT_BANK(lp, 2);
1340 spin_unlock_irq(&lp->lock);
1341 PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
1342 status, mask, meminfo, fifo, eph_st);
1343
1344 smc_reset(dev);
1345 smc_enable(dev);
1346
1347
1348
1349
1350
1351
1352 if (lp->phy_type != 0)
1353 schedule_work(&lp->phy_configure);
1354
1355
1356 netif_trans_update(dev);
1357 netif_wake_queue(dev);
1358 }
1359
1360
1361
1362
1363
1364
1365
1366 static void smc_set_multicast_list(struct net_device *dev)
1367 {
1368 struct smc_local *lp = netdev_priv(dev);
1369 void __iomem *ioaddr = lp->base;
1370 unsigned char multicast_table[8];
1371 int update_multicast = 0;
1372
1373 DBG(2, dev, "%s\n", __func__);
1374
1375 if (dev->flags & IFF_PROMISC) {
1376 DBG(2, dev, "RCR_PRMS\n");
1377 lp->rcr_cur_mode |= RCR_PRMS;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
1391 DBG(2, dev, "RCR_ALMUL\n");
1392 lp->rcr_cur_mode |= RCR_ALMUL;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 else if (!netdev_mc_empty(dev)) {
1408 struct netdev_hw_addr *ha;
1409
1410
1411 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
1412
1413
1414 memset(multicast_table, 0, sizeof(multicast_table));
1415
1416 netdev_for_each_mc_addr(ha, dev) {
1417 int position;
1418
1419
1420 position = crc32_le(~0, ha->addr, 6) & 0x3f;
1421
1422
1423 multicast_table[invert3[position&7]] |=
1424 (1<<invert3[(position>>3)&7]);
1425 }
1426
1427
1428 lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1429
1430
1431 update_multicast = 1;
1432 } else {
1433 DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
1434 lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1435
1436
1437
1438
1439
1440 memset(multicast_table, 0, sizeof(multicast_table));
1441 update_multicast = 1;
1442 }
1443
1444 spin_lock_irq(&lp->lock);
1445 SMC_SELECT_BANK(lp, 0);
1446 SMC_SET_RCR(lp, lp->rcr_cur_mode);
1447 if (update_multicast) {
1448 SMC_SELECT_BANK(lp, 3);
1449 SMC_SET_MCAST(lp, multicast_table);
1450 }
1451 SMC_SELECT_BANK(lp, 2);
1452 spin_unlock_irq(&lp->lock);
1453 }
1454
1455
1456
1457
1458
1459
1460
1461 static int
1462 smc_open(struct net_device *dev)
1463 {
1464 struct smc_local *lp = netdev_priv(dev);
1465
1466 DBG(2, dev, "%s\n", __func__);
1467
1468
1469 lp->tcr_cur_mode = TCR_DEFAULT;
1470 lp->rcr_cur_mode = RCR_DEFAULT;
1471 lp->rpc_cur_mode = RPC_DEFAULT |
1472 lp->cfg.leda << RPC_LSXA_SHFT |
1473 lp->cfg.ledb << RPC_LSXB_SHFT;
1474
1475
1476
1477
1478
1479 if (lp->phy_type == 0)
1480 lp->tcr_cur_mode |= TCR_MON_CSN;
1481
1482
1483 smc_reset(dev);
1484 smc_enable(dev);
1485
1486
1487 if (lp->phy_type != 0)
1488 smc_phy_configure(&lp->phy_configure);
1489 else {
1490 spin_lock_irq(&lp->lock);
1491 smc_10bt_check_media(dev, 1);
1492 spin_unlock_irq(&lp->lock);
1493 }
1494
1495 netif_start_queue(dev);
1496 return 0;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505
1506 static int smc_close(struct net_device *dev)
1507 {
1508 struct smc_local *lp = netdev_priv(dev);
1509
1510 DBG(2, dev, "%s\n", __func__);
1511
1512 netif_stop_queue(dev);
1513 netif_carrier_off(dev);
1514
1515
1516 smc_shutdown(dev);
1517 tasklet_kill(&lp->tx_task);
1518 smc_phy_powerdown(dev);
1519 return 0;
1520 }
1521
1522
1523
1524
1525 static int
1526 smc_ethtool_get_link_ksettings(struct net_device *dev,
1527 struct ethtool_link_ksettings *cmd)
1528 {
1529 struct smc_local *lp = netdev_priv(dev);
1530
1531 if (lp->phy_type != 0) {
1532 spin_lock_irq(&lp->lock);
1533 mii_ethtool_get_link_ksettings(&lp->mii, cmd);
1534 spin_unlock_irq(&lp->lock);
1535 } else {
1536 u32 supported = SUPPORTED_10baseT_Half |
1537 SUPPORTED_10baseT_Full |
1538 SUPPORTED_TP | SUPPORTED_AUI;
1539
1540 if (lp->ctl_rspeed == 10)
1541 cmd->base.speed = SPEED_10;
1542 else if (lp->ctl_rspeed == 100)
1543 cmd->base.speed = SPEED_100;
1544
1545 cmd->base.autoneg = AUTONEG_DISABLE;
1546 cmd->base.port = 0;
1547 cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
1548 DUPLEX_FULL : DUPLEX_HALF;
1549
1550 ethtool_convert_legacy_u32_to_link_mode(
1551 cmd->link_modes.supported, supported);
1552 }
1553
1554 return 0;
1555 }
1556
1557 static int
1558 smc_ethtool_set_link_ksettings(struct net_device *dev,
1559 const struct ethtool_link_ksettings *cmd)
1560 {
1561 struct smc_local *lp = netdev_priv(dev);
1562 int ret;
1563
1564 if (lp->phy_type != 0) {
1565 spin_lock_irq(&lp->lock);
1566 ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
1567 spin_unlock_irq(&lp->lock);
1568 } else {
1569 if (cmd->base.autoneg != AUTONEG_DISABLE ||
1570 cmd->base.speed != SPEED_10 ||
1571 (cmd->base.duplex != DUPLEX_HALF &&
1572 cmd->base.duplex != DUPLEX_FULL) ||
1573 (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
1574 return -EINVAL;
1575
1576
1577 lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
1578
1579
1580
1581
1582 ret = 0;
1583 }
1584
1585 return ret;
1586 }
1587
1588 static void
1589 smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1590 {
1591 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
1592 strlcpy(info->version, version, sizeof(info->version));
1593 strlcpy(info->bus_info, dev_name(dev->dev.parent),
1594 sizeof(info->bus_info));
1595 }
1596
1597 static int smc_ethtool_nwayreset(struct net_device *dev)
1598 {
1599 struct smc_local *lp = netdev_priv(dev);
1600 int ret = -EINVAL;
1601
1602 if (lp->phy_type != 0) {
1603 spin_lock_irq(&lp->lock);
1604 ret = mii_nway_restart(&lp->mii);
1605 spin_unlock_irq(&lp->lock);
1606 }
1607
1608 return ret;
1609 }
1610
1611 static u32 smc_ethtool_getmsglevel(struct net_device *dev)
1612 {
1613 struct smc_local *lp = netdev_priv(dev);
1614 return lp->msg_enable;
1615 }
1616
1617 static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
1618 {
1619 struct smc_local *lp = netdev_priv(dev);
1620 lp->msg_enable = level;
1621 }
1622
1623 static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
1624 {
1625 u16 ctl;
1626 struct smc_local *lp = netdev_priv(dev);
1627 void __iomem *ioaddr = lp->base;
1628
1629 spin_lock_irq(&lp->lock);
1630
1631 SMC_SELECT_BANK(lp, 1);
1632 SMC_SET_GP(lp, word);
1633
1634 SMC_SELECT_BANK(lp, 2);
1635 SMC_SET_PTR(lp, addr);
1636
1637 SMC_SELECT_BANK(lp, 1);
1638 ctl = SMC_GET_CTL(lp);
1639 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
1640
1641 do {
1642 udelay(1);
1643 } while (SMC_GET_CTL(lp) & CTL_STORE);
1644
1645 SMC_SET_CTL(lp, ctl);
1646 SMC_SELECT_BANK(lp, 2);
1647 spin_unlock_irq(&lp->lock);
1648 return 0;
1649 }
1650
1651 static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
1652 {
1653 u16 ctl;
1654 struct smc_local *lp = netdev_priv(dev);
1655 void __iomem *ioaddr = lp->base;
1656
1657 spin_lock_irq(&lp->lock);
1658
1659 SMC_SELECT_BANK(lp, 2);
1660 SMC_SET_PTR(lp, addr | PTR_READ);
1661
1662 SMC_SELECT_BANK(lp, 1);
1663 SMC_SET_GP(lp, 0xffff);
1664 ctl = SMC_GET_CTL(lp);
1665 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
1666
1667 do {
1668 udelay(1);
1669 } while (SMC_GET_CTL(lp) & CTL_RELOAD);
1670
1671 *word = SMC_GET_GP(lp);
1672
1673 SMC_SET_CTL(lp, ctl);
1674 SMC_SELECT_BANK(lp, 2);
1675 spin_unlock_irq(&lp->lock);
1676 return 0;
1677 }
1678
1679 static int smc_ethtool_geteeprom_len(struct net_device *dev)
1680 {
1681 return 0x23 * 2;
1682 }
1683
1684 static int smc_ethtool_geteeprom(struct net_device *dev,
1685 struct ethtool_eeprom *eeprom, u8 *data)
1686 {
1687 int i;
1688 int imax;
1689
1690 DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
1691 eeprom->len, eeprom->offset, eeprom->offset);
1692 imax = smc_ethtool_geteeprom_len(dev);
1693 for (i = 0; i < eeprom->len; i += 2) {
1694 int ret;
1695 u16 wbuf;
1696 int offset = i + eeprom->offset;
1697 if (offset > imax)
1698 break;
1699 ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
1700 if (ret != 0)
1701 return ret;
1702 DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
1703 data[i] = (wbuf >> 8) & 0xff;
1704 data[i+1] = wbuf & 0xff;
1705 }
1706 return 0;
1707 }
1708
1709 static int smc_ethtool_seteeprom(struct net_device *dev,
1710 struct ethtool_eeprom *eeprom, u8 *data)
1711 {
1712 int i;
1713 int imax;
1714
1715 DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
1716 eeprom->len, eeprom->offset, eeprom->offset);
1717 imax = smc_ethtool_geteeprom_len(dev);
1718 for (i = 0; i < eeprom->len; i += 2) {
1719 int ret;
1720 u16 wbuf;
1721 int offset = i + eeprom->offset;
1722 if (offset > imax)
1723 break;
1724 wbuf = (data[i] << 8) | data[i + 1];
1725 DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
1726 ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
1727 if (ret != 0)
1728 return ret;
1729 }
1730 return 0;
1731 }
1732
1733
1734 static const struct ethtool_ops smc_ethtool_ops = {
1735 .get_drvinfo = smc_ethtool_getdrvinfo,
1736
1737 .get_msglevel = smc_ethtool_getmsglevel,
1738 .set_msglevel = smc_ethtool_setmsglevel,
1739 .nway_reset = smc_ethtool_nwayreset,
1740 .get_link = ethtool_op_get_link,
1741 .get_eeprom_len = smc_ethtool_geteeprom_len,
1742 .get_eeprom = smc_ethtool_geteeprom,
1743 .set_eeprom = smc_ethtool_seteeprom,
1744 .get_link_ksettings = smc_ethtool_get_link_ksettings,
1745 .set_link_ksettings = smc_ethtool_set_link_ksettings,
1746 };
1747
1748 static const struct net_device_ops smc_netdev_ops = {
1749 .ndo_open = smc_open,
1750 .ndo_stop = smc_close,
1751 .ndo_start_xmit = smc_hard_start_xmit,
1752 .ndo_tx_timeout = smc_timeout,
1753 .ndo_set_rx_mode = smc_set_multicast_list,
1754 .ndo_validate_addr = eth_validate_addr,
1755 .ndo_set_mac_address = eth_mac_addr,
1756 #ifdef CONFIG_NET_POLL_CONTROLLER
1757 .ndo_poll_controller = smc_poll_controller,
1758 #endif
1759 };
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773 static int smc_findirq(struct smc_local *lp)
1774 {
1775 void __iomem *ioaddr = lp->base;
1776 int timeout = 20;
1777 unsigned long cookie;
1778
1779 DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
1780
1781 cookie = probe_irq_on();
1782
1783
1784
1785
1786
1787
1788
1789 SMC_SELECT_BANK(lp, 2);
1790 SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
1791
1792
1793
1794
1795
1796 SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
1797
1798
1799
1800
1801 do {
1802 int int_status;
1803 udelay(10);
1804 int_status = SMC_GET_INT(lp);
1805 if (int_status & IM_ALLOC_INT)
1806 break;
1807 } while (--timeout);
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 SMC_SET_INT_MASK(lp, 0);
1818
1819
1820 return probe_irq_off(cookie);
1821 }
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847 static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
1848 unsigned long irq_flags)
1849 {
1850 struct smc_local *lp = netdev_priv(dev);
1851 int retval;
1852 unsigned int val, revision_register;
1853 const char *version_string;
1854 u8 addr[ETH_ALEN];
1855
1856 DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
1857
1858
1859 val = SMC_CURRENT_BANK(lp);
1860 DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
1861 CARDNAME, val);
1862 if ((val & 0xFF00) != 0x3300) {
1863 if ((val & 0xFF) == 0x33) {
1864 netdev_warn(dev,
1865 "%s: Detected possible byte-swapped interface at IOADDR %p\n",
1866 CARDNAME, ioaddr);
1867 }
1868 retval = -ENODEV;
1869 goto err_out;
1870 }
1871
1872
1873
1874
1875
1876 SMC_SELECT_BANK(lp, 0);
1877 val = SMC_CURRENT_BANK(lp);
1878 if ((val & 0xFF00) != 0x3300) {
1879 retval = -ENODEV;
1880 goto err_out;
1881 }
1882
1883
1884
1885
1886
1887
1888
1889 SMC_SELECT_BANK(lp, 1);
1890 val = SMC_GET_BASE(lp);
1891 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
1892 if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
1893 netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
1894 CARDNAME, ioaddr, val);
1895 }
1896
1897
1898
1899
1900
1901
1902 SMC_SELECT_BANK(lp, 3);
1903 revision_register = SMC_GET_REV(lp);
1904 DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
1905 version_string = chip_ids[ (revision_register >> 4) & 0xF];
1906 if (!version_string || (revision_register & 0xff00) != 0x3300) {
1907
1908 netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
1909 CARDNAME, ioaddr, revision_register);
1910
1911 retval = -ENODEV;
1912 goto err_out;
1913 }
1914
1915
1916 pr_info_once("%s\n", version);
1917
1918
1919 dev->base_addr = (unsigned long)ioaddr;
1920 lp->base = ioaddr;
1921 lp->version = revision_register & 0xff;
1922 spin_lock_init(&lp->lock);
1923
1924
1925 SMC_SELECT_BANK(lp, 1);
1926 SMC_GET_MAC_ADDR(lp, addr);
1927 eth_hw_addr_set(dev, addr);
1928
1929
1930 smc_reset(dev);
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 if (dev->irq < 1) {
1948 int trials;
1949
1950 trials = 3;
1951 while (trials--) {
1952 dev->irq = smc_findirq(lp);
1953 if (dev->irq)
1954 break;
1955
1956 smc_reset(dev);
1957 }
1958 }
1959 if (dev->irq == 0) {
1960 netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
1961 retval = -ENODEV;
1962 goto err_out;
1963 }
1964 dev->irq = irq_canonicalize(dev->irq);
1965
1966 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1967 dev->netdev_ops = &smc_netdev_ops;
1968 dev->ethtool_ops = &smc_ethtool_ops;
1969
1970 tasklet_setup(&lp->tx_task, smc_hardware_send_pkt);
1971 INIT_WORK(&lp->phy_configure, smc_phy_configure);
1972 lp->dev = dev;
1973 lp->mii.phy_id_mask = 0x1f;
1974 lp->mii.reg_num_mask = 0x1f;
1975 lp->mii.force_media = 0;
1976 lp->mii.full_duplex = 0;
1977 lp->mii.dev = dev;
1978 lp->mii.mdio_read = smc_phy_read;
1979 lp->mii.mdio_write = smc_phy_write;
1980
1981
1982
1983
1984 if (lp->version >= (CHIP_91100 << 4))
1985 smc_phy_detect(dev);
1986
1987
1988 smc_shutdown(dev);
1989 smc_phy_powerdown(dev);
1990
1991
1992 lp->msg_enable = NETIF_MSG_LINK;
1993 lp->ctl_rfduplx = 0;
1994 lp->ctl_rspeed = 10;
1995
1996 if (lp->version >= (CHIP_91100 << 4)) {
1997 lp->ctl_rfduplx = 1;
1998 lp->ctl_rspeed = 100;
1999 }
2000
2001
2002 retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
2003 if (retval)
2004 goto err_out;
2005
2006 #ifdef CONFIG_ARCH_PXA
2007 # ifdef SMC_USE_PXA_DMA
2008 lp->cfg.flags |= SMC91X_USE_DMA;
2009 # endif
2010 if (lp->cfg.flags & SMC91X_USE_DMA) {
2011 dma_cap_mask_t mask;
2012
2013 dma_cap_zero(mask);
2014 dma_cap_set(DMA_SLAVE, mask);
2015 lp->dma_chan = dma_request_channel(mask, NULL, NULL);
2016 }
2017 #endif
2018
2019 retval = register_netdev(dev);
2020 if (retval == 0) {
2021
2022 netdev_info(dev, "%s (rev %d) at %p IRQ %d",
2023 version_string, revision_register & 0x0f,
2024 lp->base, dev->irq);
2025
2026 if (lp->dma_chan)
2027 pr_cont(" DMA %p", lp->dma_chan);
2028
2029 pr_cont("%s%s\n",
2030 lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
2031 THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
2032
2033 if (!is_valid_ether_addr(dev->dev_addr)) {
2034 netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
2035 } else {
2036
2037 netdev_info(dev, "Ethernet addr: %pM\n",
2038 dev->dev_addr);
2039 }
2040
2041 if (lp->phy_type == 0) {
2042 PRINTK(dev, "No PHY found\n");
2043 } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
2044 PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
2045 } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
2046 PRINTK(dev, "PHY LAN83C180\n");
2047 }
2048 }
2049
2050 err_out:
2051 #ifdef CONFIG_ARCH_PXA
2052 if (retval && lp->dma_chan)
2053 dma_release_channel(lp->dma_chan);
2054 #endif
2055 return retval;
2056 }
2057
2058 static int smc_enable_device(struct platform_device *pdev)
2059 {
2060 struct net_device *ndev = platform_get_drvdata(pdev);
2061 struct smc_local *lp = netdev_priv(ndev);
2062 unsigned long flags;
2063 unsigned char ecor, ecsr;
2064 void __iomem *addr;
2065 struct resource * res;
2066
2067 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2068 if (!res)
2069 return 0;
2070
2071
2072
2073
2074 addr = ioremap(res->start, ATTRIB_SIZE);
2075 if (!addr)
2076 return -ENOMEM;
2077
2078
2079
2080
2081
2082 local_irq_save(flags);
2083 ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET;
2084 writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT));
2085 readb(addr + (ECOR << SMC_IO_SHIFT));
2086
2087
2088
2089
2090 udelay(100);
2091
2092
2093
2094
2095
2096
2097 writeb(ecor, addr + (ECOR << SMC_IO_SHIFT));
2098 writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT));
2099
2100
2101
2102
2103 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
2104 if (!SMC_16BIT(lp))
2105 ecsr |= ECSR_IOIS8;
2106 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2107 local_irq_restore(flags);
2108
2109 iounmap(addr);
2110
2111
2112
2113
2114
2115
2116 msleep(1);
2117
2118 return 0;
2119 }
2120
2121 static int smc_request_attrib(struct platform_device *pdev,
2122 struct net_device *ndev)
2123 {
2124 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2125 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2126
2127 if (!res)
2128 return 0;
2129
2130 if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME))
2131 return -EBUSY;
2132
2133 return 0;
2134 }
2135
2136 static void smc_release_attrib(struct platform_device *pdev,
2137 struct net_device *ndev)
2138 {
2139 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2140 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2141
2142 if (res)
2143 release_mem_region(res->start, ATTRIB_SIZE);
2144 }
2145
2146 static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2147 {
2148 if (SMC_CAN_USE_DATACS) {
2149 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2150 struct smc_local *lp = netdev_priv(ndev);
2151
2152 if (!res)
2153 return;
2154
2155 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
2156 netdev_info(ndev, "%s: failed to request datacs memory region.\n",
2157 CARDNAME);
2158 return;
2159 }
2160
2161 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
2162 }
2163 }
2164
2165 static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
2166 {
2167 if (SMC_CAN_USE_DATACS) {
2168 struct smc_local *lp = netdev_priv(ndev);
2169 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2170
2171 if (lp->datacs)
2172 iounmap(lp->datacs);
2173
2174 lp->datacs = NULL;
2175
2176 if (res)
2177 release_mem_region(res->start, SMC_DATA_EXTENT);
2178 }
2179 }
2180
2181 static const struct acpi_device_id smc91x_acpi_match[] = {
2182 { "LNRO0003", 0 },
2183 { }
2184 };
2185 MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match);
2186
2187 #if IS_BUILTIN(CONFIG_OF)
2188 static const struct of_device_id smc91x_match[] = {
2189 { .compatible = "smsc,lan91c94", },
2190 { .compatible = "smsc,lan91c111", },
2191 {},
2192 };
2193 MODULE_DEVICE_TABLE(of, smc91x_match);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 static int try_toggle_control_gpio(struct device *dev,
2205 struct gpio_desc **desc,
2206 const char *name, int index,
2207 int value, unsigned int nsdelay)
2208 {
2209 struct gpio_desc *gpio;
2210 enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
2211
2212 gpio = devm_gpiod_get_index_optional(dev, name, index, flags);
2213 if (IS_ERR(gpio))
2214 return PTR_ERR(gpio);
2215
2216 if (gpio) {
2217 if (nsdelay)
2218 usleep_range(nsdelay, 2 * nsdelay);
2219 gpiod_set_value_cansleep(gpio, value);
2220 }
2221 *desc = gpio;
2222
2223 return 0;
2224 }
2225 #endif
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 static int smc_drv_probe(struct platform_device *pdev)
2239 {
2240 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
2241 const struct of_device_id *match = NULL;
2242 struct smc_local *lp;
2243 struct net_device *ndev;
2244 struct resource *res;
2245 unsigned int __iomem *addr;
2246 unsigned long irq_flags = SMC_IRQ_FLAGS;
2247 unsigned long irq_resflags;
2248 int ret;
2249
2250 ndev = alloc_etherdev(sizeof(struct smc_local));
2251 if (!ndev) {
2252 ret = -ENOMEM;
2253 goto out;
2254 }
2255 SET_NETDEV_DEV(ndev, &pdev->dev);
2256
2257
2258
2259
2260
2261 lp = netdev_priv(ndev);
2262 lp->cfg.flags = 0;
2263
2264 if (pd) {
2265 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2266 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2267
2268 if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
2269 dev_err(&pdev->dev,
2270 "at least one of 8-bit or 16-bit access support is required.\n");
2271 ret = -ENXIO;
2272 goto out_free_netdev;
2273 }
2274 }
2275
2276 #if IS_BUILTIN(CONFIG_OF)
2277 match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
2278 if (match) {
2279 u32 val;
2280
2281
2282 ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
2283 "power", 0, 0, 100);
2284 if (ret)
2285 goto out_free_netdev;
2286
2287
2288
2289
2290
2291 ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
2292 "reset", 0, 0, 100);
2293 if (ret)
2294 goto out_free_netdev;
2295
2296
2297
2298
2299
2300 if (lp->reset_gpio)
2301 usleep_range(750, 1000);
2302
2303
2304 if (!device_property_read_u32(&pdev->dev, "reg-io-width",
2305 &val)) {
2306 if (val & 1)
2307 lp->cfg.flags |= SMC91X_USE_8BIT;
2308 if ((val == 0) || (val & 2))
2309 lp->cfg.flags |= SMC91X_USE_16BIT;
2310 if (val & 4)
2311 lp->cfg.flags |= SMC91X_USE_32BIT;
2312 } else {
2313 lp->cfg.flags |= SMC91X_USE_16BIT;
2314 }
2315 if (!device_property_read_u32(&pdev->dev, "reg-shift",
2316 &val))
2317 lp->io_shift = val;
2318 lp->cfg.pxa_u16_align4 =
2319 device_property_read_bool(&pdev->dev, "pxa-u16-align4");
2320 }
2321 #endif
2322
2323 if (!pd && !match) {
2324 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2325 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2326 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
2327 lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
2328 }
2329
2330 if (!lp->cfg.leda && !lp->cfg.ledb) {
2331 lp->cfg.leda = RPC_LSA_DEFAULT;
2332 lp->cfg.ledb = RPC_LSB_DEFAULT;
2333 }
2334
2335 ndev->dma = (unsigned char)-1;
2336
2337 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2338 if (!res)
2339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2340 if (!res) {
2341 ret = -ENODEV;
2342 goto out_free_netdev;
2343 }
2344
2345
2346 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2347 ret = -EBUSY;
2348 goto out_free_netdev;
2349 }
2350
2351 ndev->irq = platform_get_irq(pdev, 0);
2352 if (ndev->irq < 0) {
2353 ret = ndev->irq;
2354 goto out_release_io;
2355 }
2356
2357
2358
2359
2360
2361 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2362 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2363 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2364
2365 ret = smc_request_attrib(pdev, ndev);
2366 if (ret)
2367 goto out_release_io;
2368 #if defined(CONFIG_ASSABET_NEPONSET)
2369 if (machine_is_assabet() && machine_has_neponset())
2370 neponset_ncr_set(NCR_ENET_OSC_EN);
2371 #endif
2372 platform_set_drvdata(pdev, ndev);
2373 ret = smc_enable_device(pdev);
2374 if (ret)
2375 goto out_release_attrib;
2376
2377 addr = ioremap(res->start, SMC_IO_EXTENT);
2378 if (!addr) {
2379 ret = -ENOMEM;
2380 goto out_release_attrib;
2381 }
2382
2383 #ifdef CONFIG_ARCH_PXA
2384 {
2385 struct smc_local *lp = netdev_priv(ndev);
2386 lp->device = &pdev->dev;
2387 lp->physaddr = res->start;
2388
2389 }
2390 #endif
2391
2392 ret = smc_probe(ndev, addr, irq_flags);
2393 if (ret != 0)
2394 goto out_iounmap;
2395
2396 smc_request_datacs(pdev, ndev);
2397
2398 return 0;
2399
2400 out_iounmap:
2401 iounmap(addr);
2402 out_release_attrib:
2403 smc_release_attrib(pdev, ndev);
2404 out_release_io:
2405 release_mem_region(res->start, SMC_IO_EXTENT);
2406 out_free_netdev:
2407 free_netdev(ndev);
2408 out:
2409 pr_info("%s: not found (%d).\n", CARDNAME, ret);
2410
2411 return ret;
2412 }
2413
2414 static int smc_drv_remove(struct platform_device *pdev)
2415 {
2416 struct net_device *ndev = platform_get_drvdata(pdev);
2417 struct smc_local *lp = netdev_priv(ndev);
2418 struct resource *res;
2419
2420 unregister_netdev(ndev);
2421
2422 free_irq(ndev->irq, ndev);
2423
2424 #ifdef CONFIG_ARCH_PXA
2425 if (lp->dma_chan)
2426 dma_release_channel(lp->dma_chan);
2427 #endif
2428 iounmap(lp->base);
2429
2430 smc_release_datacs(pdev,ndev);
2431 smc_release_attrib(pdev,ndev);
2432
2433 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2434 if (!res)
2435 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2436 release_mem_region(res->start, SMC_IO_EXTENT);
2437
2438 free_netdev(ndev);
2439
2440 return 0;
2441 }
2442
2443 static int smc_drv_suspend(struct device *dev)
2444 {
2445 struct net_device *ndev = dev_get_drvdata(dev);
2446
2447 if (ndev) {
2448 if (netif_running(ndev)) {
2449 netif_device_detach(ndev);
2450 smc_shutdown(ndev);
2451 smc_phy_powerdown(ndev);
2452 }
2453 }
2454 return 0;
2455 }
2456
2457 static int smc_drv_resume(struct device *dev)
2458 {
2459 struct platform_device *pdev = to_platform_device(dev);
2460 struct net_device *ndev = platform_get_drvdata(pdev);
2461
2462 if (ndev) {
2463 struct smc_local *lp = netdev_priv(ndev);
2464 smc_enable_device(pdev);
2465 if (netif_running(ndev)) {
2466 smc_reset(ndev);
2467 smc_enable(ndev);
2468 if (lp->phy_type != 0)
2469 smc_phy_configure(&lp->phy_configure);
2470 netif_device_attach(ndev);
2471 }
2472 }
2473 return 0;
2474 }
2475
2476 static const struct dev_pm_ops smc_drv_pm_ops = {
2477 .suspend = smc_drv_suspend,
2478 .resume = smc_drv_resume,
2479 };
2480
2481 static struct platform_driver smc_driver = {
2482 .probe = smc_drv_probe,
2483 .remove = smc_drv_remove,
2484 .driver = {
2485 .name = CARDNAME,
2486 .pm = &smc_drv_pm_ops,
2487 .of_match_table = of_match_ptr(smc91x_match),
2488 .acpi_match_table = smc91x_acpi_match,
2489 },
2490 };
2491
2492 module_platform_driver(smc_driver);