0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/module.h>
0014 #include <linux/ioport.h>
0015 #include <linux/netdevice.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/skbuff.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/crc32.h>
0021 #include <linux/mii.h>
0022 #include <linux/of.h>
0023 #include <linux/of_net.h>
0024 #include <linux/ethtool.h>
0025 #include <linux/dm9000.h>
0026 #include <linux/delay.h>
0027 #include <linux/platform_device.h>
0028 #include <linux/irq.h>
0029 #include <linux/slab.h>
0030 #include <linux/regulator/consumer.h>
0031 #include <linux/gpio.h>
0032 #include <linux/of_gpio.h>
0033
0034 #include <asm/delay.h>
0035 #include <asm/irq.h>
0036 #include <asm/io.h>
0037
0038 #include "dm9000.h"
0039
0040
0041
0042 #define DM9000_PHY 0x40
0043
0044 #define CARDNAME "dm9000"
0045
0046
0047
0048
0049 static int watchdog = 5000;
0050 module_param(watchdog, int, 0400);
0051 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
0052
0053
0054
0055
0056 static int debug;
0057 module_param(debug, int, 0644);
0058 MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 enum dm9000_type {
0083 TYPE_DM9000E,
0084 TYPE_DM9000A,
0085 TYPE_DM9000B
0086 };
0087
0088
0089 struct board_info {
0090
0091 void __iomem *io_addr;
0092 void __iomem *io_data;
0093 u16 irq;
0094
0095 u16 tx_pkt_cnt;
0096 u16 queue_pkt_len;
0097 u16 queue_start_addr;
0098 u16 queue_ip_summed;
0099 u16 dbug_cnt;
0100 u8 io_mode;
0101 u8 phy_addr;
0102 u8 imr_all;
0103
0104 unsigned int flags;
0105 unsigned int in_timeout:1;
0106 unsigned int in_suspend:1;
0107 unsigned int wake_supported:1;
0108
0109 enum dm9000_type type;
0110
0111 void (*inblk)(void __iomem *port, void *data, int length);
0112 void (*outblk)(void __iomem *port, void *data, int length);
0113 void (*dumpblk)(void __iomem *port, int length);
0114
0115 struct device *dev;
0116
0117 struct resource *addr_res;
0118 struct resource *data_res;
0119 struct resource *addr_req;
0120 struct resource *data_req;
0121
0122 int irq_wake;
0123
0124 struct mutex addr_lock;
0125
0126 struct delayed_work phy_poll;
0127 struct net_device *ndev;
0128
0129 spinlock_t lock;
0130
0131 struct mii_if_info mii;
0132 u32 msg_enable;
0133 u32 wake_state;
0134
0135 int ip_summed;
0136
0137 struct regulator *power_supply;
0138 };
0139
0140
0141
0142 #define dm9000_dbg(db, lev, msg...) do { \
0143 if ((lev) < debug) { \
0144 dev_dbg(db->dev, msg); \
0145 } \
0146 } while (0)
0147
0148 static inline struct board_info *to_dm9000_board(struct net_device *dev)
0149 {
0150 return netdev_priv(dev);
0151 }
0152
0153
0154
0155
0156
0157
0158 static u8
0159 ior(struct board_info *db, int reg)
0160 {
0161 writeb(reg, db->io_addr);
0162 return readb(db->io_data);
0163 }
0164
0165
0166
0167
0168
0169 static void
0170 iow(struct board_info *db, int reg, int value)
0171 {
0172 writeb(reg, db->io_addr);
0173 writeb(value, db->io_data);
0174 }
0175
0176 static void
0177 dm9000_reset(struct board_info *db)
0178 {
0179 dev_dbg(db->dev, "resetting device\n");
0180
0181
0182
0183
0184
0185 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
0186 udelay(100);
0187 if (ior(db, DM9000_NCR) & 1)
0188 dev_err(db->dev, "dm9000 did not respond to first reset\n");
0189
0190 iow(db, DM9000_NCR, 0);
0191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
0192 udelay(100);
0193 if (ior(db, DM9000_NCR) & 1)
0194 dev_err(db->dev, "dm9000 did not respond to second reset\n");
0195 }
0196
0197
0198
0199 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
0200 {
0201 iowrite8_rep(reg, data, count);
0202 }
0203
0204 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
0205 {
0206 iowrite16_rep(reg, data, (count+1) >> 1);
0207 }
0208
0209 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
0210 {
0211 iowrite32_rep(reg, data, (count+3) >> 2);
0212 }
0213
0214
0215
0216 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
0217 {
0218 ioread8_rep(reg, data, count);
0219 }
0220
0221
0222 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
0223 {
0224 ioread16_rep(reg, data, (count+1) >> 1);
0225 }
0226
0227 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
0228 {
0229 ioread32_rep(reg, data, (count+3) >> 2);
0230 }
0231
0232
0233
0234 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
0235 {
0236 int i;
0237
0238 for (i = 0; i < count; i++)
0239 readb(reg);
0240 }
0241
0242 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
0243 {
0244 int i;
0245
0246 count = (count + 1) >> 1;
0247
0248 for (i = 0; i < count; i++)
0249 readw(reg);
0250 }
0251
0252 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
0253 {
0254 int i;
0255
0256 count = (count + 3) >> 2;
0257
0258 for (i = 0; i < count; i++)
0259 readl(reg);
0260 }
0261
0262
0263
0264
0265
0266 static void dm9000_msleep(struct board_info *db, unsigned int ms)
0267 {
0268 if (db->in_suspend || db->in_timeout)
0269 mdelay(ms);
0270 else
0271 msleep(ms);
0272 }
0273
0274
0275 static int
0276 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
0277 {
0278 struct board_info *db = netdev_priv(dev);
0279 unsigned long flags;
0280 unsigned int reg_save;
0281 int ret;
0282
0283 mutex_lock(&db->addr_lock);
0284
0285 spin_lock_irqsave(&db->lock, flags);
0286
0287
0288 reg_save = readb(db->io_addr);
0289
0290
0291 iow(db, DM9000_EPAR, DM9000_PHY | reg);
0292
0293
0294 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
0295
0296 writeb(reg_save, db->io_addr);
0297 spin_unlock_irqrestore(&db->lock, flags);
0298
0299 dm9000_msleep(db, 1);
0300
0301 spin_lock_irqsave(&db->lock, flags);
0302 reg_save = readb(db->io_addr);
0303
0304 iow(db, DM9000_EPCR, 0x0);
0305
0306
0307 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
0308
0309
0310 writeb(reg_save, db->io_addr);
0311 spin_unlock_irqrestore(&db->lock, flags);
0312
0313 mutex_unlock(&db->addr_lock);
0314
0315 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
0316 return ret;
0317 }
0318
0319
0320 static void
0321 dm9000_phy_write(struct net_device *dev,
0322 int phyaddr_unused, int reg, int value)
0323 {
0324 struct board_info *db = netdev_priv(dev);
0325 unsigned long flags;
0326 unsigned long reg_save;
0327
0328 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
0329 if (!db->in_timeout)
0330 mutex_lock(&db->addr_lock);
0331
0332 spin_lock_irqsave(&db->lock, flags);
0333
0334
0335 reg_save = readb(db->io_addr);
0336
0337
0338 iow(db, DM9000_EPAR, DM9000_PHY | reg);
0339
0340
0341 iow(db, DM9000_EPDRL, value);
0342 iow(db, DM9000_EPDRH, value >> 8);
0343
0344
0345 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
0346
0347 writeb(reg_save, db->io_addr);
0348 spin_unlock_irqrestore(&db->lock, flags);
0349
0350 dm9000_msleep(db, 1);
0351
0352 spin_lock_irqsave(&db->lock, flags);
0353 reg_save = readb(db->io_addr);
0354
0355 iow(db, DM9000_EPCR, 0x0);
0356
0357
0358 writeb(reg_save, db->io_addr);
0359
0360 spin_unlock_irqrestore(&db->lock, flags);
0361 if (!db->in_timeout)
0362 mutex_unlock(&db->addr_lock);
0363 }
0364
0365
0366
0367
0368
0369
0370
0371 static void dm9000_set_io(struct board_info *db, int byte_width)
0372 {
0373
0374
0375
0376
0377 switch (byte_width) {
0378 case 1:
0379 db->dumpblk = dm9000_dumpblk_8bit;
0380 db->outblk = dm9000_outblk_8bit;
0381 db->inblk = dm9000_inblk_8bit;
0382 break;
0383
0384
0385 case 3:
0386 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
0387 fallthrough;
0388 case 2:
0389 db->dumpblk = dm9000_dumpblk_16bit;
0390 db->outblk = dm9000_outblk_16bit;
0391 db->inblk = dm9000_inblk_16bit;
0392 break;
0393
0394 case 4:
0395 default:
0396 db->dumpblk = dm9000_dumpblk_32bit;
0397 db->outblk = dm9000_outblk_32bit;
0398 db->inblk = dm9000_inblk_32bit;
0399 break;
0400 }
0401 }
0402
0403 static void dm9000_schedule_poll(struct board_info *db)
0404 {
0405 if (db->type == TYPE_DM9000E)
0406 schedule_delayed_work(&db->phy_poll, HZ * 2);
0407 }
0408
0409 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
0410 {
0411 struct board_info *dm = to_dm9000_board(dev);
0412
0413 if (!netif_running(dev))
0414 return -EINVAL;
0415
0416 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
0417 }
0418
0419 static unsigned int
0420 dm9000_read_locked(struct board_info *db, int reg)
0421 {
0422 unsigned long flags;
0423 unsigned int ret;
0424
0425 spin_lock_irqsave(&db->lock, flags);
0426 ret = ior(db, reg);
0427 spin_unlock_irqrestore(&db->lock, flags);
0428
0429 return ret;
0430 }
0431
0432 static int dm9000_wait_eeprom(struct board_info *db)
0433 {
0434 unsigned int status;
0435 int timeout = 8;
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448 while (1) {
0449 status = dm9000_read_locked(db, DM9000_EPCR);
0450
0451 if ((status & EPCR_ERRE) == 0)
0452 break;
0453
0454 msleep(1);
0455
0456 if (timeout-- < 0) {
0457 dev_dbg(db->dev, "timeout waiting EEPROM\n");
0458 break;
0459 }
0460 }
0461
0462 return 0;
0463 }
0464
0465
0466
0467
0468 static void
0469 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
0470 {
0471 unsigned long flags;
0472
0473 if (db->flags & DM9000_PLATF_NO_EEPROM) {
0474 to[0] = 0xff;
0475 to[1] = 0xff;
0476 return;
0477 }
0478
0479 mutex_lock(&db->addr_lock);
0480
0481 spin_lock_irqsave(&db->lock, flags);
0482
0483 iow(db, DM9000_EPAR, offset);
0484 iow(db, DM9000_EPCR, EPCR_ERPRR);
0485
0486 spin_unlock_irqrestore(&db->lock, flags);
0487
0488 dm9000_wait_eeprom(db);
0489
0490
0491 msleep(1);
0492
0493 spin_lock_irqsave(&db->lock, flags);
0494
0495 iow(db, DM9000_EPCR, 0x0);
0496
0497 to[0] = ior(db, DM9000_EPDRL);
0498 to[1] = ior(db, DM9000_EPDRH);
0499
0500 spin_unlock_irqrestore(&db->lock, flags);
0501
0502 mutex_unlock(&db->addr_lock);
0503 }
0504
0505
0506
0507
0508 static void
0509 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
0510 {
0511 unsigned long flags;
0512
0513 if (db->flags & DM9000_PLATF_NO_EEPROM)
0514 return;
0515
0516 mutex_lock(&db->addr_lock);
0517
0518 spin_lock_irqsave(&db->lock, flags);
0519 iow(db, DM9000_EPAR, offset);
0520 iow(db, DM9000_EPDRH, data[1]);
0521 iow(db, DM9000_EPDRL, data[0]);
0522 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
0523 spin_unlock_irqrestore(&db->lock, flags);
0524
0525 dm9000_wait_eeprom(db);
0526
0527 mdelay(1);
0528
0529 spin_lock_irqsave(&db->lock, flags);
0530 iow(db, DM9000_EPCR, 0);
0531 spin_unlock_irqrestore(&db->lock, flags);
0532
0533 mutex_unlock(&db->addr_lock);
0534 }
0535
0536
0537
0538 static void dm9000_get_drvinfo(struct net_device *dev,
0539 struct ethtool_drvinfo *info)
0540 {
0541 struct board_info *dm = to_dm9000_board(dev);
0542
0543 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
0544 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
0545 sizeof(info->bus_info));
0546 }
0547
0548 static u32 dm9000_get_msglevel(struct net_device *dev)
0549 {
0550 struct board_info *dm = to_dm9000_board(dev);
0551
0552 return dm->msg_enable;
0553 }
0554
0555 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
0556 {
0557 struct board_info *dm = to_dm9000_board(dev);
0558
0559 dm->msg_enable = value;
0560 }
0561
0562 static int dm9000_get_link_ksettings(struct net_device *dev,
0563 struct ethtool_link_ksettings *cmd)
0564 {
0565 struct board_info *dm = to_dm9000_board(dev);
0566
0567 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
0568 return 0;
0569 }
0570
0571 static int dm9000_set_link_ksettings(struct net_device *dev,
0572 const struct ethtool_link_ksettings *cmd)
0573 {
0574 struct board_info *dm = to_dm9000_board(dev);
0575
0576 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
0577 }
0578
0579 static int dm9000_nway_reset(struct net_device *dev)
0580 {
0581 struct board_info *dm = to_dm9000_board(dev);
0582 return mii_nway_restart(&dm->mii);
0583 }
0584
0585 static int dm9000_set_features(struct net_device *dev,
0586 netdev_features_t features)
0587 {
0588 struct board_info *dm = to_dm9000_board(dev);
0589 netdev_features_t changed = dev->features ^ features;
0590 unsigned long flags;
0591
0592 if (!(changed & NETIF_F_RXCSUM))
0593 return 0;
0594
0595 spin_lock_irqsave(&dm->lock, flags);
0596 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
0597 spin_unlock_irqrestore(&dm->lock, flags);
0598
0599 return 0;
0600 }
0601
0602 static u32 dm9000_get_link(struct net_device *dev)
0603 {
0604 struct board_info *dm = to_dm9000_board(dev);
0605 u32 ret;
0606
0607 if (dm->flags & DM9000_PLATF_EXT_PHY)
0608 ret = mii_link_ok(&dm->mii);
0609 else
0610 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
0611
0612 return ret;
0613 }
0614
0615 #define DM_EEPROM_MAGIC (0x444D394B)
0616
0617 static int dm9000_get_eeprom_len(struct net_device *dev)
0618 {
0619 return 128;
0620 }
0621
0622 static int dm9000_get_eeprom(struct net_device *dev,
0623 struct ethtool_eeprom *ee, u8 *data)
0624 {
0625 struct board_info *dm = to_dm9000_board(dev);
0626 int offset = ee->offset;
0627 int len = ee->len;
0628 int i;
0629
0630
0631
0632 if ((len & 1) != 0 || (offset & 1) != 0)
0633 return -EINVAL;
0634
0635 if (dm->flags & DM9000_PLATF_NO_EEPROM)
0636 return -ENOENT;
0637
0638 ee->magic = DM_EEPROM_MAGIC;
0639
0640 for (i = 0; i < len; i += 2)
0641 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
0642
0643 return 0;
0644 }
0645
0646 static int dm9000_set_eeprom(struct net_device *dev,
0647 struct ethtool_eeprom *ee, u8 *data)
0648 {
0649 struct board_info *dm = to_dm9000_board(dev);
0650 int offset = ee->offset;
0651 int len = ee->len;
0652 int done;
0653
0654
0655
0656 if (dm->flags & DM9000_PLATF_NO_EEPROM)
0657 return -ENOENT;
0658
0659 if (ee->magic != DM_EEPROM_MAGIC)
0660 return -EINVAL;
0661
0662 while (len > 0) {
0663 if (len & 1 || offset & 1) {
0664 int which = offset & 1;
0665 u8 tmp[2];
0666
0667 dm9000_read_eeprom(dm, offset / 2, tmp);
0668 tmp[which] = *data;
0669 dm9000_write_eeprom(dm, offset / 2, tmp);
0670
0671 done = 1;
0672 } else {
0673 dm9000_write_eeprom(dm, offset / 2, data);
0674 done = 2;
0675 }
0676
0677 data += done;
0678 offset += done;
0679 len -= done;
0680 }
0681
0682 return 0;
0683 }
0684
0685 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
0686 {
0687 struct board_info *dm = to_dm9000_board(dev);
0688
0689 memset(w, 0, sizeof(struct ethtool_wolinfo));
0690
0691
0692 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
0693 w->wolopts = dm->wake_state;
0694 }
0695
0696 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
0697 {
0698 struct board_info *dm = to_dm9000_board(dev);
0699 unsigned long flags;
0700 u32 opts = w->wolopts;
0701 u32 wcr = 0;
0702
0703 if (!dm->wake_supported)
0704 return -EOPNOTSUPP;
0705
0706 if (opts & ~WAKE_MAGIC)
0707 return -EINVAL;
0708
0709 if (opts & WAKE_MAGIC)
0710 wcr |= WCR_MAGICEN;
0711
0712 mutex_lock(&dm->addr_lock);
0713
0714 spin_lock_irqsave(&dm->lock, flags);
0715 iow(dm, DM9000_WCR, wcr);
0716 spin_unlock_irqrestore(&dm->lock, flags);
0717
0718 mutex_unlock(&dm->addr_lock);
0719
0720 if (dm->wake_state != opts) {
0721
0722
0723 if (!dm->wake_state)
0724 irq_set_irq_wake(dm->irq_wake, 1);
0725 else if (dm->wake_state && !opts)
0726 irq_set_irq_wake(dm->irq_wake, 0);
0727 }
0728
0729 dm->wake_state = opts;
0730 return 0;
0731 }
0732
0733 static const struct ethtool_ops dm9000_ethtool_ops = {
0734 .get_drvinfo = dm9000_get_drvinfo,
0735 .get_msglevel = dm9000_get_msglevel,
0736 .set_msglevel = dm9000_set_msglevel,
0737 .nway_reset = dm9000_nway_reset,
0738 .get_link = dm9000_get_link,
0739 .get_wol = dm9000_get_wol,
0740 .set_wol = dm9000_set_wol,
0741 .get_eeprom_len = dm9000_get_eeprom_len,
0742 .get_eeprom = dm9000_get_eeprom,
0743 .set_eeprom = dm9000_set_eeprom,
0744 .get_link_ksettings = dm9000_get_link_ksettings,
0745 .set_link_ksettings = dm9000_set_link_ksettings,
0746 };
0747
0748 static void dm9000_show_carrier(struct board_info *db,
0749 unsigned carrier, unsigned nsr)
0750 {
0751 int lpa;
0752 struct net_device *ndev = db->ndev;
0753 struct mii_if_info *mii = &db->mii;
0754 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
0755
0756 if (carrier) {
0757 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
0758 dev_info(db->dev,
0759 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
0760 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
0761 (ncr & NCR_FDX) ? "full" : "half", lpa);
0762 } else {
0763 dev_info(db->dev, "%s: link down\n", ndev->name);
0764 }
0765 }
0766
0767 static void
0768 dm9000_poll_work(struct work_struct *w)
0769 {
0770 struct delayed_work *dw = to_delayed_work(w);
0771 struct board_info *db = container_of(dw, struct board_info, phy_poll);
0772 struct net_device *ndev = db->ndev;
0773
0774 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
0775 !(db->flags & DM9000_PLATF_EXT_PHY)) {
0776 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
0777 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
0778 unsigned new_carrier;
0779
0780 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
0781
0782 if (old_carrier != new_carrier) {
0783 if (netif_msg_link(db))
0784 dm9000_show_carrier(db, new_carrier, nsr);
0785
0786 if (!new_carrier)
0787 netif_carrier_off(ndev);
0788 else
0789 netif_carrier_on(ndev);
0790 }
0791 } else
0792 mii_check_media(&db->mii, netif_msg_link(db), 0);
0793
0794 if (netif_running(ndev))
0795 dm9000_schedule_poll(db);
0796 }
0797
0798
0799
0800
0801
0802
0803 static void
0804 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
0805 {
0806
0807
0808 iounmap(db->io_addr);
0809 iounmap(db->io_data);
0810
0811
0812
0813 if (db->data_req)
0814 release_resource(db->data_req);
0815 kfree(db->data_req);
0816
0817 if (db->addr_req)
0818 release_resource(db->addr_req);
0819 kfree(db->addr_req);
0820 }
0821
0822 static unsigned char dm9000_type_to_char(enum dm9000_type type)
0823 {
0824 switch (type) {
0825 case TYPE_DM9000E: return 'e';
0826 case TYPE_DM9000A: return 'a';
0827 case TYPE_DM9000B: return 'b';
0828 }
0829
0830 return '?';
0831 }
0832
0833
0834
0835
0836 static void
0837 dm9000_hash_table_unlocked(struct net_device *dev)
0838 {
0839 struct board_info *db = netdev_priv(dev);
0840 struct netdev_hw_addr *ha;
0841 int i, oft;
0842 u32 hash_val;
0843 u16 hash_table[4] = { 0, 0, 0, 0x8000 };
0844 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
0845
0846 dm9000_dbg(db, 1, "entering %s\n", __func__);
0847
0848 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
0849 iow(db, oft, dev->dev_addr[i]);
0850
0851 if (dev->flags & IFF_PROMISC)
0852 rcr |= RCR_PRMSC;
0853
0854 if (dev->flags & IFF_ALLMULTI)
0855 rcr |= RCR_ALL;
0856
0857
0858 netdev_for_each_mc_addr(ha, dev) {
0859 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
0860 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
0861 }
0862
0863
0864 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
0865 iow(db, oft++, hash_table[i]);
0866 iow(db, oft++, hash_table[i] >> 8);
0867 }
0868
0869 iow(db, DM9000_RCR, rcr);
0870 }
0871
0872 static void
0873 dm9000_hash_table(struct net_device *dev)
0874 {
0875 struct board_info *db = netdev_priv(dev);
0876 unsigned long flags;
0877
0878 spin_lock_irqsave(&db->lock, flags);
0879 dm9000_hash_table_unlocked(dev);
0880 spin_unlock_irqrestore(&db->lock, flags);
0881 }
0882
0883 static void
0884 dm9000_mask_interrupts(struct board_info *db)
0885 {
0886 iow(db, DM9000_IMR, IMR_PAR);
0887 }
0888
0889 static void
0890 dm9000_unmask_interrupts(struct board_info *db)
0891 {
0892 iow(db, DM9000_IMR, db->imr_all);
0893 }
0894
0895
0896
0897
0898 static void
0899 dm9000_init_dm9000(struct net_device *dev)
0900 {
0901 struct board_info *db = netdev_priv(dev);
0902 unsigned int imr;
0903 unsigned int ncr;
0904
0905 dm9000_dbg(db, 1, "entering %s\n", __func__);
0906
0907 dm9000_reset(db);
0908 dm9000_mask_interrupts(db);
0909
0910
0911 db->io_mode = ior(db, DM9000_ISR) >> 6;
0912
0913
0914 if (dev->hw_features & NETIF_F_RXCSUM)
0915 iow(db, DM9000_RCSR,
0916 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
0917
0918 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
0919 iow(db, DM9000_GPR, 0);
0920
0921
0922
0923
0924 if (db->type == TYPE_DM9000B) {
0925 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
0926 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
0927 }
0928
0929 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
0930
0931
0932
0933
0934 if (db->wake_supported)
0935 ncr |= NCR_WAKEEN;
0936
0937 iow(db, DM9000_NCR, ncr);
0938
0939
0940 iow(db, DM9000_TCR, 0);
0941 iow(db, DM9000_BPTR, 0x3f);
0942 iow(db, DM9000_FCR, 0xff);
0943 iow(db, DM9000_SMCR, 0);
0944
0945 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
0946 iow(db, DM9000_ISR, ISR_CLR_STATUS);
0947
0948
0949 dm9000_hash_table_unlocked(dev);
0950
0951 imr = IMR_PAR | IMR_PTM | IMR_PRM;
0952 if (db->type != TYPE_DM9000E)
0953 imr |= IMR_LNKCHNG;
0954
0955 db->imr_all = imr;
0956
0957
0958 db->tx_pkt_cnt = 0;
0959 db->queue_pkt_len = 0;
0960 netif_trans_update(dev);
0961 }
0962
0963
0964 static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
0965 {
0966 struct board_info *db = netdev_priv(dev);
0967 u8 reg_save;
0968 unsigned long flags;
0969
0970
0971 spin_lock_irqsave(&db->lock, flags);
0972 db->in_timeout = 1;
0973 reg_save = readb(db->io_addr);
0974
0975 netif_stop_queue(dev);
0976 dm9000_init_dm9000(dev);
0977 dm9000_unmask_interrupts(db);
0978
0979 netif_trans_update(dev);
0980 netif_wake_queue(dev);
0981
0982
0983 writeb(reg_save, db->io_addr);
0984 db->in_timeout = 0;
0985 spin_unlock_irqrestore(&db->lock, flags);
0986 }
0987
0988 static void dm9000_send_packet(struct net_device *dev,
0989 int ip_summed,
0990 u16 pkt_len)
0991 {
0992 struct board_info *dm = to_dm9000_board(dev);
0993
0994
0995 if (dm->ip_summed != ip_summed) {
0996 if (ip_summed == CHECKSUM_NONE)
0997 iow(dm, DM9000_TCCR, 0);
0998 else
0999 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1000 dm->ip_summed = ip_summed;
1001 }
1002
1003
1004 iow(dm, DM9000_TXPLL, pkt_len);
1005 iow(dm, DM9000_TXPLH, pkt_len >> 8);
1006
1007
1008 iow(dm, DM9000_TCR, TCR_TXREQ);
1009 }
1010
1011
1012
1013
1014
1015 static int
1016 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1017 {
1018 unsigned long flags;
1019 struct board_info *db = netdev_priv(dev);
1020
1021 dm9000_dbg(db, 3, "%s:\n", __func__);
1022
1023 if (db->tx_pkt_cnt > 1)
1024 return NETDEV_TX_BUSY;
1025
1026 spin_lock_irqsave(&db->lock, flags);
1027
1028
1029 writeb(DM9000_MWCMD, db->io_addr);
1030
1031 (db->outblk)(db->io_data, skb->data, skb->len);
1032 dev->stats.tx_bytes += skb->len;
1033
1034 db->tx_pkt_cnt++;
1035
1036 if (db->tx_pkt_cnt == 1) {
1037 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1038 } else {
1039
1040 db->queue_pkt_len = skb->len;
1041 db->queue_ip_summed = skb->ip_summed;
1042 netif_stop_queue(dev);
1043 }
1044
1045 spin_unlock_irqrestore(&db->lock, flags);
1046
1047
1048 dev_consume_skb_any(skb);
1049
1050 return NETDEV_TX_OK;
1051 }
1052
1053
1054
1055
1056
1057
1058 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1059 {
1060 int tx_status = ior(db, DM9000_NSR);
1061
1062 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1063
1064 db->tx_pkt_cnt--;
1065 dev->stats.tx_packets++;
1066
1067 if (netif_msg_tx_done(db))
1068 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1069
1070
1071 if (db->tx_pkt_cnt > 0)
1072 dm9000_send_packet(dev, db->queue_ip_summed,
1073 db->queue_pkt_len);
1074 netif_wake_queue(dev);
1075 }
1076 }
1077
1078 struct dm9000_rxhdr {
1079 u8 RxPktReady;
1080 u8 RxStatus;
1081 __le16 RxLen;
1082 } __packed;
1083
1084
1085
1086
1087 static void
1088 dm9000_rx(struct net_device *dev)
1089 {
1090 struct board_info *db = netdev_priv(dev);
1091 struct dm9000_rxhdr rxhdr;
1092 struct sk_buff *skb;
1093 u8 rxbyte, *rdptr;
1094 bool GoodPacket;
1095 int RxLen;
1096
1097
1098 do {
1099 ior(db, DM9000_MRCMDX);
1100
1101
1102 rxbyte = readb(db->io_data);
1103
1104
1105 if (rxbyte & DM9000_PKT_ERR) {
1106 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1107 iow(db, DM9000_RCR, 0x00);
1108 return;
1109 }
1110
1111 if (!(rxbyte & DM9000_PKT_RDY))
1112 return;
1113
1114
1115 GoodPacket = true;
1116 writeb(DM9000_MRCMD, db->io_addr);
1117
1118 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1119
1120 RxLen = le16_to_cpu(rxhdr.RxLen);
1121
1122 if (netif_msg_rx_status(db))
1123 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1124 rxhdr.RxStatus, RxLen);
1125
1126
1127 if (RxLen < 0x40) {
1128 GoodPacket = false;
1129 if (netif_msg_rx_err(db))
1130 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1131 }
1132
1133 if (RxLen > DM9000_PKT_MAX) {
1134 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1135 }
1136
1137
1138 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1139 RSR_PLE | RSR_RWTO |
1140 RSR_LCS | RSR_RF)) {
1141 GoodPacket = false;
1142 if (rxhdr.RxStatus & RSR_FOE) {
1143 if (netif_msg_rx_err(db))
1144 dev_dbg(db->dev, "fifo error\n");
1145 dev->stats.rx_fifo_errors++;
1146 }
1147 if (rxhdr.RxStatus & RSR_CE) {
1148 if (netif_msg_rx_err(db))
1149 dev_dbg(db->dev, "crc error\n");
1150 dev->stats.rx_crc_errors++;
1151 }
1152 if (rxhdr.RxStatus & RSR_RF) {
1153 if (netif_msg_rx_err(db))
1154 dev_dbg(db->dev, "length error\n");
1155 dev->stats.rx_length_errors++;
1156 }
1157 }
1158
1159
1160 if (GoodPacket &&
1161 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1162 skb_reserve(skb, 2);
1163 rdptr = skb_put(skb, RxLen - 4);
1164
1165
1166
1167 (db->inblk)(db->io_data, rdptr, RxLen);
1168 dev->stats.rx_bytes += RxLen;
1169
1170
1171 skb->protocol = eth_type_trans(skb, dev);
1172 if (dev->features & NETIF_F_RXCSUM) {
1173 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
1175 else
1176 skb_checksum_none_assert(skb);
1177 }
1178 netif_rx(skb);
1179 dev->stats.rx_packets++;
1180
1181 } else {
1182
1183
1184 (db->dumpblk)(db->io_data, RxLen);
1185 }
1186 } while (rxbyte & DM9000_PKT_RDY);
1187 }
1188
1189 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1190 {
1191 struct net_device *dev = dev_id;
1192 struct board_info *db = netdev_priv(dev);
1193 int int_status;
1194 unsigned long flags;
1195 u8 reg_save;
1196
1197 dm9000_dbg(db, 3, "entering %s\n", __func__);
1198
1199
1200
1201
1202 spin_lock_irqsave(&db->lock, flags);
1203
1204
1205 reg_save = readb(db->io_addr);
1206
1207 dm9000_mask_interrupts(db);
1208
1209 int_status = ior(db, DM9000_ISR);
1210 iow(db, DM9000_ISR, int_status);
1211
1212 if (netif_msg_intr(db))
1213 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1214
1215
1216 if (int_status & ISR_PRS)
1217 dm9000_rx(dev);
1218
1219
1220 if (int_status & ISR_PTS)
1221 dm9000_tx_done(dev, db);
1222
1223 if (db->type != TYPE_DM9000E) {
1224 if (int_status & ISR_LNKCHNG) {
1225
1226 schedule_delayed_work(&db->phy_poll, 1);
1227 }
1228 }
1229
1230 dm9000_unmask_interrupts(db);
1231
1232 writeb(reg_save, db->io_addr);
1233
1234 spin_unlock_irqrestore(&db->lock, flags);
1235
1236 return IRQ_HANDLED;
1237 }
1238
1239 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1240 {
1241 struct net_device *dev = dev_id;
1242 struct board_info *db = netdev_priv(dev);
1243 unsigned long flags;
1244 unsigned nsr, wcr;
1245
1246 spin_lock_irqsave(&db->lock, flags);
1247
1248 nsr = ior(db, DM9000_NSR);
1249 wcr = ior(db, DM9000_WCR);
1250
1251 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1252
1253 if (nsr & NSR_WAKEST) {
1254
1255 iow(db, DM9000_NSR, NSR_WAKEST);
1256
1257 if (wcr & WCR_LINKST)
1258 dev_info(db->dev, "wake by link status change\n");
1259 if (wcr & WCR_SAMPLEST)
1260 dev_info(db->dev, "wake by sample packet\n");
1261 if (wcr & WCR_MAGICST)
1262 dev_info(db->dev, "wake by magic packet\n");
1263 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1264 dev_err(db->dev, "wake signalled with no reason? "
1265 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1266 }
1267
1268 spin_unlock_irqrestore(&db->lock, flags);
1269
1270 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1271 }
1272
1273 #ifdef CONFIG_NET_POLL_CONTROLLER
1274
1275
1276
1277 static void dm9000_poll_controller(struct net_device *dev)
1278 {
1279 disable_irq(dev->irq);
1280 dm9000_interrupt(dev->irq, dev);
1281 enable_irq(dev->irq);
1282 }
1283 #endif
1284
1285
1286
1287
1288
1289 static int
1290 dm9000_open(struct net_device *dev)
1291 {
1292 struct board_info *db = netdev_priv(dev);
1293 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1294
1295 if (netif_msg_ifup(db))
1296 dev_dbg(db->dev, "enabling %s\n", dev->name);
1297
1298
1299
1300
1301 if (irq_flags == IRQF_TRIGGER_NONE)
1302 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1303
1304 irq_flags |= IRQF_SHARED;
1305
1306
1307 iow(db, DM9000_GPR, 0);
1308 mdelay(1);
1309
1310
1311 dm9000_init_dm9000(dev);
1312
1313 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1314 return -EAGAIN;
1315
1316
1317
1318 dm9000_unmask_interrupts(db);
1319
1320
1321 db->dbug_cnt = 0;
1322
1323 mii_check_media(&db->mii, netif_msg_link(db), 1);
1324 netif_start_queue(dev);
1325
1326
1327 schedule_delayed_work(&db->phy_poll, 1);
1328
1329 return 0;
1330 }
1331
1332 static void
1333 dm9000_shutdown(struct net_device *dev)
1334 {
1335 struct board_info *db = netdev_priv(dev);
1336
1337
1338 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1339 iow(db, DM9000_GPR, 0x01);
1340 dm9000_mask_interrupts(db);
1341 iow(db, DM9000_RCR, 0x00);
1342 }
1343
1344
1345
1346
1347
1348 static int
1349 dm9000_stop(struct net_device *ndev)
1350 {
1351 struct board_info *db = netdev_priv(ndev);
1352
1353 if (netif_msg_ifdown(db))
1354 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1355
1356 cancel_delayed_work_sync(&db->phy_poll);
1357
1358 netif_stop_queue(ndev);
1359 netif_carrier_off(ndev);
1360
1361
1362 free_irq(ndev->irq, ndev);
1363
1364 dm9000_shutdown(ndev);
1365
1366 return 0;
1367 }
1368
1369 static const struct net_device_ops dm9000_netdev_ops = {
1370 .ndo_open = dm9000_open,
1371 .ndo_stop = dm9000_stop,
1372 .ndo_start_xmit = dm9000_start_xmit,
1373 .ndo_tx_timeout = dm9000_timeout,
1374 .ndo_set_rx_mode = dm9000_hash_table,
1375 .ndo_eth_ioctl = dm9000_ioctl,
1376 .ndo_set_features = dm9000_set_features,
1377 .ndo_validate_addr = eth_validate_addr,
1378 .ndo_set_mac_address = eth_mac_addr,
1379 #ifdef CONFIG_NET_POLL_CONTROLLER
1380 .ndo_poll_controller = dm9000_poll_controller,
1381 #endif
1382 };
1383
1384 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1385 {
1386 struct dm9000_plat_data *pdata;
1387 struct device_node *np = dev->of_node;
1388 int ret;
1389
1390 if (!IS_ENABLED(CONFIG_OF) || !np)
1391 return ERR_PTR(-ENXIO);
1392
1393 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1394 if (!pdata)
1395 return ERR_PTR(-ENOMEM);
1396
1397 if (of_find_property(np, "davicom,ext-phy", NULL))
1398 pdata->flags |= DM9000_PLATF_EXT_PHY;
1399 if (of_find_property(np, "davicom,no-eeprom", NULL))
1400 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1401
1402 ret = of_get_mac_address(np, pdata->dev_addr);
1403 if (ret == -EPROBE_DEFER)
1404 return ERR_PTR(ret);
1405
1406 return pdata;
1407 }
1408
1409
1410
1411
1412 static int
1413 dm9000_probe(struct platform_device *pdev)
1414 {
1415 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1416 struct board_info *db;
1417 struct net_device *ndev;
1418 struct device *dev = &pdev->dev;
1419 const unsigned char *mac_src;
1420 int ret = 0;
1421 int iosize;
1422 int i;
1423 u32 id_val;
1424 int reset_gpios;
1425 enum of_gpio_flags flags;
1426 struct regulator *power;
1427 bool inv_mac_addr = false;
1428 u8 addr[ETH_ALEN];
1429
1430 power = devm_regulator_get(dev, "vcc");
1431 if (IS_ERR(power)) {
1432 if (PTR_ERR(power) == -EPROBE_DEFER)
1433 return -EPROBE_DEFER;
1434 dev_dbg(dev, "no regulator provided\n");
1435 } else {
1436 ret = regulator_enable(power);
1437 if (ret != 0) {
1438 dev_err(dev,
1439 "Failed to enable power regulator: %d\n", ret);
1440 return ret;
1441 }
1442 dev_dbg(dev, "regulator enabled\n");
1443 }
1444
1445 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1446 &flags);
1447 if (gpio_is_valid(reset_gpios)) {
1448 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1449 "dm9000_reset");
1450 if (ret) {
1451 dev_err(dev, "failed to request reset gpio %d: %d\n",
1452 reset_gpios, ret);
1453 goto out_regulator_disable;
1454 }
1455
1456
1457 msleep(2);
1458 gpio_set_value(reset_gpios, 1);
1459
1460 msleep(4);
1461 }
1462
1463 if (!pdata) {
1464 pdata = dm9000_parse_dt(&pdev->dev);
1465 if (IS_ERR(pdata)) {
1466 ret = PTR_ERR(pdata);
1467 goto out_regulator_disable;
1468 }
1469 }
1470
1471
1472 ndev = alloc_etherdev(sizeof(struct board_info));
1473 if (!ndev) {
1474 ret = -ENOMEM;
1475 goto out_regulator_disable;
1476 }
1477
1478 SET_NETDEV_DEV(ndev, &pdev->dev);
1479
1480 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1481
1482
1483 db = netdev_priv(ndev);
1484
1485 db->dev = &pdev->dev;
1486 db->ndev = ndev;
1487 if (!IS_ERR(power))
1488 db->power_supply = power;
1489
1490 spin_lock_init(&db->lock);
1491 mutex_init(&db->addr_lock);
1492
1493 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1494
1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1497
1498 if (!db->addr_res || !db->data_res) {
1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1500 db->addr_res, db->data_res);
1501 ret = -ENOENT;
1502 goto out;
1503 }
1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 ret = ndev->irq;
1508 goto out;
1509 }
1510
1511 db->irq_wake = platform_get_irq_optional(pdev, 1);
1512 if (db->irq_wake >= 0) {
1513 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1514
1515 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1516 IRQF_SHARED, dev_name(db->dev), ndev);
1517 if (ret) {
1518 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1519 } else {
1520
1521
1522 ret = irq_set_irq_wake(db->irq_wake, 1);
1523 if (ret) {
1524 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1525 db->irq_wake, ret);
1526 } else {
1527 irq_set_irq_wake(db->irq_wake, 0);
1528 db->wake_supported = 1;
1529 }
1530 }
1531 }
1532
1533 iosize = resource_size(db->addr_res);
1534 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1535 pdev->name);
1536
1537 if (db->addr_req == NULL) {
1538 dev_err(db->dev, "cannot claim address reg area\n");
1539 ret = -EIO;
1540 goto out;
1541 }
1542
1543 db->io_addr = ioremap(db->addr_res->start, iosize);
1544
1545 if (db->io_addr == NULL) {
1546 dev_err(db->dev, "failed to ioremap address reg\n");
1547 ret = -EINVAL;
1548 goto out;
1549 }
1550
1551 iosize = resource_size(db->data_res);
1552 db->data_req = request_mem_region(db->data_res->start, iosize,
1553 pdev->name);
1554
1555 if (db->data_req == NULL) {
1556 dev_err(db->dev, "cannot claim data reg area\n");
1557 ret = -EIO;
1558 goto out;
1559 }
1560
1561 db->io_data = ioremap(db->data_res->start, iosize);
1562
1563 if (db->io_data == NULL) {
1564 dev_err(db->dev, "failed to ioremap data reg\n");
1565 ret = -EINVAL;
1566 goto out;
1567 }
1568
1569
1570 ndev->base_addr = (unsigned long)db->io_addr;
1571
1572
1573 dm9000_set_io(db, iosize);
1574
1575
1576 if (pdata != NULL) {
1577
1578
1579
1580 if (pdata->flags & DM9000_PLATF_8BITONLY)
1581 dm9000_set_io(db, 1);
1582
1583 if (pdata->flags & DM9000_PLATF_16BITONLY)
1584 dm9000_set_io(db, 2);
1585
1586 if (pdata->flags & DM9000_PLATF_32BITONLY)
1587 dm9000_set_io(db, 4);
1588
1589
1590
1591
1592 if (pdata->inblk != NULL)
1593 db->inblk = pdata->inblk;
1594
1595 if (pdata->outblk != NULL)
1596 db->outblk = pdata->outblk;
1597
1598 if (pdata->dumpblk != NULL)
1599 db->dumpblk = pdata->dumpblk;
1600
1601 db->flags = pdata->flags;
1602 }
1603
1604 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1605 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1606 #endif
1607
1608 dm9000_reset(db);
1609
1610
1611 for (i = 0; i < 8; i++) {
1612 id_val = ior(db, DM9000_VIDL);
1613 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1614 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1615 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1616
1617 if (id_val == DM9000_ID)
1618 break;
1619 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1620 }
1621
1622 if (id_val != DM9000_ID) {
1623 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1624 ret = -ENODEV;
1625 goto out;
1626 }
1627
1628
1629
1630 id_val = ior(db, DM9000_CHIPR);
1631 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1632
1633 switch (id_val) {
1634 case CHIPR_DM9000A:
1635 db->type = TYPE_DM9000A;
1636 break;
1637 case CHIPR_DM9000B:
1638 db->type = TYPE_DM9000B;
1639 break;
1640 default:
1641 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1642 db->type = TYPE_DM9000E;
1643 }
1644
1645
1646 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1647 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1648 ndev->features |= ndev->hw_features;
1649 }
1650
1651
1652
1653 ndev->netdev_ops = &dm9000_netdev_ops;
1654 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1655 ndev->ethtool_ops = &dm9000_ethtool_ops;
1656
1657 db->msg_enable = NETIF_MSG_LINK;
1658 db->mii.phy_id_mask = 0x1f;
1659 db->mii.reg_num_mask = 0x1f;
1660 db->mii.force_media = 0;
1661 db->mii.full_duplex = 0;
1662 db->mii.dev = ndev;
1663 db->mii.mdio_read = dm9000_phy_read;
1664 db->mii.mdio_write = dm9000_phy_write;
1665
1666 mac_src = "eeprom";
1667
1668
1669 for (i = 0; i < 6; i += 2)
1670 dm9000_read_eeprom(db, i / 2, addr + i);
1671 eth_hw_addr_set(ndev, addr);
1672
1673 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1674 mac_src = "platform data";
1675 eth_hw_addr_set(ndev, pdata->dev_addr);
1676 }
1677
1678 if (!is_valid_ether_addr(ndev->dev_addr)) {
1679
1680
1681 mac_src = "chip";
1682 for (i = 0; i < 6; i++)
1683 addr[i] = ior(db, i + DM9000_PAR);
1684 eth_hw_addr_set(ndev, pdata->dev_addr);
1685 }
1686
1687 if (!is_valid_ether_addr(ndev->dev_addr)) {
1688 inv_mac_addr = true;
1689 eth_hw_addr_random(ndev);
1690 mac_src = "random";
1691 }
1692
1693
1694 platform_set_drvdata(pdev, ndev);
1695 ret = register_netdev(ndev);
1696
1697 if (ret == 0) {
1698 if (inv_mac_addr)
1699 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1700 ndev->name);
1701 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1702 ndev->name, dm9000_type_to_char(db->type),
1703 db->io_addr, db->io_data, ndev->irq,
1704 ndev->dev_addr, mac_src);
1705 }
1706 return 0;
1707
1708 out:
1709 dev_err(db->dev, "not found (%d).\n", ret);
1710
1711 dm9000_release_board(pdev, db);
1712 free_netdev(ndev);
1713
1714 out_regulator_disable:
1715 if (!IS_ERR(power))
1716 regulator_disable(power);
1717
1718 return ret;
1719 }
1720
1721 static int
1722 dm9000_drv_suspend(struct device *dev)
1723 {
1724 struct net_device *ndev = dev_get_drvdata(dev);
1725 struct board_info *db;
1726
1727 if (ndev) {
1728 db = netdev_priv(ndev);
1729 db->in_suspend = 1;
1730
1731 if (!netif_running(ndev))
1732 return 0;
1733
1734 netif_device_detach(ndev);
1735
1736
1737 if (!db->wake_state)
1738 dm9000_shutdown(ndev);
1739 }
1740 return 0;
1741 }
1742
1743 static int
1744 dm9000_drv_resume(struct device *dev)
1745 {
1746 struct net_device *ndev = dev_get_drvdata(dev);
1747 struct board_info *db = netdev_priv(ndev);
1748
1749 if (ndev) {
1750 if (netif_running(ndev)) {
1751
1752
1753 if (!db->wake_state) {
1754 dm9000_init_dm9000(ndev);
1755 dm9000_unmask_interrupts(db);
1756 }
1757
1758 netif_device_attach(ndev);
1759 }
1760
1761 db->in_suspend = 0;
1762 }
1763 return 0;
1764 }
1765
1766 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1767 .suspend = dm9000_drv_suspend,
1768 .resume = dm9000_drv_resume,
1769 };
1770
1771 static int
1772 dm9000_drv_remove(struct platform_device *pdev)
1773 {
1774 struct net_device *ndev = platform_get_drvdata(pdev);
1775 struct board_info *dm = to_dm9000_board(ndev);
1776
1777 unregister_netdev(ndev);
1778 dm9000_release_board(pdev, dm);
1779 free_netdev(ndev);
1780 if (dm->power_supply)
1781 regulator_disable(dm->power_supply);
1782
1783 dev_dbg(&pdev->dev, "released and freed device\n");
1784 return 0;
1785 }
1786
1787 #ifdef CONFIG_OF
1788 static const struct of_device_id dm9000_of_matches[] = {
1789 { .compatible = "davicom,dm9000", },
1790 { }
1791 };
1792 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1793 #endif
1794
1795 static struct platform_driver dm9000_driver = {
1796 .driver = {
1797 .name = "dm9000",
1798 .pm = &dm9000_drv_pm_ops,
1799 .of_match_table = of_match_ptr(dm9000_of_matches),
1800 },
1801 .probe = dm9000_probe,
1802 .remove = dm9000_drv_remove,
1803 };
1804
1805 module_platform_driver(dm9000_driver);
1806
1807 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1808 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1809 MODULE_LICENSE("GPL");
1810 MODULE_ALIAS("platform:dm9000");