0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008
0009 #include <linux/interrupt.h>
0010 #include <linux/module.h>
0011 #include <linux/kernel.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/ethtool.h>
0015 #include <linux/cache.h>
0016 #include <linux/debugfs.h>
0017 #include <linux/seq_file.h>
0018
0019 #include <linux/spi/spi.h>
0020 #include <linux/of_net.h>
0021
0022 #define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
0023 NETIF_MSG_TIMER)
0024
0025 #define DRV_NAME "mse102x"
0026
0027 #define DET_CMD 0x0001
0028 #define DET_SOF 0x0002
0029 #define DET_DFT 0x55AA
0030
0031 #define CMD_SHIFT 12
0032 #define CMD_RTS (0x1 << CMD_SHIFT)
0033 #define CMD_CTR (0x2 << CMD_SHIFT)
0034
0035 #define CMD_MASK GENMASK(15, CMD_SHIFT)
0036 #define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
0037
0038 #define DET_CMD_LEN 4
0039 #define DET_SOF_LEN 2
0040 #define DET_DFT_LEN 2
0041
0042 #define MIN_FREQ_HZ 6000000
0043 #define MAX_FREQ_HZ 7142857
0044
0045 struct mse102x_stats {
0046 u64 xfer_err;
0047 u64 invalid_cmd;
0048 u64 invalid_ctr;
0049 u64 invalid_dft;
0050 u64 invalid_len;
0051 u64 invalid_rts;
0052 u64 invalid_sof;
0053 u64 tx_timeout;
0054 };
0055
0056 static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
0057 "SPI transfer errors",
0058 "Invalid command",
0059 "Invalid CTR",
0060 "Invalid DFT",
0061 "Invalid frame length",
0062 "Invalid RTS",
0063 "Invalid SOF",
0064 "TX timeout",
0065 };
0066
0067 struct mse102x_net {
0068 struct net_device *ndev;
0069
0070 u8 rxd[8];
0071 u8 txd[8];
0072
0073 u32 msg_enable ____cacheline_aligned;
0074
0075 struct sk_buff_head txq;
0076 struct mse102x_stats stats;
0077 };
0078
0079 struct mse102x_net_spi {
0080 struct mse102x_net mse102x;
0081 struct mutex lock;
0082 struct work_struct tx_work;
0083 struct spi_device *spidev;
0084 struct spi_message spi_msg;
0085 struct spi_transfer spi_xfer;
0086
0087 #ifdef CONFIG_DEBUG_FS
0088 struct dentry *device_root;
0089 #endif
0090 };
0091
0092 #define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
0093
0094 #ifdef CONFIG_DEBUG_FS
0095
0096 static int mse102x_info_show(struct seq_file *s, void *what)
0097 {
0098 struct mse102x_net_spi *mses = s->private;
0099
0100 seq_printf(s, "TX ring size : %u\n",
0101 skb_queue_len(&mses->mse102x.txq));
0102
0103 seq_printf(s, "IRQ : %d\n",
0104 mses->spidev->irq);
0105
0106 seq_printf(s, "SPI effective speed : %lu\n",
0107 (unsigned long)mses->spi_xfer.effective_speed_hz);
0108 seq_printf(s, "SPI mode : %x\n",
0109 mses->spidev->mode);
0110
0111 return 0;
0112 }
0113 DEFINE_SHOW_ATTRIBUTE(mse102x_info);
0114
0115 static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
0116 {
0117 mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev),
0118 NULL);
0119
0120 debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses,
0121 &mse102x_info_fops);
0122 }
0123
0124 static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
0125 {
0126 debugfs_remove_recursive(mses->device_root);
0127 }
0128
0129 #else
0130
0131 static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
0132 {
0133 }
0134
0135 static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
0136 {
0137 }
0138
0139 #endif
0140
0141
0142
0143
0144
0145
0146
0147
0148 static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd)
0149 {
0150 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0151 struct spi_transfer *xfer = &mses->spi_xfer;
0152 struct spi_message *msg = &mses->spi_msg;
0153 __be16 txb[2];
0154 int ret;
0155
0156 txb[0] = cpu_to_be16(DET_CMD);
0157 txb[1] = cpu_to_be16(cmd);
0158
0159 xfer->tx_buf = txb;
0160 xfer->rx_buf = NULL;
0161 xfer->len = DET_CMD_LEN;
0162
0163 ret = spi_sync(mses->spidev, msg);
0164 if (ret < 0) {
0165 netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
0166 __func__, ret);
0167 mse->stats.xfer_err++;
0168 }
0169 }
0170
0171 static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
0172 {
0173 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0174 struct spi_transfer *xfer = &mses->spi_xfer;
0175 struct spi_message *msg = &mses->spi_msg;
0176 __be16 *txb = (__be16 *)mse->txd;
0177 __be16 *cmd = (__be16 *)mse->rxd;
0178 u8 *trx = mse->rxd;
0179 int ret;
0180
0181 txb[0] = 0;
0182 txb[1] = 0;
0183
0184 xfer->tx_buf = txb;
0185 xfer->rx_buf = trx;
0186 xfer->len = DET_CMD_LEN;
0187
0188 ret = spi_sync(mses->spidev, msg);
0189 if (ret < 0) {
0190 netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
0191 __func__, ret);
0192 mse->stats.xfer_err++;
0193 } else if (*cmd != cpu_to_be16(DET_CMD)) {
0194 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
0195 __func__, *cmd);
0196 mse->stats.invalid_cmd++;
0197 ret = -EIO;
0198 } else {
0199 memcpy(rxb, trx + 2, 2);
0200 }
0201
0202 return ret;
0203 }
0204
0205 static inline void mse102x_push_header(struct sk_buff *skb)
0206 {
0207 __be16 *header = skb_push(skb, DET_SOF_LEN);
0208
0209 *header = cpu_to_be16(DET_SOF);
0210 }
0211
0212 static inline void mse102x_put_footer(struct sk_buff *skb)
0213 {
0214 __be16 *footer = skb_put(skb, DET_DFT_LEN);
0215
0216 *footer = cpu_to_be16(DET_DFT);
0217 }
0218
0219 static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
0220 unsigned int pad)
0221 {
0222 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0223 struct spi_transfer *xfer = &mses->spi_xfer;
0224 struct spi_message *msg = &mses->spi_msg;
0225 struct sk_buff *tskb;
0226 int ret;
0227
0228 netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
0229 __func__, txp, txp->len, txp->data);
0230
0231 if ((skb_headroom(txp) < DET_SOF_LEN) ||
0232 (skb_tailroom(txp) < DET_DFT_LEN + pad)) {
0233 tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad,
0234 GFP_KERNEL);
0235 if (!tskb)
0236 return -ENOMEM;
0237
0238 dev_kfree_skb(txp);
0239 txp = tskb;
0240 }
0241
0242 mse102x_push_header(txp);
0243
0244 if (pad)
0245 skb_put_zero(txp, pad);
0246
0247 mse102x_put_footer(txp);
0248
0249 xfer->tx_buf = txp->data;
0250 xfer->rx_buf = NULL;
0251 xfer->len = txp->len;
0252
0253 ret = spi_sync(mses->spidev, msg);
0254 if (ret < 0) {
0255 netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
0256 __func__, ret);
0257 mse->stats.xfer_err++;
0258 }
0259
0260 return ret;
0261 }
0262
0263 static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
0264 unsigned int frame_len)
0265 {
0266 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0267 struct spi_transfer *xfer = &mses->spi_xfer;
0268 struct spi_message *msg = &mses->spi_msg;
0269 __be16 *sof = (__be16 *)buff;
0270 __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len);
0271 int ret;
0272
0273 xfer->rx_buf = buff;
0274 xfer->tx_buf = NULL;
0275 xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN;
0276
0277 ret = spi_sync(mses->spidev, msg);
0278 if (ret < 0) {
0279 netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
0280 __func__, ret);
0281 mse->stats.xfer_err++;
0282 } else if (*sof != cpu_to_be16(DET_SOF)) {
0283 netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
0284 __func__, *sof);
0285 mse->stats.invalid_sof++;
0286 ret = -EIO;
0287 } else if (*dft != cpu_to_be16(DET_DFT)) {
0288 netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n",
0289 __func__, *dft);
0290 mse->stats.invalid_dft++;
0291 ret = -EIO;
0292 }
0293
0294 return ret;
0295 }
0296
0297 static void mse102x_dump_packet(const char *msg, int len, const char *data)
0298 {
0299 printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len);
0300 print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
0301 data, len, true);
0302 }
0303
0304 static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
0305 {
0306 struct sk_buff *skb;
0307 unsigned int rxalign;
0308 unsigned int rxlen;
0309 __be16 rx = 0;
0310 u16 cmd_resp;
0311 u8 *rxpkt;
0312 int ret;
0313
0314 mse102x_tx_cmd_spi(mse, CMD_CTR);
0315 ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
0316 cmd_resp = be16_to_cpu(rx);
0317
0318 if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
0319 usleep_range(50, 100);
0320
0321 mse102x_tx_cmd_spi(mse, CMD_CTR);
0322 ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
0323 if (ret)
0324 return;
0325
0326 cmd_resp = be16_to_cpu(rx);
0327 if ((cmd_resp & CMD_MASK) != CMD_RTS) {
0328 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
0329 __func__, cmd_resp);
0330 mse->stats.invalid_rts++;
0331 return;
0332 }
0333
0334 net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
0335 __func__);
0336 }
0337
0338 rxlen = cmd_resp & LEN_MASK;
0339 if (!rxlen) {
0340 net_dbg_ratelimited("%s: No frame length defined\n", __func__);
0341 mse->stats.invalid_len++;
0342 return;
0343 }
0344
0345 rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
0346 skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
0347 if (!skb)
0348 return;
0349
0350
0351
0352
0353
0354 rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
0355 if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
0356 mse->ndev->stats.rx_errors++;
0357 dev_kfree_skb(skb);
0358 return;
0359 }
0360
0361 if (netif_msg_pktdata(mse))
0362 mse102x_dump_packet(__func__, skb->len, skb->data);
0363
0364 skb->protocol = eth_type_trans(skb, mse->ndev);
0365 netif_rx(skb);
0366
0367 mse->ndev->stats.rx_packets++;
0368 mse->ndev->stats.rx_bytes += rxlen;
0369 }
0370
0371 static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
0372 unsigned long work_timeout)
0373 {
0374 unsigned int pad = 0;
0375 __be16 rx = 0;
0376 u16 cmd_resp;
0377 int ret;
0378 bool first = true;
0379
0380 if (txb->len < 60)
0381 pad = 60 - txb->len;
0382
0383 while (1) {
0384 mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
0385 ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
0386 cmd_resp = be16_to_cpu(rx);
0387
0388 if (!ret) {
0389
0390 if (cmd_resp == CMD_CTR)
0391 break;
0392
0393 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
0394 __func__, cmd_resp);
0395 mse->stats.invalid_ctr++;
0396 }
0397
0398
0399
0400
0401
0402 if (time_after(jiffies, work_timeout))
0403 return -ETIMEDOUT;
0404
0405 if (first) {
0406
0407 netif_stop_queue(mse->ndev);
0408
0409 usleep_range(50, 100);
0410 first = false;
0411 } else {
0412 msleep(20);
0413 }
0414 }
0415
0416 ret = mse102x_tx_frame_spi(mse, txb, pad);
0417 if (ret)
0418 net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
0419 __func__, ret);
0420
0421 return ret;
0422 }
0423
0424 #define TX_QUEUE_MAX 10
0425
0426 static void mse102x_tx_work(struct work_struct *work)
0427 {
0428
0429 unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
0430 struct mse102x_net_spi *mses;
0431 struct mse102x_net *mse;
0432 struct sk_buff *txb;
0433 int ret = 0;
0434
0435 mses = container_of(work, struct mse102x_net_spi, tx_work);
0436 mse = &mses->mse102x;
0437
0438 while ((txb = skb_dequeue(&mse->txq))) {
0439 mutex_lock(&mses->lock);
0440 ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
0441 mutex_unlock(&mses->lock);
0442 if (ret) {
0443 mse->ndev->stats.tx_dropped++;
0444 } else {
0445 mse->ndev->stats.tx_bytes += txb->len;
0446 mse->ndev->stats.tx_packets++;
0447 }
0448
0449 dev_kfree_skb(txb);
0450 }
0451
0452 if (ret == -ETIMEDOUT) {
0453 if (netif_msg_timer(mse))
0454 netdev_err(mse->ndev, "tx work timeout\n");
0455
0456 mse->stats.tx_timeout++;
0457 }
0458
0459 netif_wake_queue(mse->ndev);
0460 }
0461
0462 static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
0463 struct net_device *ndev)
0464 {
0465 struct mse102x_net *mse = netdev_priv(ndev);
0466 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0467
0468 netif_dbg(mse, tx_queued, ndev,
0469 "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
0470
0471 skb_queue_tail(&mse->txq, skb);
0472
0473 if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX)
0474 netif_stop_queue(ndev);
0475
0476 schedule_work(&mses->tx_work);
0477
0478 return NETDEV_TX_OK;
0479 }
0480
0481 static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
0482 {
0483 struct net_device *ndev = mse->ndev;
0484 int ret = of_get_ethdev_address(np, ndev);
0485
0486 if (ret) {
0487 eth_hw_addr_random(ndev);
0488 netdev_err(ndev, "Using random MAC address: %pM\n",
0489 ndev->dev_addr);
0490 }
0491 }
0492
0493
0494 static irqreturn_t mse102x_irq(int irq, void *_mse)
0495 {
0496 struct mse102x_net *mse = _mse;
0497 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0498
0499 mutex_lock(&mses->lock);
0500 mse102x_rx_pkt_spi(mse);
0501 mutex_unlock(&mses->lock);
0502
0503 return IRQ_HANDLED;
0504 }
0505
0506 static int mse102x_net_open(struct net_device *ndev)
0507 {
0508 struct mse102x_net *mse = netdev_priv(ndev);
0509 int ret;
0510
0511 ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
0512 ndev->name, mse);
0513 if (ret < 0) {
0514 netdev_err(ndev, "Failed to get irq: %d\n", ret);
0515 return ret;
0516 }
0517
0518 netif_dbg(mse, ifup, ndev, "opening\n");
0519
0520 netif_start_queue(ndev);
0521
0522 netif_carrier_on(ndev);
0523
0524 netif_dbg(mse, ifup, ndev, "network device up\n");
0525
0526 return 0;
0527 }
0528
0529 static int mse102x_net_stop(struct net_device *ndev)
0530 {
0531 struct mse102x_net *mse = netdev_priv(ndev);
0532 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0533
0534 netif_info(mse, ifdown, ndev, "shutting down\n");
0535
0536 netif_carrier_off(mse->ndev);
0537
0538
0539 flush_work(&mses->tx_work);
0540
0541 netif_stop_queue(ndev);
0542
0543 skb_queue_purge(&mse->txq);
0544
0545 free_irq(ndev->irq, mse);
0546
0547 return 0;
0548 }
0549
0550 static const struct net_device_ops mse102x_netdev_ops = {
0551 .ndo_open = mse102x_net_open,
0552 .ndo_stop = mse102x_net_stop,
0553 .ndo_start_xmit = mse102x_start_xmit_spi,
0554 .ndo_set_mac_address = eth_mac_addr,
0555 .ndo_validate_addr = eth_validate_addr,
0556 };
0557
0558
0559
0560 static void mse102x_get_drvinfo(struct net_device *ndev,
0561 struct ethtool_drvinfo *di)
0562 {
0563 strscpy(di->driver, DRV_NAME, sizeof(di->driver));
0564 strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
0565 }
0566
0567 static u32 mse102x_get_msglevel(struct net_device *ndev)
0568 {
0569 struct mse102x_net *mse = netdev_priv(ndev);
0570
0571 return mse->msg_enable;
0572 }
0573
0574 static void mse102x_set_msglevel(struct net_device *ndev, u32 to)
0575 {
0576 struct mse102x_net *mse = netdev_priv(ndev);
0577
0578 mse->msg_enable = to;
0579 }
0580
0581 static void mse102x_get_ethtool_stats(struct net_device *ndev,
0582 struct ethtool_stats *estats, u64 *data)
0583 {
0584 struct mse102x_net *mse = netdev_priv(ndev);
0585 struct mse102x_stats *st = &mse->stats;
0586
0587 memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64));
0588 }
0589
0590 static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
0591 {
0592 switch (stringset) {
0593 case ETH_SS_STATS:
0594 memcpy(buf, &mse102x_gstrings_stats,
0595 sizeof(mse102x_gstrings_stats));
0596 break;
0597 default:
0598 WARN_ON(1);
0599 break;
0600 }
0601 }
0602
0603 static int mse102x_get_sset_count(struct net_device *ndev, int sset)
0604 {
0605 switch (sset) {
0606 case ETH_SS_STATS:
0607 return ARRAY_SIZE(mse102x_gstrings_stats);
0608 default:
0609 return -EINVAL;
0610 }
0611 }
0612
0613 static const struct ethtool_ops mse102x_ethtool_ops = {
0614 .get_drvinfo = mse102x_get_drvinfo,
0615 .get_link = ethtool_op_get_link,
0616 .get_msglevel = mse102x_get_msglevel,
0617 .set_msglevel = mse102x_set_msglevel,
0618 .get_ethtool_stats = mse102x_get_ethtool_stats,
0619 .get_strings = mse102x_get_strings,
0620 .get_sset_count = mse102x_get_sset_count,
0621 };
0622
0623
0624
0625 #ifdef CONFIG_PM_SLEEP
0626
0627 static int mse102x_suspend(struct device *dev)
0628 {
0629 struct mse102x_net *mse = dev_get_drvdata(dev);
0630 struct net_device *ndev = mse->ndev;
0631
0632 if (netif_running(ndev)) {
0633 netif_device_detach(ndev);
0634 mse102x_net_stop(ndev);
0635 }
0636
0637 return 0;
0638 }
0639
0640 static int mse102x_resume(struct device *dev)
0641 {
0642 struct mse102x_net *mse = dev_get_drvdata(dev);
0643 struct net_device *ndev = mse->ndev;
0644
0645 if (netif_running(ndev)) {
0646 mse102x_net_open(ndev);
0647 netif_device_attach(ndev);
0648 }
0649
0650 return 0;
0651 }
0652 #endif
0653
0654 static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
0655
0656 static int mse102x_probe_spi(struct spi_device *spi)
0657 {
0658 struct device *dev = &spi->dev;
0659 struct mse102x_net_spi *mses;
0660 struct net_device *ndev;
0661 struct mse102x_net *mse;
0662 int ret;
0663
0664 spi->bits_per_word = 8;
0665 spi->mode |= SPI_MODE_3;
0666
0667 spi->master->min_speed_hz = MIN_FREQ_HZ;
0668
0669 if (!spi->max_speed_hz)
0670 spi->max_speed_hz = MAX_FREQ_HZ;
0671
0672 if (spi->max_speed_hz < MIN_FREQ_HZ ||
0673 spi->max_speed_hz > MAX_FREQ_HZ) {
0674 dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n",
0675 MIN_FREQ_HZ, MAX_FREQ_HZ);
0676 return -EINVAL;
0677 }
0678
0679 ret = spi_setup(spi);
0680 if (ret < 0) {
0681 dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret);
0682 return ret;
0683 }
0684
0685 ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi));
0686 if (!ndev)
0687 return -ENOMEM;
0688
0689 ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4);
0690 ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4);
0691 ndev->priv_flags &= ~IFF_TX_SKB_SHARING;
0692 ndev->tx_queue_len = 100;
0693
0694 mse = netdev_priv(ndev);
0695 mses = to_mse102x_spi(mse);
0696
0697 mses->spidev = spi;
0698 mutex_init(&mses->lock);
0699 INIT_WORK(&mses->tx_work, mse102x_tx_work);
0700
0701
0702 spi_message_init(&mses->spi_msg);
0703 spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg);
0704
0705 ndev->irq = spi->irq;
0706 mse->ndev = ndev;
0707
0708
0709 mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT);
0710
0711 skb_queue_head_init(&mse->txq);
0712
0713 SET_NETDEV_DEV(ndev, dev);
0714
0715 dev_set_drvdata(dev, mse);
0716
0717 netif_carrier_off(mse->ndev);
0718 ndev->netdev_ops = &mse102x_netdev_ops;
0719 ndev->ethtool_ops = &mse102x_ethtool_ops;
0720
0721 mse102x_init_mac(mse, dev->of_node);
0722
0723 ret = register_netdev(ndev);
0724 if (ret) {
0725 dev_err(dev, "failed to register network device: %d\n", ret);
0726 return ret;
0727 }
0728
0729 mse102x_init_device_debugfs(mses);
0730
0731 return 0;
0732 }
0733
0734 static void mse102x_remove_spi(struct spi_device *spi)
0735 {
0736 struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
0737 struct mse102x_net_spi *mses = to_mse102x_spi(mse);
0738
0739 if (netif_msg_drv(mse))
0740 dev_info(&spi->dev, "remove\n");
0741
0742 mse102x_remove_device_debugfs(mses);
0743 unregister_netdev(mse->ndev);
0744 }
0745
0746 static const struct of_device_id mse102x_match_table[] = {
0747 { .compatible = "vertexcom,mse1021" },
0748 { .compatible = "vertexcom,mse1022" },
0749 { }
0750 };
0751 MODULE_DEVICE_TABLE(of, mse102x_match_table);
0752
0753 static struct spi_driver mse102x_driver = {
0754 .driver = {
0755 .name = DRV_NAME,
0756 .of_match_table = mse102x_match_table,
0757 .pm = &mse102x_pm_ops,
0758 },
0759 .probe = mse102x_probe_spi,
0760 .remove = mse102x_remove_spi,
0761 };
0762 module_spi_driver(mse102x_driver);
0763
0764 MODULE_DESCRIPTION("MSE102x Network driver");
0765 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
0766 MODULE_LICENSE("GPL");
0767 MODULE_ALIAS("spi:" DRV_NAME);