0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/etherdevice.h>
0011 #include <linux/module.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/clk.h>
0015 #include <linux/delay.h>
0016
0017 #include <linux/if_vlan.h>
0018
0019 #include <linux/of_net.h>
0020 #include <linux/of_platform.h>
0021
0022 #include <xway_dma.h>
0023
0024
0025 #define XRX200_DMA_DATA_LEN (SZ_64K - 1)
0026 #define XRX200_DMA_RX 0
0027 #define XRX200_DMA_TX 1
0028 #define XRX200_DMA_BURST_LEN 8
0029
0030 #define XRX200_DMA_PACKET_COMPLETE 0
0031 #define XRX200_DMA_PACKET_IN_PROGRESS 1
0032
0033
0034 #define PMAC_RX_IPG 0x0024
0035 #define PMAC_RX_IPG_MASK 0xf
0036
0037 #define PMAC_HD_CTL 0x0000
0038
0039 #define PMAC_HD_CTL_ADD BIT(0)
0040
0041 #define PMAC_HD_CTL_TAG BIT(1)
0042
0043 #define PMAC_HD_CTL_AC BIT(2)
0044
0045 #define PMAC_HD_CTL_AS BIT(3)
0046
0047 #define PMAC_HD_CTL_RC BIT(4)
0048
0049 #define PMAC_HD_CTL_RL2 BIT(5)
0050
0051 #define PMAC_HD_CTL_RXSH BIT(6)
0052
0053 #define PMAC_HD_CTL_AST BIT(7)
0054
0055 #define PMAC_HD_CTL_RST BIT(8)
0056
0057 #define PMAC_HD_CTL_CCRC BIT(9)
0058
0059 #define PMAC_HD_CTL_FC BIT(10)
0060
0061 struct xrx200_chan {
0062 int tx_free;
0063
0064 struct napi_struct napi;
0065 struct ltq_dma_channel dma;
0066
0067 union {
0068 struct sk_buff *skb[LTQ_DESC_NUM];
0069 void *rx_buff[LTQ_DESC_NUM];
0070 };
0071
0072 struct sk_buff *skb_head;
0073 struct sk_buff *skb_tail;
0074
0075 struct xrx200_priv *priv;
0076 };
0077
0078 struct xrx200_priv {
0079 struct clk *clk;
0080
0081 struct xrx200_chan chan_tx;
0082 struct xrx200_chan chan_rx;
0083
0084 u16 rx_buf_size;
0085 u16 rx_skb_size;
0086
0087 struct net_device *net_dev;
0088 struct device *dev;
0089
0090 __iomem void *pmac_reg;
0091 };
0092
0093 static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
0094 {
0095 return __raw_readl(priv->pmac_reg + offset);
0096 }
0097
0098 static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
0099 {
0100 __raw_writel(val, priv->pmac_reg + offset);
0101 }
0102
0103 static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
0104 u32 offset)
0105 {
0106 u32 val = xrx200_pmac_r32(priv, offset);
0107
0108 val &= ~(clear);
0109 val |= set;
0110 xrx200_pmac_w32(priv, val, offset);
0111 }
0112
0113 static int xrx200_max_frame_len(int mtu)
0114 {
0115 return VLAN_ETH_HLEN + mtu;
0116 }
0117
0118 static int xrx200_buffer_size(int mtu)
0119 {
0120 return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
0121 }
0122
0123 static int xrx200_skb_size(u16 buf_size)
0124 {
0125 return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
0126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0127 }
0128
0129
0130 static void xrx200_flush_dma(struct xrx200_chan *ch)
0131 {
0132 int i;
0133
0134 for (i = 0; i < LTQ_DESC_NUM; i++) {
0135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
0136
0137 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
0138 break;
0139
0140 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
0141 ch->priv->rx_buf_size;
0142 ch->dma.desc++;
0143 ch->dma.desc %= LTQ_DESC_NUM;
0144 }
0145 }
0146
0147 static int xrx200_open(struct net_device *net_dev)
0148 {
0149 struct xrx200_priv *priv = netdev_priv(net_dev);
0150
0151 napi_enable(&priv->chan_tx.napi);
0152 ltq_dma_open(&priv->chan_tx.dma);
0153 ltq_dma_enable_irq(&priv->chan_tx.dma);
0154
0155 napi_enable(&priv->chan_rx.napi);
0156 ltq_dma_open(&priv->chan_rx.dma);
0157
0158
0159
0160
0161
0162
0163 usleep_range(20, 40);
0164 xrx200_flush_dma(&priv->chan_rx);
0165 ltq_dma_enable_irq(&priv->chan_rx.dma);
0166
0167 netif_wake_queue(net_dev);
0168
0169 return 0;
0170 }
0171
0172 static int xrx200_close(struct net_device *net_dev)
0173 {
0174 struct xrx200_priv *priv = netdev_priv(net_dev);
0175
0176 netif_stop_queue(net_dev);
0177
0178 napi_disable(&priv->chan_rx.napi);
0179 ltq_dma_close(&priv->chan_rx.dma);
0180
0181 napi_disable(&priv->chan_tx.napi);
0182 ltq_dma_close(&priv->chan_tx.dma);
0183
0184 return 0;
0185 }
0186
0187 static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
0188 {
0189 void *buf = ch->rx_buff[ch->dma.desc];
0190 struct xrx200_priv *priv = ch->priv;
0191 dma_addr_t mapping;
0192 int ret = 0;
0193
0194 ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
0195 if (!ch->rx_buff[ch->dma.desc]) {
0196 ch->rx_buff[ch->dma.desc] = buf;
0197 ret = -ENOMEM;
0198 goto skip;
0199 }
0200
0201 mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
0202 priv->rx_buf_size, DMA_FROM_DEVICE);
0203 if (unlikely(dma_mapping_error(priv->dev, mapping))) {
0204 skb_free_frag(ch->rx_buff[ch->dma.desc]);
0205 ch->rx_buff[ch->dma.desc] = buf;
0206 ret = -ENOMEM;
0207 goto skip;
0208 }
0209
0210 ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
0211
0212 wmb();
0213 skip:
0214 ch->dma.desc_base[ch->dma.desc].ctl =
0215 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
0216
0217 return ret;
0218 }
0219
0220 static int xrx200_hw_receive(struct xrx200_chan *ch)
0221 {
0222 struct xrx200_priv *priv = ch->priv;
0223 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
0224 void *buf = ch->rx_buff[ch->dma.desc];
0225 u32 ctl = desc->ctl;
0226 int len = (ctl & LTQ_DMA_SIZE_MASK);
0227 struct net_device *net_dev = priv->net_dev;
0228 struct sk_buff *skb;
0229 int ret;
0230
0231 ret = xrx200_alloc_buf(ch, napi_alloc_frag);
0232
0233 ch->dma.desc++;
0234 ch->dma.desc %= LTQ_DESC_NUM;
0235
0236 if (ret) {
0237 net_dev->stats.rx_dropped++;
0238 netdev_err(net_dev, "failed to allocate new rx buffer\n");
0239 return ret;
0240 }
0241
0242 skb = build_skb(buf, priv->rx_skb_size);
0243 if (!skb) {
0244 skb_free_frag(buf);
0245 net_dev->stats.rx_dropped++;
0246 return -ENOMEM;
0247 }
0248
0249 skb_reserve(skb, NET_SKB_PAD);
0250 skb_put(skb, len);
0251
0252
0253 if (ctl & LTQ_DMA_SOP) {
0254 ch->skb_head = skb;
0255 ch->skb_tail = skb;
0256 skb_reserve(skb, NET_IP_ALIGN);
0257 } else if (ch->skb_head) {
0258 if (ch->skb_head == ch->skb_tail)
0259 skb_shinfo(ch->skb_tail)->frag_list = skb;
0260 else
0261 ch->skb_tail->next = skb;
0262 ch->skb_tail = skb;
0263 ch->skb_head->len += skb->len;
0264 ch->skb_head->data_len += skb->len;
0265 ch->skb_head->truesize += skb->truesize;
0266 }
0267
0268 if (ctl & LTQ_DMA_EOP) {
0269 ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
0270 net_dev->stats.rx_packets++;
0271 net_dev->stats.rx_bytes += ch->skb_head->len;
0272 netif_receive_skb(ch->skb_head);
0273 ch->skb_head = NULL;
0274 ch->skb_tail = NULL;
0275 ret = XRX200_DMA_PACKET_COMPLETE;
0276 } else {
0277 ret = XRX200_DMA_PACKET_IN_PROGRESS;
0278 }
0279
0280 return ret;
0281 }
0282
0283 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
0284 {
0285 struct xrx200_chan *ch = container_of(napi,
0286 struct xrx200_chan, napi);
0287 int rx = 0;
0288 int ret;
0289
0290 while (rx < budget) {
0291 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
0292
0293 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
0294 ret = xrx200_hw_receive(ch);
0295 if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
0296 continue;
0297 if (ret != XRX200_DMA_PACKET_COMPLETE)
0298 break;
0299 rx++;
0300 } else {
0301 break;
0302 }
0303 }
0304
0305 if (rx < budget) {
0306 if (napi_complete_done(&ch->napi, rx))
0307 ltq_dma_enable_irq(&ch->dma);
0308 }
0309
0310 return rx;
0311 }
0312
0313 static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
0314 {
0315 struct xrx200_chan *ch = container_of(napi,
0316 struct xrx200_chan, napi);
0317 struct net_device *net_dev = ch->priv->net_dev;
0318 int pkts = 0;
0319 int bytes = 0;
0320
0321 netif_tx_lock(net_dev);
0322 while (pkts < budget) {
0323 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
0324
0325 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
0326 struct sk_buff *skb = ch->skb[ch->tx_free];
0327
0328 pkts++;
0329 bytes += skb->len;
0330 ch->skb[ch->tx_free] = NULL;
0331 consume_skb(skb);
0332 memset(&ch->dma.desc_base[ch->tx_free], 0,
0333 sizeof(struct ltq_dma_desc));
0334 ch->tx_free++;
0335 ch->tx_free %= LTQ_DESC_NUM;
0336 } else {
0337 break;
0338 }
0339 }
0340
0341 net_dev->stats.tx_packets += pkts;
0342 net_dev->stats.tx_bytes += bytes;
0343 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
0344
0345 netif_tx_unlock(net_dev);
0346 if (netif_queue_stopped(net_dev))
0347 netif_wake_queue(net_dev);
0348
0349 if (pkts < budget) {
0350 if (napi_complete_done(&ch->napi, pkts))
0351 ltq_dma_enable_irq(&ch->dma);
0352 }
0353
0354 return pkts;
0355 }
0356
0357 static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
0358 struct net_device *net_dev)
0359 {
0360 struct xrx200_priv *priv = netdev_priv(net_dev);
0361 struct xrx200_chan *ch = &priv->chan_tx;
0362 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
0363 u32 byte_offset;
0364 dma_addr_t mapping;
0365 int len;
0366
0367 skb->dev = net_dev;
0368 if (skb_put_padto(skb, ETH_ZLEN)) {
0369 net_dev->stats.tx_dropped++;
0370 return NETDEV_TX_OK;
0371 }
0372
0373 len = skb->len;
0374
0375 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
0376 netdev_err(net_dev, "tx ring full\n");
0377 netif_stop_queue(net_dev);
0378 return NETDEV_TX_BUSY;
0379 }
0380
0381 ch->skb[ch->dma.desc] = skb;
0382
0383 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
0384 if (unlikely(dma_mapping_error(priv->dev, mapping)))
0385 goto err_drop;
0386
0387
0388 byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
0389
0390 desc->addr = mapping - byte_offset;
0391
0392 wmb();
0393 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
0394 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
0395 ch->dma.desc++;
0396 ch->dma.desc %= LTQ_DESC_NUM;
0397 if (ch->dma.desc == ch->tx_free)
0398 netif_stop_queue(net_dev);
0399
0400 netdev_sent_queue(net_dev, len);
0401
0402 return NETDEV_TX_OK;
0403
0404 err_drop:
0405 dev_kfree_skb(skb);
0406 net_dev->stats.tx_dropped++;
0407 net_dev->stats.tx_errors++;
0408 return NETDEV_TX_OK;
0409 }
0410
0411 static int
0412 xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
0413 {
0414 struct xrx200_priv *priv = netdev_priv(net_dev);
0415 struct xrx200_chan *ch_rx = &priv->chan_rx;
0416 int old_mtu = net_dev->mtu;
0417 bool running = false;
0418 void *buff;
0419 int curr_desc;
0420 int ret = 0;
0421
0422 net_dev->mtu = new_mtu;
0423 priv->rx_buf_size = xrx200_buffer_size(new_mtu);
0424 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
0425
0426 if (new_mtu <= old_mtu)
0427 return ret;
0428
0429 running = netif_running(net_dev);
0430 if (running) {
0431 napi_disable(&ch_rx->napi);
0432 ltq_dma_close(&ch_rx->dma);
0433 }
0434
0435 xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
0436 curr_desc = ch_rx->dma.desc;
0437
0438 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
0439 ch_rx->dma.desc++) {
0440 buff = ch_rx->rx_buff[ch_rx->dma.desc];
0441 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
0442 if (ret) {
0443 net_dev->mtu = old_mtu;
0444 priv->rx_buf_size = xrx200_buffer_size(old_mtu);
0445 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
0446 break;
0447 }
0448 skb_free_frag(buff);
0449 }
0450
0451 ch_rx->dma.desc = curr_desc;
0452 if (running) {
0453 napi_enable(&ch_rx->napi);
0454 ltq_dma_open(&ch_rx->dma);
0455 ltq_dma_enable_irq(&ch_rx->dma);
0456 }
0457
0458 return ret;
0459 }
0460
0461 static const struct net_device_ops xrx200_netdev_ops = {
0462 .ndo_open = xrx200_open,
0463 .ndo_stop = xrx200_close,
0464 .ndo_start_xmit = xrx200_start_xmit,
0465 .ndo_change_mtu = xrx200_change_mtu,
0466 .ndo_set_mac_address = eth_mac_addr,
0467 .ndo_validate_addr = eth_validate_addr,
0468 };
0469
0470 static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
0471 {
0472 struct xrx200_chan *ch = ptr;
0473
0474 if (napi_schedule_prep(&ch->napi)) {
0475 ltq_dma_disable_irq(&ch->dma);
0476 __napi_schedule(&ch->napi);
0477 }
0478
0479 ltq_dma_ack_irq(&ch->dma);
0480
0481 return IRQ_HANDLED;
0482 }
0483
0484 static int xrx200_dma_init(struct xrx200_priv *priv)
0485 {
0486 struct xrx200_chan *ch_rx = &priv->chan_rx;
0487 struct xrx200_chan *ch_tx = &priv->chan_tx;
0488 int ret = 0;
0489 int i;
0490
0491 ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
0492 XRX200_DMA_BURST_LEN);
0493
0494 ch_rx->dma.nr = XRX200_DMA_RX;
0495 ch_rx->dma.dev = priv->dev;
0496 ch_rx->priv = priv;
0497
0498 ltq_dma_alloc_rx(&ch_rx->dma);
0499 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
0500 ch_rx->dma.desc++) {
0501 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
0502 if (ret)
0503 goto rx_free;
0504 }
0505 ch_rx->dma.desc = 0;
0506 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
0507 "xrx200_net_rx", &priv->chan_rx);
0508 if (ret) {
0509 dev_err(priv->dev, "failed to request RX irq %d\n",
0510 ch_rx->dma.irq);
0511 goto rx_ring_free;
0512 }
0513
0514 ch_tx->dma.nr = XRX200_DMA_TX;
0515 ch_tx->dma.dev = priv->dev;
0516 ch_tx->priv = priv;
0517
0518 ltq_dma_alloc_tx(&ch_tx->dma);
0519 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
0520 "xrx200_net_tx", &priv->chan_tx);
0521 if (ret) {
0522 dev_err(priv->dev, "failed to request TX irq %d\n",
0523 ch_tx->dma.irq);
0524 goto tx_free;
0525 }
0526
0527 return ret;
0528
0529 tx_free:
0530 ltq_dma_free(&ch_tx->dma);
0531
0532 rx_ring_free:
0533
0534 for (i = 0; i < LTQ_DESC_NUM; i++) {
0535 if (priv->chan_rx.skb[i])
0536 skb_free_frag(priv->chan_rx.rx_buff[i]);
0537 }
0538
0539 rx_free:
0540 ltq_dma_free(&ch_rx->dma);
0541 return ret;
0542 }
0543
0544 static void xrx200_hw_cleanup(struct xrx200_priv *priv)
0545 {
0546 int i;
0547
0548 ltq_dma_free(&priv->chan_tx.dma);
0549 ltq_dma_free(&priv->chan_rx.dma);
0550
0551
0552 for (i = 0; i < LTQ_DESC_NUM; i++)
0553 skb_free_frag(priv->chan_rx.rx_buff[i]);
0554 }
0555
0556 static int xrx200_probe(struct platform_device *pdev)
0557 {
0558 struct device *dev = &pdev->dev;
0559 struct device_node *np = dev->of_node;
0560 struct xrx200_priv *priv;
0561 struct net_device *net_dev;
0562 int err;
0563
0564
0565 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
0566 if (!net_dev)
0567 return -ENOMEM;
0568
0569 priv = netdev_priv(net_dev);
0570 priv->net_dev = net_dev;
0571 priv->dev = dev;
0572
0573 net_dev->netdev_ops = &xrx200_netdev_ops;
0574 SET_NETDEV_DEV(net_dev, dev);
0575 net_dev->min_mtu = ETH_ZLEN;
0576 net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
0577 priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
0578 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
0579
0580
0581 priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
0582 if (IS_ERR(priv->pmac_reg))
0583 return PTR_ERR(priv->pmac_reg);
0584
0585 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
0586 if (priv->chan_rx.dma.irq < 0)
0587 return -ENOENT;
0588 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
0589 if (priv->chan_tx.dma.irq < 0)
0590 return -ENOENT;
0591
0592
0593 priv->clk = devm_clk_get(dev, NULL);
0594 if (IS_ERR(priv->clk)) {
0595 dev_err(dev, "failed to get clock\n");
0596 return PTR_ERR(priv->clk);
0597 }
0598
0599 err = of_get_ethdev_address(np, net_dev);
0600 if (err)
0601 eth_hw_addr_random(net_dev);
0602
0603
0604 err = xrx200_dma_init(priv);
0605 if (err)
0606 return err;
0607
0608
0609 err = clk_prepare_enable(priv->clk);
0610 if (err)
0611 goto err_uninit_dma;
0612
0613
0614 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
0615
0616
0617 xrx200_pmac_mask(priv, 0,
0618 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
0619 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
0620 PMAC_HD_CTL);
0621
0622
0623 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
0624 NAPI_POLL_WEIGHT);
0625 netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
0626 xrx200_tx_housekeeping);
0627
0628 platform_set_drvdata(pdev, priv);
0629
0630 err = register_netdev(net_dev);
0631 if (err)
0632 goto err_unprepare_clk;
0633
0634 return 0;
0635
0636 err_unprepare_clk:
0637 clk_disable_unprepare(priv->clk);
0638
0639 err_uninit_dma:
0640 xrx200_hw_cleanup(priv);
0641
0642 return err;
0643 }
0644
0645 static int xrx200_remove(struct platform_device *pdev)
0646 {
0647 struct xrx200_priv *priv = platform_get_drvdata(pdev);
0648 struct net_device *net_dev = priv->net_dev;
0649
0650
0651 netif_stop_queue(net_dev);
0652 netif_napi_del(&priv->chan_tx.napi);
0653 netif_napi_del(&priv->chan_rx.napi);
0654
0655
0656 unregister_netdev(net_dev);
0657
0658
0659 clk_disable_unprepare(priv->clk);
0660
0661
0662 xrx200_hw_cleanup(priv);
0663
0664 return 0;
0665 }
0666
0667 static const struct of_device_id xrx200_match[] = {
0668 { .compatible = "lantiq,xrx200-net" },
0669 {},
0670 };
0671 MODULE_DEVICE_TABLE(of, xrx200_match);
0672
0673 static struct platform_driver xrx200_driver = {
0674 .probe = xrx200_probe,
0675 .remove = xrx200_remove,
0676 .driver = {
0677 .name = "lantiq,xrx200-net",
0678 .of_match_table = xrx200_match,
0679 },
0680 };
0681
0682 module_platform_driver(xrx200_driver);
0683
0684 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
0685 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
0686 MODULE_LICENSE("GPL");