0001
0002
0003
0004
0005
0006
0007 #include <linux/etherdevice.h>
0008 #include <linux/module.h>
0009 #include <linux/netdevice.h>
0010 #include <linux/of_address.h>
0011 #include <linux/of_mdio.h>
0012 #include <linux/of_net.h>
0013 #include <linux/of_platform.h>
0014 #include <linux/of_irq.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/phy.h>
0017 #include <linux/mii.h>
0018 #include <linux/nvmem-consumer.h>
0019 #include <linux/ethtool.h>
0020 #include <linux/iopoll.h>
0021
0022 #define TX_BD_NUM 64
0023 #define RX_BD_NUM 128
0024
0025
0026 #define XAXIDMA_TX_CR_OFFSET 0x00
0027 #define XAXIDMA_TX_SR_OFFSET 0x04
0028 #define XAXIDMA_TX_CDESC_OFFSET 0x08
0029 #define XAXIDMA_TX_TDESC_OFFSET 0x10
0030
0031 #define XAXIDMA_RX_CR_OFFSET 0x30
0032 #define XAXIDMA_RX_SR_OFFSET 0x34
0033 #define XAXIDMA_RX_CDESC_OFFSET 0x38
0034 #define XAXIDMA_RX_TDESC_OFFSET 0x40
0035
0036 #define XAXIDMA_CR_RUNSTOP_MASK 0x1
0037 #define XAXIDMA_CR_RESET_MASK 0x4
0038
0039 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF
0040 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000
0041 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000
0042 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000
0043
0044 #define XAXIDMA_DELAY_MASK 0xFF000000
0045 #define XAXIDMA_COALESCE_MASK 0x00FF0000
0046
0047 #define XAXIDMA_DELAY_SHIFT 24
0048 #define XAXIDMA_COALESCE_SHIFT 16
0049
0050 #define XAXIDMA_IRQ_IOC_MASK 0x00001000
0051 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000
0052 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000
0053 #define XAXIDMA_IRQ_ALL_MASK 0x00007000
0054
0055
0056 #define XAXIDMA_DFT_TX_THRESHOLD 24
0057 #define XAXIDMA_DFT_TX_WAITBOUND 254
0058 #define XAXIDMA_DFT_RX_THRESHOLD 24
0059 #define XAXIDMA_DFT_RX_WAITBOUND 254
0060
0061 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
0062 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000
0063 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000
0064 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000
0065 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000
0066 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000
0067 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000
0068 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000
0069 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000
0070
0071 #define NIXGE_REG_CTRL_OFFSET 0x4000
0072 #define NIXGE_REG_INFO 0x00
0073 #define NIXGE_REG_MAC_CTL 0x04
0074 #define NIXGE_REG_PHY_CTL 0x08
0075 #define NIXGE_REG_LED_CTL 0x0c
0076 #define NIXGE_REG_MDIO_DATA 0x10
0077 #define NIXGE_REG_MDIO_ADDR 0x14
0078 #define NIXGE_REG_MDIO_OP 0x18
0079 #define NIXGE_REG_MDIO_CTRL 0x1c
0080
0081 #define NIXGE_ID_LED_CTL_EN BIT(0)
0082 #define NIXGE_ID_LED_CTL_VAL BIT(1)
0083
0084 #define NIXGE_MDIO_CLAUSE45 BIT(12)
0085 #define NIXGE_MDIO_CLAUSE22 0
0086 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
0087 #define NIXGE_MDIO_OP_ADDRESS 0
0088 #define NIXGE_MDIO_C45_WRITE BIT(0)
0089 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
0090 #define NIXGE_MDIO_C22_WRITE BIT(0)
0091 #define NIXGE_MDIO_C22_READ BIT(1)
0092 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
0093 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
0094
0095 #define NIXGE_REG_MAC_LSB 0x1000
0096 #define NIXGE_REG_MAC_MSB 0x1004
0097
0098
0099 #define NIXGE_HDR_SIZE 14
0100 #define NIXGE_TRL_SIZE 4
0101 #define NIXGE_MTU 1500
0102 #define NIXGE_JUMBO_MTU 9000
0103
0104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
0105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
0106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
0107
0108 enum nixge_version {
0109 NIXGE_V2,
0110 NIXGE_V3,
0111 NIXGE_VERSION_COUNT
0112 };
0113
0114 struct nixge_hw_dma_bd {
0115 u32 next_lo;
0116 u32 next_hi;
0117 u32 phys_lo;
0118 u32 phys_hi;
0119 u32 reserved3;
0120 u32 reserved4;
0121 u32 cntrl;
0122 u32 status;
0123 u32 app0;
0124 u32 app1;
0125 u32 app2;
0126 u32 app3;
0127 u32 app4;
0128 u32 sw_id_offset_lo;
0129 u32 sw_id_offset_hi;
0130 u32 reserved6;
0131 };
0132
0133 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0134 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
0135 do { \
0136 (bd)->field##_lo = lower_32_bits((addr)); \
0137 (bd)->field##_hi = upper_32_bits((addr)); \
0138 } while (0)
0139 #else
0140 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
0141 ((bd)->field##_lo = lower_32_bits((addr)))
0142 #endif
0143
0144 #define nixge_hw_dma_bd_set_phys(bd, addr) \
0145 nixge_hw_dma_bd_set_addr((bd), phys, (addr))
0146
0147 #define nixge_hw_dma_bd_set_next(bd, addr) \
0148 nixge_hw_dma_bd_set_addr((bd), next, (addr))
0149
0150 #define nixge_hw_dma_bd_set_offset(bd, addr) \
0151 nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
0152
0153 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0154 #define nixge_hw_dma_bd_get_addr(bd, field) \
0155 (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
0156 #else
0157 #define nixge_hw_dma_bd_get_addr(bd, field) \
0158 (dma_addr_t)((bd)->field##_lo)
0159 #endif
0160
0161 struct nixge_tx_skb {
0162 struct sk_buff *skb;
0163 dma_addr_t mapping;
0164 size_t size;
0165 bool mapped_as_page;
0166 };
0167
0168 struct nixge_priv {
0169 struct net_device *ndev;
0170 struct napi_struct napi;
0171 struct device *dev;
0172
0173
0174 struct device_node *phy_node;
0175 phy_interface_t phy_mode;
0176
0177 int link;
0178 unsigned int speed;
0179 unsigned int duplex;
0180
0181
0182 struct mii_bus *mii_bus;
0183
0184
0185 void __iomem *ctrl_regs;
0186 void __iomem *dma_regs;
0187
0188 struct tasklet_struct dma_err_tasklet;
0189
0190 int tx_irq;
0191 int rx_irq;
0192
0193
0194 struct nixge_hw_dma_bd *tx_bd_v;
0195 struct nixge_tx_skb *tx_skb;
0196 dma_addr_t tx_bd_p;
0197
0198 struct nixge_hw_dma_bd *rx_bd_v;
0199 dma_addr_t rx_bd_p;
0200 u32 tx_bd_ci;
0201 u32 tx_bd_tail;
0202 u32 rx_bd_ci;
0203
0204 u32 coalesce_count_rx;
0205 u32 coalesce_count_tx;
0206 };
0207
0208 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
0209 {
0210 writel(val, priv->dma_regs + offset);
0211 }
0212
0213 static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
0214 dma_addr_t addr)
0215 {
0216 writel(lower_32_bits(addr), priv->dma_regs + offset);
0217 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0218 writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
0219 #endif
0220 }
0221
0222 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
0223 {
0224 return readl(priv->dma_regs + offset);
0225 }
0226
0227 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
0228 {
0229 writel(val, priv->ctrl_regs + offset);
0230 }
0231
0232 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
0233 {
0234 return readl(priv->ctrl_regs + offset);
0235 }
0236
0237 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
0238 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
0239 (sleep_us), (timeout_us))
0240
0241 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
0242 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
0243 (sleep_us), (timeout_us))
0244
0245 static void nixge_hw_dma_bd_release(struct net_device *ndev)
0246 {
0247 struct nixge_priv *priv = netdev_priv(ndev);
0248 dma_addr_t phys_addr;
0249 struct sk_buff *skb;
0250 int i;
0251
0252 for (i = 0; i < RX_BD_NUM; i++) {
0253 phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
0254 phys);
0255
0256 dma_unmap_single(ndev->dev.parent, phys_addr,
0257 NIXGE_MAX_JUMBO_FRAME_SIZE,
0258 DMA_FROM_DEVICE);
0259
0260 skb = (struct sk_buff *)(uintptr_t)
0261 nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
0262 sw_id_offset);
0263 dev_kfree_skb(skb);
0264 }
0265
0266 if (priv->rx_bd_v)
0267 dma_free_coherent(ndev->dev.parent,
0268 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
0269 priv->rx_bd_v,
0270 priv->rx_bd_p);
0271
0272 if (priv->tx_skb)
0273 devm_kfree(ndev->dev.parent, priv->tx_skb);
0274
0275 if (priv->tx_bd_v)
0276 dma_free_coherent(ndev->dev.parent,
0277 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
0278 priv->tx_bd_v,
0279 priv->tx_bd_p);
0280 }
0281
0282 static int nixge_hw_dma_bd_init(struct net_device *ndev)
0283 {
0284 struct nixge_priv *priv = netdev_priv(ndev);
0285 struct sk_buff *skb;
0286 dma_addr_t phys;
0287 u32 cr;
0288 int i;
0289
0290
0291 priv->tx_bd_ci = 0;
0292 priv->tx_bd_tail = 0;
0293 priv->rx_bd_ci = 0;
0294
0295
0296 priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
0297 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
0298 &priv->tx_bd_p, GFP_KERNEL);
0299 if (!priv->tx_bd_v)
0300 goto out;
0301
0302 priv->tx_skb = devm_kcalloc(ndev->dev.parent,
0303 TX_BD_NUM, sizeof(*priv->tx_skb),
0304 GFP_KERNEL);
0305 if (!priv->tx_skb)
0306 goto out;
0307
0308 priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
0309 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
0310 &priv->rx_bd_p, GFP_KERNEL);
0311 if (!priv->rx_bd_v)
0312 goto out;
0313
0314 for (i = 0; i < TX_BD_NUM; i++) {
0315 nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
0316 priv->tx_bd_p +
0317 sizeof(*priv->tx_bd_v) *
0318 ((i + 1) % TX_BD_NUM));
0319 }
0320
0321 for (i = 0; i < RX_BD_NUM; i++) {
0322 nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
0323 priv->rx_bd_p
0324 + sizeof(*priv->rx_bd_v) *
0325 ((i + 1) % RX_BD_NUM));
0326
0327 skb = __netdev_alloc_skb_ip_align(ndev,
0328 NIXGE_MAX_JUMBO_FRAME_SIZE,
0329 GFP_KERNEL);
0330 if (!skb)
0331 goto out;
0332
0333 nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
0334 phys = dma_map_single(ndev->dev.parent, skb->data,
0335 NIXGE_MAX_JUMBO_FRAME_SIZE,
0336 DMA_FROM_DEVICE);
0337
0338 nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
0339
0340 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
0341 }
0342
0343
0344 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0345
0346 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
0347 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
0348
0349 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
0350 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
0351
0352 cr |= XAXIDMA_IRQ_ALL_MASK;
0353
0354 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
0355
0356
0357 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
0358
0359 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
0360 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
0361
0362 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
0363 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
0364
0365 cr |= XAXIDMA_IRQ_ALL_MASK;
0366
0367 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
0368
0369
0370
0371
0372 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
0373 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0374 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
0375 cr | XAXIDMA_CR_RUNSTOP_MASK);
0376 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
0377 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
0378
0379
0380
0381
0382
0383 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
0384 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
0385 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
0386 cr | XAXIDMA_CR_RUNSTOP_MASK);
0387
0388 return 0;
0389 out:
0390 nixge_hw_dma_bd_release(ndev);
0391 return -ENOMEM;
0392 }
0393
0394 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
0395 {
0396 u32 status;
0397 int err;
0398
0399
0400
0401
0402
0403
0404 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
0405 err = nixge_dma_poll_timeout(priv, offset, status,
0406 !(status & XAXIDMA_CR_RESET_MASK), 10,
0407 1000);
0408 if (err)
0409 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
0410 }
0411
0412 static void nixge_device_reset(struct net_device *ndev)
0413 {
0414 struct nixge_priv *priv = netdev_priv(ndev);
0415
0416 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
0417 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
0418
0419 if (nixge_hw_dma_bd_init(ndev))
0420 netdev_err(ndev, "%s: descriptor allocation failed\n",
0421 __func__);
0422
0423 netif_trans_update(ndev);
0424 }
0425
0426 static void nixge_handle_link_change(struct net_device *ndev)
0427 {
0428 struct nixge_priv *priv = netdev_priv(ndev);
0429 struct phy_device *phydev = ndev->phydev;
0430
0431 if (phydev->link != priv->link || phydev->speed != priv->speed ||
0432 phydev->duplex != priv->duplex) {
0433 priv->link = phydev->link;
0434 priv->speed = phydev->speed;
0435 priv->duplex = phydev->duplex;
0436 phy_print_status(phydev);
0437 }
0438 }
0439
0440 static void nixge_tx_skb_unmap(struct nixge_priv *priv,
0441 struct nixge_tx_skb *tx_skb)
0442 {
0443 if (tx_skb->mapping) {
0444 if (tx_skb->mapped_as_page)
0445 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
0446 tx_skb->size, DMA_TO_DEVICE);
0447 else
0448 dma_unmap_single(priv->ndev->dev.parent,
0449 tx_skb->mapping,
0450 tx_skb->size, DMA_TO_DEVICE);
0451 tx_skb->mapping = 0;
0452 }
0453
0454 if (tx_skb->skb) {
0455 dev_kfree_skb_any(tx_skb->skb);
0456 tx_skb->skb = NULL;
0457 }
0458 }
0459
0460 static void nixge_start_xmit_done(struct net_device *ndev)
0461 {
0462 struct nixge_priv *priv = netdev_priv(ndev);
0463 struct nixge_hw_dma_bd *cur_p;
0464 struct nixge_tx_skb *tx_skb;
0465 unsigned int status = 0;
0466 u32 packets = 0;
0467 u32 size = 0;
0468
0469 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
0470 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
0471
0472 status = cur_p->status;
0473
0474 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
0475 nixge_tx_skb_unmap(priv, tx_skb);
0476 cur_p->status = 0;
0477
0478 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
0479 packets++;
0480
0481 ++priv->tx_bd_ci;
0482 priv->tx_bd_ci %= TX_BD_NUM;
0483 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
0484 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
0485 status = cur_p->status;
0486 }
0487
0488 ndev->stats.tx_packets += packets;
0489 ndev->stats.tx_bytes += size;
0490
0491 if (packets)
0492 netif_wake_queue(ndev);
0493 }
0494
0495 static int nixge_check_tx_bd_space(struct nixge_priv *priv,
0496 int num_frag)
0497 {
0498 struct nixge_hw_dma_bd *cur_p;
0499
0500 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
0501 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
0502 return NETDEV_TX_BUSY;
0503 return 0;
0504 }
0505
0506 static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
0507 struct net_device *ndev)
0508 {
0509 struct nixge_priv *priv = netdev_priv(ndev);
0510 struct nixge_hw_dma_bd *cur_p;
0511 struct nixge_tx_skb *tx_skb;
0512 dma_addr_t tail_p, cur_phys;
0513 skb_frag_t *frag;
0514 u32 num_frag;
0515 u32 ii;
0516
0517 num_frag = skb_shinfo(skb)->nr_frags;
0518 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
0519 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
0520
0521 if (nixge_check_tx_bd_space(priv, num_frag)) {
0522 if (!netif_queue_stopped(ndev))
0523 netif_stop_queue(ndev);
0524 return NETDEV_TX_OK;
0525 }
0526
0527 cur_phys = dma_map_single(ndev->dev.parent, skb->data,
0528 skb_headlen(skb), DMA_TO_DEVICE);
0529 if (dma_mapping_error(ndev->dev.parent, cur_phys))
0530 goto drop;
0531 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
0532
0533 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
0534
0535 tx_skb->skb = NULL;
0536 tx_skb->mapping = cur_phys;
0537 tx_skb->size = skb_headlen(skb);
0538 tx_skb->mapped_as_page = false;
0539
0540 for (ii = 0; ii < num_frag; ii++) {
0541 ++priv->tx_bd_tail;
0542 priv->tx_bd_tail %= TX_BD_NUM;
0543 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
0544 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
0545 frag = &skb_shinfo(skb)->frags[ii];
0546
0547 cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
0548 skb_frag_size(frag),
0549 DMA_TO_DEVICE);
0550 if (dma_mapping_error(ndev->dev.parent, cur_phys))
0551 goto frag_err;
0552 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
0553
0554 cur_p->cntrl = skb_frag_size(frag);
0555
0556 tx_skb->skb = NULL;
0557 tx_skb->mapping = cur_phys;
0558 tx_skb->size = skb_frag_size(frag);
0559 tx_skb->mapped_as_page = true;
0560 }
0561
0562
0563 tx_skb->skb = skb;
0564
0565 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
0566
0567 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
0568
0569 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
0570 ++priv->tx_bd_tail;
0571 priv->tx_bd_tail %= TX_BD_NUM;
0572
0573 return NETDEV_TX_OK;
0574 frag_err:
0575 for (; ii > 0; ii--) {
0576 if (priv->tx_bd_tail)
0577 priv->tx_bd_tail--;
0578 else
0579 priv->tx_bd_tail = TX_BD_NUM - 1;
0580
0581 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
0582 nixge_tx_skb_unmap(priv, tx_skb);
0583
0584 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
0585 cur_p->status = 0;
0586 }
0587 dma_unmap_single(priv->ndev->dev.parent,
0588 tx_skb->mapping,
0589 tx_skb->size, DMA_TO_DEVICE);
0590 drop:
0591 ndev->stats.tx_dropped++;
0592 return NETDEV_TX_OK;
0593 }
0594
0595 static int nixge_recv(struct net_device *ndev, int budget)
0596 {
0597 struct nixge_priv *priv = netdev_priv(ndev);
0598 struct sk_buff *skb, *new_skb;
0599 struct nixge_hw_dma_bd *cur_p;
0600 dma_addr_t tail_p = 0, cur_phys = 0;
0601 u32 packets = 0;
0602 u32 length = 0;
0603 u32 size = 0;
0604
0605 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
0606
0607 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
0608 budget > packets)) {
0609 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
0610 priv->rx_bd_ci;
0611
0612 skb = (struct sk_buff *)(uintptr_t)
0613 nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
0614
0615 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
0616 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
0617 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
0618
0619 dma_unmap_single(ndev->dev.parent,
0620 nixge_hw_dma_bd_get_addr(cur_p, phys),
0621 NIXGE_MAX_JUMBO_FRAME_SIZE,
0622 DMA_FROM_DEVICE);
0623
0624 skb_put(skb, length);
0625
0626 skb->protocol = eth_type_trans(skb, ndev);
0627 skb_checksum_none_assert(skb);
0628
0629
0630
0631
0632 skb->ip_summed = CHECKSUM_NONE;
0633
0634 napi_gro_receive(&priv->napi, skb);
0635
0636 size += length;
0637 packets++;
0638
0639 new_skb = netdev_alloc_skb_ip_align(ndev,
0640 NIXGE_MAX_JUMBO_FRAME_SIZE);
0641 if (!new_skb)
0642 return packets;
0643
0644 cur_phys = dma_map_single(ndev->dev.parent, new_skb->data,
0645 NIXGE_MAX_JUMBO_FRAME_SIZE,
0646 DMA_FROM_DEVICE);
0647 if (dma_mapping_error(ndev->dev.parent, cur_phys)) {
0648
0649 netdev_err(ndev, "Failed to map ...\n");
0650 }
0651 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
0652 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
0653 cur_p->status = 0;
0654 nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
0655
0656 ++priv->rx_bd_ci;
0657 priv->rx_bd_ci %= RX_BD_NUM;
0658 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
0659 }
0660
0661 ndev->stats.rx_packets += packets;
0662 ndev->stats.rx_bytes += size;
0663
0664 if (tail_p)
0665 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
0666
0667 return packets;
0668 }
0669
0670 static int nixge_poll(struct napi_struct *napi, int budget)
0671 {
0672 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
0673 int work_done;
0674 u32 status, cr;
0675
0676 work_done = 0;
0677
0678 work_done = nixge_recv(priv->ndev, budget);
0679 if (work_done < budget) {
0680 napi_complete_done(napi, work_done);
0681 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
0682
0683 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
0684
0685 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
0686 napi_reschedule(napi);
0687 } else {
0688
0689 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0690 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
0691 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
0692 }
0693 }
0694
0695 return work_done;
0696 }
0697
0698 static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
0699 {
0700 struct nixge_priv *priv = netdev_priv(_ndev);
0701 struct net_device *ndev = _ndev;
0702 unsigned int status;
0703 dma_addr_t phys;
0704 u32 cr;
0705
0706 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
0707 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
0708 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
0709 nixge_start_xmit_done(priv->ndev);
0710 goto out;
0711 }
0712 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
0713 netdev_err(ndev, "No interrupts asserted in Tx path\n");
0714 return IRQ_NONE;
0715 }
0716 if (status & XAXIDMA_IRQ_ERROR_MASK) {
0717 phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci],
0718 phys);
0719
0720 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
0721 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
0722
0723 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
0724
0725 cr &= (~XAXIDMA_IRQ_ALL_MASK);
0726
0727 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
0728
0729 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0730
0731 cr &= (~XAXIDMA_IRQ_ALL_MASK);
0732
0733 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
0734
0735 tasklet_schedule(&priv->dma_err_tasklet);
0736 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
0737 }
0738 out:
0739 return IRQ_HANDLED;
0740 }
0741
0742 static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
0743 {
0744 struct nixge_priv *priv = netdev_priv(_ndev);
0745 struct net_device *ndev = _ndev;
0746 unsigned int status;
0747 dma_addr_t phys;
0748 u32 cr;
0749
0750 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
0751 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
0752
0753 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
0754 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0755 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
0756 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
0757
0758 if (napi_schedule_prep(&priv->napi))
0759 __napi_schedule(&priv->napi);
0760 goto out;
0761 }
0762 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
0763 netdev_err(ndev, "No interrupts asserted in Rx path\n");
0764 return IRQ_NONE;
0765 }
0766 if (status & XAXIDMA_IRQ_ERROR_MASK) {
0767 phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
0768 phys);
0769 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
0770 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
0771
0772 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
0773
0774 cr &= (~XAXIDMA_IRQ_ALL_MASK);
0775
0776 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
0777
0778 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0779
0780 cr &= (~XAXIDMA_IRQ_ALL_MASK);
0781
0782 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
0783
0784 tasklet_schedule(&priv->dma_err_tasklet);
0785 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
0786 }
0787 out:
0788 return IRQ_HANDLED;
0789 }
0790
0791 static void nixge_dma_err_handler(struct tasklet_struct *t)
0792 {
0793 struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
0794 struct nixge_hw_dma_bd *cur_p;
0795 struct nixge_tx_skb *tx_skb;
0796 u32 cr, i;
0797
0798 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
0799 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
0800
0801 for (i = 0; i < TX_BD_NUM; i++) {
0802 cur_p = &lp->tx_bd_v[i];
0803 tx_skb = &lp->tx_skb[i];
0804 nixge_tx_skb_unmap(lp, tx_skb);
0805
0806 nixge_hw_dma_bd_set_phys(cur_p, 0);
0807 cur_p->cntrl = 0;
0808 cur_p->status = 0;
0809 nixge_hw_dma_bd_set_offset(cur_p, 0);
0810 }
0811
0812 for (i = 0; i < RX_BD_NUM; i++) {
0813 cur_p = &lp->rx_bd_v[i];
0814 cur_p->status = 0;
0815 }
0816
0817 lp->tx_bd_ci = 0;
0818 lp->tx_bd_tail = 0;
0819 lp->rx_bd_ci = 0;
0820
0821
0822 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
0823
0824 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
0825 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
0826
0827 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
0828 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
0829
0830 cr |= XAXIDMA_IRQ_ALL_MASK;
0831
0832 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
0833
0834
0835 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
0836
0837 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
0838 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
0839
0840 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
0841 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
0842
0843 cr |= XAXIDMA_IRQ_ALL_MASK;
0844
0845 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
0846
0847
0848
0849
0850 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
0851 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
0852 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
0853 cr | XAXIDMA_CR_RUNSTOP_MASK);
0854 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
0855 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
0856
0857
0858
0859
0860
0861 nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
0862 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
0863 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
0864 cr | XAXIDMA_CR_RUNSTOP_MASK);
0865 }
0866
0867 static int nixge_open(struct net_device *ndev)
0868 {
0869 struct nixge_priv *priv = netdev_priv(ndev);
0870 struct phy_device *phy;
0871 int ret;
0872
0873 nixge_device_reset(ndev);
0874
0875 phy = of_phy_connect(ndev, priv->phy_node,
0876 &nixge_handle_link_change, 0, priv->phy_mode);
0877 if (!phy)
0878 return -ENODEV;
0879
0880 phy_start(phy);
0881
0882
0883 tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
0884
0885 napi_enable(&priv->napi);
0886
0887
0888 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
0889 if (ret)
0890 goto err_tx_irq;
0891
0892 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
0893 if (ret)
0894 goto err_rx_irq;
0895
0896 netif_start_queue(ndev);
0897
0898 return 0;
0899
0900 err_rx_irq:
0901 free_irq(priv->tx_irq, ndev);
0902 err_tx_irq:
0903 phy_stop(phy);
0904 phy_disconnect(phy);
0905 tasklet_kill(&priv->dma_err_tasklet);
0906 netdev_err(ndev, "request_irq() failed\n");
0907 return ret;
0908 }
0909
0910 static int nixge_stop(struct net_device *ndev)
0911 {
0912 struct nixge_priv *priv = netdev_priv(ndev);
0913 u32 cr;
0914
0915 netif_stop_queue(ndev);
0916 napi_disable(&priv->napi);
0917
0918 if (ndev->phydev) {
0919 phy_stop(ndev->phydev);
0920 phy_disconnect(ndev->phydev);
0921 }
0922
0923 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
0924 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
0925 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
0926 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
0927 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
0928 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
0929
0930 tasklet_kill(&priv->dma_err_tasklet);
0931
0932 free_irq(priv->tx_irq, ndev);
0933 free_irq(priv->rx_irq, ndev);
0934
0935 nixge_hw_dma_bd_release(ndev);
0936
0937 return 0;
0938 }
0939
0940 static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
0941 {
0942 if (netif_running(ndev))
0943 return -EBUSY;
0944
0945 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
0946 NIXGE_MAX_JUMBO_FRAME_SIZE)
0947 return -EINVAL;
0948
0949 ndev->mtu = new_mtu;
0950
0951 return 0;
0952 }
0953
0954 static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
0955 {
0956 struct nixge_priv *priv = netdev_priv(ndev);
0957
0958 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
0959 (ndev->dev_addr[2]) << 24 |
0960 (ndev->dev_addr[3] << 16) |
0961 (ndev->dev_addr[4] << 8) |
0962 (ndev->dev_addr[5] << 0));
0963
0964 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
0965 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
0966
0967 return 0;
0968 }
0969
0970 static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
0971 {
0972 int err;
0973
0974 err = eth_mac_addr(ndev, p);
0975 if (!err)
0976 __nixge_hw_set_mac_address(ndev);
0977
0978 return err;
0979 }
0980
0981 static const struct net_device_ops nixge_netdev_ops = {
0982 .ndo_open = nixge_open,
0983 .ndo_stop = nixge_stop,
0984 .ndo_start_xmit = nixge_start_xmit,
0985 .ndo_change_mtu = nixge_change_mtu,
0986 .ndo_set_mac_address = nixge_net_set_mac_address,
0987 .ndo_validate_addr = eth_validate_addr,
0988 };
0989
0990 static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
0991 struct ethtool_drvinfo *ed)
0992 {
0993 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
0994 strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
0995 }
0996
0997 static int
0998 nixge_ethtools_get_coalesce(struct net_device *ndev,
0999 struct ethtool_coalesce *ecoalesce,
1000 struct kernel_ethtool_coalesce *kernel_coal,
1001 struct netlink_ext_ack *extack)
1002 {
1003 struct nixge_priv *priv = netdev_priv(ndev);
1004 u32 regval = 0;
1005
1006 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
1007 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1008 >> XAXIDMA_COALESCE_SHIFT;
1009 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
1010 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1011 >> XAXIDMA_COALESCE_SHIFT;
1012 return 0;
1013 }
1014
1015 static int
1016 nixge_ethtools_set_coalesce(struct net_device *ndev,
1017 struct ethtool_coalesce *ecoalesce,
1018 struct kernel_ethtool_coalesce *kernel_coal,
1019 struct netlink_ext_ack *extack)
1020 {
1021 struct nixge_priv *priv = netdev_priv(ndev);
1022
1023 if (netif_running(ndev)) {
1024 netdev_err(ndev,
1025 "Please stop netif before applying configuration\n");
1026 return -EBUSY;
1027 }
1028
1029 if (ecoalesce->rx_max_coalesced_frames)
1030 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1031 if (ecoalesce->tx_max_coalesced_frames)
1032 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1033
1034 return 0;
1035 }
1036
1037 static int nixge_ethtools_set_phys_id(struct net_device *ndev,
1038 enum ethtool_phys_id_state state)
1039 {
1040 struct nixge_priv *priv = netdev_priv(ndev);
1041 u32 ctrl;
1042
1043 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
1044 switch (state) {
1045 case ETHTOOL_ID_ACTIVE:
1046 ctrl |= NIXGE_ID_LED_CTL_EN;
1047
1048 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1049 return 2;
1050
1051 case ETHTOOL_ID_ON:
1052 ctrl |= NIXGE_ID_LED_CTL_VAL;
1053 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1054 break;
1055
1056 case ETHTOOL_ID_OFF:
1057 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1058 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1059 break;
1060
1061 case ETHTOOL_ID_INACTIVE:
1062
1063 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1064 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1065 break;
1066 }
1067
1068 return 0;
1069 }
1070
1071 static const struct ethtool_ops nixge_ethtool_ops = {
1072 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
1073 .get_drvinfo = nixge_ethtools_get_drvinfo,
1074 .get_coalesce = nixge_ethtools_get_coalesce,
1075 .set_coalesce = nixge_ethtools_set_coalesce,
1076 .set_phys_id = nixge_ethtools_set_phys_id,
1077 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1078 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1079 .get_link = ethtool_op_get_link,
1080 };
1081
1082 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1083 {
1084 struct nixge_priv *priv = bus->priv;
1085 u32 status, tmp;
1086 int err;
1087 u16 device;
1088
1089 if (reg & MII_ADDR_C45) {
1090 device = (reg >> 16) & 0x1f;
1091
1092 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1093
1094 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1095 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1096
1097 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1098 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1099
1100 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1101 !status, 10, 1000);
1102 if (err) {
1103 dev_err(priv->dev, "timeout setting address");
1104 return err;
1105 }
1106
1107 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1108 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1109 } else {
1110 device = reg & 0x1f;
1111
1112 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1113 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1114 }
1115
1116 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1117 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1118
1119 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1120 !status, 10, 1000);
1121 if (err) {
1122 dev_err(priv->dev, "timeout setting read command");
1123 return err;
1124 }
1125
1126 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1127
1128 return status;
1129 }
1130
1131 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1132 {
1133 struct nixge_priv *priv = bus->priv;
1134 u32 status, tmp;
1135 u16 device;
1136 int err;
1137
1138 if (reg & MII_ADDR_C45) {
1139 device = (reg >> 16) & 0x1f;
1140
1141 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1142
1143 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1144 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1145
1146 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1147 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1148
1149 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1150 !status, 10, 1000);
1151 if (err) {
1152 dev_err(priv->dev, "timeout setting address");
1153 return err;
1154 }
1155
1156 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1157 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1158
1159 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1160 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1161 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1162 !status, 10, 1000);
1163 if (err)
1164 dev_err(priv->dev, "timeout setting write command");
1165 } else {
1166 device = reg & 0x1f;
1167
1168 tmp = NIXGE_MDIO_CLAUSE22 |
1169 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1170 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1171
1172 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1173 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1174 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1175
1176 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1177 !status, 10, 1000);
1178 if (err)
1179 dev_err(priv->dev, "timeout setting write command");
1180 }
1181
1182 return err;
1183 }
1184
1185 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1186 {
1187 struct mii_bus *bus;
1188
1189 bus = devm_mdiobus_alloc(priv->dev);
1190 if (!bus)
1191 return -ENOMEM;
1192
1193 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1194 bus->priv = priv;
1195 bus->name = "nixge_mii_bus";
1196 bus->read = nixge_mdio_read;
1197 bus->write = nixge_mdio_write;
1198 bus->parent = priv->dev;
1199
1200 priv->mii_bus = bus;
1201
1202 return of_mdiobus_register(bus, np);
1203 }
1204
1205 static void *nixge_get_nvmem_address(struct device *dev)
1206 {
1207 struct nvmem_cell *cell;
1208 size_t cell_size;
1209 char *mac;
1210
1211 cell = nvmem_cell_get(dev, "address");
1212 if (IS_ERR(cell))
1213 return cell;
1214
1215 mac = nvmem_cell_read(cell, &cell_size);
1216 nvmem_cell_put(cell);
1217
1218 return mac;
1219 }
1220
1221
1222 static const struct of_device_id nixge_dt_ids[] = {
1223 { .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 },
1224 { .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 },
1225 {},
1226 };
1227 MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1228
1229 static int nixge_of_get_resources(struct platform_device *pdev)
1230 {
1231 const struct of_device_id *of_id;
1232 enum nixge_version version;
1233 struct net_device *ndev;
1234 struct nixge_priv *priv;
1235
1236 ndev = platform_get_drvdata(pdev);
1237 priv = netdev_priv(ndev);
1238 of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node);
1239 if (!of_id)
1240 return -ENODEV;
1241
1242 version = (enum nixge_version)of_id->data;
1243 if (version <= NIXGE_V2)
1244 priv->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1245 else
1246 priv->dma_regs = devm_platform_ioremap_resource_byname(pdev, "dma");
1247 if (IS_ERR(priv->dma_regs)) {
1248 netdev_err(ndev, "failed to map dma regs\n");
1249 return PTR_ERR(priv->dma_regs);
1250 }
1251 if (version <= NIXGE_V2)
1252 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1253 else
1254 priv->ctrl_regs = devm_platform_ioremap_resource_byname(pdev, "ctrl");
1255 if (IS_ERR(priv->ctrl_regs)) {
1256 netdev_err(ndev, "failed to map ctrl regs\n");
1257 return PTR_ERR(priv->ctrl_regs);
1258 }
1259 return 0;
1260 }
1261
1262 static int nixge_probe(struct platform_device *pdev)
1263 {
1264 struct device_node *mn, *phy_node;
1265 struct nixge_priv *priv;
1266 struct net_device *ndev;
1267 const u8 *mac_addr;
1268 int err;
1269
1270 ndev = alloc_etherdev(sizeof(*priv));
1271 if (!ndev)
1272 return -ENOMEM;
1273
1274 platform_set_drvdata(pdev, ndev);
1275 SET_NETDEV_DEV(ndev, &pdev->dev);
1276
1277 ndev->features = NETIF_F_SG;
1278 ndev->netdev_ops = &nixge_netdev_ops;
1279 ndev->ethtool_ops = &nixge_ethtool_ops;
1280
1281
1282 ndev->min_mtu = 64;
1283 ndev->max_mtu = NIXGE_JUMBO_MTU;
1284
1285 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1286 if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) {
1287 eth_hw_addr_set(ndev, mac_addr);
1288 kfree(mac_addr);
1289 } else {
1290 eth_hw_addr_random(ndev);
1291 }
1292
1293 priv = netdev_priv(ndev);
1294 priv->ndev = ndev;
1295 priv->dev = &pdev->dev;
1296
1297 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1298 err = nixge_of_get_resources(pdev);
1299 if (err)
1300 goto free_netdev;
1301 __nixge_hw_set_mac_address(ndev);
1302
1303 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1304 if (priv->tx_irq < 0) {
1305 netdev_err(ndev, "could not find 'tx' irq");
1306 err = priv->tx_irq;
1307 goto free_netdev;
1308 }
1309
1310 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1311 if (priv->rx_irq < 0) {
1312 netdev_err(ndev, "could not find 'rx' irq");
1313 err = priv->rx_irq;
1314 goto free_netdev;
1315 }
1316
1317 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1318 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1319
1320 mn = of_get_child_by_name(pdev->dev.of_node, "mdio");
1321 if (mn) {
1322 err = nixge_mdio_setup(priv, mn);
1323 of_node_put(mn);
1324 if (err) {
1325 netdev_err(ndev, "error registering mdio bus");
1326 goto free_netdev;
1327 }
1328 }
1329
1330 err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
1331 if (err) {
1332 netdev_err(ndev, "not find \"phy-mode\" property\n");
1333 goto unregister_mdio;
1334 }
1335
1336 phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1337 if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) {
1338 err = of_phy_register_fixed_link(pdev->dev.of_node);
1339 if (err < 0) {
1340 netdev_err(ndev, "broken fixed-link specification\n");
1341 goto unregister_mdio;
1342 }
1343 phy_node = of_node_get(pdev->dev.of_node);
1344 }
1345 priv->phy_node = phy_node;
1346
1347 err = register_netdev(priv->ndev);
1348 if (err) {
1349 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1350 goto free_phy;
1351 }
1352
1353 return 0;
1354
1355 free_phy:
1356 if (of_phy_is_fixed_link(pdev->dev.of_node))
1357 of_phy_deregister_fixed_link(pdev->dev.of_node);
1358 of_node_put(phy_node);
1359
1360 unregister_mdio:
1361 if (priv->mii_bus)
1362 mdiobus_unregister(priv->mii_bus);
1363
1364 free_netdev:
1365 free_netdev(ndev);
1366
1367 return err;
1368 }
1369
1370 static int nixge_remove(struct platform_device *pdev)
1371 {
1372 struct net_device *ndev = platform_get_drvdata(pdev);
1373 struct nixge_priv *priv = netdev_priv(ndev);
1374
1375 unregister_netdev(ndev);
1376
1377 if (of_phy_is_fixed_link(pdev->dev.of_node))
1378 of_phy_deregister_fixed_link(pdev->dev.of_node);
1379 of_node_put(priv->phy_node);
1380
1381 if (priv->mii_bus)
1382 mdiobus_unregister(priv->mii_bus);
1383
1384 free_netdev(ndev);
1385
1386 return 0;
1387 }
1388
1389 static struct platform_driver nixge_driver = {
1390 .probe = nixge_probe,
1391 .remove = nixge_remove,
1392 .driver = {
1393 .name = "nixge",
1394 .of_match_table = of_match_ptr(nixge_dt_ids),
1395 },
1396 };
1397 module_platform_driver(nixge_driver);
1398
1399 MODULE_LICENSE("GPL v2");
1400 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1401 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");