0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/cache.h>
0012 #include <linux/clk.h>
0013 #include <linux/delay.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/ethtool.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/kernel.h>
0020 #include <linux/list.h>
0021 #include <linux/module.h>
0022 #include <linux/net_tstamp.h>
0023 #include <linux/of.h>
0024 #include <linux/of_device.h>
0025 #include <linux/of_irq.h>
0026 #include <linux/of_mdio.h>
0027 #include <linux/of_net.h>
0028 #include <linux/pm_runtime.h>
0029 #include <linux/slab.h>
0030 #include <linux/spinlock.h>
0031 #include <linux/sys_soc.h>
0032 #include <linux/reset.h>
0033 #include <linux/math64.h>
0034
0035 #include "ravb.h"
0036
0037 #define RAVB_DEF_MSG_ENABLE \
0038 (NETIF_MSG_LINK | \
0039 NETIF_MSG_TIMER | \
0040 NETIF_MSG_RX_ERR | \
0041 NETIF_MSG_TX_ERR)
0042
0043 static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
0044 "ch0",
0045 "ch1",
0046 };
0047
0048 static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
0049 "ch18",
0050 "ch19",
0051 };
0052
0053 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
0054 u32 set)
0055 {
0056 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
0057 }
0058
0059 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
0060 {
0061 int i;
0062
0063 for (i = 0; i < 10000; i++) {
0064 if ((ravb_read(ndev, reg) & mask) == value)
0065 return 0;
0066 udelay(10);
0067 }
0068 return -ETIMEDOUT;
0069 }
0070
0071 static int ravb_config(struct net_device *ndev)
0072 {
0073 int error;
0074
0075
0076 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
0077
0078 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
0079 if (error)
0080 netdev_err(ndev, "failed to switch device to config mode\n");
0081
0082 return error;
0083 }
0084
0085 static void ravb_set_rate_gbeth(struct net_device *ndev)
0086 {
0087 struct ravb_private *priv = netdev_priv(ndev);
0088
0089 switch (priv->speed) {
0090 case 10:
0091 ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
0092 break;
0093 case 100:
0094 ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
0095 break;
0096 case 1000:
0097 ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
0098 break;
0099 }
0100 }
0101
0102 static void ravb_set_rate_rcar(struct net_device *ndev)
0103 {
0104 struct ravb_private *priv = netdev_priv(ndev);
0105
0106 switch (priv->speed) {
0107 case 100:
0108 ravb_write(ndev, GECMR_SPEED_100, GECMR);
0109 break;
0110 case 1000:
0111 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
0112 break;
0113 }
0114 }
0115
0116 static void ravb_set_buffer_align(struct sk_buff *skb)
0117 {
0118 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
0119
0120 if (reserve)
0121 skb_reserve(skb, RAVB_ALIGN - reserve);
0122 }
0123
0124
0125
0126
0127
0128
0129 static void ravb_read_mac_address(struct device_node *np,
0130 struct net_device *ndev)
0131 {
0132 int ret;
0133
0134 ret = of_get_ethdev_address(np, ndev);
0135 if (ret) {
0136 u32 mahr = ravb_read(ndev, MAHR);
0137 u32 malr = ravb_read(ndev, MALR);
0138 u8 addr[ETH_ALEN];
0139
0140 addr[0] = (mahr >> 24) & 0xFF;
0141 addr[1] = (mahr >> 16) & 0xFF;
0142 addr[2] = (mahr >> 8) & 0xFF;
0143 addr[3] = (mahr >> 0) & 0xFF;
0144 addr[4] = (malr >> 8) & 0xFF;
0145 addr[5] = (malr >> 0) & 0xFF;
0146 eth_hw_addr_set(ndev, addr);
0147 }
0148 }
0149
0150 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
0151 {
0152 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
0153 mdiobb);
0154
0155 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
0156 }
0157
0158
0159 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
0160 {
0161 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
0162 }
0163
0164
0165 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
0166 {
0167 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
0168 }
0169
0170
0171 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
0172 {
0173 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
0174 }
0175
0176
0177 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
0178 {
0179 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
0180 mdiobb);
0181
0182 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
0183 }
0184
0185
0186 static const struct mdiobb_ops bb_ops = {
0187 .owner = THIS_MODULE,
0188 .set_mdc = ravb_set_mdc,
0189 .set_mdio_dir = ravb_set_mdio_dir,
0190 .set_mdio_data = ravb_set_mdio_data,
0191 .get_mdio_data = ravb_get_mdio_data,
0192 };
0193
0194
0195 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
0196 {
0197 struct ravb_private *priv = netdev_priv(ndev);
0198 struct net_device_stats *stats = &priv->stats[q];
0199 unsigned int num_tx_desc = priv->num_tx_desc;
0200 struct ravb_tx_desc *desc;
0201 unsigned int entry;
0202 int free_num = 0;
0203 u32 size;
0204
0205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
0206 bool txed;
0207
0208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
0209 num_tx_desc);
0210 desc = &priv->tx_ring[q][entry];
0211 txed = desc->die_dt == DT_FEMPTY;
0212 if (free_txed_only && !txed)
0213 break;
0214
0215 dma_rmb();
0216 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
0217
0218 if (priv->tx_skb[q][entry / num_tx_desc]) {
0219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
0220 size, DMA_TO_DEVICE);
0221
0222 if (entry % num_tx_desc == num_tx_desc - 1) {
0223 entry /= num_tx_desc;
0224 dev_kfree_skb_any(priv->tx_skb[q][entry]);
0225 priv->tx_skb[q][entry] = NULL;
0226 if (txed)
0227 stats->tx_packets++;
0228 }
0229 free_num++;
0230 }
0231 if (txed)
0232 stats->tx_bytes += size;
0233 desc->die_dt = DT_EEMPTY;
0234 }
0235 return free_num;
0236 }
0237
0238 static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
0239 {
0240 struct ravb_private *priv = netdev_priv(ndev);
0241 unsigned int ring_size;
0242 unsigned int i;
0243
0244 if (!priv->gbeth_rx_ring)
0245 return;
0246
0247 for (i = 0; i < priv->num_rx_ring[q]; i++) {
0248 struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
0249
0250 if (!dma_mapping_error(ndev->dev.parent,
0251 le32_to_cpu(desc->dptr)))
0252 dma_unmap_single(ndev->dev.parent,
0253 le32_to_cpu(desc->dptr),
0254 GBETH_RX_BUFF_MAX,
0255 DMA_FROM_DEVICE);
0256 }
0257 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
0258 dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
0259 priv->rx_desc_dma[q]);
0260 priv->gbeth_rx_ring = NULL;
0261 }
0262
0263 static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
0264 {
0265 struct ravb_private *priv = netdev_priv(ndev);
0266 unsigned int ring_size;
0267 unsigned int i;
0268
0269 if (!priv->rx_ring[q])
0270 return;
0271
0272 for (i = 0; i < priv->num_rx_ring[q]; i++) {
0273 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
0274
0275 if (!dma_mapping_error(ndev->dev.parent,
0276 le32_to_cpu(desc->dptr)))
0277 dma_unmap_single(ndev->dev.parent,
0278 le32_to_cpu(desc->dptr),
0279 RX_BUF_SZ,
0280 DMA_FROM_DEVICE);
0281 }
0282 ring_size = sizeof(struct ravb_ex_rx_desc) *
0283 (priv->num_rx_ring[q] + 1);
0284 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
0285 priv->rx_desc_dma[q]);
0286 priv->rx_ring[q] = NULL;
0287 }
0288
0289
0290 static void ravb_ring_free(struct net_device *ndev, int q)
0291 {
0292 struct ravb_private *priv = netdev_priv(ndev);
0293 const struct ravb_hw_info *info = priv->info;
0294 unsigned int num_tx_desc = priv->num_tx_desc;
0295 unsigned int ring_size;
0296 unsigned int i;
0297
0298 info->rx_ring_free(ndev, q);
0299
0300 if (priv->tx_ring[q]) {
0301 ravb_tx_free(ndev, q, false);
0302
0303 ring_size = sizeof(struct ravb_tx_desc) *
0304 (priv->num_tx_ring[q] * num_tx_desc + 1);
0305 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
0306 priv->tx_desc_dma[q]);
0307 priv->tx_ring[q] = NULL;
0308 }
0309
0310
0311 if (priv->rx_skb[q]) {
0312 for (i = 0; i < priv->num_rx_ring[q]; i++)
0313 dev_kfree_skb(priv->rx_skb[q][i]);
0314 }
0315 kfree(priv->rx_skb[q]);
0316 priv->rx_skb[q] = NULL;
0317
0318
0319 kfree(priv->tx_align[q]);
0320 priv->tx_align[q] = NULL;
0321
0322
0323
0324
0325 kfree(priv->tx_skb[q]);
0326 priv->tx_skb[q] = NULL;
0327 }
0328
0329 static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
0330 {
0331 struct ravb_private *priv = netdev_priv(ndev);
0332 struct ravb_rx_desc *rx_desc;
0333 unsigned int rx_ring_size;
0334 dma_addr_t dma_addr;
0335 unsigned int i;
0336
0337 rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
0338 memset(priv->gbeth_rx_ring, 0, rx_ring_size);
0339
0340 for (i = 0; i < priv->num_rx_ring[q]; i++) {
0341
0342 rx_desc = &priv->gbeth_rx_ring[i];
0343 rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
0344 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
0345 GBETH_RX_BUFF_MAX,
0346 DMA_FROM_DEVICE);
0347
0348
0349
0350 if (dma_mapping_error(ndev->dev.parent, dma_addr))
0351 rx_desc->ds_cc = cpu_to_le16(0);
0352 rx_desc->dptr = cpu_to_le32(dma_addr);
0353 rx_desc->die_dt = DT_FEMPTY;
0354 }
0355 rx_desc = &priv->gbeth_rx_ring[i];
0356 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
0357 rx_desc->die_dt = DT_LINKFIX;
0358 }
0359
0360 static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
0361 {
0362 struct ravb_private *priv = netdev_priv(ndev);
0363 struct ravb_ex_rx_desc *rx_desc;
0364 unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
0365 dma_addr_t dma_addr;
0366 unsigned int i;
0367
0368 memset(priv->rx_ring[q], 0, rx_ring_size);
0369
0370 for (i = 0; i < priv->num_rx_ring[q]; i++) {
0371
0372 rx_desc = &priv->rx_ring[q][i];
0373 rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
0374 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
0375 RX_BUF_SZ,
0376 DMA_FROM_DEVICE);
0377
0378
0379
0380 if (dma_mapping_error(ndev->dev.parent, dma_addr))
0381 rx_desc->ds_cc = cpu_to_le16(0);
0382 rx_desc->dptr = cpu_to_le32(dma_addr);
0383 rx_desc->die_dt = DT_FEMPTY;
0384 }
0385 rx_desc = &priv->rx_ring[q][i];
0386 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
0387 rx_desc->die_dt = DT_LINKFIX;
0388 }
0389
0390
0391 static void ravb_ring_format(struct net_device *ndev, int q)
0392 {
0393 struct ravb_private *priv = netdev_priv(ndev);
0394 const struct ravb_hw_info *info = priv->info;
0395 unsigned int num_tx_desc = priv->num_tx_desc;
0396 struct ravb_tx_desc *tx_desc;
0397 struct ravb_desc *desc;
0398 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
0399 num_tx_desc;
0400 unsigned int i;
0401
0402 priv->cur_rx[q] = 0;
0403 priv->cur_tx[q] = 0;
0404 priv->dirty_rx[q] = 0;
0405 priv->dirty_tx[q] = 0;
0406
0407 info->rx_ring_format(ndev, q);
0408
0409 memset(priv->tx_ring[q], 0, tx_ring_size);
0410
0411 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
0412 i++, tx_desc++) {
0413 tx_desc->die_dt = DT_EEMPTY;
0414 if (num_tx_desc > 1) {
0415 tx_desc++;
0416 tx_desc->die_dt = DT_EEMPTY;
0417 }
0418 }
0419 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
0420 tx_desc->die_dt = DT_LINKFIX;
0421
0422
0423 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
0424 desc->die_dt = DT_LINKFIX;
0425 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
0426
0427
0428 desc = &priv->desc_bat[q];
0429 desc->die_dt = DT_LINKFIX;
0430 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
0431 }
0432
0433 static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
0434 {
0435 struct ravb_private *priv = netdev_priv(ndev);
0436 unsigned int ring_size;
0437
0438 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
0439
0440 priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
0441 &priv->rx_desc_dma[q],
0442 GFP_KERNEL);
0443 return priv->gbeth_rx_ring;
0444 }
0445
0446 static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
0447 {
0448 struct ravb_private *priv = netdev_priv(ndev);
0449 unsigned int ring_size;
0450
0451 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
0452
0453 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
0454 &priv->rx_desc_dma[q],
0455 GFP_KERNEL);
0456 return priv->rx_ring[q];
0457 }
0458
0459
0460 static int ravb_ring_init(struct net_device *ndev, int q)
0461 {
0462 struct ravb_private *priv = netdev_priv(ndev);
0463 const struct ravb_hw_info *info = priv->info;
0464 unsigned int num_tx_desc = priv->num_tx_desc;
0465 unsigned int ring_size;
0466 struct sk_buff *skb;
0467 unsigned int i;
0468
0469
0470 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
0471 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
0472 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
0473 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
0474 if (!priv->rx_skb[q] || !priv->tx_skb[q])
0475 goto error;
0476
0477 for (i = 0; i < priv->num_rx_ring[q]; i++) {
0478 skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
0479 if (!skb)
0480 goto error;
0481 ravb_set_buffer_align(skb);
0482 priv->rx_skb[q][i] = skb;
0483 }
0484
0485 if (num_tx_desc > 1) {
0486
0487 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
0488 DPTR_ALIGN - 1, GFP_KERNEL);
0489 if (!priv->tx_align[q])
0490 goto error;
0491 }
0492
0493
0494 if (!info->alloc_rx_desc(ndev, q))
0495 goto error;
0496
0497 priv->dirty_rx[q] = 0;
0498
0499
0500 ring_size = sizeof(struct ravb_tx_desc) *
0501 (priv->num_tx_ring[q] * num_tx_desc + 1);
0502 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
0503 &priv->tx_desc_dma[q],
0504 GFP_KERNEL);
0505 if (!priv->tx_ring[q])
0506 goto error;
0507
0508 return 0;
0509
0510 error:
0511 ravb_ring_free(ndev, q);
0512
0513 return -ENOMEM;
0514 }
0515
0516 static void ravb_emac_init_gbeth(struct net_device *ndev)
0517 {
0518 struct ravb_private *priv = netdev_priv(ndev);
0519
0520
0521 ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
0522
0523
0524 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
0525 ECMR_TE | ECMR_RE | ECMR_RCPT |
0526 ECMR_TXF | ECMR_RXF, ECMR);
0527
0528 ravb_set_rate_gbeth(ndev);
0529
0530
0531 ravb_write(ndev,
0532 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
0533 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
0534 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
0535
0536
0537 ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
0538 ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
0539
0540
0541 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
0542
0543 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
0544 }
0545
0546 static void ravb_emac_init_rcar(struct net_device *ndev)
0547 {
0548
0549 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
0550
0551
0552 ravb_write(ndev, ECMR_ZPF | ECMR_DM |
0553 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
0554 ECMR_TE | ECMR_RE, ECMR);
0555
0556 ravb_set_rate_rcar(ndev);
0557
0558
0559 ravb_write(ndev,
0560 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
0561 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
0562 ravb_write(ndev,
0563 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
0564
0565
0566 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
0567
0568
0569 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
0570 }
0571
0572
0573 static void ravb_emac_init(struct net_device *ndev)
0574 {
0575 struct ravb_private *priv = netdev_priv(ndev);
0576 const struct ravb_hw_info *info = priv->info;
0577
0578 info->emac_init(ndev);
0579 }
0580
0581 static int ravb_dmac_init_gbeth(struct net_device *ndev)
0582 {
0583 int error;
0584
0585 error = ravb_ring_init(ndev, RAVB_BE);
0586 if (error)
0587 return error;
0588
0589
0590 ravb_ring_format(ndev, RAVB_BE);
0591
0592
0593 ravb_write(ndev, 0x60000000, RCR);
0594
0595
0596 ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
0597
0598
0599 ravb_write(ndev, 0x00222200, TGC);
0600
0601 ravb_write(ndev, 0, TCCR);
0602
0603
0604 ravb_write(ndev, RIC0_FRE0, RIC0);
0605
0606 ravb_write(ndev, 0x0, RIC1);
0607
0608 ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
0609
0610 ravb_write(ndev, TIC_FTE0, TIC);
0611
0612 return 0;
0613 }
0614
0615 static int ravb_dmac_init_rcar(struct net_device *ndev)
0616 {
0617 struct ravb_private *priv = netdev_priv(ndev);
0618 const struct ravb_hw_info *info = priv->info;
0619 int error;
0620
0621 error = ravb_ring_init(ndev, RAVB_BE);
0622 if (error)
0623 return error;
0624 error = ravb_ring_init(ndev, RAVB_NC);
0625 if (error) {
0626 ravb_ring_free(ndev, RAVB_BE);
0627 return error;
0628 }
0629
0630
0631 ravb_ring_format(ndev, RAVB_BE);
0632 ravb_ring_format(ndev, RAVB_NC);
0633
0634
0635 ravb_write(ndev,
0636 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
0637
0638
0639 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
0640
0641
0642 ravb_write(ndev, TCCR_TFEN, TCCR);
0643
0644
0645 if (info->multi_irqs) {
0646
0647 ravb_write(ndev, 0, DIL);
0648
0649 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
0650 }
0651
0652 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
0653
0654 ravb_write(ndev, 0, RIC1);
0655
0656 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
0657
0658 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
0659
0660 return 0;
0661 }
0662
0663
0664 static int ravb_dmac_init(struct net_device *ndev)
0665 {
0666 struct ravb_private *priv = netdev_priv(ndev);
0667 const struct ravb_hw_info *info = priv->info;
0668 int error;
0669
0670
0671 error = ravb_config(ndev);
0672 if (error)
0673 return error;
0674
0675 error = info->dmac_init(ndev);
0676 if (error)
0677 return error;
0678
0679
0680 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
0681
0682 return 0;
0683 }
0684
0685 static void ravb_get_tx_tstamp(struct net_device *ndev)
0686 {
0687 struct ravb_private *priv = netdev_priv(ndev);
0688 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
0689 struct skb_shared_hwtstamps shhwtstamps;
0690 struct sk_buff *skb;
0691 struct timespec64 ts;
0692 u16 tag, tfa_tag;
0693 int count;
0694 u32 tfa2;
0695
0696 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
0697 while (count--) {
0698 tfa2 = ravb_read(ndev, TFA2);
0699 tfa_tag = (tfa2 & TFA2_TST) >> 16;
0700 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
0701 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
0702 ravb_read(ndev, TFA1);
0703 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
0704 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
0705 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
0706 list) {
0707 skb = ts_skb->skb;
0708 tag = ts_skb->tag;
0709 list_del(&ts_skb->list);
0710 kfree(ts_skb);
0711 if (tag == tfa_tag) {
0712 skb_tstamp_tx(skb, &shhwtstamps);
0713 dev_consume_skb_any(skb);
0714 break;
0715 } else {
0716 dev_kfree_skb_any(skb);
0717 }
0718 }
0719 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
0720 }
0721 }
0722
0723 static void ravb_rx_csum(struct sk_buff *skb)
0724 {
0725 u8 *hw_csum;
0726
0727
0728
0729
0730 if (unlikely(skb->len < sizeof(__sum16)))
0731 return;
0732 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
0733 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
0734 skb->ip_summed = CHECKSUM_COMPLETE;
0735 skb_trim(skb, skb->len - sizeof(__sum16));
0736 }
0737
0738 static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
0739 struct ravb_rx_desc *desc)
0740 {
0741 struct ravb_private *priv = netdev_priv(ndev);
0742 struct sk_buff *skb;
0743
0744 skb = priv->rx_skb[RAVB_BE][entry];
0745 priv->rx_skb[RAVB_BE][entry] = NULL;
0746 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
0747 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
0748
0749 return skb;
0750 }
0751
0752
0753 static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
0754 {
0755 struct ravb_private *priv = netdev_priv(ndev);
0756 const struct ravb_hw_info *info = priv->info;
0757 struct net_device_stats *stats;
0758 struct ravb_rx_desc *desc;
0759 struct sk_buff *skb;
0760 dma_addr_t dma_addr;
0761 u8 desc_status;
0762 int boguscnt;
0763 u16 pkt_len;
0764 u8 die_dt;
0765 int entry;
0766 int limit;
0767
0768 entry = priv->cur_rx[q] % priv->num_rx_ring[q];
0769 boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
0770 stats = &priv->stats[q];
0771
0772 boguscnt = min(boguscnt, *quota);
0773 limit = boguscnt;
0774 desc = &priv->gbeth_rx_ring[entry];
0775 while (desc->die_dt != DT_FEMPTY) {
0776
0777 dma_rmb();
0778 desc_status = desc->msc;
0779 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
0780
0781 if (--boguscnt < 0)
0782 break;
0783
0784
0785 if (!pkt_len)
0786 continue;
0787
0788 if (desc_status & MSC_MC)
0789 stats->multicast++;
0790
0791 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
0792 stats->rx_errors++;
0793 if (desc_status & MSC_CRC)
0794 stats->rx_crc_errors++;
0795 if (desc_status & MSC_RFE)
0796 stats->rx_frame_errors++;
0797 if (desc_status & (MSC_RTLF | MSC_RTSF))
0798 stats->rx_length_errors++;
0799 if (desc_status & MSC_CEEF)
0800 stats->rx_missed_errors++;
0801 } else {
0802 die_dt = desc->die_dt & 0xF0;
0803 switch (die_dt) {
0804 case DT_FSINGLE:
0805 skb = ravb_get_skb_gbeth(ndev, entry, desc);
0806 skb_put(skb, pkt_len);
0807 skb->protocol = eth_type_trans(skb, ndev);
0808 napi_gro_receive(&priv->napi[q], skb);
0809 stats->rx_packets++;
0810 stats->rx_bytes += pkt_len;
0811 break;
0812 case DT_FSTART:
0813 priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
0814 skb_put(priv->rx_1st_skb, pkt_len);
0815 break;
0816 case DT_FMID:
0817 skb = ravb_get_skb_gbeth(ndev, entry, desc);
0818 skb_copy_to_linear_data_offset(priv->rx_1st_skb,
0819 priv->rx_1st_skb->len,
0820 skb->data,
0821 pkt_len);
0822 skb_put(priv->rx_1st_skb, pkt_len);
0823 dev_kfree_skb(skb);
0824 break;
0825 case DT_FEND:
0826 skb = ravb_get_skb_gbeth(ndev, entry, desc);
0827 skb_copy_to_linear_data_offset(priv->rx_1st_skb,
0828 priv->rx_1st_skb->len,
0829 skb->data,
0830 pkt_len);
0831 skb_put(priv->rx_1st_skb, pkt_len);
0832 dev_kfree_skb(skb);
0833 priv->rx_1st_skb->protocol =
0834 eth_type_trans(priv->rx_1st_skb, ndev);
0835 napi_gro_receive(&priv->napi[q],
0836 priv->rx_1st_skb);
0837 stats->rx_packets++;
0838 stats->rx_bytes += priv->rx_1st_skb->len;
0839 break;
0840 }
0841 }
0842
0843 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
0844 desc = &priv->gbeth_rx_ring[entry];
0845 }
0846
0847
0848 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
0849 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
0850 desc = &priv->gbeth_rx_ring[entry];
0851 desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
0852
0853 if (!priv->rx_skb[q][entry]) {
0854 skb = netdev_alloc_skb(ndev, info->max_rx_len);
0855 if (!skb)
0856 break;
0857 ravb_set_buffer_align(skb);
0858 dma_addr = dma_map_single(ndev->dev.parent,
0859 skb->data,
0860 GBETH_RX_BUFF_MAX,
0861 DMA_FROM_DEVICE);
0862 skb_checksum_none_assert(skb);
0863
0864
0865
0866 if (dma_mapping_error(ndev->dev.parent, dma_addr))
0867 desc->ds_cc = cpu_to_le16(0);
0868 desc->dptr = cpu_to_le32(dma_addr);
0869 priv->rx_skb[q][entry] = skb;
0870 }
0871
0872 dma_wmb();
0873 desc->die_dt = DT_FEMPTY;
0874 }
0875
0876 *quota -= limit - (++boguscnt);
0877
0878 return boguscnt <= 0;
0879 }
0880
0881
0882 static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
0883 {
0884 struct ravb_private *priv = netdev_priv(ndev);
0885 const struct ravb_hw_info *info = priv->info;
0886 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
0887 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
0888 priv->cur_rx[q];
0889 struct net_device_stats *stats = &priv->stats[q];
0890 struct ravb_ex_rx_desc *desc;
0891 struct sk_buff *skb;
0892 dma_addr_t dma_addr;
0893 struct timespec64 ts;
0894 u8 desc_status;
0895 u16 pkt_len;
0896 int limit;
0897
0898 boguscnt = min(boguscnt, *quota);
0899 limit = boguscnt;
0900 desc = &priv->rx_ring[q][entry];
0901 while (desc->die_dt != DT_FEMPTY) {
0902
0903 dma_rmb();
0904 desc_status = desc->msc;
0905 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
0906
0907 if (--boguscnt < 0)
0908 break;
0909
0910
0911 if (!pkt_len)
0912 continue;
0913
0914 if (desc_status & MSC_MC)
0915 stats->multicast++;
0916
0917 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
0918 MSC_CEEF)) {
0919 stats->rx_errors++;
0920 if (desc_status & MSC_CRC)
0921 stats->rx_crc_errors++;
0922 if (desc_status & MSC_RFE)
0923 stats->rx_frame_errors++;
0924 if (desc_status & (MSC_RTLF | MSC_RTSF))
0925 stats->rx_length_errors++;
0926 if (desc_status & MSC_CEEF)
0927 stats->rx_missed_errors++;
0928 } else {
0929 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
0930
0931 skb = priv->rx_skb[q][entry];
0932 priv->rx_skb[q][entry] = NULL;
0933 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
0934 RX_BUF_SZ,
0935 DMA_FROM_DEVICE);
0936 get_ts &= (q == RAVB_NC) ?
0937 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
0938 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
0939 if (get_ts) {
0940 struct skb_shared_hwtstamps *shhwtstamps;
0941
0942 shhwtstamps = skb_hwtstamps(skb);
0943 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
0944 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
0945 32) | le32_to_cpu(desc->ts_sl);
0946 ts.tv_nsec = le32_to_cpu(desc->ts_n);
0947 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
0948 }
0949
0950 skb_put(skb, pkt_len);
0951 skb->protocol = eth_type_trans(skb, ndev);
0952 if (ndev->features & NETIF_F_RXCSUM)
0953 ravb_rx_csum(skb);
0954 napi_gro_receive(&priv->napi[q], skb);
0955 stats->rx_packets++;
0956 stats->rx_bytes += pkt_len;
0957 }
0958
0959 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
0960 desc = &priv->rx_ring[q][entry];
0961 }
0962
0963
0964 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
0965 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
0966 desc = &priv->rx_ring[q][entry];
0967 desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
0968
0969 if (!priv->rx_skb[q][entry]) {
0970 skb = netdev_alloc_skb(ndev, info->max_rx_len);
0971 if (!skb)
0972 break;
0973 ravb_set_buffer_align(skb);
0974 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
0975 le16_to_cpu(desc->ds_cc),
0976 DMA_FROM_DEVICE);
0977 skb_checksum_none_assert(skb);
0978
0979
0980
0981 if (dma_mapping_error(ndev->dev.parent, dma_addr))
0982 desc->ds_cc = cpu_to_le16(0);
0983 desc->dptr = cpu_to_le32(dma_addr);
0984 priv->rx_skb[q][entry] = skb;
0985 }
0986
0987 dma_wmb();
0988 desc->die_dt = DT_FEMPTY;
0989 }
0990
0991 *quota -= limit - (++boguscnt);
0992
0993 return boguscnt <= 0;
0994 }
0995
0996
0997 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
0998 {
0999 struct ravb_private *priv = netdev_priv(ndev);
1000 const struct ravb_hw_info *info = priv->info;
1001
1002 return info->receive(ndev, quota, q);
1003 }
1004
1005 static void ravb_rcv_snd_disable(struct net_device *ndev)
1006 {
1007
1008 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1009 }
1010
1011 static void ravb_rcv_snd_enable(struct net_device *ndev)
1012 {
1013
1014 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1015 }
1016
1017
1018 static int ravb_stop_dma(struct net_device *ndev)
1019 {
1020 struct ravb_private *priv = netdev_priv(ndev);
1021 const struct ravb_hw_info *info = priv->info;
1022 int error;
1023
1024
1025 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1026
1027 if (error)
1028 return error;
1029
1030 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1031 0);
1032 if (error)
1033 return error;
1034
1035
1036 ravb_rcv_snd_disable(ndev);
1037
1038
1039 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1040 if (error)
1041 return error;
1042
1043
1044 return ravb_config(ndev);
1045 }
1046
1047
1048 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1049 {
1050 struct ravb_private *priv = netdev_priv(ndev);
1051 u32 ecsr, psr;
1052
1053 ecsr = ravb_read(ndev, ECSR);
1054 ravb_write(ndev, ecsr, ECSR);
1055
1056 if (ecsr & ECSR_MPD)
1057 pm_wakeup_event(&priv->pdev->dev, 0);
1058 if (ecsr & ECSR_ICD)
1059 ndev->stats.tx_carrier_errors++;
1060 if (ecsr & ECSR_LCHNG) {
1061
1062 if (priv->no_avb_link)
1063 return;
1064 psr = ravb_read(ndev, PSR);
1065 if (priv->avb_link_active_low)
1066 psr ^= PSR_LMON;
1067 if (!(psr & PSR_LMON)) {
1068
1069 ravb_rcv_snd_disable(ndev);
1070 } else {
1071
1072 ravb_rcv_snd_enable(ndev);
1073 }
1074 }
1075 }
1076
1077 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1078 {
1079 struct net_device *ndev = dev_id;
1080 struct ravb_private *priv = netdev_priv(ndev);
1081
1082 spin_lock(&priv->lock);
1083 ravb_emac_interrupt_unlocked(ndev);
1084 spin_unlock(&priv->lock);
1085 return IRQ_HANDLED;
1086 }
1087
1088
1089 static void ravb_error_interrupt(struct net_device *ndev)
1090 {
1091 struct ravb_private *priv = netdev_priv(ndev);
1092 u32 eis, ris2;
1093
1094 eis = ravb_read(ndev, EIS);
1095 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1096 if (eis & EIS_QFS) {
1097 ris2 = ravb_read(ndev, RIS2);
1098 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
1099 RIS2);
1100
1101
1102 if (ris2 & RIS2_QFF0)
1103 priv->stats[RAVB_BE].rx_over_errors++;
1104
1105
1106 if (ris2 & RIS2_QFF1)
1107 priv->stats[RAVB_NC].rx_over_errors++;
1108
1109
1110 if (ris2 & RIS2_RFFF)
1111 priv->rx_fifo_errors++;
1112 }
1113 }
1114
1115 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1116 {
1117 struct ravb_private *priv = netdev_priv(ndev);
1118 const struct ravb_hw_info *info = priv->info;
1119 u32 ris0 = ravb_read(ndev, RIS0);
1120 u32 ric0 = ravb_read(ndev, RIC0);
1121 u32 tis = ravb_read(ndev, TIS);
1122 u32 tic = ravb_read(ndev, TIC);
1123
1124 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
1125 if (napi_schedule_prep(&priv->napi[q])) {
1126
1127 if (!info->irq_en_dis) {
1128 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1129 ravb_write(ndev, tic & ~BIT(q), TIC);
1130 } else {
1131 ravb_write(ndev, BIT(q), RID0);
1132 ravb_write(ndev, BIT(q), TID);
1133 }
1134 __napi_schedule(&priv->napi[q]);
1135 } else {
1136 netdev_warn(ndev,
1137 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1138 ris0, ric0);
1139 netdev_warn(ndev,
1140 " tx status 0x%08x, tx mask 0x%08x.\n",
1141 tis, tic);
1142 }
1143 return true;
1144 }
1145 return false;
1146 }
1147
1148 static bool ravb_timestamp_interrupt(struct net_device *ndev)
1149 {
1150 u32 tis = ravb_read(ndev, TIS);
1151
1152 if (tis & TIS_TFUF) {
1153 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1154 ravb_get_tx_tstamp(ndev);
1155 return true;
1156 }
1157 return false;
1158 }
1159
1160 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1161 {
1162 struct net_device *ndev = dev_id;
1163 struct ravb_private *priv = netdev_priv(ndev);
1164 const struct ravb_hw_info *info = priv->info;
1165 irqreturn_t result = IRQ_NONE;
1166 u32 iss;
1167
1168 spin_lock(&priv->lock);
1169
1170 iss = ravb_read(ndev, ISS);
1171
1172
1173 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1174 int q;
1175
1176
1177 if (ravb_timestamp_interrupt(ndev))
1178 result = IRQ_HANDLED;
1179
1180
1181 if (info->nc_queues) {
1182 for (q = RAVB_NC; q >= RAVB_BE; q--) {
1183 if (ravb_queue_interrupt(ndev, q))
1184 result = IRQ_HANDLED;
1185 }
1186 } else {
1187 if (ravb_queue_interrupt(ndev, RAVB_BE))
1188 result = IRQ_HANDLED;
1189 }
1190 }
1191
1192
1193 if (iss & ISS_MS) {
1194 ravb_emac_interrupt_unlocked(ndev);
1195 result = IRQ_HANDLED;
1196 }
1197
1198
1199 if (iss & ISS_ES) {
1200 ravb_error_interrupt(ndev);
1201 result = IRQ_HANDLED;
1202 }
1203
1204
1205 if (iss & ISS_CGIS) {
1206 ravb_ptp_interrupt(ndev);
1207 result = IRQ_HANDLED;
1208 }
1209
1210 spin_unlock(&priv->lock);
1211 return result;
1212 }
1213
1214
1215 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1216 {
1217 struct net_device *ndev = dev_id;
1218 struct ravb_private *priv = netdev_priv(ndev);
1219 irqreturn_t result = IRQ_NONE;
1220 u32 iss;
1221
1222 spin_lock(&priv->lock);
1223
1224 iss = ravb_read(ndev, ISS);
1225
1226
1227 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1228 result = IRQ_HANDLED;
1229
1230
1231 if (iss & ISS_ES) {
1232 ravb_error_interrupt(ndev);
1233 result = IRQ_HANDLED;
1234 }
1235
1236
1237 if (iss & ISS_CGIS) {
1238 ravb_ptp_interrupt(ndev);
1239 result = IRQ_HANDLED;
1240 }
1241
1242 spin_unlock(&priv->lock);
1243 return result;
1244 }
1245
1246 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1247 {
1248 struct net_device *ndev = dev_id;
1249 struct ravb_private *priv = netdev_priv(ndev);
1250 irqreturn_t result = IRQ_NONE;
1251
1252 spin_lock(&priv->lock);
1253
1254
1255 if (ravb_queue_interrupt(ndev, q))
1256 result = IRQ_HANDLED;
1257
1258 spin_unlock(&priv->lock);
1259 return result;
1260 }
1261
1262 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1263 {
1264 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1265 }
1266
1267 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1268 {
1269 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1270 }
1271
1272 static int ravb_poll(struct napi_struct *napi, int budget)
1273 {
1274 struct net_device *ndev = napi->dev;
1275 struct ravb_private *priv = netdev_priv(ndev);
1276 const struct ravb_hw_info *info = priv->info;
1277 bool gptp = info->gptp || info->ccc_gac;
1278 struct ravb_rx_desc *desc;
1279 unsigned long flags;
1280 int q = napi - priv->napi;
1281 int mask = BIT(q);
1282 int quota = budget;
1283 unsigned int entry;
1284
1285 if (!gptp) {
1286 entry = priv->cur_rx[q] % priv->num_rx_ring[q];
1287 desc = &priv->gbeth_rx_ring[entry];
1288 }
1289
1290
1291 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1292 if (gptp || desc->die_dt != DT_FEMPTY) {
1293 if (ravb_rx(ndev, "a, q))
1294 goto out;
1295 }
1296
1297
1298 spin_lock_irqsave(&priv->lock, flags);
1299
1300 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1301 ravb_tx_free(ndev, q, true);
1302 netif_wake_subqueue(ndev, q);
1303 spin_unlock_irqrestore(&priv->lock, flags);
1304
1305 napi_complete(napi);
1306
1307
1308 spin_lock_irqsave(&priv->lock, flags);
1309 if (!info->irq_en_dis) {
1310 ravb_modify(ndev, RIC0, mask, mask);
1311 ravb_modify(ndev, TIC, mask, mask);
1312 } else {
1313 ravb_write(ndev, mask, RIE0);
1314 ravb_write(ndev, mask, TIE);
1315 }
1316 spin_unlock_irqrestore(&priv->lock, flags);
1317
1318
1319 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1320 if (info->nc_queues)
1321 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1322 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1323 ndev->stats.rx_over_errors = priv->rx_over_errors;
1324 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1325 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1326 out:
1327 return budget - quota;
1328 }
1329
1330 static void ravb_set_duplex_gbeth(struct net_device *ndev)
1331 {
1332 struct ravb_private *priv = netdev_priv(ndev);
1333
1334 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1335 }
1336
1337
1338 static void ravb_adjust_link(struct net_device *ndev)
1339 {
1340 struct ravb_private *priv = netdev_priv(ndev);
1341 const struct ravb_hw_info *info = priv->info;
1342 struct phy_device *phydev = ndev->phydev;
1343 bool new_state = false;
1344 unsigned long flags;
1345
1346 spin_lock_irqsave(&priv->lock, flags);
1347
1348
1349 if (priv->no_avb_link)
1350 ravb_rcv_snd_disable(ndev);
1351
1352 if (phydev->link) {
1353 if (info->half_duplex && phydev->duplex != priv->duplex) {
1354 new_state = true;
1355 priv->duplex = phydev->duplex;
1356 ravb_set_duplex_gbeth(ndev);
1357 }
1358
1359 if (phydev->speed != priv->speed) {
1360 new_state = true;
1361 priv->speed = phydev->speed;
1362 info->set_rate(ndev);
1363 }
1364 if (!priv->link) {
1365 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1366 new_state = true;
1367 priv->link = phydev->link;
1368 }
1369 } else if (priv->link) {
1370 new_state = true;
1371 priv->link = 0;
1372 priv->speed = 0;
1373 if (info->half_duplex)
1374 priv->duplex = -1;
1375 }
1376
1377
1378 if (priv->no_avb_link && phydev->link)
1379 ravb_rcv_snd_enable(ndev);
1380
1381 spin_unlock_irqrestore(&priv->lock, flags);
1382
1383 if (new_state && netif_msg_link(priv))
1384 phy_print_status(phydev);
1385 }
1386
1387 static const struct soc_device_attribute r8a7795es10[] = {
1388 { .soc_id = "r8a7795", .revision = "ES1.0", },
1389 { }
1390 };
1391
1392
1393 static int ravb_phy_init(struct net_device *ndev)
1394 {
1395 struct device_node *np = ndev->dev.parent->of_node;
1396 struct ravb_private *priv = netdev_priv(ndev);
1397 const struct ravb_hw_info *info = priv->info;
1398 struct phy_device *phydev;
1399 struct device_node *pn;
1400 phy_interface_t iface;
1401 int err;
1402
1403 priv->link = 0;
1404 priv->speed = 0;
1405 priv->duplex = -1;
1406
1407
1408 pn = of_parse_phandle(np, "phy-handle", 0);
1409 if (!pn) {
1410
1411
1412
1413 if (of_phy_is_fixed_link(np)) {
1414 err = of_phy_register_fixed_link(np);
1415 if (err)
1416 return err;
1417 }
1418 pn = of_node_get(np);
1419 }
1420
1421 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1422 : priv->phy_interface;
1423 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1424 of_node_put(pn);
1425 if (!phydev) {
1426 netdev_err(ndev, "failed to connect PHY\n");
1427 err = -ENOENT;
1428 goto err_deregister_fixed_link;
1429 }
1430
1431
1432
1433
1434 if (soc_device_match(r8a7795es10)) {
1435 phy_set_max_speed(phydev, SPEED_100);
1436
1437 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1438 }
1439
1440 if (!info->half_duplex) {
1441
1442 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1443 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1444 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1445 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1446
1447
1448 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1449 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1450 }
1451
1452
1453 phydev->mac_managed_pm = true;
1454 phy_attached_info(phydev);
1455
1456 return 0;
1457
1458 err_deregister_fixed_link:
1459 if (of_phy_is_fixed_link(np))
1460 of_phy_deregister_fixed_link(np);
1461
1462 return err;
1463 }
1464
1465
1466 static int ravb_phy_start(struct net_device *ndev)
1467 {
1468 int error;
1469
1470 error = ravb_phy_init(ndev);
1471 if (error)
1472 return error;
1473
1474 phy_start(ndev->phydev);
1475
1476 return 0;
1477 }
1478
1479 static u32 ravb_get_msglevel(struct net_device *ndev)
1480 {
1481 struct ravb_private *priv = netdev_priv(ndev);
1482
1483 return priv->msg_enable;
1484 }
1485
1486 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1487 {
1488 struct ravb_private *priv = netdev_priv(ndev);
1489
1490 priv->msg_enable = value;
1491 }
1492
1493 static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1494 "rx_queue_0_current",
1495 "tx_queue_0_current",
1496 "rx_queue_0_dirty",
1497 "tx_queue_0_dirty",
1498 "rx_queue_0_packets",
1499 "tx_queue_0_packets",
1500 "rx_queue_0_bytes",
1501 "tx_queue_0_bytes",
1502 "rx_queue_0_mcast_packets",
1503 "rx_queue_0_errors",
1504 "rx_queue_0_crc_errors",
1505 "rx_queue_0_frame_errors",
1506 "rx_queue_0_length_errors",
1507 "rx_queue_0_csum_offload_errors",
1508 "rx_queue_0_over_errors",
1509 };
1510
1511 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1512 "rx_queue_0_current",
1513 "tx_queue_0_current",
1514 "rx_queue_0_dirty",
1515 "tx_queue_0_dirty",
1516 "rx_queue_0_packets",
1517 "tx_queue_0_packets",
1518 "rx_queue_0_bytes",
1519 "tx_queue_0_bytes",
1520 "rx_queue_0_mcast_packets",
1521 "rx_queue_0_errors",
1522 "rx_queue_0_crc_errors",
1523 "rx_queue_0_frame_errors",
1524 "rx_queue_0_length_errors",
1525 "rx_queue_0_missed_errors",
1526 "rx_queue_0_over_errors",
1527
1528 "rx_queue_1_current",
1529 "tx_queue_1_current",
1530 "rx_queue_1_dirty",
1531 "tx_queue_1_dirty",
1532 "rx_queue_1_packets",
1533 "tx_queue_1_packets",
1534 "rx_queue_1_bytes",
1535 "tx_queue_1_bytes",
1536 "rx_queue_1_mcast_packets",
1537 "rx_queue_1_errors",
1538 "rx_queue_1_crc_errors",
1539 "rx_queue_1_frame_errors",
1540 "rx_queue_1_length_errors",
1541 "rx_queue_1_missed_errors",
1542 "rx_queue_1_over_errors",
1543 };
1544
1545 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1546 {
1547 struct ravb_private *priv = netdev_priv(netdev);
1548 const struct ravb_hw_info *info = priv->info;
1549
1550 switch (sset) {
1551 case ETH_SS_STATS:
1552 return info->stats_len;
1553 default:
1554 return -EOPNOTSUPP;
1555 }
1556 }
1557
1558 static void ravb_get_ethtool_stats(struct net_device *ndev,
1559 struct ethtool_stats *estats, u64 *data)
1560 {
1561 struct ravb_private *priv = netdev_priv(ndev);
1562 const struct ravb_hw_info *info = priv->info;
1563 int num_rx_q;
1564 int i = 0;
1565 int q;
1566
1567 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1568
1569 for (q = RAVB_BE; q < num_rx_q; q++) {
1570 struct net_device_stats *stats = &priv->stats[q];
1571
1572 data[i++] = priv->cur_rx[q];
1573 data[i++] = priv->cur_tx[q];
1574 data[i++] = priv->dirty_rx[q];
1575 data[i++] = priv->dirty_tx[q];
1576 data[i++] = stats->rx_packets;
1577 data[i++] = stats->tx_packets;
1578 data[i++] = stats->rx_bytes;
1579 data[i++] = stats->tx_bytes;
1580 data[i++] = stats->multicast;
1581 data[i++] = stats->rx_errors;
1582 data[i++] = stats->rx_crc_errors;
1583 data[i++] = stats->rx_frame_errors;
1584 data[i++] = stats->rx_length_errors;
1585 data[i++] = stats->rx_missed_errors;
1586 data[i++] = stats->rx_over_errors;
1587 }
1588 }
1589
1590 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1591 {
1592 struct ravb_private *priv = netdev_priv(ndev);
1593 const struct ravb_hw_info *info = priv->info;
1594
1595 switch (stringset) {
1596 case ETH_SS_STATS:
1597 memcpy(data, info->gstrings_stats, info->gstrings_size);
1598 break;
1599 }
1600 }
1601
1602 static void ravb_get_ringparam(struct net_device *ndev,
1603 struct ethtool_ringparam *ring,
1604 struct kernel_ethtool_ringparam *kernel_ring,
1605 struct netlink_ext_ack *extack)
1606 {
1607 struct ravb_private *priv = netdev_priv(ndev);
1608
1609 ring->rx_max_pending = BE_RX_RING_MAX;
1610 ring->tx_max_pending = BE_TX_RING_MAX;
1611 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1612 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1613 }
1614
1615 static int ravb_set_ringparam(struct net_device *ndev,
1616 struct ethtool_ringparam *ring,
1617 struct kernel_ethtool_ringparam *kernel_ring,
1618 struct netlink_ext_ack *extack)
1619 {
1620 struct ravb_private *priv = netdev_priv(ndev);
1621 const struct ravb_hw_info *info = priv->info;
1622 int error;
1623
1624 if (ring->tx_pending > BE_TX_RING_MAX ||
1625 ring->rx_pending > BE_RX_RING_MAX ||
1626 ring->tx_pending < BE_TX_RING_MIN ||
1627 ring->rx_pending < BE_RX_RING_MIN)
1628 return -EINVAL;
1629 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1630 return -EINVAL;
1631
1632 if (netif_running(ndev)) {
1633 netif_device_detach(ndev);
1634
1635 if (info->gptp)
1636 ravb_ptp_stop(ndev);
1637
1638 error = ravb_stop_dma(ndev);
1639 if (error) {
1640 netdev_err(ndev,
1641 "cannot set ringparam! Any AVB processes are still running?\n");
1642 return error;
1643 }
1644 synchronize_irq(ndev->irq);
1645
1646
1647 ravb_ring_free(ndev, RAVB_BE);
1648 if (info->nc_queues)
1649 ravb_ring_free(ndev, RAVB_NC);
1650 }
1651
1652
1653 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1654 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1655
1656 if (netif_running(ndev)) {
1657 error = ravb_dmac_init(ndev);
1658 if (error) {
1659 netdev_err(ndev,
1660 "%s: ravb_dmac_init() failed, error %d\n",
1661 __func__, error);
1662 return error;
1663 }
1664
1665 ravb_emac_init(ndev);
1666
1667
1668 if (info->gptp)
1669 ravb_ptp_init(ndev, priv->pdev);
1670
1671 netif_device_attach(ndev);
1672 }
1673
1674 return 0;
1675 }
1676
1677 static int ravb_get_ts_info(struct net_device *ndev,
1678 struct ethtool_ts_info *info)
1679 {
1680 struct ravb_private *priv = netdev_priv(ndev);
1681 const struct ravb_hw_info *hw_info = priv->info;
1682
1683 info->so_timestamping =
1684 SOF_TIMESTAMPING_TX_SOFTWARE |
1685 SOF_TIMESTAMPING_RX_SOFTWARE |
1686 SOF_TIMESTAMPING_SOFTWARE |
1687 SOF_TIMESTAMPING_TX_HARDWARE |
1688 SOF_TIMESTAMPING_RX_HARDWARE |
1689 SOF_TIMESTAMPING_RAW_HARDWARE;
1690 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1691 info->rx_filters =
1692 (1 << HWTSTAMP_FILTER_NONE) |
1693 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1694 (1 << HWTSTAMP_FILTER_ALL);
1695 if (hw_info->gptp || hw_info->ccc_gac)
1696 info->phc_index = ptp_clock_index(priv->ptp.clock);
1697
1698 return 0;
1699 }
1700
1701 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1702 {
1703 struct ravb_private *priv = netdev_priv(ndev);
1704
1705 wol->supported = WAKE_MAGIC;
1706 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1707 }
1708
1709 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1710 {
1711 struct ravb_private *priv = netdev_priv(ndev);
1712 const struct ravb_hw_info *info = priv->info;
1713
1714 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1715 return -EOPNOTSUPP;
1716
1717 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1718
1719 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1720
1721 return 0;
1722 }
1723
1724 static const struct ethtool_ops ravb_ethtool_ops = {
1725 .nway_reset = phy_ethtool_nway_reset,
1726 .get_msglevel = ravb_get_msglevel,
1727 .set_msglevel = ravb_set_msglevel,
1728 .get_link = ethtool_op_get_link,
1729 .get_strings = ravb_get_strings,
1730 .get_ethtool_stats = ravb_get_ethtool_stats,
1731 .get_sset_count = ravb_get_sset_count,
1732 .get_ringparam = ravb_get_ringparam,
1733 .set_ringparam = ravb_set_ringparam,
1734 .get_ts_info = ravb_get_ts_info,
1735 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1736 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1737 .get_wol = ravb_get_wol,
1738 .set_wol = ravb_set_wol,
1739 };
1740
1741 static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1742 struct net_device *ndev, struct device *dev,
1743 const char *ch)
1744 {
1745 char *name;
1746 int error;
1747
1748 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1749 if (!name)
1750 return -ENOMEM;
1751 error = request_irq(irq, handler, 0, name, ndev);
1752 if (error)
1753 netdev_err(ndev, "cannot request IRQ %s\n", name);
1754
1755 return error;
1756 }
1757
1758
1759 static int ravb_open(struct net_device *ndev)
1760 {
1761 struct ravb_private *priv = netdev_priv(ndev);
1762 const struct ravb_hw_info *info = priv->info;
1763 struct platform_device *pdev = priv->pdev;
1764 struct device *dev = &pdev->dev;
1765 int error;
1766
1767 napi_enable(&priv->napi[RAVB_BE]);
1768 if (info->nc_queues)
1769 napi_enable(&priv->napi[RAVB_NC]);
1770
1771 if (!info->multi_irqs) {
1772 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1773 ndev->name, ndev);
1774 if (error) {
1775 netdev_err(ndev, "cannot request IRQ\n");
1776 goto out_napi_off;
1777 }
1778 } else {
1779 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1780 dev, "ch22:multi");
1781 if (error)
1782 goto out_napi_off;
1783 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1784 dev, "ch24:emac");
1785 if (error)
1786 goto out_free_irq;
1787 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1788 ndev, dev, "ch0:rx_be");
1789 if (error)
1790 goto out_free_irq_emac;
1791 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1792 ndev, dev, "ch18:tx_be");
1793 if (error)
1794 goto out_free_irq_be_rx;
1795 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1796 ndev, dev, "ch1:rx_nc");
1797 if (error)
1798 goto out_free_irq_be_tx;
1799 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1800 ndev, dev, "ch19:tx_nc");
1801 if (error)
1802 goto out_free_irq_nc_rx;
1803
1804 if (info->err_mgmt_irqs) {
1805 error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
1806 ndev, dev, "err_a");
1807 if (error)
1808 goto out_free_irq_nc_tx;
1809 error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
1810 ndev, dev, "mgmt_a");
1811 if (error)
1812 goto out_free_irq_erra;
1813 }
1814 }
1815
1816
1817 error = ravb_dmac_init(ndev);
1818 if (error)
1819 goto out_free_irq_mgmta;
1820 ravb_emac_init(ndev);
1821
1822
1823 if (info->gptp)
1824 ravb_ptp_init(ndev, priv->pdev);
1825
1826 netif_tx_start_all_queues(ndev);
1827
1828
1829 error = ravb_phy_start(ndev);
1830 if (error)
1831 goto out_ptp_stop;
1832
1833 return 0;
1834
1835 out_ptp_stop:
1836
1837 if (info->gptp)
1838 ravb_ptp_stop(ndev);
1839 out_free_irq_mgmta:
1840 if (!info->multi_irqs)
1841 goto out_free_irq;
1842 if (info->err_mgmt_irqs)
1843 free_irq(priv->mgmta_irq, ndev);
1844 out_free_irq_erra:
1845 if (info->err_mgmt_irqs)
1846 free_irq(priv->erra_irq, ndev);
1847 out_free_irq_nc_tx:
1848 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1849 out_free_irq_nc_rx:
1850 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1851 out_free_irq_be_tx:
1852 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1853 out_free_irq_be_rx:
1854 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1855 out_free_irq_emac:
1856 free_irq(priv->emac_irq, ndev);
1857 out_free_irq:
1858 free_irq(ndev->irq, ndev);
1859 out_napi_off:
1860 if (info->nc_queues)
1861 napi_disable(&priv->napi[RAVB_NC]);
1862 napi_disable(&priv->napi[RAVB_BE]);
1863 return error;
1864 }
1865
1866
1867 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1868 {
1869 struct ravb_private *priv = netdev_priv(ndev);
1870
1871 netif_err(priv, tx_err, ndev,
1872 "transmit timed out, status %08x, resetting...\n",
1873 ravb_read(ndev, ISS));
1874
1875
1876 ndev->stats.tx_errors++;
1877
1878 schedule_work(&priv->work);
1879 }
1880
1881 static void ravb_tx_timeout_work(struct work_struct *work)
1882 {
1883 struct ravb_private *priv = container_of(work, struct ravb_private,
1884 work);
1885 const struct ravb_hw_info *info = priv->info;
1886 struct net_device *ndev = priv->ndev;
1887 int error;
1888
1889 netif_tx_stop_all_queues(ndev);
1890
1891
1892 if (info->gptp)
1893 ravb_ptp_stop(ndev);
1894
1895
1896 if (ravb_stop_dma(ndev)) {
1897
1898
1899
1900
1901
1902
1903
1904
1905 ravb_rcv_snd_enable(ndev);
1906 goto out;
1907 }
1908
1909 ravb_ring_free(ndev, RAVB_BE);
1910 if (info->nc_queues)
1911 ravb_ring_free(ndev, RAVB_NC);
1912
1913
1914 error = ravb_dmac_init(ndev);
1915 if (error) {
1916
1917
1918
1919
1920 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1921 __func__, error);
1922 return;
1923 }
1924 ravb_emac_init(ndev);
1925
1926 out:
1927
1928 if (info->gptp)
1929 ravb_ptp_init(ndev, priv->pdev);
1930
1931 netif_tx_start_all_queues(ndev);
1932 }
1933
1934
1935 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1936 {
1937 struct ravb_private *priv = netdev_priv(ndev);
1938 const struct ravb_hw_info *info = priv->info;
1939 unsigned int num_tx_desc = priv->num_tx_desc;
1940 u16 q = skb_get_queue_mapping(skb);
1941 struct ravb_tstamp_skb *ts_skb;
1942 struct ravb_tx_desc *desc;
1943 unsigned long flags;
1944 u32 dma_addr;
1945 void *buffer;
1946 u32 entry;
1947 u32 len;
1948
1949 spin_lock_irqsave(&priv->lock, flags);
1950 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1951 num_tx_desc) {
1952 netif_err(priv, tx_queued, ndev,
1953 "still transmitting with the full ring!\n");
1954 netif_stop_subqueue(ndev, q);
1955 spin_unlock_irqrestore(&priv->lock, flags);
1956 return NETDEV_TX_BUSY;
1957 }
1958
1959 if (skb_put_padto(skb, ETH_ZLEN))
1960 goto exit;
1961
1962 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1963 priv->tx_skb[q][entry / num_tx_desc] = skb;
1964
1965 if (num_tx_desc > 1) {
1966 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1967 entry / num_tx_desc * DPTR_ALIGN;
1968 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982 if (len == 0)
1983 len = DPTR_ALIGN;
1984
1985 memcpy(buffer, skb->data, len);
1986 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1987 DMA_TO_DEVICE);
1988 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1989 goto drop;
1990
1991 desc = &priv->tx_ring[q][entry];
1992 desc->ds_tagl = cpu_to_le16(len);
1993 desc->dptr = cpu_to_le32(dma_addr);
1994
1995 buffer = skb->data + len;
1996 len = skb->len - len;
1997 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1998 DMA_TO_DEVICE);
1999 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2000 goto unmap;
2001
2002 desc++;
2003 } else {
2004 desc = &priv->tx_ring[q][entry];
2005 len = skb->len;
2006 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
2007 DMA_TO_DEVICE);
2008 if (dma_mapping_error(ndev->dev.parent, dma_addr))
2009 goto drop;
2010 }
2011 desc->ds_tagl = cpu_to_le16(len);
2012 desc->dptr = cpu_to_le32(dma_addr);
2013
2014
2015 if (info->gptp || info->ccc_gac) {
2016 if (q == RAVB_NC) {
2017 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2018 if (!ts_skb) {
2019 if (num_tx_desc > 1) {
2020 desc--;
2021 dma_unmap_single(ndev->dev.parent, dma_addr,
2022 len, DMA_TO_DEVICE);
2023 }
2024 goto unmap;
2025 }
2026 ts_skb->skb = skb_get(skb);
2027 ts_skb->tag = priv->ts_skb_tag++;
2028 priv->ts_skb_tag &= 0x3ff;
2029 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2030
2031
2032 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2033 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2034 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2035 }
2036
2037 skb_tx_timestamp(skb);
2038 }
2039
2040 dma_wmb();
2041 if (num_tx_desc > 1) {
2042 desc->die_dt = DT_FEND;
2043 desc--;
2044 desc->die_dt = DT_FSTART;
2045 } else {
2046 desc->die_dt = DT_FSINGLE;
2047 }
2048 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2049
2050 priv->cur_tx[q] += num_tx_desc;
2051 if (priv->cur_tx[q] - priv->dirty_tx[q] >
2052 (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2053 !ravb_tx_free(ndev, q, true))
2054 netif_stop_subqueue(ndev, q);
2055
2056 exit:
2057 spin_unlock_irqrestore(&priv->lock, flags);
2058 return NETDEV_TX_OK;
2059
2060 unmap:
2061 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2062 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2063 drop:
2064 dev_kfree_skb_any(skb);
2065 priv->tx_skb[q][entry / num_tx_desc] = NULL;
2066 goto exit;
2067 }
2068
2069 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2070 struct net_device *sb_dev)
2071 {
2072
2073 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2074 RAVB_BE;
2075
2076 }
2077
2078 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2079 {
2080 struct ravb_private *priv = netdev_priv(ndev);
2081 const struct ravb_hw_info *info = priv->info;
2082 struct net_device_stats *nstats, *stats0, *stats1;
2083
2084 nstats = &ndev->stats;
2085 stats0 = &priv->stats[RAVB_BE];
2086
2087 if (info->tx_counters) {
2088 nstats->tx_dropped += ravb_read(ndev, TROCR);
2089 ravb_write(ndev, 0, TROCR);
2090 }
2091
2092 if (info->carrier_counters) {
2093 nstats->collisions += ravb_read(ndev, CXR41);
2094 ravb_write(ndev, 0, CXR41);
2095 nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2096 ravb_write(ndev, 0, CXR42);
2097 }
2098
2099 nstats->rx_packets = stats0->rx_packets;
2100 nstats->tx_packets = stats0->tx_packets;
2101 nstats->rx_bytes = stats0->rx_bytes;
2102 nstats->tx_bytes = stats0->tx_bytes;
2103 nstats->multicast = stats0->multicast;
2104 nstats->rx_errors = stats0->rx_errors;
2105 nstats->rx_crc_errors = stats0->rx_crc_errors;
2106 nstats->rx_frame_errors = stats0->rx_frame_errors;
2107 nstats->rx_length_errors = stats0->rx_length_errors;
2108 nstats->rx_missed_errors = stats0->rx_missed_errors;
2109 nstats->rx_over_errors = stats0->rx_over_errors;
2110 if (info->nc_queues) {
2111 stats1 = &priv->stats[RAVB_NC];
2112
2113 nstats->rx_packets += stats1->rx_packets;
2114 nstats->tx_packets += stats1->tx_packets;
2115 nstats->rx_bytes += stats1->rx_bytes;
2116 nstats->tx_bytes += stats1->tx_bytes;
2117 nstats->multicast += stats1->multicast;
2118 nstats->rx_errors += stats1->rx_errors;
2119 nstats->rx_crc_errors += stats1->rx_crc_errors;
2120 nstats->rx_frame_errors += stats1->rx_frame_errors;
2121 nstats->rx_length_errors += stats1->rx_length_errors;
2122 nstats->rx_missed_errors += stats1->rx_missed_errors;
2123 nstats->rx_over_errors += stats1->rx_over_errors;
2124 }
2125
2126 return nstats;
2127 }
2128
2129
2130 static void ravb_set_rx_mode(struct net_device *ndev)
2131 {
2132 struct ravb_private *priv = netdev_priv(ndev);
2133 unsigned long flags;
2134
2135 spin_lock_irqsave(&priv->lock, flags);
2136 ravb_modify(ndev, ECMR, ECMR_PRM,
2137 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2138 spin_unlock_irqrestore(&priv->lock, flags);
2139 }
2140
2141
2142 static int ravb_close(struct net_device *ndev)
2143 {
2144 struct device_node *np = ndev->dev.parent->of_node;
2145 struct ravb_private *priv = netdev_priv(ndev);
2146 const struct ravb_hw_info *info = priv->info;
2147 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2148
2149 netif_tx_stop_all_queues(ndev);
2150
2151
2152 ravb_write(ndev, 0, RIC0);
2153 ravb_write(ndev, 0, RIC2);
2154 ravb_write(ndev, 0, TIC);
2155
2156
2157 if (info->gptp)
2158 ravb_ptp_stop(ndev);
2159
2160
2161 if (ravb_stop_dma(ndev) < 0)
2162 netdev_err(ndev,
2163 "device will be stopped after h/w processes are done.\n");
2164
2165
2166 if (info->gptp || info->ccc_gac) {
2167 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2168 list_del(&ts_skb->list);
2169 kfree_skb(ts_skb->skb);
2170 kfree(ts_skb);
2171 }
2172 }
2173
2174
2175 if (ndev->phydev) {
2176 phy_stop(ndev->phydev);
2177 phy_disconnect(ndev->phydev);
2178 if (of_phy_is_fixed_link(np))
2179 of_phy_deregister_fixed_link(np);
2180 }
2181
2182 if (info->multi_irqs) {
2183 free_irq(priv->tx_irqs[RAVB_NC], ndev);
2184 free_irq(priv->rx_irqs[RAVB_NC], ndev);
2185 free_irq(priv->tx_irqs[RAVB_BE], ndev);
2186 free_irq(priv->rx_irqs[RAVB_BE], ndev);
2187 free_irq(priv->emac_irq, ndev);
2188 if (info->err_mgmt_irqs) {
2189 free_irq(priv->erra_irq, ndev);
2190 free_irq(priv->mgmta_irq, ndev);
2191 }
2192 }
2193 free_irq(ndev->irq, ndev);
2194
2195 if (info->nc_queues)
2196 napi_disable(&priv->napi[RAVB_NC]);
2197 napi_disable(&priv->napi[RAVB_BE]);
2198
2199
2200 ravb_ring_free(ndev, RAVB_BE);
2201 if (info->nc_queues)
2202 ravb_ring_free(ndev, RAVB_NC);
2203
2204 return 0;
2205 }
2206
2207 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2208 {
2209 struct ravb_private *priv = netdev_priv(ndev);
2210 struct hwtstamp_config config;
2211
2212 config.flags = 0;
2213 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2214 HWTSTAMP_TX_OFF;
2215 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2216 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2217 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2218 break;
2219 case RAVB_RXTSTAMP_TYPE_ALL:
2220 config.rx_filter = HWTSTAMP_FILTER_ALL;
2221 break;
2222 default:
2223 config.rx_filter = HWTSTAMP_FILTER_NONE;
2224 }
2225
2226 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2227 -EFAULT : 0;
2228 }
2229
2230
2231 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2232 {
2233 struct ravb_private *priv = netdev_priv(ndev);
2234 struct hwtstamp_config config;
2235 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2236 u32 tstamp_tx_ctrl;
2237
2238 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2239 return -EFAULT;
2240
2241 switch (config.tx_type) {
2242 case HWTSTAMP_TX_OFF:
2243 tstamp_tx_ctrl = 0;
2244 break;
2245 case HWTSTAMP_TX_ON:
2246 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2247 break;
2248 default:
2249 return -ERANGE;
2250 }
2251
2252 switch (config.rx_filter) {
2253 case HWTSTAMP_FILTER_NONE:
2254 tstamp_rx_ctrl = 0;
2255 break;
2256 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2257 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2258 break;
2259 default:
2260 config.rx_filter = HWTSTAMP_FILTER_ALL;
2261 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2262 }
2263
2264 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2265 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2266
2267 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2268 -EFAULT : 0;
2269 }
2270
2271
2272 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2273 {
2274 struct phy_device *phydev = ndev->phydev;
2275
2276 if (!netif_running(ndev))
2277 return -EINVAL;
2278
2279 if (!phydev)
2280 return -ENODEV;
2281
2282 switch (cmd) {
2283 case SIOCGHWTSTAMP:
2284 return ravb_hwtstamp_get(ndev, req);
2285 case SIOCSHWTSTAMP:
2286 return ravb_hwtstamp_set(ndev, req);
2287 }
2288
2289 return phy_mii_ioctl(phydev, req, cmd);
2290 }
2291
2292 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2293 {
2294 struct ravb_private *priv = netdev_priv(ndev);
2295
2296 ndev->mtu = new_mtu;
2297
2298 if (netif_running(ndev)) {
2299 synchronize_irq(priv->emac_irq);
2300 ravb_emac_init(ndev);
2301 }
2302
2303 netdev_update_features(ndev);
2304
2305 return 0;
2306 }
2307
2308 static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2309 {
2310 struct ravb_private *priv = netdev_priv(ndev);
2311 unsigned long flags;
2312
2313 spin_lock_irqsave(&priv->lock, flags);
2314
2315
2316 ravb_rcv_snd_disable(ndev);
2317
2318
2319 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2320
2321
2322 ravb_rcv_snd_enable(ndev);
2323
2324 spin_unlock_irqrestore(&priv->lock, flags);
2325 }
2326
2327 static int ravb_set_features_gbeth(struct net_device *ndev,
2328 netdev_features_t features)
2329 {
2330
2331 return 0;
2332 }
2333
2334 static int ravb_set_features_rcar(struct net_device *ndev,
2335 netdev_features_t features)
2336 {
2337 netdev_features_t changed = ndev->features ^ features;
2338
2339 if (changed & NETIF_F_RXCSUM)
2340 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2341
2342 ndev->features = features;
2343
2344 return 0;
2345 }
2346
2347 static int ravb_set_features(struct net_device *ndev,
2348 netdev_features_t features)
2349 {
2350 struct ravb_private *priv = netdev_priv(ndev);
2351 const struct ravb_hw_info *info = priv->info;
2352
2353 return info->set_feature(ndev, features);
2354 }
2355
2356 static const struct net_device_ops ravb_netdev_ops = {
2357 .ndo_open = ravb_open,
2358 .ndo_stop = ravb_close,
2359 .ndo_start_xmit = ravb_start_xmit,
2360 .ndo_select_queue = ravb_select_queue,
2361 .ndo_get_stats = ravb_get_stats,
2362 .ndo_set_rx_mode = ravb_set_rx_mode,
2363 .ndo_tx_timeout = ravb_tx_timeout,
2364 .ndo_eth_ioctl = ravb_do_ioctl,
2365 .ndo_change_mtu = ravb_change_mtu,
2366 .ndo_validate_addr = eth_validate_addr,
2367 .ndo_set_mac_address = eth_mac_addr,
2368 .ndo_set_features = ravb_set_features,
2369 };
2370
2371
2372 static int ravb_mdio_init(struct ravb_private *priv)
2373 {
2374 struct platform_device *pdev = priv->pdev;
2375 struct device *dev = &pdev->dev;
2376 int error;
2377
2378
2379 priv->mdiobb.ops = &bb_ops;
2380
2381
2382 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2383 if (!priv->mii_bus)
2384 return -ENOMEM;
2385
2386
2387 priv->mii_bus->name = "ravb_mii";
2388 priv->mii_bus->parent = dev;
2389 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2390 pdev->name, pdev->id);
2391
2392
2393 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2394 if (error)
2395 goto out_free_bus;
2396
2397 return 0;
2398
2399 out_free_bus:
2400 free_mdio_bitbang(priv->mii_bus);
2401 return error;
2402 }
2403
2404
2405 static int ravb_mdio_release(struct ravb_private *priv)
2406 {
2407
2408 mdiobus_unregister(priv->mii_bus);
2409
2410
2411 free_mdio_bitbang(priv->mii_bus);
2412
2413 return 0;
2414 }
2415
2416 static const struct ravb_hw_info ravb_gen3_hw_info = {
2417 .rx_ring_free = ravb_rx_ring_free_rcar,
2418 .rx_ring_format = ravb_rx_ring_format_rcar,
2419 .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2420 .receive = ravb_rx_rcar,
2421 .set_rate = ravb_set_rate_rcar,
2422 .set_feature = ravb_set_features_rcar,
2423 .dmac_init = ravb_dmac_init_rcar,
2424 .emac_init = ravb_emac_init_rcar,
2425 .gstrings_stats = ravb_gstrings_stats,
2426 .gstrings_size = sizeof(ravb_gstrings_stats),
2427 .net_hw_features = NETIF_F_RXCSUM,
2428 .net_features = NETIF_F_RXCSUM,
2429 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2430 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2431 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2432 .rx_max_buf_size = SZ_2K,
2433 .internal_delay = 1,
2434 .tx_counters = 1,
2435 .multi_irqs = 1,
2436 .irq_en_dis = 1,
2437 .ccc_gac = 1,
2438 .nc_queues = 1,
2439 .magic_pkt = 1,
2440 };
2441
2442 static const struct ravb_hw_info ravb_gen2_hw_info = {
2443 .rx_ring_free = ravb_rx_ring_free_rcar,
2444 .rx_ring_format = ravb_rx_ring_format_rcar,
2445 .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2446 .receive = ravb_rx_rcar,
2447 .set_rate = ravb_set_rate_rcar,
2448 .set_feature = ravb_set_features_rcar,
2449 .dmac_init = ravb_dmac_init_rcar,
2450 .emac_init = ravb_emac_init_rcar,
2451 .gstrings_stats = ravb_gstrings_stats,
2452 .gstrings_size = sizeof(ravb_gstrings_stats),
2453 .net_hw_features = NETIF_F_RXCSUM,
2454 .net_features = NETIF_F_RXCSUM,
2455 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2456 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2457 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2458 .rx_max_buf_size = SZ_2K,
2459 .aligned_tx = 1,
2460 .gptp = 1,
2461 .nc_queues = 1,
2462 .magic_pkt = 1,
2463 };
2464
2465 static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2466 .rx_ring_free = ravb_rx_ring_free_rcar,
2467 .rx_ring_format = ravb_rx_ring_format_rcar,
2468 .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2469 .receive = ravb_rx_rcar,
2470 .set_rate = ravb_set_rate_rcar,
2471 .set_feature = ravb_set_features_rcar,
2472 .dmac_init = ravb_dmac_init_rcar,
2473 .emac_init = ravb_emac_init_rcar,
2474 .gstrings_stats = ravb_gstrings_stats,
2475 .gstrings_size = sizeof(ravb_gstrings_stats),
2476 .net_hw_features = NETIF_F_RXCSUM,
2477 .net_features = NETIF_F_RXCSUM,
2478 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2479 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2480 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2481 .rx_max_buf_size = SZ_2K,
2482 .multi_irqs = 1,
2483 .err_mgmt_irqs = 1,
2484 .gptp = 1,
2485 .gptp_ref_clk = 1,
2486 .nc_queues = 1,
2487 .magic_pkt = 1,
2488 };
2489
2490 static const struct ravb_hw_info gbeth_hw_info = {
2491 .rx_ring_free = ravb_rx_ring_free_gbeth,
2492 .rx_ring_format = ravb_rx_ring_format_gbeth,
2493 .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
2494 .receive = ravb_rx_gbeth,
2495 .set_rate = ravb_set_rate_gbeth,
2496 .set_feature = ravb_set_features_gbeth,
2497 .dmac_init = ravb_dmac_init_gbeth,
2498 .emac_init = ravb_emac_init_gbeth,
2499 .gstrings_stats = ravb_gstrings_stats_gbeth,
2500 .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2501 .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2502 .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
2503 .tccr_mask = TCCR_TSRQ0,
2504 .rx_max_buf_size = SZ_8K,
2505 .aligned_tx = 1,
2506 .tx_counters = 1,
2507 .carrier_counters = 1,
2508 .half_duplex = 1,
2509 };
2510
2511 static const struct of_device_id ravb_match_table[] = {
2512 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2513 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2514 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2515 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2516 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2517 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2518 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2519 { }
2520 };
2521 MODULE_DEVICE_TABLE(of, ravb_match_table);
2522
2523 static int ravb_set_gti(struct net_device *ndev)
2524 {
2525 struct ravb_private *priv = netdev_priv(ndev);
2526 const struct ravb_hw_info *info = priv->info;
2527 struct device *dev = ndev->dev.parent;
2528 unsigned long rate;
2529 uint64_t inc;
2530
2531 if (info->gptp_ref_clk)
2532 rate = clk_get_rate(priv->gptp_clk);
2533 else
2534 rate = clk_get_rate(priv->clk);
2535 if (!rate)
2536 return -EINVAL;
2537
2538 inc = div64_ul(1000000000ULL << 20, rate);
2539
2540 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2541 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2542 inc, GTI_TIV_MIN, GTI_TIV_MAX);
2543 return -EINVAL;
2544 }
2545
2546 ravb_write(ndev, inc, GTI);
2547
2548 return 0;
2549 }
2550
2551 static void ravb_set_config_mode(struct net_device *ndev)
2552 {
2553 struct ravb_private *priv = netdev_priv(ndev);
2554 const struct ravb_hw_info *info = priv->info;
2555
2556 if (info->gptp) {
2557 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2558
2559 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2560 } else if (info->ccc_gac) {
2561 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
2562 CCC_GAC | CCC_CSEL_HPB);
2563 } else {
2564 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2565 }
2566 }
2567
2568
2569 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2570 {
2571 struct ravb_private *priv = netdev_priv(ndev);
2572 bool explicit_delay = false;
2573 u32 delay;
2574
2575 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2576
2577 priv->rxcidm = !!delay;
2578 explicit_delay = true;
2579 }
2580 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2581
2582 priv->txcidm = !!delay;
2583 explicit_delay = true;
2584 }
2585
2586 if (explicit_delay)
2587 return;
2588
2589
2590 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2591 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2592 priv->rxcidm = 1;
2593 priv->rgmii_override = 1;
2594 }
2595
2596 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2597 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2598 priv->txcidm = 1;
2599 priv->rgmii_override = 1;
2600 }
2601 }
2602
2603 static void ravb_set_delay_mode(struct net_device *ndev)
2604 {
2605 struct ravb_private *priv = netdev_priv(ndev);
2606 u32 set = 0;
2607
2608 if (priv->rxcidm)
2609 set |= APSR_RDM;
2610 if (priv->txcidm)
2611 set |= APSR_TDM;
2612 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2613 }
2614
2615 static int ravb_probe(struct platform_device *pdev)
2616 {
2617 struct device_node *np = pdev->dev.of_node;
2618 const struct ravb_hw_info *info;
2619 struct reset_control *rstc;
2620 struct ravb_private *priv;
2621 struct net_device *ndev;
2622 int error, irq, q;
2623 struct resource *res;
2624 int i;
2625
2626 if (!np) {
2627 dev_err(&pdev->dev,
2628 "this driver is required to be instantiated from device tree\n");
2629 return -EINVAL;
2630 }
2631
2632 rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2633 if (IS_ERR(rstc))
2634 return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2635 "failed to get cpg reset\n");
2636
2637 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2638 NUM_TX_QUEUE, NUM_RX_QUEUE);
2639 if (!ndev)
2640 return -ENOMEM;
2641
2642 info = of_device_get_match_data(&pdev->dev);
2643
2644 ndev->features = info->net_features;
2645 ndev->hw_features = info->net_hw_features;
2646
2647 reset_control_deassert(rstc);
2648 pm_runtime_enable(&pdev->dev);
2649 pm_runtime_get_sync(&pdev->dev);
2650
2651 if (info->multi_irqs) {
2652 if (info->err_mgmt_irqs)
2653 irq = platform_get_irq_byname(pdev, "dia");
2654 else
2655 irq = platform_get_irq_byname(pdev, "ch22");
2656 } else {
2657 irq = platform_get_irq(pdev, 0);
2658 }
2659 if (irq < 0) {
2660 error = irq;
2661 goto out_release;
2662 }
2663 ndev->irq = irq;
2664
2665 SET_NETDEV_DEV(ndev, &pdev->dev);
2666
2667 priv = netdev_priv(ndev);
2668 priv->info = info;
2669 priv->rstc = rstc;
2670 priv->ndev = ndev;
2671 priv->pdev = pdev;
2672 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2673 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2674 if (info->nc_queues) {
2675 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2676 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2677 }
2678
2679 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2680 if (IS_ERR(priv->addr)) {
2681 error = PTR_ERR(priv->addr);
2682 goto out_release;
2683 }
2684
2685
2686 ndev->base_addr = res->start;
2687
2688 spin_lock_init(&priv->lock);
2689 INIT_WORK(&priv->work, ravb_tx_timeout_work);
2690
2691 error = of_get_phy_mode(np, &priv->phy_interface);
2692 if (error && error != -ENODEV)
2693 goto out_release;
2694
2695 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2696 priv->avb_link_active_low =
2697 of_property_read_bool(np, "renesas,ether-link-active-low");
2698
2699 if (info->multi_irqs) {
2700 if (info->err_mgmt_irqs)
2701 irq = platform_get_irq_byname(pdev, "line3");
2702 else
2703 irq = platform_get_irq_byname(pdev, "ch24");
2704 if (irq < 0) {
2705 error = irq;
2706 goto out_release;
2707 }
2708 priv->emac_irq = irq;
2709 for (i = 0; i < NUM_RX_QUEUE; i++) {
2710 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2711 if (irq < 0) {
2712 error = irq;
2713 goto out_release;
2714 }
2715 priv->rx_irqs[i] = irq;
2716 }
2717 for (i = 0; i < NUM_TX_QUEUE; i++) {
2718 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2719 if (irq < 0) {
2720 error = irq;
2721 goto out_release;
2722 }
2723 priv->tx_irqs[i] = irq;
2724 }
2725
2726 if (info->err_mgmt_irqs) {
2727 irq = platform_get_irq_byname(pdev, "err_a");
2728 if (irq < 0) {
2729 error = irq;
2730 goto out_release;
2731 }
2732 priv->erra_irq = irq;
2733
2734 irq = platform_get_irq_byname(pdev, "mgmt_a");
2735 if (irq < 0) {
2736 error = irq;
2737 goto out_release;
2738 }
2739 priv->mgmta_irq = irq;
2740 }
2741 }
2742
2743 priv->clk = devm_clk_get(&pdev->dev, NULL);
2744 if (IS_ERR(priv->clk)) {
2745 error = PTR_ERR(priv->clk);
2746 goto out_release;
2747 }
2748
2749 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2750 if (IS_ERR(priv->refclk)) {
2751 error = PTR_ERR(priv->refclk);
2752 goto out_release;
2753 }
2754 clk_prepare_enable(priv->refclk);
2755
2756 if (info->gptp_ref_clk) {
2757 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
2758 if (IS_ERR(priv->gptp_clk)) {
2759 error = PTR_ERR(priv->gptp_clk);
2760 goto out_disable_refclk;
2761 }
2762 clk_prepare_enable(priv->gptp_clk);
2763 }
2764
2765 ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2766 ndev->min_mtu = ETH_MIN_MTU;
2767
2768
2769
2770
2771
2772
2773 priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2774
2775
2776 ndev->netdev_ops = &ravb_netdev_ops;
2777 ndev->ethtool_ops = &ravb_ethtool_ops;
2778
2779
2780 ravb_set_config_mode(ndev);
2781
2782 if (info->gptp || info->ccc_gac) {
2783
2784 error = ravb_set_gti(ndev);
2785 if (error)
2786 goto out_disable_gptp_clk;
2787
2788
2789 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2790 }
2791
2792 if (info->internal_delay) {
2793 ravb_parse_delay_mode(np, ndev);
2794 ravb_set_delay_mode(ndev);
2795 }
2796
2797
2798 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2799 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2800 &priv->desc_bat_dma, GFP_KERNEL);
2801 if (!priv->desc_bat) {
2802 dev_err(&pdev->dev,
2803 "Cannot allocate desc base address table (size %d bytes)\n",
2804 priv->desc_bat_size);
2805 error = -ENOMEM;
2806 goto out_disable_gptp_clk;
2807 }
2808 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2809 priv->desc_bat[q].die_dt = DT_EOS;
2810 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2811
2812
2813 INIT_LIST_HEAD(&priv->ts_skb_list);
2814
2815
2816 if (info->ccc_gac)
2817 ravb_ptp_init(ndev, pdev);
2818
2819
2820 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2821
2822
2823 ravb_read_mac_address(np, ndev);
2824 if (!is_valid_ether_addr(ndev->dev_addr)) {
2825 dev_warn(&pdev->dev,
2826 "no valid MAC address supplied, using a random one\n");
2827 eth_hw_addr_random(ndev);
2828 }
2829
2830
2831 error = ravb_mdio_init(priv);
2832 if (error) {
2833 dev_err(&pdev->dev, "failed to initialize MDIO\n");
2834 goto out_dma_free;
2835 }
2836
2837 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2838 if (info->nc_queues)
2839 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2840
2841
2842 error = register_netdev(ndev);
2843 if (error)
2844 goto out_napi_del;
2845
2846 device_set_wakeup_capable(&pdev->dev, 1);
2847
2848
2849 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2850 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2851
2852 platform_set_drvdata(pdev, ndev);
2853
2854 return 0;
2855
2856 out_napi_del:
2857 if (info->nc_queues)
2858 netif_napi_del(&priv->napi[RAVB_NC]);
2859
2860 netif_napi_del(&priv->napi[RAVB_BE]);
2861 ravb_mdio_release(priv);
2862 out_dma_free:
2863 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2864 priv->desc_bat_dma);
2865
2866
2867 if (info->ccc_gac)
2868 ravb_ptp_stop(ndev);
2869 out_disable_gptp_clk:
2870 clk_disable_unprepare(priv->gptp_clk);
2871 out_disable_refclk:
2872 clk_disable_unprepare(priv->refclk);
2873 out_release:
2874 free_netdev(ndev);
2875
2876 pm_runtime_put(&pdev->dev);
2877 pm_runtime_disable(&pdev->dev);
2878 reset_control_assert(rstc);
2879 return error;
2880 }
2881
2882 static int ravb_remove(struct platform_device *pdev)
2883 {
2884 struct net_device *ndev = platform_get_drvdata(pdev);
2885 struct ravb_private *priv = netdev_priv(ndev);
2886 const struct ravb_hw_info *info = priv->info;
2887
2888
2889 if (info->ccc_gac)
2890 ravb_ptp_stop(ndev);
2891
2892 clk_disable_unprepare(priv->gptp_clk);
2893 clk_disable_unprepare(priv->refclk);
2894
2895 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2896 priv->desc_bat_dma);
2897
2898 ravb_write(ndev, CCC_OPC_RESET, CCC);
2899 pm_runtime_put_sync(&pdev->dev);
2900 unregister_netdev(ndev);
2901 if (info->nc_queues)
2902 netif_napi_del(&priv->napi[RAVB_NC]);
2903 netif_napi_del(&priv->napi[RAVB_BE]);
2904 ravb_mdio_release(priv);
2905 pm_runtime_disable(&pdev->dev);
2906 reset_control_assert(priv->rstc);
2907 free_netdev(ndev);
2908 platform_set_drvdata(pdev, NULL);
2909
2910 return 0;
2911 }
2912
2913 static int ravb_wol_setup(struct net_device *ndev)
2914 {
2915 struct ravb_private *priv = netdev_priv(ndev);
2916 const struct ravb_hw_info *info = priv->info;
2917
2918
2919 ravb_write(ndev, 0, RIC0);
2920 ravb_write(ndev, 0, RIC2);
2921 ravb_write(ndev, 0, TIC);
2922
2923
2924 synchronize_irq(priv->emac_irq);
2925 if (info->nc_queues)
2926 napi_disable(&priv->napi[RAVB_NC]);
2927 napi_disable(&priv->napi[RAVB_BE]);
2928 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2929
2930
2931 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2932
2933 return enable_irq_wake(priv->emac_irq);
2934 }
2935
2936 static int ravb_wol_restore(struct net_device *ndev)
2937 {
2938 struct ravb_private *priv = netdev_priv(ndev);
2939 const struct ravb_hw_info *info = priv->info;
2940
2941 if (info->nc_queues)
2942 napi_enable(&priv->napi[RAVB_NC]);
2943 napi_enable(&priv->napi[RAVB_BE]);
2944
2945
2946 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2947
2948 ravb_close(ndev);
2949
2950 return disable_irq_wake(priv->emac_irq);
2951 }
2952
2953 static int __maybe_unused ravb_suspend(struct device *dev)
2954 {
2955 struct net_device *ndev = dev_get_drvdata(dev);
2956 struct ravb_private *priv = netdev_priv(ndev);
2957 int ret;
2958
2959 if (!netif_running(ndev))
2960 return 0;
2961
2962 netif_device_detach(ndev);
2963
2964 if (priv->wol_enabled)
2965 ret = ravb_wol_setup(ndev);
2966 else
2967 ret = ravb_close(ndev);
2968
2969 return ret;
2970 }
2971
2972 static int __maybe_unused ravb_resume(struct device *dev)
2973 {
2974 struct net_device *ndev = dev_get_drvdata(dev);
2975 struct ravb_private *priv = netdev_priv(ndev);
2976 const struct ravb_hw_info *info = priv->info;
2977 int ret = 0;
2978
2979
2980 if (priv->wol_enabled)
2981 ravb_write(ndev, CCC_OPC_RESET, CCC);
2982
2983
2984
2985
2986
2987
2988
2989 ravb_set_config_mode(ndev);
2990
2991 if (info->gptp || info->ccc_gac) {
2992
2993 ret = ravb_set_gti(ndev);
2994 if (ret)
2995 return ret;
2996
2997
2998 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2999 }
3000
3001 if (info->internal_delay)
3002 ravb_set_delay_mode(ndev);
3003
3004
3005 ravb_write(ndev, priv->desc_bat_dma, DBAT);
3006
3007 if (netif_running(ndev)) {
3008 if (priv->wol_enabled) {
3009 ret = ravb_wol_restore(ndev);
3010 if (ret)
3011 return ret;
3012 }
3013 ret = ravb_open(ndev);
3014 if (ret < 0)
3015 return ret;
3016 netif_device_attach(ndev);
3017 }
3018
3019 return ret;
3020 }
3021
3022 static int __maybe_unused ravb_runtime_nop(struct device *dev)
3023 {
3024
3025
3026
3027
3028
3029
3030
3031 return 0;
3032 }
3033
3034 static const struct dev_pm_ops ravb_dev_pm_ops = {
3035 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
3036 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
3037 };
3038
3039 static struct platform_driver ravb_driver = {
3040 .probe = ravb_probe,
3041 .remove = ravb_remove,
3042 .driver = {
3043 .name = "ravb",
3044 .pm = &ravb_dev_pm_ops,
3045 .of_match_table = ravb_match_table,
3046 },
3047 };
3048
3049 module_platform_driver(ravb_driver);
3050
3051 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
3052 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
3053 MODULE_LICENSE("GPL v2");