0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #include <linux/phy.h>
0118 #include <linux/mdio.h>
0119 #include <linux/clk.h>
0120 #include <linux/bitrev.h>
0121 #include <linux/crc32.h>
0122 #include <linux/crc32poly.h>
0123
0124 #include "xgbe.h"
0125 #include "xgbe-common.h"
0126
0127 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
0128 {
0129 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
0130 }
0131
0132 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
0133 unsigned int usec)
0134 {
0135 unsigned long rate;
0136 unsigned int ret;
0137
0138 DBGPR("-->xgbe_usec_to_riwt\n");
0139
0140 rate = pdata->sysclk_rate;
0141
0142
0143
0144
0145
0146
0147
0148 ret = (usec * (rate / 1000000)) / 256;
0149
0150 DBGPR("<--xgbe_usec_to_riwt\n");
0151
0152 return ret;
0153 }
0154
0155 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
0156 unsigned int riwt)
0157 {
0158 unsigned long rate;
0159 unsigned int ret;
0160
0161 DBGPR("-->xgbe_riwt_to_usec\n");
0162
0163 rate = pdata->sysclk_rate;
0164
0165
0166
0167
0168
0169
0170
0171 ret = (riwt * 256) / (rate / 1000000);
0172
0173 DBGPR("<--xgbe_riwt_to_usec\n");
0174
0175 return ret;
0176 }
0177
0178 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
0179 {
0180 unsigned int pblx8, pbl;
0181 unsigned int i;
0182
0183 pblx8 = DMA_PBL_X8_DISABLE;
0184 pbl = pdata->pbl;
0185
0186 if (pdata->pbl > 32) {
0187 pblx8 = DMA_PBL_X8_ENABLE;
0188 pbl >>= 3;
0189 }
0190
0191 for (i = 0; i < pdata->channel_count; i++) {
0192 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
0193 pblx8);
0194
0195 if (pdata->channel[i]->tx_ring)
0196 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
0197 PBL, pbl);
0198
0199 if (pdata->channel[i]->rx_ring)
0200 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
0201 PBL, pbl);
0202 }
0203
0204 return 0;
0205 }
0206
0207 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
0208 {
0209 unsigned int i;
0210
0211 for (i = 0; i < pdata->channel_count; i++) {
0212 if (!pdata->channel[i]->tx_ring)
0213 break;
0214
0215 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
0216 pdata->tx_osp_mode);
0217 }
0218
0219 return 0;
0220 }
0221
0222 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
0223 {
0224 unsigned int i;
0225
0226 for (i = 0; i < pdata->rx_q_count; i++)
0227 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
0228
0229 return 0;
0230 }
0231
0232 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
0233 {
0234 unsigned int i;
0235
0236 for (i = 0; i < pdata->tx_q_count; i++)
0237 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
0238
0239 return 0;
0240 }
0241
0242 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
0243 unsigned int val)
0244 {
0245 unsigned int i;
0246
0247 for (i = 0; i < pdata->rx_q_count; i++)
0248 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
0249
0250 return 0;
0251 }
0252
0253 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
0254 unsigned int val)
0255 {
0256 unsigned int i;
0257
0258 for (i = 0; i < pdata->tx_q_count; i++)
0259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
0260
0261 return 0;
0262 }
0263
0264 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
0265 {
0266 unsigned int i;
0267
0268 for (i = 0; i < pdata->channel_count; i++) {
0269 if (!pdata->channel[i]->rx_ring)
0270 break;
0271
0272 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
0273 pdata->rx_riwt);
0274 }
0275
0276 return 0;
0277 }
0278
0279 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
0280 {
0281 return 0;
0282 }
0283
0284 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
0285 {
0286 unsigned int i;
0287
0288 for (i = 0; i < pdata->channel_count; i++) {
0289 if (!pdata->channel[i]->rx_ring)
0290 break;
0291
0292 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
0293 pdata->rx_buf_size);
0294 }
0295 }
0296
0297 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
0298 {
0299 unsigned int i;
0300
0301 for (i = 0; i < pdata->channel_count; i++) {
0302 if (!pdata->channel[i]->tx_ring)
0303 break;
0304
0305 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
0306 }
0307 }
0308
0309 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
0310 {
0311 unsigned int i;
0312
0313 for (i = 0; i < pdata->channel_count; i++) {
0314 if (!pdata->channel[i]->rx_ring)
0315 break;
0316
0317 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
0318 }
0319
0320 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
0321 }
0322
0323 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
0324 unsigned int index, unsigned int val)
0325 {
0326 unsigned int wait;
0327 int ret = 0;
0328
0329 mutex_lock(&pdata->rss_mutex);
0330
0331 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
0332 ret = -EBUSY;
0333 goto unlock;
0334 }
0335
0336 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
0337
0338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
0339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
0340 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
0341 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
0342
0343 wait = 1000;
0344 while (wait--) {
0345 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
0346 goto unlock;
0347
0348 usleep_range(1000, 1500);
0349 }
0350
0351 ret = -EBUSY;
0352
0353 unlock:
0354 mutex_unlock(&pdata->rss_mutex);
0355
0356 return ret;
0357 }
0358
0359 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
0360 {
0361 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
0362 unsigned int *key = (unsigned int *)&pdata->rss_key;
0363 int ret;
0364
0365 while (key_regs--) {
0366 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
0367 key_regs, *key++);
0368 if (ret)
0369 return ret;
0370 }
0371
0372 return 0;
0373 }
0374
0375 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
0376 {
0377 unsigned int i;
0378 int ret;
0379
0380 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
0381 ret = xgbe_write_rss_reg(pdata,
0382 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
0383 pdata->rss_table[i]);
0384 if (ret)
0385 return ret;
0386 }
0387
0388 return 0;
0389 }
0390
0391 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
0392 {
0393 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
0394
0395 return xgbe_write_rss_hash_key(pdata);
0396 }
0397
0398 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
0399 const u32 *table)
0400 {
0401 unsigned int i;
0402
0403 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
0404 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
0405
0406 return xgbe_write_rss_lookup_table(pdata);
0407 }
0408
0409 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
0410 {
0411 int ret;
0412
0413 if (!pdata->hw_feat.rss)
0414 return -EOPNOTSUPP;
0415
0416
0417 ret = xgbe_write_rss_hash_key(pdata);
0418 if (ret)
0419 return ret;
0420
0421
0422 ret = xgbe_write_rss_lookup_table(pdata);
0423 if (ret)
0424 return ret;
0425
0426
0427 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
0428
0429
0430 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
0431
0432 return 0;
0433 }
0434
0435 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
0436 {
0437 if (!pdata->hw_feat.rss)
0438 return -EOPNOTSUPP;
0439
0440 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
0441
0442 return 0;
0443 }
0444
0445 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
0446 {
0447 int ret;
0448
0449 if (!pdata->hw_feat.rss)
0450 return;
0451
0452 if (pdata->netdev->features & NETIF_F_RXHASH)
0453 ret = xgbe_enable_rss(pdata);
0454 else
0455 ret = xgbe_disable_rss(pdata);
0456
0457 if (ret)
0458 netdev_err(pdata->netdev,
0459 "error configuring RSS, RSS disabled\n");
0460 }
0461
0462 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
0463 unsigned int queue)
0464 {
0465 unsigned int prio, tc;
0466
0467 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
0468
0469 if (pdata->prio2q_map[prio] != queue)
0470 continue;
0471
0472
0473 tc = pdata->ets->prio_tc[prio];
0474
0475
0476 if (pdata->pfc->pfc_en & (1 << tc))
0477 return true;
0478 }
0479
0480 return false;
0481 }
0482
0483 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
0484 {
0485
0486 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
0487
0488 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
0489 pdata->vxlan_port);
0490 }
0491
0492 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
0493 {
0494 if (!pdata->hw_feat.vxn)
0495 return;
0496
0497
0498 xgbe_set_vxlan_id(pdata);
0499
0500
0501 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
0502
0503
0504 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
0505 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
0506
0507 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
0508 }
0509
0510 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
0511 {
0512 if (!pdata->hw_feat.vxn)
0513 return;
0514
0515
0516 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
0517
0518
0519 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
0520
0521
0522 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
0523
0524 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
0525 }
0526
0527 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
0528 {
0529 unsigned int max_q_count, q_count;
0530 unsigned int reg, reg_val;
0531 unsigned int i;
0532
0533
0534 for (i = 0; i < pdata->rx_q_count; i++)
0535 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
0536
0537
0538 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
0539 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
0540 reg = MAC_Q0TFCR;
0541 for (i = 0; i < q_count; i++) {
0542 reg_val = XGMAC_IOREAD(pdata, reg);
0543 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
0544 XGMAC_IOWRITE(pdata, reg, reg_val);
0545
0546 reg += MAC_QTFCR_INC;
0547 }
0548
0549 return 0;
0550 }
0551
0552 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
0553 {
0554 struct ieee_pfc *pfc = pdata->pfc;
0555 struct ieee_ets *ets = pdata->ets;
0556 unsigned int max_q_count, q_count;
0557 unsigned int reg, reg_val;
0558 unsigned int i;
0559
0560
0561 for (i = 0; i < pdata->rx_q_count; i++) {
0562 unsigned int ehfc = 0;
0563
0564 if (pdata->rx_rfd[i]) {
0565
0566 if (pfc && ets) {
0567 if (xgbe_is_pfc_queue(pdata, i))
0568 ehfc = 1;
0569 } else {
0570 ehfc = 1;
0571 }
0572 }
0573
0574 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
0575
0576 netif_dbg(pdata, drv, pdata->netdev,
0577 "flow control %s for RXq%u\n",
0578 ehfc ? "enabled" : "disabled", i);
0579 }
0580
0581
0582 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
0583 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
0584 reg = MAC_Q0TFCR;
0585 for (i = 0; i < q_count; i++) {
0586 reg_val = XGMAC_IOREAD(pdata, reg);
0587
0588
0589 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
0590
0591 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
0592
0593 XGMAC_IOWRITE(pdata, reg, reg_val);
0594
0595 reg += MAC_QTFCR_INC;
0596 }
0597
0598 return 0;
0599 }
0600
0601 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
0602 {
0603 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
0604
0605 return 0;
0606 }
0607
0608 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
0609 {
0610 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
0611
0612 return 0;
0613 }
0614
0615 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
0616 {
0617 struct ieee_pfc *pfc = pdata->pfc;
0618
0619 if (pdata->tx_pause || (pfc && pfc->pfc_en))
0620 xgbe_enable_tx_flow_control(pdata);
0621 else
0622 xgbe_disable_tx_flow_control(pdata);
0623
0624 return 0;
0625 }
0626
0627 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
0628 {
0629 struct ieee_pfc *pfc = pdata->pfc;
0630
0631 if (pdata->rx_pause || (pfc && pfc->pfc_en))
0632 xgbe_enable_rx_flow_control(pdata);
0633 else
0634 xgbe_disable_rx_flow_control(pdata);
0635
0636 return 0;
0637 }
0638
0639 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
0640 {
0641 struct ieee_pfc *pfc = pdata->pfc;
0642
0643 xgbe_config_tx_flow_control(pdata);
0644 xgbe_config_rx_flow_control(pdata);
0645
0646 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
0647 (pfc && pfc->pfc_en) ? 1 : 0);
0648 }
0649
0650 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
0651 {
0652 struct xgbe_channel *channel;
0653 unsigned int i, ver;
0654
0655
0656 if (pdata->channel_irq_mode)
0657 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
0658 pdata->channel_irq_mode);
0659
0660 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
0661
0662 for (i = 0; i < pdata->channel_count; i++) {
0663 channel = pdata->channel[i];
0664
0665
0666 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
0667 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
0668
0669
0670 channel->curr_ier = 0;
0671
0672
0673
0674
0675
0676
0677 if (ver < 0x21) {
0678 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
0679 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
0680 } else {
0681 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
0682 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
0683 }
0684 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
0685
0686 if (channel->tx_ring) {
0687
0688
0689
0690
0691
0692 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
0693 XGMAC_SET_BITS(channel->curr_ier,
0694 DMA_CH_IER, TIE, 1);
0695 }
0696 if (channel->rx_ring) {
0697
0698
0699
0700
0701
0702
0703 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
0704 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
0705 XGMAC_SET_BITS(channel->curr_ier,
0706 DMA_CH_IER, RIE, 1);
0707 }
0708
0709 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
0710 }
0711 }
0712
0713 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
0714 {
0715 unsigned int mtl_q_isr;
0716 unsigned int q_count, i;
0717
0718 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
0719 for (i = 0; i < q_count; i++) {
0720
0721 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
0722 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
0723
0724
0725 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
0726 }
0727 }
0728
0729 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
0730 {
0731 unsigned int mac_ier = 0;
0732
0733
0734 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
0735
0736 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
0737
0738
0739 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
0740 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
0741
0742
0743 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
0744 }
0745
0746 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
0747 {
0748 unsigned int ecc_isr, ecc_ier = 0;
0749
0750 if (!pdata->vdata->ecc_support)
0751 return;
0752
0753
0754 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
0755 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
0756
0757
0758 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
0759 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
0760 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
0761 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
0762 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
0763 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
0764
0765 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
0766 }
0767
0768 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
0769 {
0770 unsigned int ecc_ier;
0771
0772 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
0773
0774
0775 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
0776 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
0777 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
0778
0779 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
0780 }
0781
0782 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
0783 enum xgbe_ecc_sec sec)
0784 {
0785 unsigned int ecc_ier;
0786
0787 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
0788
0789
0790 switch (sec) {
0791 case XGBE_ECC_SEC_TX:
0792 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
0793 break;
0794 case XGBE_ECC_SEC_RX:
0795 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
0796 break;
0797 case XGBE_ECC_SEC_DESC:
0798 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
0799 break;
0800 }
0801
0802 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
0803 }
0804
0805 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
0806 {
0807 unsigned int ss;
0808
0809 switch (speed) {
0810 case SPEED_1000:
0811 ss = 0x03;
0812 break;
0813 case SPEED_2500:
0814 ss = 0x02;
0815 break;
0816 case SPEED_10000:
0817 ss = 0x00;
0818 break;
0819 default:
0820 return -EINVAL;
0821 }
0822
0823 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
0824 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
0825
0826 return 0;
0827 }
0828
0829 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
0830 {
0831
0832 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
0833
0834
0835 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
0836
0837
0838 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
0839
0840
0841 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
0842
0843
0844 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
0845
0846 return 0;
0847 }
0848
0849 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
0850 {
0851 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
0852
0853 return 0;
0854 }
0855
0856 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
0857 {
0858
0859 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
0860
0861
0862 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
0863
0864
0865 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
0866
0867
0868 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
0869
0870
0871
0872
0873
0874
0875
0876 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
0877
0878 return 0;
0879 }
0880
0881 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
0882 {
0883
0884 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
0885
0886 return 0;
0887 }
0888
0889 static u32 xgbe_vid_crc32_le(__le16 vid_le)
0890 {
0891 u32 crc = ~0;
0892 u32 temp = 0;
0893 unsigned char *data = (unsigned char *)&vid_le;
0894 unsigned char data_byte = 0;
0895 int i, bits;
0896
0897 bits = get_bitmask_order(VLAN_VID_MASK);
0898 for (i = 0; i < bits; i++) {
0899 if ((i % 8) == 0)
0900 data_byte = data[i / 8];
0901
0902 temp = ((crc & 1) ^ data_byte) & 1;
0903 crc >>= 1;
0904 data_byte >>= 1;
0905
0906 if (temp)
0907 crc ^= CRC32_POLY_LE;
0908 }
0909
0910 return crc;
0911 }
0912
0913 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
0914 {
0915 u32 crc;
0916 u16 vid;
0917 __le16 vid_le;
0918 u16 vlan_hash_table = 0;
0919
0920
0921 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
0922
0923 vid_le = cpu_to_le16(vid);
0924 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
0925
0926 vlan_hash_table |= (1 << crc);
0927 }
0928
0929
0930 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
0931
0932 return 0;
0933 }
0934
0935 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
0936 unsigned int enable)
0937 {
0938 unsigned int val = enable ? 1 : 0;
0939
0940 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
0941 return 0;
0942
0943 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
0944 enable ? "entering" : "leaving");
0945 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
0946
0947
0948 if (enable) {
0949 xgbe_disable_rx_vlan_filtering(pdata);
0950 } else {
0951 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
0952 xgbe_enable_rx_vlan_filtering(pdata);
0953 }
0954
0955 return 0;
0956 }
0957
0958 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
0959 unsigned int enable)
0960 {
0961 unsigned int val = enable ? 1 : 0;
0962
0963 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
0964 return 0;
0965
0966 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
0967 enable ? "entering" : "leaving");
0968 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
0969
0970 return 0;
0971 }
0972
0973 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
0974 struct netdev_hw_addr *ha, unsigned int *mac_reg)
0975 {
0976 unsigned int mac_addr_hi, mac_addr_lo;
0977 u8 *mac_addr;
0978
0979 mac_addr_lo = 0;
0980 mac_addr_hi = 0;
0981
0982 if (ha) {
0983 mac_addr = (u8 *)&mac_addr_lo;
0984 mac_addr[0] = ha->addr[0];
0985 mac_addr[1] = ha->addr[1];
0986 mac_addr[2] = ha->addr[2];
0987 mac_addr[3] = ha->addr[3];
0988 mac_addr = (u8 *)&mac_addr_hi;
0989 mac_addr[0] = ha->addr[4];
0990 mac_addr[1] = ha->addr[5];
0991
0992 netif_dbg(pdata, drv, pdata->netdev,
0993 "adding mac address %pM at %#x\n",
0994 ha->addr, *mac_reg);
0995
0996 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
0997 }
0998
0999 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
1000 *mac_reg += MAC_MACA_INC;
1001 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
1002 *mac_reg += MAC_MACA_INC;
1003 }
1004
1005 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
1006 {
1007 struct net_device *netdev = pdata->netdev;
1008 struct netdev_hw_addr *ha;
1009 unsigned int mac_reg;
1010 unsigned int addn_macs;
1011
1012 mac_reg = MAC_MACA1HR;
1013 addn_macs = pdata->hw_feat.addn_mac;
1014
1015 if (netdev_uc_count(netdev) > addn_macs) {
1016 xgbe_set_promiscuous_mode(pdata, 1);
1017 } else {
1018 netdev_for_each_uc_addr(ha, netdev) {
1019 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1020 addn_macs--;
1021 }
1022
1023 if (netdev_mc_count(netdev) > addn_macs) {
1024 xgbe_set_all_multicast_mode(pdata, 1);
1025 } else {
1026 netdev_for_each_mc_addr(ha, netdev) {
1027 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1028 addn_macs--;
1029 }
1030 }
1031 }
1032
1033
1034 while (addn_macs--)
1035 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1036 }
1037
1038 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1039 {
1040 struct net_device *netdev = pdata->netdev;
1041 struct netdev_hw_addr *ha;
1042 unsigned int hash_reg;
1043 unsigned int hash_table_shift, hash_table_count;
1044 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
1045 u32 crc;
1046 unsigned int i;
1047
1048 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1049 hash_table_count = pdata->hw_feat.hash_table_size / 32;
1050 memset(hash_table, 0, sizeof(hash_table));
1051
1052
1053 netdev_for_each_uc_addr(ha, netdev) {
1054 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1055 crc >>= hash_table_shift;
1056 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1057 }
1058
1059 netdev_for_each_mc_addr(ha, netdev) {
1060 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1061 crc >>= hash_table_shift;
1062 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1063 }
1064
1065
1066 hash_reg = MAC_HTR0;
1067 for (i = 0; i < hash_table_count; i++) {
1068 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1069 hash_reg += MAC_HTR_INC;
1070 }
1071 }
1072
1073 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1074 {
1075 if (pdata->hw_feat.hash_table_size)
1076 xgbe_set_mac_hash_table(pdata);
1077 else
1078 xgbe_set_mac_addn_addrs(pdata);
1079
1080 return 0;
1081 }
1082
1083 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
1084 {
1085 unsigned int mac_addr_hi, mac_addr_lo;
1086
1087 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1088 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1089 (addr[1] << 8) | (addr[0] << 0);
1090
1091 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1092 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1093
1094 return 0;
1095 }
1096
1097 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1098 {
1099 struct net_device *netdev = pdata->netdev;
1100 unsigned int pr_mode, am_mode;
1101
1102 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1103 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1104
1105 xgbe_set_promiscuous_mode(pdata, pr_mode);
1106 xgbe_set_all_multicast_mode(pdata, am_mode);
1107
1108 xgbe_add_mac_addresses(pdata);
1109
1110 return 0;
1111 }
1112
1113 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1114 {
1115 unsigned int reg;
1116
1117 if (gpio > 15)
1118 return -EINVAL;
1119
1120 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1121
1122 reg &= ~(1 << (gpio + 16));
1123 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1124
1125 return 0;
1126 }
1127
1128 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1129 {
1130 unsigned int reg;
1131
1132 if (gpio > 15)
1133 return -EINVAL;
1134
1135 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1136
1137 reg |= (1 << (gpio + 16));
1138 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1139
1140 return 0;
1141 }
1142
1143 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1144 int mmd_reg)
1145 {
1146 unsigned long flags;
1147 unsigned int mmd_address, index, offset;
1148 int mmd_data;
1149
1150 if (mmd_reg & MII_ADDR_C45)
1151 mmd_address = mmd_reg & ~MII_ADDR_C45;
1152 else
1153 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 mmd_address <<= 1;
1165 index = mmd_address & ~pdata->xpcs_window_mask;
1166 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1167
1168 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1169 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1170 mmd_data = XPCS16_IOREAD(pdata, offset);
1171 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1172
1173 return mmd_data;
1174 }
1175
1176 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1177 int mmd_reg, int mmd_data)
1178 {
1179 unsigned long flags;
1180 unsigned int mmd_address, index, offset;
1181
1182 if (mmd_reg & MII_ADDR_C45)
1183 mmd_address = mmd_reg & ~MII_ADDR_C45;
1184 else
1185 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 mmd_address <<= 1;
1197 index = mmd_address & ~pdata->xpcs_window_mask;
1198 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1199
1200 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1201 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1202 XPCS16_IOWRITE(pdata, offset, mmd_data);
1203 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1204 }
1205
1206 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1207 int mmd_reg)
1208 {
1209 unsigned long flags;
1210 unsigned int mmd_address;
1211 int mmd_data;
1212
1213 if (mmd_reg & MII_ADDR_C45)
1214 mmd_address = mmd_reg & ~MII_ADDR_C45;
1215 else
1216 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1228 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1229 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1230 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1231
1232 return mmd_data;
1233 }
1234
1235 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1236 int mmd_reg, int mmd_data)
1237 {
1238 unsigned int mmd_address;
1239 unsigned long flags;
1240
1241 if (mmd_reg & MII_ADDR_C45)
1242 mmd_address = mmd_reg & ~MII_ADDR_C45;
1243 else
1244 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1256 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1257 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1258 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1259 }
1260
1261 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1262 int mmd_reg)
1263 {
1264 switch (pdata->vdata->xpcs_access) {
1265 case XGBE_XPCS_ACCESS_V1:
1266 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1267
1268 case XGBE_XPCS_ACCESS_V2:
1269 default:
1270 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1271 }
1272 }
1273
1274 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1275 int mmd_reg, int mmd_data)
1276 {
1277 switch (pdata->vdata->xpcs_access) {
1278 case XGBE_XPCS_ACCESS_V1:
1279 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1280
1281 case XGBE_XPCS_ACCESS_V2:
1282 default:
1283 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1284 }
1285 }
1286
1287 static unsigned int xgbe_create_mdio_sca(int port, int reg)
1288 {
1289 unsigned int mdio_sca, da;
1290
1291 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1292
1293 mdio_sca = 0;
1294 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1297
1298 return mdio_sca;
1299 }
1300
1301 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1302 int reg, u16 val)
1303 {
1304 unsigned int mdio_sca, mdio_sccd;
1305
1306 reinit_completion(&pdata->mdio_complete);
1307
1308 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1310
1311 mdio_sccd = 0;
1312 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1313 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1314 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1315 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1316
1317 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1318 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1319 return -ETIMEDOUT;
1320 }
1321
1322 return 0;
1323 }
1324
1325 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1326 int reg)
1327 {
1328 unsigned int mdio_sca, mdio_sccd;
1329
1330 reinit_completion(&pdata->mdio_complete);
1331
1332 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1333 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1334
1335 mdio_sccd = 0;
1336 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1337 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1338 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1339
1340 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1341 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1342 return -ETIMEDOUT;
1343 }
1344
1345 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1346 }
1347
1348 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1349 enum xgbe_mdio_mode mode)
1350 {
1351 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1352
1353 switch (mode) {
1354 case XGBE_MDIO_MODE_CL22:
1355 if (port > XGMAC_MAX_C22_PORT)
1356 return -EINVAL;
1357 reg_val |= (1 << port);
1358 break;
1359 case XGBE_MDIO_MODE_CL45:
1360 break;
1361 default:
1362 return -EINVAL;
1363 }
1364
1365 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1366
1367 return 0;
1368 }
1369
1370 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1371 {
1372 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1373 }
1374
1375 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1376 {
1377 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1378
1379 return 0;
1380 }
1381
1382 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1383 {
1384 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1385
1386 return 0;
1387 }
1388
1389 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1390 {
1391 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1392
1393
1394
1395
1396
1397
1398
1399 rdesc->desc0 = 0;
1400 rdesc->desc1 = 0;
1401 rdesc->desc2 = 0;
1402 rdesc->desc3 = 0;
1403
1404
1405 dma_wmb();
1406 }
1407
1408 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1409 {
1410 struct xgbe_ring *ring = channel->tx_ring;
1411 struct xgbe_ring_data *rdata;
1412 int i;
1413 int start_index = ring->cur;
1414
1415 DBGPR("-->tx_desc_init\n");
1416
1417
1418 for (i = 0; i < ring->rdesc_count; i++) {
1419 rdata = XGBE_GET_DESC_DATA(ring, i);
1420
1421
1422 xgbe_tx_desc_reset(rdata);
1423 }
1424
1425
1426 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1427
1428
1429 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1430 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1431 upper_32_bits(rdata->rdesc_dma));
1432 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1433 lower_32_bits(rdata->rdesc_dma));
1434
1435 DBGPR("<--tx_desc_init\n");
1436 }
1437
1438 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1439 struct xgbe_ring_data *rdata, unsigned int index)
1440 {
1441 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1442 unsigned int rx_usecs = pdata->rx_usecs;
1443 unsigned int rx_frames = pdata->rx_frames;
1444 unsigned int inte;
1445 dma_addr_t hdr_dma, buf_dma;
1446
1447 if (!rx_usecs && !rx_frames) {
1448
1449 inte = 1;
1450 } else {
1451
1452 if (rx_frames && !((index + 1) % rx_frames))
1453 inte = 1;
1454 else
1455 inte = 0;
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1466 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1467 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1468 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1469 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1470 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1471
1472 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1473
1474
1475
1476
1477
1478 dma_wmb();
1479
1480 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1481
1482
1483 dma_wmb();
1484 }
1485
1486 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1487 {
1488 struct xgbe_prv_data *pdata = channel->pdata;
1489 struct xgbe_ring *ring = channel->rx_ring;
1490 struct xgbe_ring_data *rdata;
1491 unsigned int start_index = ring->cur;
1492 unsigned int i;
1493
1494 DBGPR("-->rx_desc_init\n");
1495
1496
1497 for (i = 0; i < ring->rdesc_count; i++) {
1498 rdata = XGBE_GET_DESC_DATA(ring, i);
1499
1500
1501 xgbe_rx_desc_reset(pdata, rdata, i);
1502 }
1503
1504
1505 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1506
1507
1508 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1509 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1510 upper_32_bits(rdata->rdesc_dma));
1511 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1512 lower_32_bits(rdata->rdesc_dma));
1513
1514
1515 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1516 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1517 lower_32_bits(rdata->rdesc_dma));
1518
1519 DBGPR("<--rx_desc_init\n");
1520 }
1521
1522 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1523 unsigned int addend)
1524 {
1525 unsigned int count = 10000;
1526
1527
1528 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1529 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1530
1531
1532 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1533 udelay(5);
1534
1535 if (!count)
1536 netdev_err(pdata->netdev,
1537 "timed out updating timestamp addend register\n");
1538 }
1539
1540 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1541 unsigned int nsec)
1542 {
1543 unsigned int count = 10000;
1544
1545
1546 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1547 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1548 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1549
1550
1551 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1552 udelay(5);
1553
1554 if (!count)
1555 netdev_err(pdata->netdev, "timed out initializing timestamp\n");
1556 }
1557
1558 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1559 {
1560 u64 nsec;
1561
1562 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1563 nsec *= NSEC_PER_SEC;
1564 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1565
1566 return nsec;
1567 }
1568
1569 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1570 {
1571 unsigned int tx_snr, tx_ssr;
1572 u64 nsec;
1573
1574 if (pdata->vdata->tx_tstamp_workaround) {
1575 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1576 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1577 } else {
1578 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1579 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1580 }
1581
1582 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1583 return 0;
1584
1585 nsec = tx_ssr;
1586 nsec *= NSEC_PER_SEC;
1587 nsec += tx_snr;
1588
1589 return nsec;
1590 }
1591
1592 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1593 struct xgbe_ring_desc *rdesc)
1594 {
1595 u64 nsec;
1596
1597 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1598 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1599 nsec = le32_to_cpu(rdesc->desc1);
1600 nsec <<= 32;
1601 nsec |= le32_to_cpu(rdesc->desc0);
1602 if (nsec != 0xffffffffffffffffULL) {
1603 packet->rx_tstamp = nsec;
1604 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1605 RX_TSTAMP, 1);
1606 }
1607 }
1608 }
1609
1610 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1611 unsigned int mac_tscr)
1612 {
1613
1614 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1615
1616
1617 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1618
1619
1620 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1621
1622 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1623
1624
1625 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1626 return 0;
1627
1628
1629 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1630 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1631 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1632 xgbe_set_tstamp_time(pdata, 0, 0);
1633
1634
1635 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1636 ktime_to_ns(ktime_get_real()));
1637
1638 return 0;
1639 }
1640
1641 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1642 struct xgbe_ring *ring)
1643 {
1644 struct xgbe_prv_data *pdata = channel->pdata;
1645 struct xgbe_ring_data *rdata;
1646
1647
1648 wmb();
1649
1650
1651
1652 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1653 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1654 lower_32_bits(rdata->rdesc_dma));
1655
1656
1657 if (pdata->tx_usecs && !channel->tx_timer_active) {
1658 channel->tx_timer_active = 1;
1659 mod_timer(&channel->tx_timer,
1660 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1661 }
1662
1663 ring->tx.xmit_more = 0;
1664 }
1665
1666 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1667 {
1668 struct xgbe_prv_data *pdata = channel->pdata;
1669 struct xgbe_ring *ring = channel->tx_ring;
1670 struct xgbe_ring_data *rdata;
1671 struct xgbe_ring_desc *rdesc;
1672 struct xgbe_packet_data *packet = &ring->packet_data;
1673 unsigned int tx_packets, tx_bytes;
1674 unsigned int csum, tso, vlan, vxlan;
1675 unsigned int tso_context, vlan_context;
1676 unsigned int tx_set_ic;
1677 int start_index = ring->cur;
1678 int cur_index = ring->cur;
1679 int i;
1680
1681 DBGPR("-->xgbe_dev_xmit\n");
1682
1683 tx_packets = packet->tx_packets;
1684 tx_bytes = packet->tx_bytes;
1685
1686 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1687 CSUM_ENABLE);
1688 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1689 TSO_ENABLE);
1690 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1691 VLAN_CTAG);
1692 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1693 VXLAN);
1694
1695 if (tso && (packet->mss != ring->tx.cur_mss))
1696 tso_context = 1;
1697 else
1698 tso_context = 0;
1699
1700 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1701 vlan_context = 1;
1702 else
1703 vlan_context = 0;
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 ring->coalesce_count += tx_packets;
1716 if (!pdata->tx_frames)
1717 tx_set_ic = 0;
1718 else if (tx_packets > pdata->tx_frames)
1719 tx_set_ic = 1;
1720 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
1721 tx_set_ic = 1;
1722 else
1723 tx_set_ic = 0;
1724
1725 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1726 rdesc = rdata->rdesc;
1727
1728
1729 if (tso_context || vlan_context) {
1730 if (tso_context) {
1731 netif_dbg(pdata, tx_queued, pdata->netdev,
1732 "TSO context descriptor, mss=%u\n",
1733 packet->mss);
1734
1735
1736 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1737 MSS, packet->mss);
1738
1739
1740 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1741 CTXT, 1);
1742
1743
1744 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1745 TCMSSV, 1);
1746
1747 ring->tx.cur_mss = packet->mss;
1748 }
1749
1750 if (vlan_context) {
1751 netif_dbg(pdata, tx_queued, pdata->netdev,
1752 "VLAN context descriptor, ctag=%u\n",
1753 packet->vlan_ctag);
1754
1755
1756 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1757 CTXT, 1);
1758
1759
1760 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1761 VT, packet->vlan_ctag);
1762
1763
1764 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1765 VLTV, 1);
1766
1767 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1768 }
1769
1770 cur_index++;
1771 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1772 rdesc = rdata->rdesc;
1773 }
1774
1775
1776 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1777 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1778
1779
1780 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1781 rdata->skb_dma_len);
1782
1783
1784 if (vlan)
1785 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1786 TX_NORMAL_DESC2_VLAN_INSERT);
1787
1788
1789 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1790 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1791
1792
1793 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1794
1795
1796 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1797
1798
1799 if (cur_index != start_index)
1800 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1801
1802 if (tso) {
1803
1804 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1805 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1806 packet->tcp_payload_len);
1807 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1808 packet->tcp_header_len / 4);
1809
1810 pdata->ext_stats.tx_tso_packets += tx_packets;
1811 } else {
1812
1813 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1814
1815
1816 if (csum)
1817 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1818 CIC, 0x3);
1819
1820
1821 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1822 packet->length);
1823 }
1824
1825 if (vxlan) {
1826 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
1827 TX_NORMAL_DESC3_VXLAN_PACKET);
1828
1829 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
1830 }
1831
1832 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1833 cur_index++;
1834 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1835 rdesc = rdata->rdesc;
1836
1837
1838 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1839 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1840
1841
1842 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1843 rdata->skb_dma_len);
1844
1845
1846 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1847
1848
1849 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1850
1851
1852 if (csum)
1853 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1854 CIC, 0x3);
1855 }
1856
1857
1858 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1859
1860
1861 if (tx_set_ic)
1862 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1863
1864
1865 rdata->tx.packets = tx_packets;
1866 rdata->tx.bytes = tx_bytes;
1867
1868 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
1869 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
1870
1871
1872
1873
1874
1875 dma_wmb();
1876
1877
1878 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1879 rdesc = rdata->rdesc;
1880 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1881
1882 if (netif_msg_tx_queued(pdata))
1883 xgbe_dump_tx_desc(pdata, ring, start_index,
1884 packet->rdesc_count, 1);
1885
1886
1887 smp_wmb();
1888
1889 ring->cur = cur_index + 1;
1890 if (!netdev_xmit_more() ||
1891 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1892 channel->queue_index)))
1893 xgbe_tx_start_xmit(channel, ring);
1894 else
1895 ring->tx.xmit_more = 1;
1896
1897 DBGPR(" %s: descriptors %u to %u written\n",
1898 channel->name, start_index & (ring->rdesc_count - 1),
1899 (ring->cur - 1) & (ring->rdesc_count - 1));
1900
1901 DBGPR("<--xgbe_dev_xmit\n");
1902 }
1903
1904 static int xgbe_dev_read(struct xgbe_channel *channel)
1905 {
1906 struct xgbe_prv_data *pdata = channel->pdata;
1907 struct xgbe_ring *ring = channel->rx_ring;
1908 struct xgbe_ring_data *rdata;
1909 struct xgbe_ring_desc *rdesc;
1910 struct xgbe_packet_data *packet = &ring->packet_data;
1911 struct net_device *netdev = pdata->netdev;
1912 unsigned int err, etlt, l34t;
1913
1914 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1915
1916 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1917 rdesc = rdata->rdesc;
1918
1919
1920 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1921 return 1;
1922
1923
1924 dma_rmb();
1925
1926 if (netif_msg_rx_status(pdata))
1927 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1928
1929 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1930
1931 xgbe_get_rx_tstamp(packet, rdesc);
1932
1933 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1934 CONTEXT, 1);
1935 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1936 CONTEXT_NEXT, 0);
1937 return 0;
1938 }
1939
1940
1941 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1942
1943
1944 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1945 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1946 CONTEXT_NEXT, 1);
1947
1948
1949 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1950 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1951 FIRST, 1);
1952 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1953 RX_NORMAL_DESC2, HL);
1954 if (rdata->rx.hdr_len)
1955 pdata->ext_stats.rx_split_header_packets++;
1956 } else {
1957 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1958 FIRST, 0);
1959 }
1960
1961
1962 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1963 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1964 RSS_HASH, 1);
1965
1966 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1967
1968 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1969 switch (l34t) {
1970 case RX_DESC3_L34T_IPV4_TCP:
1971 case RX_DESC3_L34T_IPV4_UDP:
1972 case RX_DESC3_L34T_IPV6_TCP:
1973 case RX_DESC3_L34T_IPV6_UDP:
1974 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1975 break;
1976 default:
1977 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1978 }
1979 }
1980
1981
1982 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1983 return 0;
1984
1985
1986 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1987 LAST, 1);
1988
1989
1990 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1991
1992
1993 if (netdev->features & NETIF_F_RXCSUM) {
1994 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1995 CSUM_DONE, 1);
1996 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1997 TNPCSUM_DONE, 1);
1998 }
1999
2000
2001 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
2002 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2003 TNP, 1);
2004 pdata->ext_stats.rx_vxlan_packets++;
2005
2006 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
2007 switch (l34t) {
2008 case RX_DESC3_L34T_IPV4_UNKNOWN:
2009 case RX_DESC3_L34T_IPV6_UNKNOWN:
2010 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2011 TNPCSUM_DONE, 0);
2012 break;
2013 }
2014 }
2015
2016
2017 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
2018 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
2019 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2020
2021 if (!err || !etlt) {
2022
2023 if ((etlt == 0x09) &&
2024 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
2025 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2026 VLAN_CTAG, 1);
2027 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
2028 RX_NORMAL_DESC0,
2029 OVT);
2030 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2031 packet->vlan_ctag);
2032 }
2033 } else {
2034 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
2035 RX_PACKET_ATTRIBUTES, TNP);
2036
2037 if ((etlt == 0x05) || (etlt == 0x06)) {
2038 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2039 CSUM_DONE, 0);
2040 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2041 TNPCSUM_DONE, 0);
2042 pdata->ext_stats.rx_csum_errors++;
2043 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
2044 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2045 CSUM_DONE, 0);
2046 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2047 TNPCSUM_DONE, 0);
2048 pdata->ext_stats.rx_vxlan_csum_errors++;
2049 } else {
2050 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
2051 FRAME, 1);
2052 }
2053 }
2054
2055 pdata->ext_stats.rxq_packets[channel->queue_index]++;
2056 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
2057
2058 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
2059 ring->cur & (ring->rdesc_count - 1), ring->cur);
2060
2061 return 0;
2062 }
2063
2064 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
2065 {
2066
2067 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
2068 }
2069
2070 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
2071 {
2072
2073 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
2074 }
2075
2076 static int xgbe_enable_int(struct xgbe_channel *channel,
2077 enum xgbe_int int_id)
2078 {
2079 switch (int_id) {
2080 case XGMAC_INT_DMA_CH_SR_TI:
2081 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2082 break;
2083 case XGMAC_INT_DMA_CH_SR_TPS:
2084 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
2085 break;
2086 case XGMAC_INT_DMA_CH_SR_TBU:
2087 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
2088 break;
2089 case XGMAC_INT_DMA_CH_SR_RI:
2090 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2091 break;
2092 case XGMAC_INT_DMA_CH_SR_RBU:
2093 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
2094 break;
2095 case XGMAC_INT_DMA_CH_SR_RPS:
2096 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
2097 break;
2098 case XGMAC_INT_DMA_CH_SR_TI_RI:
2099 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2100 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2101 break;
2102 case XGMAC_INT_DMA_CH_SR_FBE:
2103 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
2104 break;
2105 case XGMAC_INT_DMA_ALL:
2106 channel->curr_ier |= channel->saved_ier;
2107 break;
2108 default:
2109 return -1;
2110 }
2111
2112 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2113
2114 return 0;
2115 }
2116
2117 static int xgbe_disable_int(struct xgbe_channel *channel,
2118 enum xgbe_int int_id)
2119 {
2120 switch (int_id) {
2121 case XGMAC_INT_DMA_CH_SR_TI:
2122 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2123 break;
2124 case XGMAC_INT_DMA_CH_SR_TPS:
2125 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
2126 break;
2127 case XGMAC_INT_DMA_CH_SR_TBU:
2128 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
2129 break;
2130 case XGMAC_INT_DMA_CH_SR_RI:
2131 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2132 break;
2133 case XGMAC_INT_DMA_CH_SR_RBU:
2134 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
2135 break;
2136 case XGMAC_INT_DMA_CH_SR_RPS:
2137 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
2138 break;
2139 case XGMAC_INT_DMA_CH_SR_TI_RI:
2140 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2141 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2142 break;
2143 case XGMAC_INT_DMA_CH_SR_FBE:
2144 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
2145 break;
2146 case XGMAC_INT_DMA_ALL:
2147 channel->saved_ier = channel->curr_ier;
2148 channel->curr_ier = 0;
2149 break;
2150 default:
2151 return -1;
2152 }
2153
2154 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2155
2156 return 0;
2157 }
2158
2159 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2160 {
2161 unsigned int count = 2000;
2162
2163 DBGPR("-->xgbe_exit\n");
2164
2165
2166 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2167 usleep_range(10, 15);
2168
2169
2170 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2171 usleep_range(500, 600);
2172
2173 if (!count)
2174 return -EBUSY;
2175
2176 DBGPR("<--xgbe_exit\n");
2177
2178 return 0;
2179 }
2180
2181 static int xgbe_exit(struct xgbe_prv_data *pdata)
2182 {
2183 int ret;
2184
2185
2186
2187
2188 ret = __xgbe_exit(pdata);
2189 if (ret)
2190 return ret;
2191
2192 return __xgbe_exit(pdata);
2193 }
2194
2195 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2196 {
2197 unsigned int i, count;
2198
2199 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2200 return 0;
2201
2202 for (i = 0; i < pdata->tx_q_count; i++)
2203 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2204
2205
2206 for (i = 0; i < pdata->tx_q_count; i++) {
2207 count = 2000;
2208 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2209 MTL_Q_TQOMR, FTQ))
2210 usleep_range(500, 600);
2211
2212 if (!count)
2213 return -EBUSY;
2214 }
2215
2216 return 0;
2217 }
2218
2219 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2220 {
2221 unsigned int sbmr;
2222
2223 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
2224
2225
2226 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
2227
2228
2229 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
2230 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
2231 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
2232 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
2233 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
2234
2235 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
2236
2237
2238 if (pdata->vdata->tx_desc_prefetch)
2239 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
2240 pdata->vdata->tx_desc_prefetch);
2241
2242 if (pdata->vdata->rx_desc_prefetch)
2243 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
2244 pdata->vdata->rx_desc_prefetch);
2245 }
2246
2247 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2248 {
2249 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
2250 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
2251 if (pdata->awarcr)
2252 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
2253 }
2254
2255 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2256 {
2257 unsigned int i;
2258
2259
2260 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2261
2262
2263 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2264 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2265 MTL_TSA_ETS);
2266 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2267 }
2268
2269
2270 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2271 }
2272
2273 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2274 unsigned int queue,
2275 unsigned int q_fifo_size)
2276 {
2277 unsigned int frame_fifo_size;
2278 unsigned int rfa, rfd;
2279
2280 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2281
2282 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2283
2284 rfa = pdata->pfc_rfa;
2285 rfd = rfa + frame_fifo_size;
2286 if (rfd > XGMAC_FLOW_CONTROL_MAX)
2287 rfd = XGMAC_FLOW_CONTROL_MAX;
2288 if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2289 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2290 } else {
2291
2292
2293
2294
2295
2296 if (q_fifo_size <= 2048) {
2297
2298 pdata->rx_rfa[queue] = 0;
2299 pdata->rx_rfd[queue] = 0;
2300 return;
2301 }
2302
2303 if (q_fifo_size <= 4096) {
2304
2305 pdata->rx_rfa[queue] = 0;
2306 pdata->rx_rfd[queue] = 1;
2307 return;
2308 }
2309
2310 if (q_fifo_size <= frame_fifo_size) {
2311
2312 pdata->rx_rfa[queue] = 2;
2313 pdata->rx_rfd[queue] = 5;
2314 return;
2315 }
2316
2317 if (q_fifo_size <= (frame_fifo_size * 3)) {
2318
2319
2320
2321
2322 rfa = q_fifo_size - frame_fifo_size;
2323 rfd = rfa + (frame_fifo_size / 2);
2324 } else {
2325
2326
2327
2328 rfa = frame_fifo_size * 2;
2329 rfa += XGMAC_FLOW_CONTROL_UNIT;
2330 rfd = rfa + frame_fifo_size;
2331 }
2332 }
2333
2334 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2335 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2336 }
2337
2338 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2339 unsigned int *fifo)
2340 {
2341 unsigned int q_fifo_size;
2342 unsigned int i;
2343
2344 for (i = 0; i < pdata->rx_q_count; i++) {
2345 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2346
2347 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2348 }
2349 }
2350
2351 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2352 {
2353 unsigned int i;
2354
2355 for (i = 0; i < pdata->rx_q_count; i++) {
2356 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2357 pdata->rx_rfa[i]);
2358 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2359 pdata->rx_rfd[i]);
2360 }
2361 }
2362
2363 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2364 {
2365
2366 return min_t(unsigned int, pdata->tx_max_fifo_size,
2367 pdata->hw_feat.tx_fifo_size);
2368 }
2369
2370 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2371 {
2372
2373 return min_t(unsigned int, pdata->rx_max_fifo_size,
2374 pdata->hw_feat.rx_fifo_size);
2375 }
2376
2377 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2378 unsigned int queue_count,
2379 unsigned int *fifo)
2380 {
2381 unsigned int q_fifo_size;
2382 unsigned int p_fifo;
2383 unsigned int i;
2384
2385 q_fifo_size = fifo_size / queue_count;
2386
2387
2388
2389
2390
2391 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2392 if (p_fifo)
2393 p_fifo--;
2394
2395
2396 for (i = 0; i < queue_count; i++)
2397 fifo[i] = p_fifo;
2398 }
2399
2400 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2401 unsigned int queue_count,
2402 unsigned int *fifo)
2403 {
2404 unsigned int i;
2405
2406 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2407
2408 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2409 return fifo_size;
2410
2411
2412
2413
2414
2415 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2416 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2417 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2418 }
2419
2420 return fifo_size;
2421 }
2422
2423 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2424 {
2425 unsigned int delay;
2426
2427
2428 if (pdata->pfc->delay)
2429 return pdata->pfc->delay / 8;
2430
2431
2432 delay = xgbe_get_max_frame(pdata);
2433 delay += XGMAC_ETH_PREAMBLE;
2434 delay *= 2;
2435
2436
2437 delay += XGMAC_PFC_DATA_LEN;
2438 delay += ETH_HLEN + ETH_FCS_LEN;
2439 delay += XGMAC_ETH_PREAMBLE;
2440
2441
2442 delay += XGMAC_PFC_DELAYS;
2443
2444 return delay;
2445 }
2446
2447 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2448 {
2449 unsigned int count, prio_queues;
2450 unsigned int i;
2451
2452 if (!pdata->pfc->pfc_en)
2453 return 0;
2454
2455 count = 0;
2456 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2457 for (i = 0; i < prio_queues; i++) {
2458 if (!xgbe_is_pfc_queue(pdata, i))
2459 continue;
2460
2461 pdata->pfcq[i] = 1;
2462 count++;
2463 }
2464
2465 return count;
2466 }
2467
2468 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2469 unsigned int fifo_size,
2470 unsigned int *fifo)
2471 {
2472 unsigned int q_fifo_size, rem_fifo, addn_fifo;
2473 unsigned int prio_queues;
2474 unsigned int pfc_count;
2475 unsigned int i;
2476
2477 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2478 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2479 pfc_count = xgbe_get_pfc_queues(pdata);
2480
2481 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2482
2483 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2484 return;
2485 }
2486
2487
2488 rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2489
2490
2491
2492
2493 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2494 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2495
2496 if (pdata->pfc_rfa > q_fifo_size) {
2497 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2498 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2499 } else {
2500 addn_fifo = 0;
2501 }
2502
2503
2504
2505
2506
2507
2508 i = prio_queues;
2509 while (i > 0) {
2510 i--;
2511
2512 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2513
2514 if (!pdata->pfcq[i] || !addn_fifo)
2515 continue;
2516
2517 if (addn_fifo > rem_fifo) {
2518 netdev_warn(pdata->netdev,
2519 "RXq%u cannot set needed fifo size\n", i);
2520 if (!rem_fifo)
2521 continue;
2522
2523 addn_fifo = rem_fifo;
2524 }
2525
2526 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2527 rem_fifo -= addn_fifo;
2528 }
2529
2530 if (rem_fifo) {
2531 unsigned int inc_fifo = rem_fifo / prio_queues;
2532
2533
2534 for (i = 0; i < prio_queues; i++)
2535 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2536 }
2537 }
2538
2539 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2540 {
2541 unsigned int fifo_size;
2542 unsigned int fifo[XGBE_MAX_QUEUES];
2543 unsigned int i;
2544
2545 fifo_size = xgbe_get_tx_fifo_size(pdata);
2546
2547 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2548
2549 for (i = 0; i < pdata->tx_q_count; i++)
2550 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2551
2552 netif_info(pdata, drv, pdata->netdev,
2553 "%d Tx hardware queues, %d byte fifo per queue\n",
2554 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2555 }
2556
2557 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2558 {
2559 unsigned int fifo_size;
2560 unsigned int fifo[XGBE_MAX_QUEUES];
2561 unsigned int prio_queues;
2562 unsigned int i;
2563
2564
2565 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2566 pdata->pfc_rfa = 0;
2567
2568 fifo_size = xgbe_get_rx_fifo_size(pdata);
2569 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2570
2571
2572 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2573
2574 if (pdata->pfc && pdata->ets)
2575 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2576 else
2577 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2578
2579 for (i = 0; i < pdata->rx_q_count; i++)
2580 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2581
2582 xgbe_calculate_flow_control_threshold(pdata, fifo);
2583 xgbe_config_flow_control_threshold(pdata);
2584
2585 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2586 netif_info(pdata, drv, pdata->netdev,
2587 "%u Rx hardware queues\n", pdata->rx_q_count);
2588 for (i = 0; i < pdata->rx_q_count; i++)
2589 netif_info(pdata, drv, pdata->netdev,
2590 "RxQ%u, %u byte fifo queue\n", i,
2591 ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2592 } else {
2593 netif_info(pdata, drv, pdata->netdev,
2594 "%u Rx hardware queues, %u byte fifo per queue\n",
2595 pdata->rx_q_count,
2596 ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2597 }
2598 }
2599
2600 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2601 {
2602 unsigned int qptc, qptc_extra, queue;
2603 unsigned int prio_queues;
2604 unsigned int ppq, ppq_extra, prio;
2605 unsigned int mask;
2606 unsigned int i, j, reg, reg_val;
2607
2608
2609
2610
2611 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2612 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2613
2614 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2615 for (j = 0; j < qptc; j++) {
2616 netif_dbg(pdata, drv, pdata->netdev,
2617 "TXq%u mapped to TC%u\n", queue, i);
2618 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2619 Q2TCMAP, i);
2620 pdata->q2tc_map[queue++] = i;
2621 }
2622
2623 if (i < qptc_extra) {
2624 netif_dbg(pdata, drv, pdata->netdev,
2625 "TXq%u mapped to TC%u\n", queue, i);
2626 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2627 Q2TCMAP, i);
2628 pdata->q2tc_map[queue++] = i;
2629 }
2630 }
2631
2632
2633 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2634 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2635 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2636
2637 reg = MAC_RQC2R;
2638 reg_val = 0;
2639 for (i = 0, prio = 0; i < prio_queues;) {
2640 mask = 0;
2641 for (j = 0; j < ppq; j++) {
2642 netif_dbg(pdata, drv, pdata->netdev,
2643 "PRIO%u mapped to RXq%u\n", prio, i);
2644 mask |= (1 << prio);
2645 pdata->prio2q_map[prio++] = i;
2646 }
2647
2648 if (i < ppq_extra) {
2649 netif_dbg(pdata, drv, pdata->netdev,
2650 "PRIO%u mapped to RXq%u\n", prio, i);
2651 mask |= (1 << prio);
2652 pdata->prio2q_map[prio++] = i;
2653 }
2654
2655 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2656
2657 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2658 continue;
2659
2660 XGMAC_IOWRITE(pdata, reg, reg_val);
2661 reg += MAC_RQC2_INC;
2662 reg_val = 0;
2663 }
2664
2665
2666 reg = MTL_RQDCM0R;
2667 reg_val = 0;
2668 for (i = 0; i < pdata->rx_q_count;) {
2669 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2670
2671 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2672 continue;
2673
2674 XGMAC_IOWRITE(pdata, reg, reg_val);
2675
2676 reg += MTL_RQDCM_INC;
2677 reg_val = 0;
2678 }
2679 }
2680
2681 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2682 {
2683 unsigned int offset, queue, prio;
2684 u8 i;
2685
2686 netdev_reset_tc(pdata->netdev);
2687 if (!pdata->num_tcs)
2688 return;
2689
2690 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2691
2692 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2693 while ((queue < pdata->tx_q_count) &&
2694 (pdata->q2tc_map[queue] == i))
2695 queue++;
2696
2697 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2698 i, offset, queue - 1);
2699 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2700 offset = queue;
2701 }
2702
2703 if (!pdata->ets)
2704 return;
2705
2706 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2707 netdev_set_prio_tc_map(pdata->netdev, prio,
2708 pdata->ets->prio_tc[prio]);
2709 }
2710
2711 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2712 {
2713 struct ieee_ets *ets = pdata->ets;
2714 unsigned int total_weight, min_weight, weight;
2715 unsigned int mask, reg, reg_val;
2716 unsigned int i, prio;
2717
2718 if (!ets)
2719 return;
2720
2721
2722
2723
2724 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2725
2726
2727 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2728 min_weight = total_weight / 100;
2729 if (!min_weight)
2730 min_weight = 1;
2731
2732 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2733
2734 mask = 0;
2735 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2736 if (ets->prio_tc[prio] == i)
2737 mask |= (1 << prio);
2738 }
2739 mask &= 0xff;
2740
2741 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2742 i, mask);
2743 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2744 reg_val = XGMAC_IOREAD(pdata, reg);
2745
2746 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2747 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2748
2749 XGMAC_IOWRITE(pdata, reg, reg_val);
2750
2751
2752 switch (ets->tc_tsa[i]) {
2753 case IEEE_8021QAZ_TSA_STRICT:
2754 netif_dbg(pdata, drv, pdata->netdev,
2755 "TC%u using SP\n", i);
2756 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2757 MTL_TSA_SP);
2758 break;
2759 case IEEE_8021QAZ_TSA_ETS:
2760 weight = total_weight * ets->tc_tx_bw[i] / 100;
2761 weight = clamp(weight, min_weight, total_weight);
2762
2763 netif_dbg(pdata, drv, pdata->netdev,
2764 "TC%u using DWRR (weight %u)\n", i, weight);
2765 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2766 MTL_TSA_ETS);
2767 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2768 weight);
2769 break;
2770 }
2771 }
2772
2773 xgbe_config_tc(pdata);
2774 }
2775
2776 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2777 {
2778 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2779
2780 netif_tx_stop_all_queues(pdata->netdev);
2781
2782
2783 pdata->hw_if.disable_rx(pdata);
2784 }
2785
2786 xgbe_config_rx_fifo_size(pdata);
2787 xgbe_config_flow_control(pdata);
2788
2789 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2790
2791 pdata->hw_if.enable_rx(pdata);
2792
2793
2794 netif_tx_start_all_queues(pdata->netdev);
2795 }
2796 }
2797
2798 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2799 {
2800 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2801
2802
2803 if (pdata->hw_feat.hash_table_size) {
2804 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2805 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2806 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2807 }
2808 }
2809
2810 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2811 {
2812 unsigned int val;
2813
2814 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2815
2816 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2817 }
2818
2819 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2820 {
2821 xgbe_set_speed(pdata, pdata->phy_speed);
2822 }
2823
2824 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2825 {
2826 if (pdata->netdev->features & NETIF_F_RXCSUM)
2827 xgbe_enable_rx_csum(pdata);
2828 else
2829 xgbe_disable_rx_csum(pdata);
2830 }
2831
2832 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2833 {
2834
2835 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2836 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2837
2838
2839 xgbe_update_vlan_hash_table(pdata);
2840
2841 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2842 xgbe_enable_rx_vlan_filtering(pdata);
2843 else
2844 xgbe_disable_rx_vlan_filtering(pdata);
2845
2846 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2847 xgbe_enable_rx_vlan_stripping(pdata);
2848 else
2849 xgbe_disable_rx_vlan_stripping(pdata);
2850 }
2851
2852 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2853 {
2854 bool read_hi;
2855 u64 val;
2856
2857 if (pdata->vdata->mmc_64bit) {
2858 switch (reg_lo) {
2859
2860 case MMC_RXRUNTERROR:
2861 case MMC_RXJABBERERROR:
2862 case MMC_RXUNDERSIZE_G:
2863 case MMC_RXOVERSIZE_G:
2864 case MMC_RXWATCHDOGERROR:
2865 read_hi = false;
2866 break;
2867
2868 default:
2869 read_hi = true;
2870 }
2871 } else {
2872 switch (reg_lo) {
2873
2874 case MMC_TXOCTETCOUNT_GB_LO:
2875 case MMC_TXOCTETCOUNT_G_LO:
2876 case MMC_RXOCTETCOUNT_GB_LO:
2877 case MMC_RXOCTETCOUNT_G_LO:
2878 read_hi = true;
2879 break;
2880
2881 default:
2882 read_hi = false;
2883 }
2884 }
2885
2886 val = XGMAC_IOREAD(pdata, reg_lo);
2887
2888 if (read_hi)
2889 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2890
2891 return val;
2892 }
2893
2894 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2895 {
2896 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2897 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2898
2899 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2900 stats->txoctetcount_gb +=
2901 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2902
2903 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2904 stats->txframecount_gb +=
2905 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2906
2907 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2908 stats->txbroadcastframes_g +=
2909 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2910
2911 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2912 stats->txmulticastframes_g +=
2913 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2914
2915 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2916 stats->tx64octets_gb +=
2917 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2918
2919 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2920 stats->tx65to127octets_gb +=
2921 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2922
2923 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2924 stats->tx128to255octets_gb +=
2925 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2926
2927 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2928 stats->tx256to511octets_gb +=
2929 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2930
2931 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2932 stats->tx512to1023octets_gb +=
2933 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2934
2935 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2936 stats->tx1024tomaxoctets_gb +=
2937 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2938
2939 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2940 stats->txunicastframes_gb +=
2941 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2942
2943 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2944 stats->txmulticastframes_gb +=
2945 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2946
2947 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2948 stats->txbroadcastframes_g +=
2949 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2950
2951 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2952 stats->txunderflowerror +=
2953 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2954
2955 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2956 stats->txoctetcount_g +=
2957 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2958
2959 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2960 stats->txframecount_g +=
2961 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2962
2963 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2964 stats->txpauseframes +=
2965 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2966
2967 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2968 stats->txvlanframes_g +=
2969 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2970 }
2971
2972 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2973 {
2974 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2975 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2976
2977 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2978 stats->rxframecount_gb +=
2979 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2980
2981 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2982 stats->rxoctetcount_gb +=
2983 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2984
2985 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2986 stats->rxoctetcount_g +=
2987 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2988
2989 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2990 stats->rxbroadcastframes_g +=
2991 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2992
2993 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2994 stats->rxmulticastframes_g +=
2995 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2996
2997 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2998 stats->rxcrcerror +=
2999 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3000
3001 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
3002 stats->rxrunterror +=
3003 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3004
3005 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
3006 stats->rxjabbererror +=
3007 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3008
3009 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
3010 stats->rxundersize_g +=
3011 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3012
3013 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
3014 stats->rxoversize_g +=
3015 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3016
3017 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
3018 stats->rx64octets_gb +=
3019 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3020
3021 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
3022 stats->rx65to127octets_gb +=
3023 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3024
3025 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
3026 stats->rx128to255octets_gb +=
3027 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3028
3029 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
3030 stats->rx256to511octets_gb +=
3031 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3032
3033 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
3034 stats->rx512to1023octets_gb +=
3035 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3036
3037 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
3038 stats->rx1024tomaxoctets_gb +=
3039 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3040
3041 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
3042 stats->rxunicastframes_g +=
3043 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3044
3045 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
3046 stats->rxlengtherror +=
3047 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3048
3049 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
3050 stats->rxoutofrangetype +=
3051 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3052
3053 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
3054 stats->rxpauseframes +=
3055 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3056
3057 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
3058 stats->rxfifooverflow +=
3059 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3060
3061 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
3062 stats->rxvlanframes_gb +=
3063 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3064
3065 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
3066 stats->rxwatchdogerror +=
3067 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3068 }
3069
3070 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
3071 {
3072 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3073
3074
3075 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
3076
3077 stats->txoctetcount_gb +=
3078 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3079
3080 stats->txframecount_gb +=
3081 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3082
3083 stats->txbroadcastframes_g +=
3084 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3085
3086 stats->txmulticastframes_g +=
3087 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3088
3089 stats->tx64octets_gb +=
3090 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3091
3092 stats->tx65to127octets_gb +=
3093 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3094
3095 stats->tx128to255octets_gb +=
3096 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3097
3098 stats->tx256to511octets_gb +=
3099 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3100
3101 stats->tx512to1023octets_gb +=
3102 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3103
3104 stats->tx1024tomaxoctets_gb +=
3105 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3106
3107 stats->txunicastframes_gb +=
3108 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3109
3110 stats->txmulticastframes_gb +=
3111 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3112
3113 stats->txbroadcastframes_g +=
3114 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3115
3116 stats->txunderflowerror +=
3117 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3118
3119 stats->txoctetcount_g +=
3120 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3121
3122 stats->txframecount_g +=
3123 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3124
3125 stats->txpauseframes +=
3126 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3127
3128 stats->txvlanframes_g +=
3129 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3130
3131 stats->rxframecount_gb +=
3132 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3133
3134 stats->rxoctetcount_gb +=
3135 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3136
3137 stats->rxoctetcount_g +=
3138 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3139
3140 stats->rxbroadcastframes_g +=
3141 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3142
3143 stats->rxmulticastframes_g +=
3144 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3145
3146 stats->rxcrcerror +=
3147 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3148
3149 stats->rxrunterror +=
3150 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3151
3152 stats->rxjabbererror +=
3153 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3154
3155 stats->rxundersize_g +=
3156 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3157
3158 stats->rxoversize_g +=
3159 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3160
3161 stats->rx64octets_gb +=
3162 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3163
3164 stats->rx65to127octets_gb +=
3165 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3166
3167 stats->rx128to255octets_gb +=
3168 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3169
3170 stats->rx256to511octets_gb +=
3171 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3172
3173 stats->rx512to1023octets_gb +=
3174 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3175
3176 stats->rx1024tomaxoctets_gb +=
3177 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3178
3179 stats->rxunicastframes_g +=
3180 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3181
3182 stats->rxlengtherror +=
3183 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3184
3185 stats->rxoutofrangetype +=
3186 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3187
3188 stats->rxpauseframes +=
3189 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3190
3191 stats->rxfifooverflow +=
3192 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3193
3194 stats->rxvlanframes_gb +=
3195 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3196
3197 stats->rxwatchdogerror +=
3198 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3199
3200
3201 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3202 }
3203
3204 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3205 {
3206
3207 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3208
3209
3210 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3211 }
3212
3213 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3214 unsigned int queue)
3215 {
3216 unsigned int tx_status;
3217 unsigned long tx_timeout;
3218
3219
3220
3221
3222
3223 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3224 while (time_before(jiffies, tx_timeout)) {
3225 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3226 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3227 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3228 break;
3229
3230 usleep_range(500, 1000);
3231 }
3232
3233 if (!time_before(jiffies, tx_timeout))
3234 netdev_info(pdata->netdev,
3235 "timed out waiting for Tx queue %u to empty\n",
3236 queue);
3237 }
3238
3239 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3240 unsigned int queue)
3241 {
3242 unsigned int tx_dsr, tx_pos, tx_qidx;
3243 unsigned int tx_status;
3244 unsigned long tx_timeout;
3245
3246 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3247 return xgbe_txq_prepare_tx_stop(pdata, queue);
3248
3249
3250 if (queue < DMA_DSRX_FIRST_QUEUE) {
3251 tx_dsr = DMA_DSR0;
3252 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3253 } else {
3254 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3255
3256 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3257 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3258 DMA_DSRX_TPS_START;
3259 }
3260
3261
3262
3263
3264
3265 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3266 while (time_before(jiffies, tx_timeout)) {
3267 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3268 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3269 if ((tx_status == DMA_TPS_STOPPED) ||
3270 (tx_status == DMA_TPS_SUSPENDED))
3271 break;
3272
3273 usleep_range(500, 1000);
3274 }
3275
3276 if (!time_before(jiffies, tx_timeout))
3277 netdev_info(pdata->netdev,
3278 "timed out waiting for Tx DMA channel %u to stop\n",
3279 queue);
3280 }
3281
3282 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3283 {
3284 unsigned int i;
3285
3286
3287 for (i = 0; i < pdata->channel_count; i++) {
3288 if (!pdata->channel[i]->tx_ring)
3289 break;
3290
3291 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3292 }
3293
3294
3295 for (i = 0; i < pdata->tx_q_count; i++)
3296 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3297 MTL_Q_ENABLED);
3298
3299
3300 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3301 }
3302
3303 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3304 {
3305 unsigned int i;
3306
3307
3308 for (i = 0; i < pdata->tx_q_count; i++)
3309 xgbe_prepare_tx_stop(pdata, i);
3310
3311
3312 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3313
3314
3315 for (i = 0; i < pdata->tx_q_count; i++)
3316 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3317
3318
3319 for (i = 0; i < pdata->channel_count; i++) {
3320 if (!pdata->channel[i]->tx_ring)
3321 break;
3322
3323 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3324 }
3325 }
3326
3327 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3328 unsigned int queue)
3329 {
3330 unsigned int rx_status;
3331 unsigned long rx_timeout;
3332
3333
3334
3335
3336
3337 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3338 while (time_before(jiffies, rx_timeout)) {
3339 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3340 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3341 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3342 break;
3343
3344 usleep_range(500, 1000);
3345 }
3346
3347 if (!time_before(jiffies, rx_timeout))
3348 netdev_info(pdata->netdev,
3349 "timed out waiting for Rx queue %u to empty\n",
3350 queue);
3351 }
3352
3353 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3354 {
3355 unsigned int reg_val, i;
3356
3357
3358 for (i = 0; i < pdata->channel_count; i++) {
3359 if (!pdata->channel[i]->rx_ring)
3360 break;
3361
3362 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3363 }
3364
3365
3366 reg_val = 0;
3367 for (i = 0; i < pdata->rx_q_count; i++)
3368 reg_val |= (0x02 << (i << 1));
3369 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3370
3371
3372 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3373 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3374 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3375 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3376 }
3377
3378 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3379 {
3380 unsigned int i;
3381
3382
3383 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3384 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3385 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3386 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3387
3388
3389 for (i = 0; i < pdata->rx_q_count; i++)
3390 xgbe_prepare_rx_stop(pdata, i);
3391
3392
3393 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3394
3395
3396 for (i = 0; i < pdata->channel_count; i++) {
3397 if (!pdata->channel[i]->rx_ring)
3398 break;
3399
3400 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3401 }
3402 }
3403
3404 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3405 {
3406 unsigned int i;
3407
3408
3409 for (i = 0; i < pdata->channel_count; i++) {
3410 if (!pdata->channel[i]->tx_ring)
3411 break;
3412
3413 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3414 }
3415
3416
3417 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3418 }
3419
3420 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3421 {
3422 unsigned int i;
3423
3424
3425 for (i = 0; i < pdata->tx_q_count; i++)
3426 xgbe_prepare_tx_stop(pdata, i);
3427
3428
3429 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3430
3431
3432 for (i = 0; i < pdata->channel_count; i++) {
3433 if (!pdata->channel[i]->tx_ring)
3434 break;
3435
3436 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3437 }
3438 }
3439
3440 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3441 {
3442 unsigned int i;
3443
3444
3445 for (i = 0; i < pdata->channel_count; i++) {
3446 if (!pdata->channel[i]->rx_ring)
3447 break;
3448
3449 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3450 }
3451 }
3452
3453 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3454 {
3455 unsigned int i;
3456
3457
3458 for (i = 0; i < pdata->channel_count; i++) {
3459 if (!pdata->channel[i]->rx_ring)
3460 break;
3461
3462 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3463 }
3464 }
3465
3466 static int xgbe_init(struct xgbe_prv_data *pdata)
3467 {
3468 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3469 int ret;
3470
3471 DBGPR("-->xgbe_init\n");
3472
3473
3474 ret = xgbe_flush_tx_queues(pdata);
3475 if (ret) {
3476 netdev_err(pdata->netdev, "error flushing TX queues\n");
3477 return ret;
3478 }
3479
3480
3481
3482
3483 xgbe_config_dma_bus(pdata);
3484 xgbe_config_dma_cache(pdata);
3485 xgbe_config_osp_mode(pdata);
3486 xgbe_config_pbl_val(pdata);
3487 xgbe_config_rx_coalesce(pdata);
3488 xgbe_config_tx_coalesce(pdata);
3489 xgbe_config_rx_buffer_size(pdata);
3490 xgbe_config_tso_mode(pdata);
3491 xgbe_config_sph_mode(pdata);
3492 xgbe_config_rss(pdata);
3493 desc_if->wrapper_tx_desc_init(pdata);
3494 desc_if->wrapper_rx_desc_init(pdata);
3495 xgbe_enable_dma_interrupts(pdata);
3496
3497
3498
3499
3500 xgbe_config_mtl_mode(pdata);
3501 xgbe_config_queue_mapping(pdata);
3502 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3503 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3504 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3505 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3506 xgbe_config_tx_fifo_size(pdata);
3507 xgbe_config_rx_fifo_size(pdata);
3508
3509
3510
3511 xgbe_config_dcb_tc(pdata);
3512 xgbe_enable_mtl_interrupts(pdata);
3513
3514
3515
3516
3517 xgbe_config_mac_address(pdata);
3518 xgbe_config_rx_mode(pdata);
3519 xgbe_config_jumbo_enable(pdata);
3520 xgbe_config_flow_control(pdata);
3521 xgbe_config_mac_speed(pdata);
3522 xgbe_config_checksum_offload(pdata);
3523 xgbe_config_vlan_support(pdata);
3524 xgbe_config_mmc(pdata);
3525 xgbe_enable_mac_interrupts(pdata);
3526
3527
3528
3529
3530 xgbe_enable_ecc_interrupts(pdata);
3531
3532 DBGPR("<--xgbe_init\n");
3533
3534 return 0;
3535 }
3536
3537 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3538 {
3539 DBGPR("-->xgbe_init_function_ptrs\n");
3540
3541 hw_if->tx_complete = xgbe_tx_complete;
3542
3543 hw_if->set_mac_address = xgbe_set_mac_address;
3544 hw_if->config_rx_mode = xgbe_config_rx_mode;
3545
3546 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3547 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3548
3549 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3550 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3551 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3552 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3553 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3554
3555 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3556 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3557
3558 hw_if->set_speed = xgbe_set_speed;
3559
3560 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3561 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
3562 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
3563
3564 hw_if->set_gpio = xgbe_set_gpio;
3565 hw_if->clr_gpio = xgbe_clr_gpio;
3566
3567 hw_if->enable_tx = xgbe_enable_tx;
3568 hw_if->disable_tx = xgbe_disable_tx;
3569 hw_if->enable_rx = xgbe_enable_rx;
3570 hw_if->disable_rx = xgbe_disable_rx;
3571
3572 hw_if->powerup_tx = xgbe_powerup_tx;
3573 hw_if->powerdown_tx = xgbe_powerdown_tx;
3574 hw_if->powerup_rx = xgbe_powerup_rx;
3575 hw_if->powerdown_rx = xgbe_powerdown_rx;
3576
3577 hw_if->dev_xmit = xgbe_dev_xmit;
3578 hw_if->dev_read = xgbe_dev_read;
3579 hw_if->enable_int = xgbe_enable_int;
3580 hw_if->disable_int = xgbe_disable_int;
3581 hw_if->init = xgbe_init;
3582 hw_if->exit = xgbe_exit;
3583
3584
3585 hw_if->tx_desc_init = xgbe_tx_desc_init;
3586 hw_if->rx_desc_init = xgbe_rx_desc_init;
3587 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3588 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3589 hw_if->is_last_desc = xgbe_is_last_desc;
3590 hw_if->is_context_desc = xgbe_is_context_desc;
3591 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3592
3593
3594 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3595 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3596
3597
3598 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3599 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3600 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3601 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3602
3603
3604 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3605 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3606
3607
3608 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3609 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3610
3611
3612 hw_if->config_osp_mode = xgbe_config_osp_mode;
3613
3614
3615 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3616 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3617 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3618
3619
3620 hw_if->config_tstamp = xgbe_config_tstamp;
3621 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
3622 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
3623 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
3624 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
3625
3626
3627 hw_if->config_tc = xgbe_config_tc;
3628 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3629 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3630
3631
3632 hw_if->enable_rss = xgbe_enable_rss;
3633 hw_if->disable_rss = xgbe_disable_rss;
3634 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3635 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3636
3637
3638 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3639 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3640
3641
3642 hw_if->enable_vxlan = xgbe_enable_vxlan;
3643 hw_if->disable_vxlan = xgbe_disable_vxlan;
3644 hw_if->set_vxlan_id = xgbe_set_vxlan_id;
3645
3646 DBGPR("<--xgbe_init_function_ptrs\n");
3647 }