0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/tcp.h>
0009 #include <linux/ip.h>
0010 #include <linux/ipv6.h>
0011 #include <linux/crc32.h>
0012 #include <linux/if_vlan.h>
0013 #include <linux/jiffies.h>
0014 #include <linux/phy.h>
0015 #include <linux/of.h>
0016 #include <net/ip6_checksum.h>
0017 #include "emac.h"
0018 #include "emac-sgmii.h"
0019
0020
0021 #define SINGLE_PAUSE_MODE 0x10000000
0022 #define DEBUG_MODE 0x08000000
0023 #define BROAD_EN 0x04000000
0024 #define MULTI_ALL 0x02000000
0025 #define RX_CHKSUM_EN 0x01000000
0026 #define HUGE 0x00800000
0027 #define SPEED(x) (((x) & 0x3) << 20)
0028 #define SPEED_MASK SPEED(0x3)
0029 #define SIMR 0x00080000
0030 #define TPAUSE 0x00010000
0031 #define PROM_MODE 0x00008000
0032 #define VLAN_STRIP 0x00004000
0033 #define PRLEN_BMSK 0x00003c00
0034 #define PRLEN_SHFT 10
0035 #define HUGEN 0x00000200
0036 #define FLCHK 0x00000100
0037 #define PCRCE 0x00000080
0038 #define CRCE 0x00000040
0039 #define FULLD 0x00000020
0040 #define MAC_LP_EN 0x00000010
0041 #define RXFC 0x00000008
0042 #define TXFC 0x00000004
0043 #define RXEN 0x00000002
0044 #define TXEN 0x00000001
0045
0046
0047 #define RFD_RING_SIZE_BMSK 0xfff
0048
0049
0050 #define RX_BUFFER_SIZE_BMSK 0xffff
0051
0052
0053 #define RRD_RING_SIZE_BMSK 0xfff
0054
0055
0056 #define TPD_RING_SIZE_BMSK 0xffff
0057
0058
0059 #define NUM_TXF_BURST_PREF_BMSK 0xffff0000
0060 #define NUM_TXF_BURST_PREF_SHFT 16
0061 #define LS_8023_SP 0x80
0062 #define TXQ_MODE 0x40
0063 #define TXQ_EN 0x20
0064 #define IP_OP_SP 0x10
0065 #define NUM_TPD_BURST_PREF_BMSK 0xf
0066 #define NUM_TPD_BURST_PREF_SHFT 0
0067
0068
0069 #define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff
0070
0071
0072 #define TXF_HWM_BMSK 0xfff0000
0073 #define TXF_LWM_BMSK 0xfff
0074
0075
0076 #define RXQ_EN BIT(31)
0077 #define CUT_THRU_EN BIT(30)
0078 #define RSS_HASH_EN BIT(29)
0079 #define NUM_RFD_BURST_PREF_BMSK 0x3f00000
0080 #define NUM_RFD_BURST_PREF_SHFT 20
0081 #define IDT_TABLE_SIZE_BMSK 0x1ff00
0082 #define IDT_TABLE_SIZE_SHFT 8
0083 #define SP_IPV6 0x80
0084
0085
0086 #define JUMBO_1KAH_BMSK 0xf000
0087 #define JUMBO_1KAH_SHFT 12
0088 #define RFD_PREF_LOW_TH 0x10
0089 #define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0
0090 #define RFD_PREF_LOW_THRESHOLD_SHFT 6
0091 #define RFD_PREF_UP_TH 0x10
0092 #define RFD_PREF_UP_THRESHOLD_BMSK 0x3f
0093 #define RFD_PREF_UP_THRESHOLD_SHFT 0
0094
0095
0096 #define RXF_DOF_THRESFHOLD 0x1a0
0097 #define RXF_DOF_THRESHOLD_BMSK 0xfff0000
0098 #define RXF_DOF_THRESHOLD_SHFT 16
0099 #define RXF_UOF_THRESFHOLD 0xbe
0100 #define RXF_UOF_THRESHOLD_BMSK 0xfff
0101 #define RXF_UOF_THRESHOLD_SHFT 0
0102
0103
0104 #define RXD_TIMER_BMSK 0xffff0000
0105 #define RXD_THRESHOLD_BMSK 0xfff
0106 #define RXD_THRESHOLD_SHFT 0
0107
0108
0109 #define DMAW_DLY_CNT_BMSK 0xf0000
0110 #define DMAW_DLY_CNT_SHFT 16
0111 #define DMAR_DLY_CNT_BMSK 0xf800
0112 #define DMAR_DLY_CNT_SHFT 11
0113 #define DMAR_REQ_PRI 0x400
0114 #define REGWRBLEN_BMSK 0x380
0115 #define REGWRBLEN_SHFT 7
0116 #define REGRDBLEN_BMSK 0x70
0117 #define REGRDBLEN_SHFT 4
0118 #define OUT_ORDER_MODE 0x4
0119 #define ENH_ORDER_MODE 0x2
0120 #define IN_ORDER_MODE 0x1
0121
0122
0123 #define RFD3_PROC_IDX_BMSK 0xfff0000
0124 #define RFD3_PROC_IDX_SHFT 16
0125 #define RFD3_PROD_IDX_BMSK 0xfff
0126 #define RFD3_PROD_IDX_SHFT 0
0127
0128
0129 #define NTPD_CONS_IDX_BMSK 0xffff0000
0130 #define NTPD_CONS_IDX_SHFT 16
0131
0132
0133 #define RFD0_CONS_IDX_BMSK 0xfff
0134 #define RFD0_CONS_IDX_SHFT 0
0135
0136
0137 #define H3TPD_PROD_IDX_BMSK 0xffff0000
0138 #define H3TPD_PROD_IDX_SHFT 16
0139
0140
0141 #define DATA_BYTE_SWAP 0x8
0142 #define MAX_BOUND 0x2
0143 #define MAX_BTYPE 0x1
0144
0145
0146 #define H3TPD_CONS_IDX_BMSK 0xffff0000
0147 #define H3TPD_CONS_IDX_SHFT 16
0148
0149
0150 #define H2TPD_PROD_IDX_BMSK 0xffff
0151 #define H2TPD_PROD_IDX_SHFT 0
0152
0153
0154 #define H1TPD_CONS_IDX_BMSK 0xffff0000
0155 #define H1TPD_CONS_IDX_SHFT 16
0156 #define H2TPD_CONS_IDX_BMSK 0xffff
0157 #define H2TPD_CONS_IDX_SHFT 0
0158
0159
0160 #define HEADER_CNT_EN 0x2
0161 #define HEADER_ENABLE 0x1
0162
0163
0164 #define RFD0_PROC_IDX_BMSK 0xfff0000
0165 #define RFD0_PROC_IDX_SHFT 16
0166 #define RFD0_PROD_IDX_BMSK 0xfff
0167 #define RFD0_PROD_IDX_SHFT 0
0168
0169
0170 #define RFD1_PROC_IDX_BMSK 0xfff0000
0171 #define RFD1_PROC_IDX_SHFT 16
0172 #define RFD1_PROD_IDX_BMSK 0xfff
0173 #define RFD1_PROD_IDX_SHFT 0
0174
0175
0176 #define RX_UNCPL_INT_EN 0x1
0177
0178
0179 #define RFD2_CONS_IDX_BMSK 0xfff0000
0180 #define RFD2_CONS_IDX_SHFT 16
0181 #define RFD1_CONS_IDX_BMSK 0xfff
0182 #define RFD1_CONS_IDX_SHFT 0
0183
0184
0185 #define RFD3_CONS_IDX_BMSK 0xfff
0186 #define RFD3_CONS_IDX_SHFT 0
0187
0188
0189 #define NTPD_PROD_IDX_BMSK 0xffff
0190 #define NTPD_PROD_IDX_SHFT 0
0191
0192
0193 #define H1TPD_PROD_IDX_BMSK 0xffff
0194 #define H1TPD_PROD_IDX_SHFT 0
0195
0196 #define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20
0197 #define RXQ0_RSS_HSTYP_IPV6_EN 0x10
0198 #define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8
0199 #define RXQ0_RSS_HSTYP_IPV4_EN 0x4
0200
0201
0202 #define EMAC_WRAPPER_TX_TS_EMPTY BIT(31)
0203 #define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff
0204
0205 struct emac_skb_cb {
0206 u32 tpd_idx;
0207 unsigned long jiffies;
0208 };
0209
0210 #define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb)
0211 #define EMAC_RSS_IDT_SIZE 256
0212 #define JUMBO_1KAH 0x4
0213 #define RXD_TH 0x100
0214 #define EMAC_TPD_LAST_FRAGMENT 0x80000000
0215 #define EMAC_TPD_TSTAMP_SAVE 0x80000000
0216
0217
0218 #define EMAC_RRD_L4F BIT(14)
0219 #define EMAC_RRD_IPF BIT(15)
0220 #define EMAC_RRD_CRC BIT(21)
0221 #define EMAC_RRD_FAE BIT(22)
0222 #define EMAC_RRD_TRN BIT(23)
0223 #define EMAC_RRD_RNT BIT(24)
0224 #define EMAC_RRD_INC BIT(25)
0225 #define EMAC_RRD_FOV BIT(29)
0226 #define EMAC_RRD_LEN BIT(30)
0227
0228
0229 #define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \
0230 EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \
0231 EMAC_RRD_FOV | EMAC_RRD_LEN)
0232 #define EMAC_RRD_STATS_DW_IDX 3
0233
0234 #define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX)))
0235 #define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX)))
0236 #define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX)))
0237
0238 #define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)]))
0239 #define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)]))
0240
0241 #define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8
0242
0243 #define ISR_RX_PKT (\
0244 RX_PKT_INT0 |\
0245 RX_PKT_INT1 |\
0246 RX_PKT_INT2 |\
0247 RX_PKT_INT3)
0248
0249 void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
0250 {
0251 u32 crc32, bit, reg, mta;
0252
0253
0254 crc32 = ether_crc(ETH_ALEN, addr);
0255
0256
0257
0258
0259
0260 reg = (crc32 >> 31) & 0x1;
0261 bit = (crc32 >> 26) & 0x1F;
0262
0263 mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
0264 mta |= BIT(bit);
0265 writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
0266 }
0267
0268 void emac_mac_multicast_addr_clear(struct emac_adapter *adpt)
0269 {
0270 writel(0, adpt->base + EMAC_HASH_TAB_REG0);
0271 writel(0, adpt->base + EMAC_HASH_TAB_REG1);
0272 }
0273
0274
0275 #define EMAC_RSS_KEY(_i, _type) \
0276 (EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
0277 #define EMAC_RSS_TBL(_i, _type) \
0278 (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
0279
0280
0281 void emac_mac_mode_config(struct emac_adapter *adpt)
0282 {
0283 struct net_device *netdev = adpt->netdev;
0284 u32 mac;
0285
0286 mac = readl(adpt->base + EMAC_MAC_CTRL);
0287 mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN);
0288
0289 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
0290 mac |= VLAN_STRIP;
0291
0292 if (netdev->flags & IFF_PROMISC)
0293 mac |= PROM_MODE;
0294
0295 if (netdev->flags & IFF_ALLMULTI)
0296 mac |= MULTI_ALL;
0297
0298 writel(mac, adpt->base + EMAC_MAC_CTRL);
0299 }
0300
0301
0302 static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
0303 {
0304
0305 writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
0306 adpt->base + EMAC_DESC_CTRL_1);
0307
0308 writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
0309 adpt->base + EMAC_DESC_CTRL_8);
0310
0311 writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
0312 adpt->base + EMAC_DESC_CTRL_9);
0313
0314
0315 writel(upper_32_bits(adpt->rx_q.rfd.dma_addr),
0316 adpt->base + EMAC_DESC_CTRL_0);
0317
0318 writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
0319 adpt->base + EMAC_DESC_CTRL_2);
0320 writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
0321 adpt->base + EMAC_DESC_CTRL_5);
0322
0323 writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
0324 adpt->base + EMAC_DESC_CTRL_3);
0325 writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK,
0326 adpt->base + EMAC_DESC_CTRL_6);
0327
0328 writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK,
0329 adpt->base + EMAC_DESC_CTRL_4);
0330
0331 writel(0, adpt->base + EMAC_DESC_CTRL_11);
0332
0333
0334
0335
0336 writel(1, adpt->base + EMAC_INTER_SRAM_PART9);
0337 }
0338
0339
0340 static void emac_mac_tx_config(struct emac_adapter *adpt)
0341 {
0342 u32 val;
0343
0344 writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) &
0345 JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1);
0346
0347 val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
0348 NUM_TPD_BURST_PREF_BMSK;
0349
0350 val |= TXQ_MODE | LS_8023_SP;
0351 val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
0352 NUM_TXF_BURST_PREF_BMSK;
0353
0354 writel(val, adpt->base + EMAC_TXQ_CTRL_0);
0355 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2,
0356 (TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
0357 }
0358
0359
0360 static void emac_mac_rx_config(struct emac_adapter *adpt)
0361 {
0362 u32 val;
0363
0364 val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
0365 NUM_RFD_BURST_PREF_BMSK;
0366 val |= (SP_IPV6 | CUT_THRU_EN);
0367
0368 writel(val, adpt->base + EMAC_RXQ_CTRL_0);
0369
0370 val = readl(adpt->base + EMAC_RXQ_CTRL_1);
0371 val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
0372 RFD_PREF_UP_THRESHOLD_BMSK);
0373 val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
0374 (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
0375 (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT);
0376 writel(val, adpt->base + EMAC_RXQ_CTRL_1);
0377
0378 val = readl(adpt->base + EMAC_RXQ_CTRL_2);
0379 val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
0380 val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) |
0381 (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT);
0382 writel(val, adpt->base + EMAC_RXQ_CTRL_2);
0383
0384 val = readl(adpt->base + EMAC_RXQ_CTRL_3);
0385 val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
0386 val |= RXD_TH << RXD_THRESHOLD_SHFT;
0387 writel(val, adpt->base + EMAC_RXQ_CTRL_3);
0388 }
0389
0390
0391 static void emac_mac_dma_config(struct emac_adapter *adpt)
0392 {
0393 u32 dma_ctrl = DMAR_REQ_PRI;
0394
0395 switch (adpt->dma_order) {
0396 case emac_dma_ord_in:
0397 dma_ctrl |= IN_ORDER_MODE;
0398 break;
0399 case emac_dma_ord_enh:
0400 dma_ctrl |= ENH_ORDER_MODE;
0401 break;
0402 case emac_dma_ord_out:
0403 dma_ctrl |= OUT_ORDER_MODE;
0404 break;
0405 default:
0406 break;
0407 }
0408
0409 dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) &
0410 REGRDBLEN_BMSK;
0411 dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) &
0412 REGWRBLEN_BMSK;
0413 dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
0414 DMAR_DLY_CNT_BMSK;
0415 dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
0416 DMAW_DLY_CNT_BMSK;
0417
0418
0419 writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL);
0420 }
0421
0422
0423 static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr)
0424 {
0425 u32 sta;
0426
0427
0428
0429
0430
0431
0432 sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
0433 (((u32)addr[4]) << 8) | (((u32)addr[5]));
0434 writel(sta, adpt->base + EMAC_MAC_STA_ADDR0);
0435
0436
0437 sta = (((u32)addr[0]) << 8) | (u32)addr[1];
0438 writel(sta, adpt->base + EMAC_MAC_STA_ADDR1);
0439 }
0440
0441 static void emac_mac_config(struct emac_adapter *adpt)
0442 {
0443 struct net_device *netdev = adpt->netdev;
0444 unsigned int max_frame;
0445 u32 val;
0446
0447 emac_set_mac_address(adpt, netdev->dev_addr);
0448
0449 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
0450 adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ?
0451 ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE;
0452
0453 emac_mac_dma_rings_config(adpt);
0454
0455 writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
0456 adpt->base + EMAC_MAX_FRAM_LEN_CTRL);
0457
0458 emac_mac_tx_config(adpt);
0459 emac_mac_rx_config(adpt);
0460 emac_mac_dma_config(adpt);
0461
0462 val = readl(adpt->base + EMAC_AXI_MAST_CTRL);
0463 val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
0464 val |= MAX_BTYPE;
0465 writel(val, adpt->base + EMAC_AXI_MAST_CTRL);
0466 writel(0, adpt->base + EMAC_CLK_GATE_CTRL);
0467 writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL);
0468 }
0469
0470 void emac_mac_reset(struct emac_adapter *adpt)
0471 {
0472 emac_mac_stop(adpt);
0473
0474 emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
0475 usleep_range(100, 150);
0476
0477
0478 emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
0479 }
0480
0481 static void emac_mac_start(struct emac_adapter *adpt)
0482 {
0483 struct phy_device *phydev = adpt->phydev;
0484 u32 mac, csr1;
0485
0486
0487 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN);
0488
0489
0490 emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN);
0491
0492
0493 mac = readl(adpt->base + EMAC_MAC_CTRL);
0494 csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
0495
0496 mac |= TXEN | RXEN;
0497
0498
0499
0500
0501
0502 mac &= ~(RXFC | TXFC);
0503
0504 if (adpt->automatic) {
0505
0506 adpt->rx_flow_control = phydev->pause;
0507 adpt->tx_flow_control = phydev->pause != phydev->asym_pause;
0508 }
0509 mac |= adpt->rx_flow_control ? RXFC : 0;
0510 mac |= adpt->tx_flow_control ? TXFC : 0;
0511
0512
0513 mac &= ~SPEED_MASK;
0514 if (phydev->speed == SPEED_1000) {
0515 mac |= SPEED(2);
0516 csr1 |= FREQ_MODE;
0517 } else {
0518 mac |= SPEED(1);
0519 csr1 &= ~FREQ_MODE;
0520 }
0521
0522 if (phydev->duplex == DUPLEX_FULL)
0523 mac |= FULLD;
0524 else
0525 mac &= ~FULLD;
0526
0527
0528 mac |= (CRCE | PCRCE);
0529 mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK);
0530 mac |= BROAD_EN;
0531 mac |= FLCHK;
0532 mac &= ~RX_CHKSUM_EN;
0533 mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
0534 DEBUG_MODE | SINGLE_PAUSE_MODE);
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
0557
0558 writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
0559
0560 writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
0561
0562
0563
0564
0565
0566 writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT);
0567 writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN |
0568 IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL);
0569
0570 emac_mac_mode_config(adpt);
0571
0572 emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
0573 (HEADER_ENABLE | HEADER_CNT_EN), 0);
0574 }
0575
0576 void emac_mac_stop(struct emac_adapter *adpt)
0577 {
0578 emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0);
0579 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0);
0580 emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0);
0581 usleep_range(1000, 1050);
0582 }
0583
0584
0585 static void emac_tx_q_descs_free(struct emac_adapter *adpt)
0586 {
0587 struct emac_tx_queue *tx_q = &adpt->tx_q;
0588 unsigned int i;
0589 size_t size;
0590
0591
0592 if (!tx_q->tpd.tpbuff)
0593 return;
0594
0595 for (i = 0; i < tx_q->tpd.count; i++) {
0596 struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i);
0597
0598 if (tpbuf->dma_addr) {
0599 dma_unmap_single(adpt->netdev->dev.parent,
0600 tpbuf->dma_addr, tpbuf->length,
0601 DMA_TO_DEVICE);
0602 tpbuf->dma_addr = 0;
0603 }
0604 if (tpbuf->skb) {
0605 dev_kfree_skb_any(tpbuf->skb);
0606 tpbuf->skb = NULL;
0607 }
0608 }
0609
0610 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
0611 memset(tx_q->tpd.tpbuff, 0, size);
0612
0613
0614 memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
0615
0616 tx_q->tpd.consume_idx = 0;
0617 tx_q->tpd.produce_idx = 0;
0618 }
0619
0620
0621 static void emac_rx_q_free_descs(struct emac_adapter *adpt)
0622 {
0623 struct device *dev = adpt->netdev->dev.parent;
0624 struct emac_rx_queue *rx_q = &adpt->rx_q;
0625 unsigned int i;
0626 size_t size;
0627
0628
0629 if (!rx_q->rfd.rfbuff)
0630 return;
0631
0632 for (i = 0; i < rx_q->rfd.count; i++) {
0633 struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i);
0634
0635 if (rfbuf->dma_addr) {
0636 dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length,
0637 DMA_FROM_DEVICE);
0638 rfbuf->dma_addr = 0;
0639 }
0640 if (rfbuf->skb) {
0641 dev_kfree_skb(rfbuf->skb);
0642 rfbuf->skb = NULL;
0643 }
0644 }
0645
0646 size = sizeof(struct emac_buffer) * rx_q->rfd.count;
0647 memset(rx_q->rfd.rfbuff, 0, size);
0648
0649
0650 memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size);
0651 rx_q->rrd.produce_idx = 0;
0652 rx_q->rrd.consume_idx = 0;
0653
0654 memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size);
0655 rx_q->rfd.produce_idx = 0;
0656 rx_q->rfd.consume_idx = 0;
0657 }
0658
0659
0660 static void emac_tx_q_bufs_free(struct emac_adapter *adpt)
0661 {
0662 struct emac_tx_queue *tx_q = &adpt->tx_q;
0663
0664 emac_tx_q_descs_free(adpt);
0665
0666 kfree(tx_q->tpd.tpbuff);
0667 tx_q->tpd.tpbuff = NULL;
0668 tx_q->tpd.v_addr = NULL;
0669 tx_q->tpd.dma_addr = 0;
0670 tx_q->tpd.size = 0;
0671 }
0672
0673
0674 static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
0675 struct emac_tx_queue *tx_q)
0676 {
0677 struct emac_ring_header *ring_header = &adpt->ring_header;
0678 int node = dev_to_node(adpt->netdev->dev.parent);
0679 size_t size;
0680
0681 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
0682 tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
0683 if (!tx_q->tpd.tpbuff)
0684 return -ENOMEM;
0685
0686 tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
0687 tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
0688 tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
0689 ring_header->used += ALIGN(tx_q->tpd.size, 8);
0690 tx_q->tpd.produce_idx = 0;
0691 tx_q->tpd.consume_idx = 0;
0692
0693 return 0;
0694 }
0695
0696
0697 static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
0698 {
0699 struct emac_rx_queue *rx_q = &adpt->rx_q;
0700
0701 emac_rx_q_free_descs(adpt);
0702
0703 kfree(rx_q->rfd.rfbuff);
0704 rx_q->rfd.rfbuff = NULL;
0705
0706 rx_q->rfd.v_addr = NULL;
0707 rx_q->rfd.dma_addr = 0;
0708 rx_q->rfd.size = 0;
0709
0710 rx_q->rrd.v_addr = NULL;
0711 rx_q->rrd.dma_addr = 0;
0712 rx_q->rrd.size = 0;
0713 }
0714
0715
0716 static int emac_rx_descs_alloc(struct emac_adapter *adpt)
0717 {
0718 struct emac_ring_header *ring_header = &adpt->ring_header;
0719 int node = dev_to_node(adpt->netdev->dev.parent);
0720 struct emac_rx_queue *rx_q = &adpt->rx_q;
0721 size_t size;
0722
0723 size = sizeof(struct emac_buffer) * rx_q->rfd.count;
0724 rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
0725 if (!rx_q->rfd.rfbuff)
0726 return -ENOMEM;
0727
0728 rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
0729 rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
0730
0731 rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
0732 rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used;
0733 ring_header->used += ALIGN(rx_q->rrd.size, 8);
0734
0735 rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used;
0736 rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used;
0737 ring_header->used += ALIGN(rx_q->rfd.size, 8);
0738
0739 rx_q->rrd.produce_idx = 0;
0740 rx_q->rrd.consume_idx = 0;
0741
0742 rx_q->rfd.produce_idx = 0;
0743 rx_q->rfd.consume_idx = 0;
0744
0745 return 0;
0746 }
0747
0748
0749 int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
0750 {
0751 struct emac_ring_header *ring_header = &adpt->ring_header;
0752 struct device *dev = adpt->netdev->dev.parent;
0753 unsigned int num_tx_descs = adpt->tx_desc_cnt;
0754 unsigned int num_rx_descs = adpt->rx_desc_cnt;
0755 int ret;
0756
0757 adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
0758
0759 adpt->rx_q.rrd.count = adpt->rx_desc_cnt;
0760 adpt->rx_q.rfd.count = adpt->rx_desc_cnt;
0761
0762
0763
0764
0765 ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
0766 num_rx_descs * (adpt->rfd_size * 4) +
0767 num_rx_descs * (adpt->rrd_size * 4) +
0768 8 + 2 * 8;
0769
0770 ring_header->used = 0;
0771 ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
0772 &ring_header->dma_addr,
0773 GFP_KERNEL);
0774 if (!ring_header->v_addr)
0775 return -ENOMEM;
0776
0777 ring_header->used = ALIGN(ring_header->dma_addr, 8) -
0778 ring_header->dma_addr;
0779
0780 ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
0781 if (ret) {
0782 netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
0783 goto err_alloc_tx;
0784 }
0785
0786 ret = emac_rx_descs_alloc(adpt);
0787 if (ret) {
0788 netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
0789 goto err_alloc_rx;
0790 }
0791
0792 return 0;
0793
0794 err_alloc_rx:
0795 emac_tx_q_bufs_free(adpt);
0796 err_alloc_tx:
0797 dma_free_coherent(dev, ring_header->size,
0798 ring_header->v_addr, ring_header->dma_addr);
0799
0800 ring_header->v_addr = NULL;
0801 ring_header->dma_addr = 0;
0802 ring_header->size = 0;
0803 ring_header->used = 0;
0804
0805 return ret;
0806 }
0807
0808
0809 void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt)
0810 {
0811 struct emac_ring_header *ring_header = &adpt->ring_header;
0812 struct device *dev = adpt->netdev->dev.parent;
0813
0814 emac_tx_q_bufs_free(adpt);
0815 emac_rx_q_bufs_free(adpt);
0816
0817 dma_free_coherent(dev, ring_header->size,
0818 ring_header->v_addr, ring_header->dma_addr);
0819
0820 ring_header->v_addr = NULL;
0821 ring_header->dma_addr = 0;
0822 ring_header->size = 0;
0823 ring_header->used = 0;
0824 }
0825
0826
0827 static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt)
0828 {
0829 unsigned int i;
0830
0831 adpt->tx_q.tpd.produce_idx = 0;
0832 adpt->tx_q.tpd.consume_idx = 0;
0833 for (i = 0; i < adpt->tx_q.tpd.count; i++)
0834 adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
0835
0836 adpt->rx_q.rrd.produce_idx = 0;
0837 adpt->rx_q.rrd.consume_idx = 0;
0838 adpt->rx_q.rfd.produce_idx = 0;
0839 adpt->rx_q.rfd.consume_idx = 0;
0840 for (i = 0; i < adpt->rx_q.rfd.count; i++)
0841 adpt->rx_q.rfd.rfbuff[i].dma_addr = 0;
0842 }
0843
0844
0845 static void emac_mac_rx_rfd_create(struct emac_adapter *adpt,
0846 struct emac_rx_queue *rx_q,
0847 dma_addr_t addr)
0848 {
0849 u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx);
0850
0851 *(hw_rfd++) = lower_32_bits(addr);
0852 *hw_rfd = upper_32_bits(addr);
0853
0854 if (++rx_q->rfd.produce_idx == rx_q->rfd.count)
0855 rx_q->rfd.produce_idx = 0;
0856 }
0857
0858
0859 static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
0860 struct emac_rx_queue *rx_q)
0861 {
0862 struct emac_buffer *curr_rxbuf;
0863 struct emac_buffer *next_rxbuf;
0864 unsigned int count = 0;
0865 u32 next_produce_idx;
0866
0867 next_produce_idx = rx_q->rfd.produce_idx + 1;
0868 if (next_produce_idx == rx_q->rfd.count)
0869 next_produce_idx = 0;
0870
0871 curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
0872 next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
0873
0874
0875 while (!next_rxbuf->dma_addr) {
0876 struct sk_buff *skb;
0877 int ret;
0878
0879 skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
0880 if (!skb)
0881 break;
0882
0883 curr_rxbuf->dma_addr =
0884 dma_map_single(adpt->netdev->dev.parent, skb->data,
0885 adpt->rxbuf_size, DMA_FROM_DEVICE);
0886
0887 ret = dma_mapping_error(adpt->netdev->dev.parent,
0888 curr_rxbuf->dma_addr);
0889 if (ret) {
0890 dev_kfree_skb(skb);
0891 break;
0892 }
0893 curr_rxbuf->skb = skb;
0894 curr_rxbuf->length = adpt->rxbuf_size;
0895
0896 emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr);
0897 next_produce_idx = rx_q->rfd.produce_idx + 1;
0898 if (next_produce_idx == rx_q->rfd.count)
0899 next_produce_idx = 0;
0900
0901 curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
0902 next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
0903 count++;
0904 }
0905
0906 if (count) {
0907 u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) &
0908 rx_q->produce_mask;
0909 emac_reg_update32(adpt->base + rx_q->produce_reg,
0910 rx_q->produce_mask, prod_idx);
0911 }
0912 }
0913
0914 static void emac_adjust_link(struct net_device *netdev)
0915 {
0916 struct emac_adapter *adpt = netdev_priv(netdev);
0917 struct phy_device *phydev = netdev->phydev;
0918
0919 if (phydev->link) {
0920 emac_mac_start(adpt);
0921 emac_sgmii_link_change(adpt, true);
0922 } else {
0923 emac_sgmii_link_change(adpt, false);
0924 emac_mac_stop(adpt);
0925 }
0926
0927 phy_print_status(phydev);
0928 }
0929
0930
0931 int emac_mac_up(struct emac_adapter *adpt)
0932 {
0933 struct net_device *netdev = adpt->netdev;
0934 int ret;
0935
0936 emac_mac_rx_tx_ring_reset_all(adpt);
0937 emac_mac_config(adpt);
0938 emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
0939
0940 adpt->phydev->irq = PHY_POLL;
0941 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
0942 PHY_INTERFACE_MODE_SGMII);
0943 if (ret) {
0944 netdev_err(adpt->netdev, "could not connect phy\n");
0945 return ret;
0946 }
0947
0948 phy_attached_print(adpt->phydev, NULL);
0949
0950
0951 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
0952 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
0953
0954 phy_start(adpt->phydev);
0955
0956 napi_enable(&adpt->rx_q.napi);
0957 netif_start_queue(netdev);
0958
0959 return 0;
0960 }
0961
0962
0963 void emac_mac_down(struct emac_adapter *adpt)
0964 {
0965 struct net_device *netdev = adpt->netdev;
0966
0967 netif_stop_queue(netdev);
0968 napi_disable(&adpt->rx_q.napi);
0969
0970 phy_stop(adpt->phydev);
0971
0972
0973
0974
0975
0976 writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
0977 writel(0, adpt->base + EMAC_INT_MASK);
0978 synchronize_irq(adpt->irq.irq);
0979
0980 phy_disconnect(adpt->phydev);
0981
0982 emac_mac_reset(adpt);
0983
0984 emac_tx_q_descs_free(adpt);
0985 netdev_reset_queue(adpt->netdev);
0986 emac_rx_q_free_descs(adpt);
0987 }
0988
0989
0990 static bool emac_rx_process_rrd(struct emac_adapter *adpt,
0991 struct emac_rx_queue *rx_q,
0992 struct emac_rrd *rrd)
0993 {
0994 u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx);
0995
0996 rrd->word[3] = *(hw_rrd + 3);
0997
0998 if (!RRD_UPDT(rrd))
0999 return false;
1000
1001 rrd->word[4] = 0;
1002 rrd->word[5] = 0;
1003
1004 rrd->word[0] = *(hw_rrd++);
1005 rrd->word[1] = *(hw_rrd++);
1006 rrd->word[2] = *(hw_rrd++);
1007
1008 if (unlikely(RRD_NOR(rrd) != 1)) {
1009 netdev_err(adpt->netdev,
1010 "error: multi-RFD not support yet! nor:%lu\n",
1011 RRD_NOR(rrd));
1012 }
1013
1014
1015 RRD_UPDT_SET(rrd, 0);
1016 *hw_rrd = rrd->word[3];
1017
1018 if (++rx_q->rrd.consume_idx == rx_q->rrd.count)
1019 rx_q->rrd.consume_idx = 0;
1020
1021 return true;
1022 }
1023
1024
1025 static void emac_tx_tpd_create(struct emac_adapter *adpt,
1026 struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
1027 {
1028 u32 *hw_tpd;
1029
1030 tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
1031 hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
1032
1033 if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
1034 tx_q->tpd.produce_idx = 0;
1035
1036 *(hw_tpd++) = tpd->word[0];
1037 *(hw_tpd++) = tpd->word[1];
1038 *(hw_tpd++) = tpd->word[2];
1039 *hw_tpd = tpd->word[3];
1040 }
1041
1042
1043 static void emac_tx_tpd_mark_last(struct emac_adapter *adpt,
1044 struct emac_tx_queue *tx_q)
1045 {
1046 u32 *hw_tpd =
1047 EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
1048 u32 tmp_tpd;
1049
1050 tmp_tpd = *(hw_tpd + 1);
1051 tmp_tpd |= EMAC_TPD_LAST_FRAGMENT;
1052 *(hw_tpd + 1) = tmp_tpd;
1053 }
1054
1055 static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd)
1056 {
1057 struct emac_buffer *rfbuf = rx_q->rfd.rfbuff;
1058 u32 consume_idx = RRD_SI(rrd);
1059 unsigned int i;
1060
1061 for (i = 0; i < RRD_NOR(rrd); i++) {
1062 rfbuf[consume_idx].skb = NULL;
1063 if (++consume_idx == rx_q->rfd.count)
1064 consume_idx = 0;
1065 }
1066
1067 rx_q->rfd.consume_idx = consume_idx;
1068 rx_q->rfd.process_idx = consume_idx;
1069 }
1070
1071
1072 static void emac_receive_skb(struct emac_rx_queue *rx_q,
1073 struct sk_buff *skb,
1074 u16 vlan_tag, bool vlan_flag)
1075 {
1076 if (vlan_flag) {
1077 u16 vlan;
1078
1079 EMAC_TAG_TO_VLAN(vlan_tag, vlan);
1080 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1081 }
1082
1083 napi_gro_receive(&rx_q->napi, skb);
1084 }
1085
1086
1087 void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
1088 int *num_pkts, int max_pkts)
1089 {
1090 u32 proc_idx, hw_consume_idx, num_consume_pkts;
1091 struct net_device *netdev = adpt->netdev;
1092 struct emac_buffer *rfbuf;
1093 unsigned int count = 0;
1094 struct emac_rrd rrd;
1095 struct sk_buff *skb;
1096 u32 reg;
1097
1098 reg = readl_relaxed(adpt->base + rx_q->consume_reg);
1099
1100 hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift;
1101 num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ?
1102 (hw_consume_idx - rx_q->rrd.consume_idx) :
1103 (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx);
1104
1105 do {
1106 if (!num_consume_pkts)
1107 break;
1108
1109 if (!emac_rx_process_rrd(adpt, rx_q, &rrd))
1110 break;
1111
1112 if (likely(RRD_NOR(&rrd) == 1)) {
1113
1114 rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd));
1115 dma_unmap_single(adpt->netdev->dev.parent,
1116 rfbuf->dma_addr, rfbuf->length,
1117 DMA_FROM_DEVICE);
1118 rfbuf->dma_addr = 0;
1119 skb = rfbuf->skb;
1120 } else {
1121 netdev_err(adpt->netdev,
1122 "error: multi-RFD not support yet!\n");
1123 break;
1124 }
1125 emac_rx_rfd_clean(rx_q, &rrd);
1126 num_consume_pkts--;
1127 count++;
1128
1129
1130
1131
1132
1133 if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) {
1134 netif_dbg(adpt, rx_status, adpt->netdev,
1135 "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n",
1136 rrd.word[0], rrd.word[1],
1137 rrd.word[2], rrd.word[3]);
1138
1139 dev_kfree_skb(skb);
1140 continue;
1141 }
1142
1143 skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
1144 skb->dev = netdev;
1145 skb->protocol = eth_type_trans(skb, skb->dev);
1146 if (netdev->features & NETIF_F_RXCSUM)
1147 skb->ip_summed = RRD_L4F(&rrd) ?
1148 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1149 else
1150 skb_checksum_none_assert(skb);
1151
1152 emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
1153 (bool)RRD_CVTAG(&rrd));
1154
1155 (*num_pkts)++;
1156 } while (*num_pkts < max_pkts);
1157
1158 if (count) {
1159 proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) &
1160 rx_q->process_mask;
1161 emac_reg_update32(adpt->base + rx_q->process_reg,
1162 rx_q->process_mask, proc_idx);
1163 emac_mac_rx_descs_refill(adpt, rx_q);
1164 }
1165 }
1166
1167
1168 static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q)
1169 {
1170 u32 produce_idx = tx_q->tpd.produce_idx;
1171 u32 consume_idx = tx_q->tpd.consume_idx;
1172
1173 return (consume_idx > produce_idx) ?
1174 (consume_idx - produce_idx - 1) :
1175 (tx_q->tpd.count + consume_idx - produce_idx - 1);
1176 }
1177
1178
1179 void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
1180 {
1181 u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg);
1182 u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0;
1183 struct emac_buffer *tpbuf;
1184
1185 hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift;
1186
1187 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1188 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1189 if (tpbuf->dma_addr) {
1190 dma_unmap_page(adpt->netdev->dev.parent,
1191 tpbuf->dma_addr, tpbuf->length,
1192 DMA_TO_DEVICE);
1193 tpbuf->dma_addr = 0;
1194 }
1195
1196 if (tpbuf->skb) {
1197 pkts_compl++;
1198 bytes_compl += tpbuf->skb->len;
1199 dev_consume_skb_irq(tpbuf->skb);
1200 tpbuf->skb = NULL;
1201 }
1202
1203 if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
1204 tx_q->tpd.consume_idx = 0;
1205 }
1206
1207 netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl);
1208
1209 if (netif_queue_stopped(adpt->netdev))
1210 if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1))
1211 netif_wake_queue(adpt->netdev);
1212 }
1213
1214
1215 void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
1216 struct emac_adapter *adpt)
1217 {
1218 adpt->rx_q.netdev = adpt->netdev;
1219
1220 adpt->rx_q.produce_reg = EMAC_MAILBOX_0;
1221 adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK;
1222 adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT;
1223
1224 adpt->rx_q.process_reg = EMAC_MAILBOX_0;
1225 adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK;
1226 adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT;
1227
1228 adpt->rx_q.consume_reg = EMAC_MAILBOX_3;
1229 adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK;
1230 adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT;
1231
1232 adpt->rx_q.irq = &adpt->irq;
1233 adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT;
1234
1235 adpt->tx_q.produce_reg = EMAC_MAILBOX_15;
1236 adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK;
1237 adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT;
1238
1239 adpt->tx_q.consume_reg = EMAC_MAILBOX_2;
1240 adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK;
1241 adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT;
1242 }
1243
1244
1245 static int emac_tso_csum(struct emac_adapter *adpt,
1246 struct emac_tx_queue *tx_q,
1247 struct sk_buff *skb,
1248 struct emac_tpd *tpd)
1249 {
1250 unsigned int hdr_len;
1251 int ret;
1252
1253 if (skb_is_gso(skb)) {
1254 if (skb_header_cloned(skb)) {
1255 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1256 if (unlikely(ret))
1257 return ret;
1258 }
1259
1260 if (skb->protocol == htons(ETH_P_IP)) {
1261 u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
1262 + ntohs(ip_hdr(skb)->tot_len);
1263 if (skb->len > pkt_len)
1264 pskb_trim(skb, pkt_len);
1265 }
1266
1267 hdr_len = skb_tcp_all_headers(skb);
1268 if (unlikely(skb->len == hdr_len)) {
1269
1270 netif_warn(adpt, tx_err, adpt->netdev,
1271 "tso not needed for packet with 0 data\n");
1272 goto do_csum;
1273 }
1274
1275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1276 ip_hdr(skb)->check = 0;
1277 tcp_hdr(skb)->check =
1278 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1279 ip_hdr(skb)->daddr,
1280 0, IPPROTO_TCP, 0);
1281 TPD_IPV4_SET(tpd, 1);
1282 }
1283
1284 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1285
1286 struct emac_tpd extra_tpd;
1287
1288 memset(tpd, 0, sizeof(*tpd));
1289 memset(&extra_tpd, 0, sizeof(extra_tpd));
1290
1291 tcp_v6_gso_csum_prep(skb);
1292
1293 TPD_PKT_LEN_SET(&extra_tpd, skb->len);
1294 TPD_LSO_SET(&extra_tpd, 1);
1295 TPD_LSOV_SET(&extra_tpd, 1);
1296 emac_tx_tpd_create(adpt, tx_q, &extra_tpd);
1297 TPD_LSOV_SET(tpd, 1);
1298 }
1299
1300 TPD_LSO_SET(tpd, 1);
1301 TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
1302 TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
1303 return 0;
1304 }
1305
1306 do_csum:
1307 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1308 unsigned int css, cso;
1309
1310 cso = skb_transport_offset(skb);
1311 if (unlikely(cso & 0x1)) {
1312 netdev_err(adpt->netdev,
1313 "error: payload offset should be even\n");
1314 return -EINVAL;
1315 }
1316 css = cso + skb->csum_offset;
1317
1318 TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
1319 TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
1320 TPD_CSX_SET(tpd, 1);
1321 }
1322
1323 return 0;
1324 }
1325
1326
1327 static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1328 struct emac_tx_queue *tx_q, struct sk_buff *skb,
1329 struct emac_tpd *tpd)
1330 {
1331 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1332 unsigned int first = tx_q->tpd.produce_idx;
1333 unsigned int len = skb_headlen(skb);
1334 struct emac_buffer *tpbuf = NULL;
1335 unsigned int mapped_len = 0;
1336 unsigned int i;
1337 int count = 0;
1338 int ret;
1339
1340
1341 if (TPD_LSO(tpd)) {
1342 mapped_len = skb_tcp_all_headers(skb);
1343
1344 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1345 tpbuf->length = mapped_len;
1346 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1347 virt_to_page(skb->data),
1348 offset_in_page(skb->data),
1349 tpbuf->length,
1350 DMA_TO_DEVICE);
1351 ret = dma_mapping_error(adpt->netdev->dev.parent,
1352 tpbuf->dma_addr);
1353 if (ret)
1354 goto error;
1355
1356 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1357 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1358 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1359 emac_tx_tpd_create(adpt, tx_q, tpd);
1360 count++;
1361 }
1362
1363 if (mapped_len < len) {
1364 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1365 tpbuf->length = len - mapped_len;
1366 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1367 virt_to_page(skb->data +
1368 mapped_len),
1369 offset_in_page(skb->data +
1370 mapped_len),
1371 tpbuf->length, DMA_TO_DEVICE);
1372 ret = dma_mapping_error(adpt->netdev->dev.parent,
1373 tpbuf->dma_addr);
1374 if (ret)
1375 goto error;
1376
1377 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1378 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1379 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1380 emac_tx_tpd_create(adpt, tx_q, tpd);
1381 count++;
1382 }
1383
1384 for (i = 0; i < nr_frags; i++) {
1385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1386
1387 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1388 tpbuf->length = skb_frag_size(frag);
1389 tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
1390 frag, 0, tpbuf->length,
1391 DMA_TO_DEVICE);
1392 ret = dma_mapping_error(adpt->netdev->dev.parent,
1393 tpbuf->dma_addr);
1394 if (ret)
1395 goto error;
1396
1397 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1398 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1399 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1400 emac_tx_tpd_create(adpt, tx_q, tpd);
1401 count++;
1402 }
1403
1404
1405 wmb();
1406 emac_tx_tpd_mark_last(adpt, tx_q);
1407
1408
1409
1410
1411 tpbuf->skb = skb;
1412
1413 return;
1414
1415 error:
1416
1417 tx_q->tpd.produce_idx = first;
1418
1419 while (count--) {
1420 tpbuf = GET_TPD_BUFFER(tx_q, first);
1421 dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr,
1422 tpbuf->length, DMA_TO_DEVICE);
1423 tpbuf->dma_addr = 0;
1424 tpbuf->length = 0;
1425
1426 if (++first == tx_q->tpd.count)
1427 first = 0;
1428 }
1429
1430 dev_kfree_skb(skb);
1431 }
1432
1433
1434 netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
1435 struct emac_tx_queue *tx_q,
1436 struct sk_buff *skb)
1437 {
1438 struct emac_tpd tpd;
1439 u32 prod_idx;
1440 int len;
1441
1442 memset(&tpd, 0, sizeof(tpd));
1443
1444 if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
1445 dev_kfree_skb_any(skb);
1446 return NETDEV_TX_OK;
1447 }
1448
1449 if (skb_vlan_tag_present(skb)) {
1450 u16 tag;
1451
1452 EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
1453 TPD_CVLAN_TAG_SET(&tpd, tag);
1454 TPD_INSTC_SET(&tpd, 1);
1455 }
1456
1457 if (skb_network_offset(skb) != ETH_HLEN)
1458 TPD_TYP_SET(&tpd, 1);
1459
1460 len = skb->len;
1461 emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
1462
1463 netdev_sent_queue(adpt->netdev, len);
1464
1465
1466
1467
1468
1469
1470 if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
1471 netif_stop_queue(adpt->netdev);
1472
1473
1474 prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &
1475 tx_q->produce_mask;
1476 emac_reg_update32(adpt->base + tx_q->produce_reg,
1477 tx_q->produce_mask, prod_idx);
1478
1479 return NETDEV_TX_OK;
1480 }