0001
0002
0003
0004
0005
0006
0007 #include <linux/module.h>
0008 #include <linux/etherdevice.h>
0009 #include <linux/platform_device.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/ktime.h>
0012 #include <linux/of_address.h>
0013 #include <linux/phy.h>
0014 #include <linux/of_mdio.h>
0015 #include <linux/of_net.h>
0016 #include <linux/mfd/syscon.h>
0017 #include <linux/regmap.h>
0018
0019 #define SC_PPE_RESET_DREQ 0x026C
0020
0021 #define PPE_CFG_RX_ADDR 0x100
0022 #define PPE_CFG_POOL_GRP 0x300
0023 #define PPE_CFG_RX_BUF_SIZE 0x400
0024 #define PPE_CFG_RX_FIFO_SIZE 0x500
0025 #define PPE_CURR_BUF_CNT 0xa200
0026
0027 #define GE_DUPLEX_TYPE 0x08
0028 #define GE_MAX_FRM_SIZE_REG 0x3c
0029 #define GE_PORT_MODE 0x40
0030 #define GE_PORT_EN 0x44
0031 #define GE_SHORT_RUNTS_THR_REG 0x50
0032 #define GE_TX_LOCAL_PAGE_REG 0x5c
0033 #define GE_TRANSMIT_CONTROL_REG 0x60
0034 #define GE_CF_CRC_STRIP_REG 0x1b0
0035 #define GE_MODE_CHANGE_REG 0x1b4
0036 #define GE_RECV_CONTROL_REG 0x1e0
0037 #define GE_STATION_MAC_ADDRESS 0x210
0038
0039 #define PPE_CFG_BUS_CTRL_REG 0x424
0040 #define PPE_CFG_RX_CTRL_REG 0x428
0041
0042 #if defined(CONFIG_HI13X1_GMAC)
0043 #define PPE_CFG_CPU_ADD_ADDR 0x6D0
0044 #define PPE_CFG_MAX_FRAME_LEN_REG 0x500
0045 #define PPE_CFG_RX_PKT_MODE_REG 0x504
0046 #define PPE_CFG_QOS_VMID_GEN 0x520
0047 #define PPE_CFG_RX_PKT_INT 0x740
0048 #define PPE_INTEN 0x700
0049 #define PPE_INTSTS 0x708
0050 #define PPE_RINT 0x704
0051 #define PPE_CFG_STS_MODE 0x880
0052 #else
0053 #define PPE_CFG_CPU_ADD_ADDR 0x580
0054 #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
0055 #define PPE_CFG_RX_PKT_MODE_REG 0x438
0056 #define PPE_CFG_QOS_VMID_GEN 0x500
0057 #define PPE_CFG_RX_PKT_INT 0x538
0058 #define PPE_INTEN 0x600
0059 #define PPE_INTSTS 0x608
0060 #define PPE_RINT 0x604
0061 #define PPE_CFG_STS_MODE 0x700
0062 #endif
0063
0064 #define PPE_HIS_RX_PKT_CNT 0x804
0065
0066 #define RESET_DREQ_ALL 0xffffffff
0067
0068
0069 #define RCV_INT BIT(10)
0070 #define RCV_NOBUF BIT(8)
0071 #define RCV_DROP BIT(7)
0072 #define TX_DROP BIT(6)
0073 #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
0074 #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
0075
0076
0077 #define TX_FREE_MEM BIT(0)
0078 #define TX_READ_ALLOC_L3 BIT(1)
0079 #if defined(CONFIG_HI13X1_GMAC)
0080 #define TX_CLEAR_WB BIT(7)
0081 #define TX_RELEASE_TO_PPE BIT(4)
0082 #define TX_FINISH_CACHE_INV BIT(6)
0083 #define TX_POOL_SHIFT 16
0084 #else
0085 #define TX_CLEAR_WB BIT(4)
0086 #define TX_FINISH_CACHE_INV BIT(2)
0087 #endif
0088 #define TX_L3_CHECKSUM BIT(5)
0089 #define TX_LOOP_BACK BIT(11)
0090
0091
0092 #define RX_PKT_DROP BIT(0)
0093 #define RX_L2_ERR BIT(1)
0094 #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
0095
0096 #define SGMII_SPEED_1000 0x08
0097 #define SGMII_SPEED_100 0x07
0098 #define SGMII_SPEED_10 0x06
0099 #define MII_SPEED_100 0x01
0100 #define MII_SPEED_10 0x00
0101
0102 #define GE_DUPLEX_FULL BIT(0)
0103 #define GE_DUPLEX_HALF 0x00
0104 #define GE_MODE_CHANGE_EN BIT(0)
0105
0106 #define GE_TX_AUTO_NEG BIT(5)
0107 #define GE_TX_ADD_CRC BIT(6)
0108 #define GE_TX_SHORT_PAD_THROUGH BIT(7)
0109
0110 #define GE_RX_STRIP_CRC BIT(0)
0111 #define GE_RX_STRIP_PAD BIT(3)
0112 #define GE_RX_PAD_EN BIT(4)
0113
0114 #define GE_AUTO_NEG_CTL BIT(0)
0115
0116 #define GE_RX_INT_THRESHOLD BIT(6)
0117 #define GE_RX_TIMEOUT 0x04
0118
0119 #define GE_RX_PORT_EN BIT(1)
0120 #define GE_TX_PORT_EN BIT(2)
0121
0122 #define PPE_CFG_RX_PKT_ALIGN BIT(18)
0123
0124 #if defined(CONFIG_HI13X1_GMAC)
0125 #define PPE_CFG_QOS_VMID_GRP_SHIFT 4
0126 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
0127 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
0128 #define PPE_CFG_QOS_VMID_MODE BIT(15)
0129 #define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
0130
0131
0132 #define PPE_BUF_SIZE_SHIFT 6
0133 #define PPE_TX_BUF_HOLD BIT(31)
0134 #define SOC_CACHE_LINE_MASK 0x3F
0135 #else
0136 #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
0137 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
0138 #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
0139 #define PPE_CFG_QOS_VMID_MODE BIT(14)
0140 #define PPE_CFG_BUS_LOCAL_REL BIT(14)
0141
0142
0143 #define PPE_BUF_SIZE_SHIFT 0
0144 #define PPE_TX_BUF_HOLD 0
0145 #endif
0146
0147 #define PPE_CFG_RX_FIFO_FSFU BIT(11)
0148 #define PPE_CFG_RX_DEPTH_SHIFT 16
0149 #define PPE_CFG_RX_START_SHIFT 0
0150
0151 #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
0152
0153 #define RX_DESC_NUM 128
0154 #define TX_DESC_NUM 256
0155 #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
0156 #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
0157
0158 #define GMAC_PPE_RX_PKT_MAX_LEN 379
0159 #define GMAC_MAX_PKT_LEN 1516
0160 #define GMAC_MIN_PKT_LEN 31
0161 #define RX_BUF_SIZE 1600
0162 #define RESET_TIMEOUT 1000
0163 #define TX_TIMEOUT (6 * HZ)
0164
0165 #define DRV_NAME "hip04-ether"
0166 #define DRV_VERSION "v1.0"
0167
0168 #define HIP04_MAX_TX_COALESCE_USECS 200
0169 #define HIP04_MIN_TX_COALESCE_USECS 100
0170 #define HIP04_MAX_TX_COALESCE_FRAMES 200
0171 #define HIP04_MIN_TX_COALESCE_FRAMES 100
0172
0173 struct tx_desc {
0174 #if defined(CONFIG_HI13X1_GMAC)
0175 u32 reserved1[2];
0176 u32 send_addr;
0177 u16 send_size;
0178 u16 data_offset;
0179 u32 reserved2[7];
0180 u32 cfg;
0181 u32 wb_addr;
0182 u32 reserved3[3];
0183 #else
0184 u32 send_addr;
0185 u32 send_size;
0186 u32 next_addr;
0187 u32 cfg;
0188 u32 wb_addr;
0189 #endif
0190 } __aligned(64);
0191
0192 struct rx_desc {
0193 #if defined(CONFIG_HI13X1_GMAC)
0194 u32 reserved1[3];
0195 u16 pkt_len;
0196 u16 reserved_16;
0197 u32 reserved2[6];
0198 u32 pkt_err;
0199 u32 reserved3[5];
0200 #else
0201 u16 reserved_16;
0202 u16 pkt_len;
0203 u32 reserve1[3];
0204 u32 pkt_err;
0205 u32 reserve2[4];
0206 #endif
0207 };
0208
0209 struct hip04_priv {
0210 void __iomem *base;
0211 #if defined(CONFIG_HI13X1_GMAC)
0212 void __iomem *sysctrl_base;
0213 #endif
0214 phy_interface_t phy_mode;
0215 int chan;
0216 unsigned int port;
0217 unsigned int group;
0218 unsigned int speed;
0219 unsigned int duplex;
0220 unsigned int reg_inten;
0221
0222 struct napi_struct napi;
0223 struct device *dev;
0224 struct net_device *ndev;
0225
0226 struct tx_desc *tx_desc;
0227 dma_addr_t tx_desc_dma;
0228 struct sk_buff *tx_skb[TX_DESC_NUM];
0229 dma_addr_t tx_phys[TX_DESC_NUM];
0230 unsigned int tx_head;
0231
0232 int tx_coalesce_frames;
0233 int tx_coalesce_usecs;
0234 struct hrtimer tx_coalesce_timer;
0235
0236 unsigned char *rx_buf[RX_DESC_NUM];
0237 dma_addr_t rx_phys[RX_DESC_NUM];
0238 unsigned int rx_head;
0239 unsigned int rx_buf_size;
0240 unsigned int rx_cnt_remaining;
0241
0242 struct device_node *phy_node;
0243 struct phy_device *phy;
0244 struct regmap *map;
0245 struct work_struct tx_timeout_task;
0246
0247
0248 unsigned int tx_tail ____cacheline_aligned_in_smp;
0249 };
0250
0251 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
0252 {
0253 return (head - tail) % TX_DESC_NUM;
0254 }
0255
0256 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
0257 {
0258 struct hip04_priv *priv = netdev_priv(ndev);
0259 u32 val;
0260
0261 priv->speed = speed;
0262 priv->duplex = duplex;
0263
0264 switch (priv->phy_mode) {
0265 case PHY_INTERFACE_MODE_SGMII:
0266 if (speed == SPEED_1000)
0267 val = SGMII_SPEED_1000;
0268 else if (speed == SPEED_100)
0269 val = SGMII_SPEED_100;
0270 else
0271 val = SGMII_SPEED_10;
0272 break;
0273 case PHY_INTERFACE_MODE_MII:
0274 if (speed == SPEED_100)
0275 val = MII_SPEED_100;
0276 else
0277 val = MII_SPEED_10;
0278 break;
0279 default:
0280 netdev_warn(ndev, "not supported mode\n");
0281 val = MII_SPEED_10;
0282 break;
0283 }
0284 writel_relaxed(val, priv->base + GE_PORT_MODE);
0285
0286 val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
0287 writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
0288
0289 val = GE_MODE_CHANGE_EN;
0290 writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
0291 }
0292
0293 static void hip04_reset_dreq(struct hip04_priv *priv)
0294 {
0295 #if defined(CONFIG_HI13X1_GMAC)
0296 writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
0297 #endif
0298 }
0299
0300 static void hip04_reset_ppe(struct hip04_priv *priv)
0301 {
0302 u32 val, tmp, timeout = 0;
0303
0304 do {
0305 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
0306 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
0307 if (timeout++ > RESET_TIMEOUT)
0308 break;
0309 } while (val & 0xfff);
0310 }
0311
0312 static void hip04_config_fifo(struct hip04_priv *priv)
0313 {
0314 u32 val;
0315
0316 val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
0317 val |= PPE_CFG_STS_RX_PKT_CNT_RC;
0318 writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
0319
0320 val = BIT(priv->group);
0321 regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
0322
0323 val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
0324 val |= PPE_CFG_QOS_VMID_MODE;
0325 writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
0326
0327 val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
0328 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
0329
0330 val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
0331 val |= PPE_CFG_RX_FIFO_FSFU;
0332 val |= priv->chan << PPE_CFG_RX_START_SHIFT;
0333 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
0334
0335 val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
0336 writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
0337
0338 val = PPE_CFG_RX_PKT_ALIGN;
0339 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
0340
0341 val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
0342 writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
0343
0344 val = GMAC_PPE_RX_PKT_MAX_LEN;
0345 writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
0346
0347 val = GMAC_MAX_PKT_LEN;
0348 writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
0349
0350 val = GMAC_MIN_PKT_LEN;
0351 writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
0352
0353 val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
0354 val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
0355 writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
0356
0357 val = GE_RX_STRIP_CRC;
0358 writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
0359
0360 val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
0361 val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
0362 writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
0363
0364 #ifndef CONFIG_HI13X1_GMAC
0365 val = GE_AUTO_NEG_CTL;
0366 writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
0367 #endif
0368 }
0369
0370 static void hip04_mac_enable(struct net_device *ndev)
0371 {
0372 struct hip04_priv *priv = netdev_priv(ndev);
0373 u32 val;
0374
0375
0376 val = readl_relaxed(priv->base + GE_PORT_EN);
0377 val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
0378 writel_relaxed(val, priv->base + GE_PORT_EN);
0379
0380
0381 val = RCV_INT;
0382 writel_relaxed(val, priv->base + PPE_RINT);
0383
0384
0385 val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
0386 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
0387
0388
0389 priv->reg_inten = DEF_INT_MASK;
0390 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
0391 }
0392
0393 static void hip04_mac_disable(struct net_device *ndev)
0394 {
0395 struct hip04_priv *priv = netdev_priv(ndev);
0396 u32 val;
0397
0398
0399 priv->reg_inten &= ~(DEF_INT_MASK);
0400 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
0401
0402
0403 val = readl_relaxed(priv->base + GE_PORT_EN);
0404 val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
0405 writel_relaxed(val, priv->base + GE_PORT_EN);
0406 }
0407
0408 static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
0409 {
0410 u32 val;
0411
0412 val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
0413 writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
0414 }
0415
0416 static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
0417 {
0418 u32 val;
0419
0420 val = phys >> PPE_BUF_SIZE_SHIFT;
0421 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
0422 }
0423
0424 static u32 hip04_recv_cnt(struct hip04_priv *priv)
0425 {
0426 return readl(priv->base + PPE_HIS_RX_PKT_CNT);
0427 }
0428
0429 static void hip04_update_mac_address(struct net_device *ndev)
0430 {
0431 struct hip04_priv *priv = netdev_priv(ndev);
0432
0433 writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
0434 priv->base + GE_STATION_MAC_ADDRESS);
0435 writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
0436 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
0437 priv->base + GE_STATION_MAC_ADDRESS + 4);
0438 }
0439
0440 static int hip04_set_mac_address(struct net_device *ndev, void *addr)
0441 {
0442 eth_mac_addr(ndev, addr);
0443 hip04_update_mac_address(ndev);
0444 return 0;
0445 }
0446
0447 static int hip04_tx_reclaim(struct net_device *ndev, bool force)
0448 {
0449 struct hip04_priv *priv = netdev_priv(ndev);
0450 unsigned tx_tail = priv->tx_tail;
0451 struct tx_desc *desc;
0452 unsigned int bytes_compl = 0, pkts_compl = 0;
0453 unsigned int count;
0454
0455 smp_rmb();
0456 count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
0457 if (count == 0)
0458 goto out;
0459
0460 while (count) {
0461 desc = &priv->tx_desc[tx_tail];
0462 if (desc->send_addr != 0) {
0463 if (force)
0464 desc->send_addr = 0;
0465 else
0466 break;
0467 }
0468
0469 if (priv->tx_phys[tx_tail]) {
0470 dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
0471 priv->tx_skb[tx_tail]->len,
0472 DMA_TO_DEVICE);
0473 priv->tx_phys[tx_tail] = 0;
0474 }
0475 pkts_compl++;
0476 bytes_compl += priv->tx_skb[tx_tail]->len;
0477 dev_kfree_skb(priv->tx_skb[tx_tail]);
0478 priv->tx_skb[tx_tail] = NULL;
0479 tx_tail = TX_NEXT(tx_tail);
0480 count--;
0481 }
0482
0483 priv->tx_tail = tx_tail;
0484 smp_wmb();
0485
0486 out:
0487 if (pkts_compl || bytes_compl)
0488 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
0489
0490 if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
0491 netif_wake_queue(ndev);
0492
0493 return count;
0494 }
0495
0496 static void hip04_start_tx_timer(struct hip04_priv *priv)
0497 {
0498 unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
0499
0500
0501 hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
0502 ns, HRTIMER_MODE_REL);
0503 }
0504
0505 static netdev_tx_t
0506 hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
0507 {
0508 struct hip04_priv *priv = netdev_priv(ndev);
0509 struct net_device_stats *stats = &ndev->stats;
0510 unsigned int tx_head = priv->tx_head, count;
0511 struct tx_desc *desc = &priv->tx_desc[tx_head];
0512 dma_addr_t phys;
0513
0514 smp_rmb();
0515 count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
0516 if (count == (TX_DESC_NUM - 1)) {
0517 netif_stop_queue(ndev);
0518 return NETDEV_TX_BUSY;
0519 }
0520
0521 phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
0522 if (dma_mapping_error(priv->dev, phys)) {
0523 dev_kfree_skb(skb);
0524 return NETDEV_TX_OK;
0525 }
0526
0527 priv->tx_skb[tx_head] = skb;
0528 priv->tx_phys[tx_head] = phys;
0529
0530 desc->send_size = (__force u32)cpu_to_be32(skb->len);
0531 #if defined(CONFIG_HI13X1_GMAC)
0532 desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
0533 | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
0534 desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
0535 desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
0536 #else
0537 desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
0538 desc->send_addr = (__force u32)cpu_to_be32(phys);
0539 #endif
0540 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
0541 desc->wb_addr = (__force u32)cpu_to_be32(phys +
0542 offsetof(struct tx_desc, send_addr));
0543 skb_tx_timestamp(skb);
0544
0545 hip04_set_xmit_desc(priv, phys);
0546 count++;
0547 netdev_sent_queue(ndev, skb->len);
0548 priv->tx_head = TX_NEXT(tx_head);
0549
0550 stats->tx_bytes += skb->len;
0551 stats->tx_packets++;
0552
0553
0554 smp_wmb();
0555
0556
0557 if (count >= priv->tx_coalesce_frames) {
0558 if (napi_schedule_prep(&priv->napi)) {
0559
0560 priv->reg_inten &= ~(RCV_INT);
0561 writel_relaxed(DEF_INT_MASK & ~RCV_INT,
0562 priv->base + PPE_INTEN);
0563 hrtimer_cancel(&priv->tx_coalesce_timer);
0564 __napi_schedule(&priv->napi);
0565 }
0566 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
0567
0568 hip04_start_tx_timer(priv);
0569 }
0570
0571 return NETDEV_TX_OK;
0572 }
0573
0574 static int hip04_rx_poll(struct napi_struct *napi, int budget)
0575 {
0576 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
0577 struct net_device *ndev = priv->ndev;
0578 struct net_device_stats *stats = &ndev->stats;
0579 struct rx_desc *desc;
0580 struct sk_buff *skb;
0581 unsigned char *buf;
0582 bool last = false;
0583 dma_addr_t phys;
0584 int rx = 0;
0585 int tx_remaining;
0586 u16 len;
0587 u32 err;
0588
0589
0590 tx_remaining = hip04_tx_reclaim(ndev, false);
0591 priv->rx_cnt_remaining += hip04_recv_cnt(priv);
0592 while (priv->rx_cnt_remaining && !last) {
0593 buf = priv->rx_buf[priv->rx_head];
0594 skb = build_skb(buf, priv->rx_buf_size);
0595 if (unlikely(!skb)) {
0596 net_dbg_ratelimited("build_skb failed\n");
0597 goto refill;
0598 }
0599
0600 dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
0601 RX_BUF_SIZE, DMA_FROM_DEVICE);
0602 priv->rx_phys[priv->rx_head] = 0;
0603
0604 desc = (struct rx_desc *)skb->data;
0605 len = be16_to_cpu((__force __be16)desc->pkt_len);
0606 err = be32_to_cpu((__force __be32)desc->pkt_err);
0607
0608 if (0 == len) {
0609 dev_kfree_skb_any(skb);
0610 last = true;
0611 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
0612 dev_kfree_skb_any(skb);
0613 stats->rx_dropped++;
0614 stats->rx_errors++;
0615 } else {
0616 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
0617 skb_put(skb, len);
0618 skb->protocol = eth_type_trans(skb, ndev);
0619 napi_gro_receive(&priv->napi, skb);
0620 stats->rx_packets++;
0621 stats->rx_bytes += len;
0622 rx++;
0623 }
0624
0625 refill:
0626 buf = netdev_alloc_frag(priv->rx_buf_size);
0627 if (!buf)
0628 goto done;
0629 phys = dma_map_single(priv->dev, buf,
0630 RX_BUF_SIZE, DMA_FROM_DEVICE);
0631 if (dma_mapping_error(priv->dev, phys))
0632 goto done;
0633 priv->rx_buf[priv->rx_head] = buf;
0634 priv->rx_phys[priv->rx_head] = phys;
0635 hip04_set_recv_desc(priv, phys);
0636
0637 priv->rx_head = RX_NEXT(priv->rx_head);
0638 if (rx >= budget) {
0639 --priv->rx_cnt_remaining;
0640 goto done;
0641 }
0642
0643 if (--priv->rx_cnt_remaining == 0)
0644 priv->rx_cnt_remaining += hip04_recv_cnt(priv);
0645 }
0646
0647 if (!(priv->reg_inten & RCV_INT)) {
0648
0649 priv->reg_inten |= RCV_INT;
0650 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
0651 }
0652 napi_complete_done(napi, rx);
0653 done:
0654
0655 if (rx < budget && tx_remaining)
0656 hip04_start_tx_timer(priv);
0657
0658 return rx;
0659 }
0660
0661 static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
0662 {
0663 struct net_device *ndev = (struct net_device *)dev_id;
0664 struct hip04_priv *priv = netdev_priv(ndev);
0665 struct net_device_stats *stats = &ndev->stats;
0666 u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
0667
0668 if (!ists)
0669 return IRQ_NONE;
0670
0671 writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
0672
0673 if (unlikely(ists & DEF_INT_ERR)) {
0674 if (ists & (RCV_NOBUF | RCV_DROP)) {
0675 stats->rx_errors++;
0676 stats->rx_dropped++;
0677 netdev_err(ndev, "rx drop\n");
0678 }
0679 if (ists & TX_DROP) {
0680 stats->tx_dropped++;
0681 netdev_err(ndev, "tx drop\n");
0682 }
0683 }
0684
0685 if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
0686
0687 priv->reg_inten &= ~(RCV_INT);
0688 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
0689 hrtimer_cancel(&priv->tx_coalesce_timer);
0690 __napi_schedule(&priv->napi);
0691 }
0692
0693 return IRQ_HANDLED;
0694 }
0695
0696 static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
0697 {
0698 struct hip04_priv *priv;
0699
0700 priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
0701
0702 if (napi_schedule_prep(&priv->napi)) {
0703
0704 priv->reg_inten &= ~(RCV_INT);
0705 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
0706 __napi_schedule(&priv->napi);
0707 }
0708
0709 return HRTIMER_NORESTART;
0710 }
0711
0712 static void hip04_adjust_link(struct net_device *ndev)
0713 {
0714 struct hip04_priv *priv = netdev_priv(ndev);
0715 struct phy_device *phy = priv->phy;
0716
0717 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
0718 hip04_config_port(ndev, phy->speed, phy->duplex);
0719 phy_print_status(phy);
0720 }
0721 }
0722
0723 static int hip04_mac_open(struct net_device *ndev)
0724 {
0725 struct hip04_priv *priv = netdev_priv(ndev);
0726 int i;
0727
0728 priv->rx_head = 0;
0729 priv->rx_cnt_remaining = 0;
0730 priv->tx_head = 0;
0731 priv->tx_tail = 0;
0732 hip04_reset_ppe(priv);
0733
0734 for (i = 0; i < RX_DESC_NUM; i++) {
0735 dma_addr_t phys;
0736
0737 phys = dma_map_single(priv->dev, priv->rx_buf[i],
0738 RX_BUF_SIZE, DMA_FROM_DEVICE);
0739 if (dma_mapping_error(priv->dev, phys))
0740 return -EIO;
0741
0742 priv->rx_phys[i] = phys;
0743 hip04_set_recv_desc(priv, phys);
0744 }
0745
0746 if (priv->phy)
0747 phy_start(priv->phy);
0748
0749 netdev_reset_queue(ndev);
0750 netif_start_queue(ndev);
0751 hip04_mac_enable(ndev);
0752 napi_enable(&priv->napi);
0753
0754 return 0;
0755 }
0756
0757 static int hip04_mac_stop(struct net_device *ndev)
0758 {
0759 struct hip04_priv *priv = netdev_priv(ndev);
0760 int i;
0761
0762 napi_disable(&priv->napi);
0763 netif_stop_queue(ndev);
0764 hip04_mac_disable(ndev);
0765 hip04_tx_reclaim(ndev, true);
0766 hip04_reset_ppe(priv);
0767
0768 if (priv->phy)
0769 phy_stop(priv->phy);
0770
0771 for (i = 0; i < RX_DESC_NUM; i++) {
0772 if (priv->rx_phys[i]) {
0773 dma_unmap_single(priv->dev, priv->rx_phys[i],
0774 RX_BUF_SIZE, DMA_FROM_DEVICE);
0775 priv->rx_phys[i] = 0;
0776 }
0777 }
0778
0779 return 0;
0780 }
0781
0782 static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
0783 {
0784 struct hip04_priv *priv = netdev_priv(ndev);
0785
0786 schedule_work(&priv->tx_timeout_task);
0787 }
0788
0789 static void hip04_tx_timeout_task(struct work_struct *work)
0790 {
0791 struct hip04_priv *priv;
0792
0793 priv = container_of(work, struct hip04_priv, tx_timeout_task);
0794 hip04_mac_stop(priv->ndev);
0795 hip04_mac_open(priv->ndev);
0796 }
0797
0798 static int hip04_get_coalesce(struct net_device *netdev,
0799 struct ethtool_coalesce *ec,
0800 struct kernel_ethtool_coalesce *kernel_coal,
0801 struct netlink_ext_ack *extack)
0802 {
0803 struct hip04_priv *priv = netdev_priv(netdev);
0804
0805 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
0806 ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
0807
0808 return 0;
0809 }
0810
0811 static int hip04_set_coalesce(struct net_device *netdev,
0812 struct ethtool_coalesce *ec,
0813 struct kernel_ethtool_coalesce *kernel_coal,
0814 struct netlink_ext_ack *extack)
0815 {
0816 struct hip04_priv *priv = netdev_priv(netdev);
0817
0818 if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
0819 ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
0820 (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
0821 ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
0822 return -EINVAL;
0823
0824 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
0825 priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
0826
0827 return 0;
0828 }
0829
0830 static void hip04_get_drvinfo(struct net_device *netdev,
0831 struct ethtool_drvinfo *drvinfo)
0832 {
0833 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
0834 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
0835 }
0836
0837 static const struct ethtool_ops hip04_ethtool_ops = {
0838 .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
0839 ETHTOOL_COALESCE_TX_MAX_FRAMES,
0840 .get_coalesce = hip04_get_coalesce,
0841 .set_coalesce = hip04_set_coalesce,
0842 .get_drvinfo = hip04_get_drvinfo,
0843 };
0844
0845 static const struct net_device_ops hip04_netdev_ops = {
0846 .ndo_open = hip04_mac_open,
0847 .ndo_stop = hip04_mac_stop,
0848 .ndo_start_xmit = hip04_mac_start_xmit,
0849 .ndo_set_mac_address = hip04_set_mac_address,
0850 .ndo_tx_timeout = hip04_timeout,
0851 .ndo_validate_addr = eth_validate_addr,
0852 };
0853
0854 static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
0855 {
0856 struct hip04_priv *priv = netdev_priv(ndev);
0857 int i;
0858
0859 priv->tx_desc = dma_alloc_coherent(d,
0860 TX_DESC_NUM * sizeof(struct tx_desc),
0861 &priv->tx_desc_dma, GFP_KERNEL);
0862 if (!priv->tx_desc)
0863 return -ENOMEM;
0864
0865 priv->rx_buf_size = RX_BUF_SIZE +
0866 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0867 for (i = 0; i < RX_DESC_NUM; i++) {
0868 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
0869 if (!priv->rx_buf[i])
0870 return -ENOMEM;
0871 }
0872
0873 return 0;
0874 }
0875
0876 static void hip04_free_ring(struct net_device *ndev, struct device *d)
0877 {
0878 struct hip04_priv *priv = netdev_priv(ndev);
0879 int i;
0880
0881 for (i = 0; i < RX_DESC_NUM; i++)
0882 if (priv->rx_buf[i])
0883 skb_free_frag(priv->rx_buf[i]);
0884
0885 for (i = 0; i < TX_DESC_NUM; i++)
0886 if (priv->tx_skb[i])
0887 dev_kfree_skb_any(priv->tx_skb[i]);
0888
0889 dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
0890 priv->tx_desc, priv->tx_desc_dma);
0891 }
0892
0893 static int hip04_mac_probe(struct platform_device *pdev)
0894 {
0895 struct device *d = &pdev->dev;
0896 struct device_node *node = d->of_node;
0897 struct of_phandle_args arg;
0898 struct net_device *ndev;
0899 struct hip04_priv *priv;
0900 int irq;
0901 int ret;
0902
0903 ndev = alloc_etherdev(sizeof(struct hip04_priv));
0904 if (!ndev)
0905 return -ENOMEM;
0906
0907 priv = netdev_priv(ndev);
0908 priv->dev = d;
0909 priv->ndev = ndev;
0910 platform_set_drvdata(pdev, ndev);
0911 SET_NETDEV_DEV(ndev, &pdev->dev);
0912
0913 priv->base = devm_platform_ioremap_resource(pdev, 0);
0914 if (IS_ERR(priv->base)) {
0915 ret = PTR_ERR(priv->base);
0916 goto init_fail;
0917 }
0918
0919 #if defined(CONFIG_HI13X1_GMAC)
0920 priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
0921 if (IS_ERR(priv->sysctrl_base)) {
0922 ret = PTR_ERR(priv->sysctrl_base);
0923 goto init_fail;
0924 }
0925 #endif
0926
0927 ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
0928 if (ret < 0) {
0929 dev_warn(d, "no port-handle\n");
0930 goto init_fail;
0931 }
0932
0933 priv->port = arg.args[0];
0934 priv->chan = arg.args[1] * RX_DESC_NUM;
0935 priv->group = arg.args[2];
0936
0937 hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
0938
0939
0940
0941
0942
0943
0944
0945 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
0946 priv->tx_coalesce_usecs = 200;
0947 priv->tx_coalesce_timer.function = tx_done;
0948
0949 priv->map = syscon_node_to_regmap(arg.np);
0950 if (IS_ERR(priv->map)) {
0951 dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
0952 ret = PTR_ERR(priv->map);
0953 goto init_fail;
0954 }
0955
0956 ret = of_get_phy_mode(node, &priv->phy_mode);
0957 if (ret) {
0958 dev_warn(d, "not find phy-mode\n");
0959 goto init_fail;
0960 }
0961
0962 irq = platform_get_irq(pdev, 0);
0963 if (irq <= 0) {
0964 ret = -EINVAL;
0965 goto init_fail;
0966 }
0967
0968 ret = devm_request_irq(d, irq, hip04_mac_interrupt,
0969 0, pdev->name, ndev);
0970 if (ret) {
0971 netdev_err(ndev, "devm_request_irq failed\n");
0972 goto init_fail;
0973 }
0974
0975 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
0976 if (priv->phy_node) {
0977 priv->phy = of_phy_connect(ndev, priv->phy_node,
0978 &hip04_adjust_link,
0979 0, priv->phy_mode);
0980 if (!priv->phy) {
0981 ret = -EPROBE_DEFER;
0982 goto init_fail;
0983 }
0984 }
0985
0986 INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
0987
0988 ndev->netdev_ops = &hip04_netdev_ops;
0989 ndev->ethtool_ops = &hip04_ethtool_ops;
0990 ndev->watchdog_timeo = TX_TIMEOUT;
0991 ndev->priv_flags |= IFF_UNICAST_FLT;
0992 ndev->irq = irq;
0993 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
0994
0995 hip04_reset_dreq(priv);
0996 hip04_reset_ppe(priv);
0997 if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
0998 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
0999
1000 hip04_config_fifo(priv);
1001 eth_hw_addr_random(ndev);
1002 hip04_update_mac_address(ndev);
1003
1004 ret = hip04_alloc_ring(ndev, d);
1005 if (ret) {
1006 netdev_err(ndev, "alloc ring fail\n");
1007 goto alloc_fail;
1008 }
1009
1010 ret = register_netdev(ndev);
1011 if (ret)
1012 goto alloc_fail;
1013
1014 return 0;
1015
1016 alloc_fail:
1017 hip04_free_ring(ndev, d);
1018 init_fail:
1019 of_node_put(priv->phy_node);
1020 free_netdev(ndev);
1021 return ret;
1022 }
1023
1024 static int hip04_remove(struct platform_device *pdev)
1025 {
1026 struct net_device *ndev = platform_get_drvdata(pdev);
1027 struct hip04_priv *priv = netdev_priv(ndev);
1028 struct device *d = &pdev->dev;
1029
1030 if (priv->phy)
1031 phy_disconnect(priv->phy);
1032
1033 hip04_free_ring(ndev, d);
1034 unregister_netdev(ndev);
1035 of_node_put(priv->phy_node);
1036 cancel_work_sync(&priv->tx_timeout_task);
1037 free_netdev(ndev);
1038
1039 return 0;
1040 }
1041
1042 static const struct of_device_id hip04_mac_match[] = {
1043 { .compatible = "hisilicon,hip04-mac" },
1044 { }
1045 };
1046
1047 MODULE_DEVICE_TABLE(of, hip04_mac_match);
1048
1049 static struct platform_driver hip04_mac_driver = {
1050 .probe = hip04_mac_probe,
1051 .remove = hip04_remove,
1052 .driver = {
1053 .name = DRV_NAME,
1054 .of_match_table = hip04_mac_match,
1055 },
1056 };
1057 module_platform_driver(hip04_mac_driver);
1058
1059 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
1060 MODULE_LICENSE("GPL");