Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* Applied Micro X-Gene SoC Ethernet Driver
0003  *
0004  * Copyright (c) 2014, Applied Micro Circuits Corporation
0005  * Authors: Iyappan Subramanian <isubramanian@apm.com>
0006  *      Ravi Patel <rapatel@apm.com>
0007  *      Keyur Chudgar <kchudgar@apm.com>
0008  */
0009 
0010 #include "xgene_enet_main.h"
0011 #include "xgene_enet_hw.h"
0012 
0013 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
0014 {
0015     u32 *ring_cfg = ring->state;
0016     u64 addr = ring->dma;
0017     enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
0018 
0019     ring_cfg[4] |= (1 << SELTHRSH_POS) &
0020             CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
0021     ring_cfg[3] |= ACCEPTLERR;
0022     ring_cfg[2] |= QCOHERENT;
0023 
0024     addr >>= 8;
0025     ring_cfg[2] |= (addr << RINGADDRL_POS) &
0026             CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
0027     addr >>= RINGADDRL_LEN;
0028     ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
0029     ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
0030             CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
0031 }
0032 
0033 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
0034 {
0035     u32 *ring_cfg = ring->state;
0036     bool is_bufpool;
0037     u32 val;
0038 
0039     is_bufpool = xgene_enet_is_bufpool(ring->id);
0040     val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
0041     ring_cfg[4] |= (val << RINGTYPE_POS) &
0042             CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
0043 
0044     if (is_bufpool) {
0045         ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
0046                 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
0047     }
0048 }
0049 
0050 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
0051 {
0052     u32 *ring_cfg = ring->state;
0053 
0054     ring_cfg[3] |= RECOMBBUF;
0055     ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
0056             CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
0057     ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
0058 }
0059 
0060 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
0061                  u32 offset, u32 data)
0062 {
0063     struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
0064 
0065     iowrite32(data, pdata->ring_csr_addr + offset);
0066 }
0067 
0068 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
0069                  u32 offset, u32 *data)
0070 {
0071     struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
0072 
0073     *data = ioread32(pdata->ring_csr_addr + offset);
0074 }
0075 
0076 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
0077 {
0078     struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
0079     int i;
0080 
0081     xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
0082     for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
0083         xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
0084                      ring->state[i]);
0085     }
0086 }
0087 
0088 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
0089 {
0090     memset(ring->state, 0, sizeof(ring->state));
0091     xgene_enet_write_ring_state(ring);
0092 }
0093 
0094 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
0095 {
0096     xgene_enet_ring_set_type(ring);
0097 
0098     if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
0099         xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
0100         xgene_enet_ring_set_recombbuf(ring);
0101 
0102     xgene_enet_ring_init(ring);
0103     xgene_enet_write_ring_state(ring);
0104 }
0105 
0106 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
0107 {
0108     u32 ring_id_val, ring_id_buf;
0109     bool is_bufpool;
0110 
0111     is_bufpool = xgene_enet_is_bufpool(ring->id);
0112 
0113     ring_id_val = ring->id & GENMASK(9, 0);
0114     ring_id_val |= OVERWRITE;
0115 
0116     ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
0117     ring_id_buf |= PREFETCH_BUF_EN;
0118     if (is_bufpool)
0119         ring_id_buf |= IS_BUFFER_POOL;
0120 
0121     xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
0122     xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
0123 }
0124 
0125 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
0126 {
0127     u32 ring_id;
0128 
0129     ring_id = ring->id | OVERWRITE;
0130     xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
0131     xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
0132 }
0133 
0134 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
0135                     struct xgene_enet_desc_ring *ring)
0136 {
0137     u32 size = ring->size;
0138     u32 i, data;
0139     bool is_bufpool;
0140 
0141     xgene_enet_clr_ring_state(ring);
0142     xgene_enet_set_ring_state(ring);
0143     xgene_enet_set_ring_id(ring);
0144 
0145     ring->slots = xgene_enet_get_numslots(ring->id, size);
0146 
0147     is_bufpool = xgene_enet_is_bufpool(ring->id);
0148     if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
0149         return ring;
0150 
0151     for (i = 0; i < ring->slots; i++)
0152         xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
0153 
0154     xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
0155     data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
0156     xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
0157 
0158     return ring;
0159 }
0160 
0161 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
0162 {
0163     u32 data;
0164     bool is_bufpool;
0165 
0166     is_bufpool = xgene_enet_is_bufpool(ring->id);
0167     if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
0168         goto out;
0169 
0170     xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
0171     data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
0172     xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
0173 
0174 out:
0175     xgene_enet_clr_desc_ring_id(ring);
0176     xgene_enet_clr_ring_state(ring);
0177 }
0178 
0179 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
0180 {
0181     iowrite32(count, ring->cmd);
0182 }
0183 
0184 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
0185 {
0186     u32 __iomem *cmd_base = ring->cmd_base;
0187     u32 ring_state, num_msgs;
0188 
0189     ring_state = ioread32(&cmd_base[1]);
0190     num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
0191 
0192     return num_msgs;
0193 }
0194 
0195 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
0196                 enum xgene_enet_err_code status)
0197 {
0198     switch (status) {
0199     case INGRESS_CRC:
0200         ring->rx_crc_errors++;
0201         break;
0202     case INGRESS_CHECKSUM:
0203     case INGRESS_CHECKSUM_COMPUTE:
0204         ring->rx_errors++;
0205         break;
0206     case INGRESS_TRUNC_FRAME:
0207         ring->rx_frame_errors++;
0208         break;
0209     case INGRESS_PKT_LEN:
0210         ring->rx_length_errors++;
0211         break;
0212     case INGRESS_PKT_UNDER:
0213         ring->rx_frame_errors++;
0214         break;
0215     case INGRESS_FIFO_OVERRUN:
0216         ring->rx_fifo_errors++;
0217         break;
0218     default:
0219         break;
0220     }
0221 }
0222 
0223 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
0224                   u32 offset, u32 val)
0225 {
0226     void __iomem *addr = pdata->eth_csr_addr + offset;
0227 
0228     iowrite32(val, addr);
0229 }
0230 
0231 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
0232                   u32 offset, u32 val)
0233 {
0234     void __iomem *addr = pdata->eth_ring_if_addr + offset;
0235 
0236     iowrite32(val, addr);
0237 }
0238 
0239 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
0240                    u32 offset, u32 val)
0241 {
0242     void __iomem *addr = pdata->eth_diag_csr_addr + offset;
0243 
0244     iowrite32(val, addr);
0245 }
0246 
0247 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
0248                   u32 offset, u32 val)
0249 {
0250     void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
0251 
0252     iowrite32(val, addr);
0253 }
0254 
0255 void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
0256 {
0257     void __iomem *addr, *wr, *cmd, *cmd_done;
0258     struct net_device *ndev = pdata->ndev;
0259     u8 wait = 10;
0260     u32 done;
0261 
0262     if (pdata->mdio_driver && ndev->phydev &&
0263         phy_interface_mode_is_rgmii(pdata->phy_mode)) {
0264         struct mii_bus *bus = ndev->phydev->mdio.bus;
0265 
0266         return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
0267     }
0268 
0269     addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
0270     wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
0271     cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
0272     cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
0273 
0274     spin_lock(&pdata->mac_lock);
0275     iowrite32(wr_addr, addr);
0276     iowrite32(wr_data, wr);
0277     iowrite32(XGENE_ENET_WR_CMD, cmd);
0278 
0279     while (!(done = ioread32(cmd_done)) && wait--)
0280         udelay(1);
0281 
0282     if (!done)
0283         netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
0284                wr_addr, wr_data);
0285 
0286     iowrite32(0, cmd);
0287     spin_unlock(&pdata->mac_lock);
0288 }
0289 
0290 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
0291                   u32 offset, u32 *val)
0292 {
0293     void __iomem *addr = pdata->eth_csr_addr + offset;
0294 
0295     *val = ioread32(addr);
0296 }
0297 
0298 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
0299                    u32 offset, u32 *val)
0300 {
0301     void __iomem *addr = pdata->eth_diag_csr_addr + offset;
0302 
0303     *val = ioread32(addr);
0304 }
0305 
0306 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
0307                   u32 offset, u32 *val)
0308 {
0309     void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
0310 
0311     *val = ioread32(addr);
0312 }
0313 
0314 u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
0315 {
0316     void __iomem *addr, *rd, *cmd, *cmd_done;
0317     struct net_device *ndev = pdata->ndev;
0318     u32 done, rd_data;
0319     u8 wait = 10;
0320 
0321     if (pdata->mdio_driver && ndev->phydev &&
0322         phy_interface_mode_is_rgmii(pdata->phy_mode)) {
0323         struct mii_bus *bus = ndev->phydev->mdio.bus;
0324 
0325         return xgene_mdio_rd_mac(bus->priv, rd_addr);
0326     }
0327 
0328     addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
0329     rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
0330     cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
0331     cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
0332 
0333     spin_lock(&pdata->mac_lock);
0334     iowrite32(rd_addr, addr);
0335     iowrite32(XGENE_ENET_RD_CMD, cmd);
0336 
0337     while (!(done = ioread32(cmd_done)) && wait--)
0338         udelay(1);
0339 
0340     if (!done)
0341         netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
0342 
0343     rd_data = ioread32(rd);
0344     iowrite32(0, cmd);
0345     spin_unlock(&pdata->mac_lock);
0346 
0347     return rd_data;
0348 }
0349 
0350 u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
0351 {
0352     void __iomem *addr, *rd, *cmd, *cmd_done;
0353     u32 done, rd_data;
0354     u8 wait = 10;
0355 
0356     addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
0357     rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
0358     cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
0359     cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
0360 
0361     spin_lock(&pdata->stats_lock);
0362     iowrite32(rd_addr, addr);
0363     iowrite32(XGENE_ENET_RD_CMD, cmd);
0364 
0365     while (!(done = ioread32(cmd_done)) && wait--)
0366         udelay(1);
0367 
0368     if (!done)
0369         netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
0370                rd_addr);
0371 
0372     rd_data = ioread32(rd);
0373     iowrite32(0, cmd);
0374     spin_unlock(&pdata->stats_lock);
0375 
0376     return rd_data;
0377 }
0378 
0379 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
0380 {
0381     const u8 *dev_addr = pdata->ndev->dev_addr;
0382     u32 addr0, addr1;
0383 
0384     addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
0385         (dev_addr[1] << 8) | dev_addr[0];
0386     addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
0387 
0388     xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
0389     xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
0390 }
0391 
0392 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
0393 {
0394     struct net_device *ndev = pdata->ndev;
0395     u32 data;
0396     u8 wait = 10;
0397 
0398     xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
0399     do {
0400         usleep_range(100, 110);
0401         xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
0402     } while ((data != 0xffffffff) && wait--);
0403 
0404     if (data != 0xffffffff) {
0405         netdev_err(ndev, "Failed to release memory from shutdown\n");
0406         return -ENODEV;
0407     }
0408 
0409     return 0;
0410 }
0411 
0412 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
0413 {
0414     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
0415     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
0416 }
0417 
0418 static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
0419 {
0420     struct device *dev = &pdata->pdev->dev;
0421 
0422     if (dev->of_node) {
0423         struct clk *parent = clk_get_parent(pdata->clk);
0424 
0425         switch (pdata->phy_speed) {
0426         case SPEED_10:
0427             clk_set_rate(parent, 2500000);
0428             break;
0429         case SPEED_100:
0430             clk_set_rate(parent, 25000000);
0431             break;
0432         default:
0433             clk_set_rate(parent, 125000000);
0434             break;
0435         }
0436     }
0437 #ifdef CONFIG_ACPI
0438     else {
0439         switch (pdata->phy_speed) {
0440         case SPEED_10:
0441             acpi_evaluate_object(ACPI_HANDLE(dev),
0442                          "S10", NULL, NULL);
0443             break;
0444         case SPEED_100:
0445             acpi_evaluate_object(ACPI_HANDLE(dev),
0446                          "S100", NULL, NULL);
0447             break;
0448         default:
0449             acpi_evaluate_object(ACPI_HANDLE(dev),
0450                          "S1G", NULL, NULL);
0451             break;
0452         }
0453     }
0454 #endif
0455 }
0456 
0457 static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
0458 {
0459     u32 icm0, icm2, mc2;
0460     u32 intf_ctl, rgmii, value;
0461 
0462     xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
0463     xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
0464     mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
0465     intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
0466     xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
0467 
0468     switch (pdata->phy_speed) {
0469     case SPEED_10:
0470         ENET_INTERFACE_MODE2_SET(&mc2, 1);
0471         intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
0472         CFG_MACMODE_SET(&icm0, 0);
0473         CFG_WAITASYNCRD_SET(&icm2, 500);
0474         rgmii &= ~CFG_SPEED_1250;
0475         break;
0476     case SPEED_100:
0477         ENET_INTERFACE_MODE2_SET(&mc2, 1);
0478         intf_ctl &= ~ENET_GHD_MODE;
0479         intf_ctl |= ENET_LHD_MODE;
0480         CFG_MACMODE_SET(&icm0, 1);
0481         CFG_WAITASYNCRD_SET(&icm2, 80);
0482         rgmii &= ~CFG_SPEED_1250;
0483         break;
0484     default:
0485         ENET_INTERFACE_MODE2_SET(&mc2, 2);
0486         intf_ctl &= ~ENET_LHD_MODE;
0487         intf_ctl |= ENET_GHD_MODE;
0488         CFG_MACMODE_SET(&icm0, 2);
0489         CFG_WAITASYNCRD_SET(&icm2, 0);
0490         CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
0491         CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
0492         rgmii |= CFG_SPEED_1250;
0493 
0494         xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
0495         value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
0496         xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
0497         break;
0498     }
0499 
0500     mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
0501     xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
0502     xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
0503     xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
0504     xgene_enet_configure_clock(pdata);
0505 
0506     xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
0507     xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
0508 }
0509 
0510 static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
0511 {
0512     xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
0513 }
0514 
0515 static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
0516                        bool enable)
0517 {
0518     u32 data;
0519 
0520     xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
0521 
0522     if (enable)
0523         data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
0524     else
0525         data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
0526 
0527     xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
0528 }
0529 
0530 static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
0531 {
0532     u32 data;
0533 
0534     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0535 
0536     if (enable)
0537         data |= TX_FLOW_EN;
0538     else
0539         data &= ~TX_FLOW_EN;
0540 
0541     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
0542 
0543     pdata->mac_ops->enable_tx_pause(pdata, enable);
0544 }
0545 
0546 static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
0547 {
0548     u32 data;
0549 
0550     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0551 
0552     if (enable)
0553         data |= RX_FLOW_EN;
0554     else
0555         data &= ~RX_FLOW_EN;
0556 
0557     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
0558 }
0559 
0560 static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
0561 {
0562     u32 value;
0563 
0564     if (!pdata->mdio_driver)
0565         xgene_gmac_reset(pdata);
0566 
0567     xgene_gmac_set_speed(pdata);
0568     xgene_gmac_set_mac_addr(pdata);
0569 
0570     /* Adjust MDC clock frequency */
0571     value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
0572     MGMT_CLOCK_SEL_SET(&value, 7);
0573     xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
0574 
0575     /* Enable drop if bufpool not available */
0576     xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
0577     value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
0578     xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
0579 
0580     /* Rtype should be copied from FP */
0581     xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
0582 
0583     /* Configure HW pause frame generation */
0584     xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
0585     value = (DEF_QUANTA << 16) | (value & 0xFFFF);
0586     xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
0587 
0588     xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
0589     xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
0590 
0591     xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
0592     xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
0593 
0594     /* Rx-Tx traffic resume */
0595     xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
0596 
0597     xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
0598     value &= ~TX_DV_GATE_EN0;
0599     value &= ~RX_DV_GATE_EN0;
0600     value |= RESUME_RX0;
0601     xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
0602 
0603     xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
0604 }
0605 
0606 static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
0607                     u32 *rx, u32 *tx)
0608 {
0609     u32 count;
0610 
0611     xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
0612     *rx = ICM_DROP_COUNT(count);
0613     *tx = ECM_DROP_COUNT(count);
0614     /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
0615     xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
0616 }
0617 
0618 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
0619 {
0620     u32 val = 0xffffffff;
0621 
0622     xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
0623     xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
0624     xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
0625     xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
0626 }
0627 
0628 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
0629                   u32 dst_ring_num, u16 bufpool_id,
0630                   u16 nxtbufpool_id)
0631 {
0632     u32 cb;
0633     u32 fpsel, nxtfpsel;
0634 
0635     fpsel = xgene_enet_get_fpsel(bufpool_id);
0636     nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
0637 
0638     xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
0639     cb |= CFG_CLE_BYPASS_EN0;
0640     CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
0641     CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
0642     xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
0643 
0644     xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
0645     CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
0646     CFG_CLE_FPSEL0_SET(&cb, fpsel);
0647     CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
0648     xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
0649 }
0650 
0651 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
0652 {
0653     u32 data;
0654 
0655     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0656     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
0657 }
0658 
0659 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
0660 {
0661     u32 data;
0662 
0663     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0664     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
0665 }
0666 
0667 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
0668 {
0669     u32 data;
0670 
0671     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0672     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
0673 }
0674 
0675 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
0676 {
0677     u32 data;
0678 
0679     data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
0680     xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
0681 }
0682 
0683 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
0684 {
0685     if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
0686         return false;
0687 
0688     if (ioread32(p->ring_csr_addr + SRST_ADDR))
0689         return false;
0690 
0691     return true;
0692 }
0693 
0694 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
0695 {
0696     struct device *dev = &pdata->pdev->dev;
0697 
0698     if (!xgene_ring_mgr_init(pdata))
0699         return -ENODEV;
0700 
0701     if (pdata->mdio_driver) {
0702         xgene_enet_config_ring_if_assoc(pdata);
0703         return 0;
0704     }
0705 
0706     if (dev->of_node) {
0707         clk_prepare_enable(pdata->clk);
0708         udelay(5);
0709         clk_disable_unprepare(pdata->clk);
0710         udelay(5);
0711         clk_prepare_enable(pdata->clk);
0712         udelay(5);
0713     } else {
0714 #ifdef CONFIG_ACPI
0715         acpi_status status;
0716 
0717         status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
0718                           "_RST", NULL, NULL);
0719         if (ACPI_FAILURE(status)) {
0720             acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
0721                          "_INI", NULL, NULL);
0722         }
0723 #endif
0724     }
0725 
0726     xgene_enet_ecc_init(pdata);
0727     xgene_enet_config_ring_if_assoc(pdata);
0728 
0729     return 0;
0730 }
0731 
0732 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
0733                  struct xgene_enet_desc_ring *ring)
0734 {
0735     u32 addr, data;
0736 
0737     if (xgene_enet_is_bufpool(ring->id)) {
0738         addr = ENET_CFGSSQMIFPRESET_ADDR;
0739         data = BIT(xgene_enet_get_fpsel(ring->id));
0740     } else {
0741         addr = ENET_CFGSSQMIWQRESET_ADDR;
0742         data = BIT(xgene_enet_ring_bufnum(ring->id));
0743     }
0744 
0745     xgene_enet_wr_ring_if(pdata, addr, data);
0746 }
0747 
0748 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
0749 {
0750     struct device *dev = &pdata->pdev->dev;
0751 
0752     if (dev->of_node) {
0753         if (!IS_ERR(pdata->clk))
0754             clk_disable_unprepare(pdata->clk);
0755     }
0756 }
0757 
0758 static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
0759 {
0760     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0761     struct phy_device *phydev = ndev->phydev;
0762     u16 lcladv, rmtadv = 0;
0763     u32 rx_pause, tx_pause;
0764     u8 flowctl = 0;
0765 
0766     if (!phydev->duplex || !pdata->pause_autoneg)
0767         return 0;
0768 
0769     if (pdata->tx_pause)
0770         flowctl |= FLOW_CTRL_TX;
0771 
0772     if (pdata->rx_pause)
0773         flowctl |= FLOW_CTRL_RX;
0774 
0775     lcladv = mii_advertise_flowctrl(flowctl);
0776 
0777     if (phydev->pause)
0778         rmtadv = LPA_PAUSE_CAP;
0779 
0780     if (phydev->asym_pause)
0781         rmtadv |= LPA_PAUSE_ASYM;
0782 
0783     flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
0784     tx_pause = !!(flowctl & FLOW_CTRL_TX);
0785     rx_pause = !!(flowctl & FLOW_CTRL_RX);
0786 
0787     if (tx_pause != pdata->tx_pause) {
0788         pdata->tx_pause = tx_pause;
0789         pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
0790     }
0791 
0792     if (rx_pause != pdata->rx_pause) {
0793         pdata->rx_pause = rx_pause;
0794         pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
0795     }
0796 
0797     return 0;
0798 }
0799 
0800 static void xgene_enet_adjust_link(struct net_device *ndev)
0801 {
0802     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0803     const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
0804     struct phy_device *phydev = ndev->phydev;
0805 
0806     if (phydev->link) {
0807         if (pdata->phy_speed != phydev->speed) {
0808             pdata->phy_speed = phydev->speed;
0809             mac_ops->set_speed(pdata);
0810             mac_ops->rx_enable(pdata);
0811             mac_ops->tx_enable(pdata);
0812             phy_print_status(phydev);
0813         }
0814 
0815         xgene_enet_flowctrl_cfg(ndev);
0816     } else {
0817         mac_ops->rx_disable(pdata);
0818         mac_ops->tx_disable(pdata);
0819         pdata->phy_speed = SPEED_UNKNOWN;
0820         phy_print_status(phydev);
0821     }
0822 }
0823 
0824 #ifdef CONFIG_ACPI
0825 static struct acpi_device *acpi_phy_find_device(struct device *dev)
0826 {
0827     struct fwnode_reference_args args;
0828     struct fwnode_handle *fw_node;
0829     int status;
0830 
0831     fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
0832     status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
0833                           &args);
0834     if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
0835         dev_dbg(dev, "No matching phy in ACPI table\n");
0836         return NULL;
0837     }
0838 
0839     return to_acpi_device_node(args.fwnode);
0840 }
0841 #endif
0842 
0843 int xgene_enet_phy_connect(struct net_device *ndev)
0844 {
0845     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0846     struct device_node *np;
0847     struct phy_device *phy_dev;
0848     struct device *dev = &pdata->pdev->dev;
0849     int i;
0850 
0851     if (dev->of_node) {
0852         for (i = 0 ; i < 2; i++) {
0853             np = of_parse_phandle(dev->of_node, "phy-handle", i);
0854             phy_dev = of_phy_connect(ndev, np,
0855                          &xgene_enet_adjust_link,
0856                          0, pdata->phy_mode);
0857             of_node_put(np);
0858             if (phy_dev)
0859                 break;
0860         }
0861 
0862         if (!phy_dev) {
0863             netdev_err(ndev, "Could not connect to PHY\n");
0864             return -ENODEV;
0865         }
0866     } else {
0867 #ifdef CONFIG_ACPI
0868         struct acpi_device *adev = acpi_phy_find_device(dev);
0869         if (adev)
0870             phy_dev = adev->driver_data;
0871         else
0872             phy_dev = NULL;
0873 
0874         if (!phy_dev ||
0875             phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
0876                        pdata->phy_mode)) {
0877             netdev_err(ndev, "Could not connect to PHY\n");
0878             return  -ENODEV;
0879         }
0880 #else
0881         return -ENODEV;
0882 #endif
0883     }
0884 
0885     pdata->phy_speed = SPEED_UNKNOWN;
0886     phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
0887     phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
0888     phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
0889     phy_support_asym_pause(phy_dev);
0890 
0891     return 0;
0892 }
0893 
0894 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
0895                   struct mii_bus *mdio)
0896 {
0897     struct device *dev = &pdata->pdev->dev;
0898     struct net_device *ndev = pdata->ndev;
0899     struct phy_device *phy;
0900     struct device_node *child_np;
0901     struct device_node *mdio_np = NULL;
0902     u32 phy_addr;
0903     int ret;
0904 
0905     if (dev->of_node) {
0906         for_each_child_of_node(dev->of_node, child_np) {
0907             if (of_device_is_compatible(child_np,
0908                             "apm,xgene-mdio")) {
0909                 mdio_np = child_np;
0910                 break;
0911             }
0912         }
0913 
0914         if (!mdio_np) {
0915             netdev_dbg(ndev, "No mdio node in the dts\n");
0916             return -ENXIO;
0917         }
0918 
0919         return of_mdiobus_register(mdio, mdio_np);
0920     }
0921 
0922     /* Mask out all PHYs from auto probing. */
0923     mdio->phy_mask = ~0;
0924 
0925     /* Register the MDIO bus */
0926     ret = mdiobus_register(mdio);
0927     if (ret)
0928         return ret;
0929 
0930     ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
0931     if (ret)
0932         ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
0933     if (ret)
0934         return -EINVAL;
0935 
0936     phy = xgene_enet_phy_register(mdio, phy_addr);
0937     if (!phy)
0938         return -EIO;
0939 
0940     return ret;
0941 }
0942 
0943 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
0944 {
0945     struct net_device *ndev = pdata->ndev;
0946     struct mii_bus *mdio_bus;
0947     int ret;
0948 
0949     mdio_bus = mdiobus_alloc();
0950     if (!mdio_bus)
0951         return -ENOMEM;
0952 
0953     mdio_bus->name = "APM X-Gene MDIO bus";
0954     mdio_bus->read = xgene_mdio_rgmii_read;
0955     mdio_bus->write = xgene_mdio_rgmii_write;
0956     snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
0957          ndev->name);
0958 
0959     mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
0960     mdio_bus->parent = &pdata->pdev->dev;
0961 
0962     ret = xgene_mdiobus_register(pdata, mdio_bus);
0963     if (ret) {
0964         netdev_err(ndev, "Failed to register MDIO bus\n");
0965         mdiobus_free(mdio_bus);
0966         return ret;
0967     }
0968     pdata->mdio_bus = mdio_bus;
0969 
0970     ret = xgene_enet_phy_connect(ndev);
0971     if (ret)
0972         xgene_enet_mdio_remove(pdata);
0973 
0974     return ret;
0975 }
0976 
0977 void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
0978 {
0979     struct net_device *ndev = pdata->ndev;
0980 
0981     if (ndev->phydev)
0982         phy_disconnect(ndev->phydev);
0983 }
0984 
0985 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
0986 {
0987     struct net_device *ndev = pdata->ndev;
0988 
0989     if (ndev->phydev)
0990         phy_disconnect(ndev->phydev);
0991 
0992     mdiobus_unregister(pdata->mdio_bus);
0993     mdiobus_free(pdata->mdio_bus);
0994     pdata->mdio_bus = NULL;
0995 }
0996 
0997 const struct xgene_mac_ops xgene_gmac_ops = {
0998     .init = xgene_gmac_init,
0999     .reset = xgene_gmac_reset,
1000     .rx_enable = xgene_gmac_rx_enable,
1001     .tx_enable = xgene_gmac_tx_enable,
1002     .rx_disable = xgene_gmac_rx_disable,
1003     .tx_disable = xgene_gmac_tx_disable,
1004     .get_drop_cnt = xgene_gmac_get_drop_cnt,
1005     .set_speed = xgene_gmac_set_speed,
1006     .set_mac_addr = xgene_gmac_set_mac_addr,
1007     .set_framesize = xgene_enet_set_frame_size,
1008     .enable_tx_pause = xgene_gmac_enable_tx_pause,
1009     .flowctl_tx     = xgene_gmac_flowctl_tx,
1010     .flowctl_rx     = xgene_gmac_flowctl_rx,
1011 };
1012 
1013 const struct xgene_port_ops xgene_gport_ops = {
1014     .reset = xgene_enet_reset,
1015     .clear = xgene_enet_clear,
1016     .cle_bypass = xgene_enet_cle_bypass,
1017     .shutdown = xgene_gport_shutdown,
1018 };
1019 
1020 struct xgene_ring_ops xgene_ring1_ops = {
1021     .num_ring_config = NUM_RING_CONFIG,
1022     .num_ring_id_shift = 6,
1023     .setup = xgene_enet_setup_ring,
1024     .clear = xgene_enet_clear_ring,
1025     .wr_cmd = xgene_enet_wr_cmd,
1026     .len = xgene_enet_ring_len,
1027 };