0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <asm/io.h>
0016 #include "dwmac1000.h"
0017 #include "dwmac_dma.h"
0018
0019 static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
0020 {
0021 u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
0022 int i;
0023
0024 pr_info("dwmac1000: Master AXI performs %s burst length\n",
0025 !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
0026
0027 if (axi->axi_lpi_en)
0028 value |= DMA_AXI_EN_LPI;
0029 if (axi->axi_xit_frm)
0030 value |= DMA_AXI_LPI_XIT_FRM;
0031
0032 value &= ~DMA_AXI_WR_OSR_LMT;
0033 value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
0034 DMA_AXI_WR_OSR_LMT_SHIFT;
0035
0036 value &= ~DMA_AXI_RD_OSR_LMT;
0037 value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
0038 DMA_AXI_RD_OSR_LMT_SHIFT;
0039
0040
0041
0042
0043
0044 for (i = 0; i < AXI_BLEN; i++) {
0045 switch (axi->axi_blen[i]) {
0046 case 256:
0047 value |= DMA_AXI_BLEN256;
0048 break;
0049 case 128:
0050 value |= DMA_AXI_BLEN128;
0051 break;
0052 case 64:
0053 value |= DMA_AXI_BLEN64;
0054 break;
0055 case 32:
0056 value |= DMA_AXI_BLEN32;
0057 break;
0058 case 16:
0059 value |= DMA_AXI_BLEN16;
0060 break;
0061 case 8:
0062 value |= DMA_AXI_BLEN8;
0063 break;
0064 case 4:
0065 value |= DMA_AXI_BLEN4;
0066 break;
0067 }
0068 }
0069
0070 writel(value, ioaddr + DMA_AXI_BUS_MODE);
0071 }
0072
0073 static void dwmac1000_dma_init(void __iomem *ioaddr,
0074 struct stmmac_dma_cfg *dma_cfg, int atds)
0075 {
0076 u32 value = readl(ioaddr + DMA_BUS_MODE);
0077 int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
0078 int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
0079
0080
0081
0082
0083
0084
0085
0086 if (dma_cfg->pblx8)
0087 value |= DMA_BUS_MODE_MAXPBL;
0088 value |= DMA_BUS_MODE_USP;
0089 value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
0090 value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
0091 value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
0092
0093
0094 if (dma_cfg->fixed_burst)
0095 value |= DMA_BUS_MODE_FB;
0096
0097
0098 if (dma_cfg->mixed_burst)
0099 value |= DMA_BUS_MODE_MB;
0100
0101 if (atds)
0102 value |= DMA_BUS_MODE_ATDS;
0103
0104 if (dma_cfg->aal)
0105 value |= DMA_BUS_MODE_AAL;
0106
0107 writel(value, ioaddr + DMA_BUS_MODE);
0108
0109
0110 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
0111 }
0112
0113 static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
0114 struct stmmac_dma_cfg *dma_cfg,
0115 dma_addr_t dma_rx_phy, u32 chan)
0116 {
0117
0118 writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
0119 }
0120
0121 static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
0122 struct stmmac_dma_cfg *dma_cfg,
0123 dma_addr_t dma_tx_phy, u32 chan)
0124 {
0125
0126 writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
0127 }
0128
0129 static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
0130 {
0131 csr6 &= ~DMA_CONTROL_RFA_MASK;
0132 csr6 &= ~DMA_CONTROL_RFD_MASK;
0133
0134
0135
0136
0137
0138 if (rxfifosz < 4096) {
0139 csr6 &= ~DMA_CONTROL_EFC;
0140 pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
0141 rxfifosz);
0142 } else {
0143 csr6 |= DMA_CONTROL_EFC;
0144 csr6 |= RFA_FULL_MINUS_1K;
0145 csr6 |= RFD_FULL_MINUS_2K;
0146 }
0147 return csr6;
0148 }
0149
0150 static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
0151 u32 channel, int fifosz, u8 qmode)
0152 {
0153 u32 csr6 = readl(ioaddr + DMA_CONTROL);
0154
0155 if (mode == SF_DMA_MODE) {
0156 pr_debug("GMAC: enable RX store and forward mode\n");
0157 csr6 |= DMA_CONTROL_RSF;
0158 } else {
0159 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
0160 csr6 &= ~DMA_CONTROL_RSF;
0161 csr6 &= DMA_CONTROL_TC_RX_MASK;
0162 if (mode <= 32)
0163 csr6 |= DMA_CONTROL_RTC_32;
0164 else if (mode <= 64)
0165 csr6 |= DMA_CONTROL_RTC_64;
0166 else if (mode <= 96)
0167 csr6 |= DMA_CONTROL_RTC_96;
0168 else
0169 csr6 |= DMA_CONTROL_RTC_128;
0170 }
0171
0172
0173 csr6 = dwmac1000_configure_fc(csr6, fifosz);
0174
0175 writel(csr6, ioaddr + DMA_CONTROL);
0176 }
0177
0178 static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
0179 u32 channel, int fifosz, u8 qmode)
0180 {
0181 u32 csr6 = readl(ioaddr + DMA_CONTROL);
0182
0183 if (mode == SF_DMA_MODE) {
0184 pr_debug("GMAC: enable TX store and forward mode\n");
0185
0186 csr6 |= DMA_CONTROL_TSF;
0187
0188
0189
0190 csr6 |= DMA_CONTROL_OSF;
0191 } else {
0192 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
0193 csr6 &= ~DMA_CONTROL_TSF;
0194 csr6 &= DMA_CONTROL_TC_TX_MASK;
0195
0196 if (mode <= 32)
0197 csr6 |= DMA_CONTROL_TTC_32;
0198 else if (mode <= 64)
0199 csr6 |= DMA_CONTROL_TTC_64;
0200 else if (mode <= 128)
0201 csr6 |= DMA_CONTROL_TTC_128;
0202 else if (mode <= 192)
0203 csr6 |= DMA_CONTROL_TTC_192;
0204 else
0205 csr6 |= DMA_CONTROL_TTC_256;
0206 }
0207
0208 writel(csr6, ioaddr + DMA_CONTROL);
0209 }
0210
0211 static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
0212 {
0213 int i;
0214
0215 for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
0216 if ((i < 12) || (i > 17))
0217 reg_space[DMA_BUS_MODE / 4 + i] =
0218 readl(ioaddr + DMA_BUS_MODE + i * 4);
0219 }
0220
0221 static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
0222 struct dma_features *dma_cap)
0223 {
0224 u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
0225
0226 if (!hw_cap) {
0227
0228
0229
0230 return -EOPNOTSUPP;
0231 }
0232
0233 dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
0234 dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
0235 dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
0236 dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
0237 dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
0238 dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
0239 dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
0240 dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
0241 dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
0242
0243 dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
0244
0245 dma_cap->time_stamp =
0246 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
0247
0248 dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
0249
0250 dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
0251 dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
0252
0253 dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
0254 dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
0255 dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
0256 dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
0257
0258 dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
0259 dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
0260
0261 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
0262
0263 return 0;
0264 }
0265
0266 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
0267 u32 queue)
0268 {
0269 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
0270 }
0271
0272 const struct stmmac_dma_ops dwmac1000_dma_ops = {
0273 .reset = dwmac_dma_reset,
0274 .init = dwmac1000_dma_init,
0275 .init_rx_chan = dwmac1000_dma_init_rx,
0276 .init_tx_chan = dwmac1000_dma_init_tx,
0277 .axi = dwmac1000_dma_axi,
0278 .dump_regs = dwmac1000_dump_dma_regs,
0279 .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
0280 .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
0281 .enable_dma_transmission = dwmac_enable_dma_transmission,
0282 .enable_dma_irq = dwmac_enable_dma_irq,
0283 .disable_dma_irq = dwmac_disable_dma_irq,
0284 .start_tx = dwmac_dma_start_tx,
0285 .stop_tx = dwmac_dma_stop_tx,
0286 .start_rx = dwmac_dma_start_rx,
0287 .stop_rx = dwmac_dma_stop_rx,
0288 .dma_interrupt = dwmac_dma_interrupt,
0289 .get_hw_feature = dwmac1000_get_hw_feature,
0290 .rx_watchdog = dwmac1000_rx_watchdog,
0291 };