0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/stmmac.h>
0012 #include "common.h"
0013 #include "descs_com.h"
0014
0015 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
0016 struct dma_desc *p, void __iomem *ioaddr)
0017 {
0018 struct net_device_stats *stats = (struct net_device_stats *)data;
0019 unsigned int tdes0 = le32_to_cpu(p->des0);
0020 int ret = tx_done;
0021
0022
0023 if (unlikely(tdes0 & ETDES0_OWN))
0024 return tx_dma_own;
0025
0026
0027 if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
0028 return tx_not_ls;
0029
0030 if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
0031 if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
0032 x->tx_jabber++;
0033
0034 if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
0035 x->tx_frame_flushed++;
0036 dwmac_dma_flush_tx_fifo(ioaddr);
0037 }
0038
0039 if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
0040 x->tx_losscarrier++;
0041 stats->tx_carrier_errors++;
0042 }
0043 if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
0044 x->tx_carrier++;
0045 stats->tx_carrier_errors++;
0046 }
0047 if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
0048 (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
0049 stats->collisions +=
0050 (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
0051
0052 if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
0053 x->tx_deferred++;
0054
0055 if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
0056 dwmac_dma_flush_tx_fifo(ioaddr);
0057 x->tx_underflow++;
0058 }
0059
0060 if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
0061 x->tx_ip_header_error++;
0062
0063 if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
0064 x->tx_payload_error++;
0065 dwmac_dma_flush_tx_fifo(ioaddr);
0066 }
0067
0068 ret = tx_err;
0069 }
0070
0071 if (unlikely(tdes0 & ETDES0_DEFERRED))
0072 x->tx_deferred++;
0073
0074 #ifdef STMMAC_VLAN_TAG_USED
0075 if (tdes0 & ETDES0_VLAN_FRAME)
0076 x->tx_vlan++;
0077 #endif
0078
0079 return ret;
0080 }
0081
0082 static int enh_desc_get_tx_len(struct dma_desc *p)
0083 {
0084 return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
0085 }
0086
0087 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
0088 {
0089 int ret = good_frame;
0090 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 if (status == 0x0)
0104 ret = llc_snap;
0105 else if (status == 0x4)
0106 ret = good_frame;
0107 else if (status == 0x5)
0108 ret = csum_none;
0109 else if (status == 0x6)
0110 ret = csum_none;
0111 else if (status == 0x7)
0112 ret = csum_none;
0113 else if (status == 0x1)
0114 ret = discard_frame;
0115 else if (status == 0x3)
0116 ret = discard_frame;
0117 return ret;
0118 }
0119
0120 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
0121 struct dma_extended_desc *p)
0122 {
0123 unsigned int rdes0 = le32_to_cpu(p->basic.des0);
0124 unsigned int rdes4 = le32_to_cpu(p->des4);
0125
0126 if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
0127 int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
0128
0129 if (rdes4 & ERDES4_IP_HDR_ERR)
0130 x->ip_hdr_err++;
0131 if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
0132 x->ip_payload_err++;
0133 if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
0134 x->ip_csum_bypassed++;
0135 if (rdes4 & ERDES4_IPV4_PKT_RCVD)
0136 x->ipv4_pkt_rcvd++;
0137 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
0138 x->ipv6_pkt_rcvd++;
0139
0140 if (message_type == RDES_EXT_NO_PTP)
0141 x->no_ptp_rx_msg_type_ext++;
0142 else if (message_type == RDES_EXT_SYNC)
0143 x->ptp_rx_msg_type_sync++;
0144 else if (message_type == RDES_EXT_FOLLOW_UP)
0145 x->ptp_rx_msg_type_follow_up++;
0146 else if (message_type == RDES_EXT_DELAY_REQ)
0147 x->ptp_rx_msg_type_delay_req++;
0148 else if (message_type == RDES_EXT_DELAY_RESP)
0149 x->ptp_rx_msg_type_delay_resp++;
0150 else if (message_type == RDES_EXT_PDELAY_REQ)
0151 x->ptp_rx_msg_type_pdelay_req++;
0152 else if (message_type == RDES_EXT_PDELAY_RESP)
0153 x->ptp_rx_msg_type_pdelay_resp++;
0154 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
0155 x->ptp_rx_msg_type_pdelay_follow_up++;
0156 else if (message_type == RDES_PTP_ANNOUNCE)
0157 x->ptp_rx_msg_type_announce++;
0158 else if (message_type == RDES_PTP_MANAGEMENT)
0159 x->ptp_rx_msg_type_management++;
0160 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
0161 x->ptp_rx_msg_pkt_reserved_type++;
0162
0163 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
0164 x->ptp_frame_type++;
0165 if (rdes4 & ERDES4_PTP_VER)
0166 x->ptp_ver++;
0167 if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
0168 x->timestamp_dropped++;
0169 if (rdes4 & ERDES4_AV_PKT_RCVD)
0170 x->av_pkt_rcvd++;
0171 if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
0172 x->av_tagged_pkt_rcvd++;
0173 if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
0174 x->vlan_tag_priority_val++;
0175 if (rdes4 & ERDES4_L3_FILTER_MATCH)
0176 x->l3_filter_match++;
0177 if (rdes4 & ERDES4_L4_FILTER_MATCH)
0178 x->l4_filter_match++;
0179 if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
0180 x->l3_l4_filter_no_match++;
0181 }
0182 }
0183
0184 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
0185 struct dma_desc *p)
0186 {
0187 struct net_device_stats *stats = (struct net_device_stats *)data;
0188 unsigned int rdes0 = le32_to_cpu(p->des0);
0189 int ret = good_frame;
0190
0191 if (unlikely(rdes0 & RDES0_OWN))
0192 return dma_own;
0193
0194 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
0195 stats->rx_length_errors++;
0196 return discard_frame;
0197 }
0198
0199 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
0200 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
0201 x->rx_desc++;
0202 stats->rx_length_errors++;
0203 }
0204 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
0205 x->rx_gmac_overflow++;
0206
0207 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
0208 pr_err("\tIPC Csum Error/Giant frame\n");
0209
0210 if (unlikely(rdes0 & RDES0_COLLISION))
0211 stats->collisions++;
0212 if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
0213 x->rx_watchdog++;
0214
0215 if (unlikely(rdes0 & RDES0_MII_ERROR))
0216 x->rx_mii++;
0217
0218 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
0219 x->rx_crc_errors++;
0220 stats->rx_crc_errors++;
0221 }
0222 ret = discard_frame;
0223 }
0224
0225
0226
0227
0228
0229 if (likely(ret == good_frame))
0230 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
0231 !!(rdes0 & RDES0_FRAME_TYPE),
0232 !!(rdes0 & ERDES0_RX_MAC_ADDR));
0233
0234 if (unlikely(rdes0 & RDES0_DRIBBLING))
0235 x->dribbling_bit++;
0236
0237 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
0238 x->sa_rx_filter_fail++;
0239 ret = discard_frame;
0240 }
0241 if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
0242 x->da_rx_filter_fail++;
0243 ret = discard_frame;
0244 }
0245 if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
0246 x->rx_length++;
0247 ret = discard_frame;
0248 }
0249 #ifdef STMMAC_VLAN_TAG_USED
0250 if (rdes0 & RDES0_VLAN_TAG)
0251 x->rx_vlan++;
0252 #endif
0253
0254 return ret;
0255 }
0256
0257 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
0258 int mode, int end, int bfsize)
0259 {
0260 int bfsize1;
0261
0262 p->des0 |= cpu_to_le32(RDES0_OWN);
0263
0264 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
0265 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
0266
0267 if (mode == STMMAC_CHAIN_MODE)
0268 ehn_desc_rx_set_on_chain(p);
0269 else
0270 ehn_desc_rx_set_on_ring(p, end, bfsize);
0271
0272 if (disable_rx_ic)
0273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
0274 }
0275
0276 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
0277 {
0278 p->des0 &= cpu_to_le32(~ETDES0_OWN);
0279 if (mode == STMMAC_CHAIN_MODE)
0280 enh_desc_end_tx_desc_on_chain(p);
0281 else
0282 enh_desc_end_tx_desc_on_ring(p, end);
0283 }
0284
0285 static int enh_desc_get_tx_owner(struct dma_desc *p)
0286 {
0287 return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
0288 }
0289
0290 static void enh_desc_set_tx_owner(struct dma_desc *p)
0291 {
0292 p->des0 |= cpu_to_le32(ETDES0_OWN);
0293 }
0294
0295 static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
0296 {
0297 p->des0 |= cpu_to_le32(RDES0_OWN);
0298 }
0299
0300 static int enh_desc_get_tx_ls(struct dma_desc *p)
0301 {
0302 return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
0303 }
0304
0305 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
0306 {
0307 int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
0308
0309 memset(p, 0, offsetof(struct dma_desc, des2));
0310 if (mode == STMMAC_CHAIN_MODE)
0311 enh_desc_end_tx_desc_on_chain(p);
0312 else
0313 enh_desc_end_tx_desc_on_ring(p, ter);
0314 }
0315
0316 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
0317 bool csum_flag, int mode, bool tx_own,
0318 bool ls, unsigned int tot_pkt_len)
0319 {
0320 unsigned int tdes0 = le32_to_cpu(p->des0);
0321
0322 if (mode == STMMAC_CHAIN_MODE)
0323 enh_set_tx_desc_len_on_chain(p, len);
0324 else
0325 enh_set_tx_desc_len_on_ring(p, len);
0326
0327 if (is_fs)
0328 tdes0 |= ETDES0_FIRST_SEGMENT;
0329 else
0330 tdes0 &= ~ETDES0_FIRST_SEGMENT;
0331
0332 if (likely(csum_flag))
0333 tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
0334 else
0335 tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
0336
0337 if (ls)
0338 tdes0 |= ETDES0_LAST_SEGMENT;
0339
0340
0341 if (tx_own)
0342 tdes0 |= ETDES0_OWN;
0343
0344 if (is_fs && tx_own)
0345
0346
0347
0348
0349 dma_wmb();
0350
0351 p->des0 = cpu_to_le32(tdes0);
0352 }
0353
0354 static void enh_desc_set_tx_ic(struct dma_desc *p)
0355 {
0356 p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
0357 }
0358
0359 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
0360 {
0361 unsigned int csum = 0;
0362
0363
0364
0365
0366
0367
0368 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
0369 csum = 2;
0370
0371 return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
0372 >> RDES0_FRAME_LEN_SHIFT) - csum);
0373 }
0374
0375 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
0376 {
0377 p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
0378 }
0379
0380 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
0381 {
0382 return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
0383 }
0384
0385 static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
0386 {
0387 u64 ns;
0388
0389 if (ats) {
0390 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
0391 ns = le32_to_cpu(p->des6);
0392
0393 ns += le32_to_cpu(p->des7) * 1000000000ULL;
0394 } else {
0395 struct dma_desc *p = (struct dma_desc *)desc;
0396 ns = le32_to_cpu(p->des2);
0397 ns += le32_to_cpu(p->des3) * 1000000000ULL;
0398 }
0399
0400 *ts = ns;
0401 }
0402
0403 static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
0404 u32 ats)
0405 {
0406 if (ats) {
0407 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
0408 return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
0409 } else {
0410 struct dma_desc *p = (struct dma_desc *)desc;
0411 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
0412 (le32_to_cpu(p->des3) == 0xffffffff))
0413
0414 return 0;
0415 else
0416 return 1;
0417 }
0418 }
0419
0420 static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
0421 dma_addr_t dma_rx_phy, unsigned int desc_size)
0422 {
0423 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
0424 dma_addr_t dma_addr;
0425 int i;
0426
0427 pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
0428
0429 for (i = 0; i < size; i++) {
0430 u64 x;
0431 dma_addr = dma_rx_phy + i * sizeof(*ep);
0432
0433 x = *(u64 *)ep;
0434 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
0435 i, &dma_addr,
0436 (unsigned int)x, (unsigned int)(x >> 32),
0437 ep->basic.des2, ep->basic.des3);
0438 ep++;
0439 }
0440 pr_info("\n");
0441 }
0442
0443 static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
0444 {
0445 p->des2 = cpu_to_le32(addr);
0446 }
0447
0448 static void enh_desc_clear(struct dma_desc *p)
0449 {
0450 p->des2 = 0;
0451 }
0452
0453 const struct stmmac_desc_ops enh_desc_ops = {
0454 .tx_status = enh_desc_get_tx_status,
0455 .rx_status = enh_desc_get_rx_status,
0456 .get_tx_len = enh_desc_get_tx_len,
0457 .init_rx_desc = enh_desc_init_rx_desc,
0458 .init_tx_desc = enh_desc_init_tx_desc,
0459 .get_tx_owner = enh_desc_get_tx_owner,
0460 .release_tx_desc = enh_desc_release_tx_desc,
0461 .prepare_tx_desc = enh_desc_prepare_tx_desc,
0462 .set_tx_ic = enh_desc_set_tx_ic,
0463 .get_tx_ls = enh_desc_get_tx_ls,
0464 .set_tx_owner = enh_desc_set_tx_owner,
0465 .set_rx_owner = enh_desc_set_rx_owner,
0466 .get_rx_frame_len = enh_desc_get_rx_frame_len,
0467 .rx_extended_status = enh_desc_get_ext_status,
0468 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
0469 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
0470 .get_timestamp = enh_desc_get_timestamp,
0471 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
0472 .display_ring = enh_desc_display_ring,
0473 .set_addr = enh_desc_set_addr,
0474 .clear = enh_desc_clear,
0475 };