0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007
0008 #include <linux/string.h>
0009 #include <linux/of_platform.h>
0010 #include <linux/net_tstamp.h>
0011 #include <linux/fsl/ptp_qoriq.h>
0012
0013 #include "dpaa_eth.h"
0014 #include "mac.h"
0015
0016 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
0017 "interrupts",
0018 "rx packets",
0019 "tx packets",
0020 "tx confirm",
0021 "tx S/G",
0022 "tx error",
0023 "rx error",
0024 "rx dropped",
0025 "tx dropped",
0026 };
0027
0028 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
0029
0030 "rx dma error",
0031 "rx frame physical error",
0032 "rx frame size error",
0033 "rx header error",
0034
0035
0036 "qman cg_tdrop",
0037 "qman wred",
0038 "qman error cond",
0039 "qman early window",
0040 "qman late window",
0041 "qman fq tdrop",
0042 "qman fq retired",
0043 "qman orp disabled",
0044
0045
0046 "congestion time (ms)",
0047 "entered congestion",
0048 "congested (0/1)"
0049 };
0050
0051 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
0052 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
0053
0054 static int dpaa_get_link_ksettings(struct net_device *net_dev,
0055 struct ethtool_link_ksettings *cmd)
0056 {
0057 if (!net_dev->phydev)
0058 return 0;
0059
0060 phy_ethtool_ksettings_get(net_dev->phydev, cmd);
0061
0062 return 0;
0063 }
0064
0065 static int dpaa_set_link_ksettings(struct net_device *net_dev,
0066 const struct ethtool_link_ksettings *cmd)
0067 {
0068 int err;
0069
0070 if (!net_dev->phydev)
0071 return -ENODEV;
0072
0073 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
0074 if (err < 0)
0075 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
0076
0077 return err;
0078 }
0079
0080 static void dpaa_get_drvinfo(struct net_device *net_dev,
0081 struct ethtool_drvinfo *drvinfo)
0082 {
0083 strlcpy(drvinfo->driver, KBUILD_MODNAME,
0084 sizeof(drvinfo->driver));
0085 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
0086 sizeof(drvinfo->bus_info));
0087 }
0088
0089 static u32 dpaa_get_msglevel(struct net_device *net_dev)
0090 {
0091 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
0092 }
0093
0094 static void dpaa_set_msglevel(struct net_device *net_dev,
0095 u32 msg_enable)
0096 {
0097 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
0098 }
0099
0100 static int dpaa_nway_reset(struct net_device *net_dev)
0101 {
0102 int err;
0103
0104 if (!net_dev->phydev)
0105 return -ENODEV;
0106
0107 err = 0;
0108 if (net_dev->phydev->autoneg) {
0109 err = phy_start_aneg(net_dev->phydev);
0110 if (err < 0)
0111 netdev_err(net_dev, "phy_start_aneg() = %d\n",
0112 err);
0113 }
0114
0115 return err;
0116 }
0117
0118 static void dpaa_get_pauseparam(struct net_device *net_dev,
0119 struct ethtool_pauseparam *epause)
0120 {
0121 struct mac_device *mac_dev;
0122 struct dpaa_priv *priv;
0123
0124 priv = netdev_priv(net_dev);
0125 mac_dev = priv->mac_dev;
0126
0127 if (!net_dev->phydev)
0128 return;
0129
0130 epause->autoneg = mac_dev->autoneg_pause;
0131 epause->rx_pause = mac_dev->rx_pause_active;
0132 epause->tx_pause = mac_dev->tx_pause_active;
0133 }
0134
0135 static int dpaa_set_pauseparam(struct net_device *net_dev,
0136 struct ethtool_pauseparam *epause)
0137 {
0138 struct mac_device *mac_dev;
0139 struct phy_device *phydev;
0140 bool rx_pause, tx_pause;
0141 struct dpaa_priv *priv;
0142 int err;
0143
0144 priv = netdev_priv(net_dev);
0145 mac_dev = priv->mac_dev;
0146
0147 phydev = net_dev->phydev;
0148 if (!phydev) {
0149 netdev_err(net_dev, "phy device not initialized\n");
0150 return -ENODEV;
0151 }
0152
0153 if (!phy_validate_pause(phydev, epause))
0154 return -EINVAL;
0155
0156
0157
0158
0159
0160 mac_dev->autoneg_pause = !!epause->autoneg;
0161 mac_dev->rx_pause_req = !!epause->rx_pause;
0162 mac_dev->tx_pause_req = !!epause->tx_pause;
0163
0164
0165
0166
0167
0168 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
0169
0170 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
0171 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
0172 if (err < 0)
0173 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
0174
0175 return err;
0176 }
0177
0178 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
0179 {
0180 unsigned int total_stats, num_stats;
0181
0182 num_stats = num_online_cpus() + 1;
0183 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
0184 DPAA_STATS_GLOBAL_LEN;
0185
0186 switch (type) {
0187 case ETH_SS_STATS:
0188 return total_stats;
0189 default:
0190 return -EOPNOTSUPP;
0191 }
0192 }
0193
0194 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
0195 int crr_cpu, u64 bp_count, u64 *data)
0196 {
0197 int num_values = num_cpus + 1;
0198 int crr = 0;
0199
0200
0201 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
0202 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
0203
0204 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
0205 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
0206
0207 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
0208 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
0209
0210 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
0211 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
0212
0213 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
0214 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
0215
0216 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
0217 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
0218
0219 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
0220 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
0221
0222 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
0223 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
0224
0225 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
0226 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
0227
0228 data[crr * num_values + crr_cpu] = bp_count;
0229 data[crr++ * num_values + num_cpus] += bp_count;
0230 }
0231
0232 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
0233 struct ethtool_stats *stats, u64 *data)
0234 {
0235 struct dpaa_percpu_priv *percpu_priv;
0236 struct dpaa_rx_errors rx_errors;
0237 unsigned int num_cpus, offset;
0238 u64 bp_count, cg_time, cg_num;
0239 struct dpaa_ern_cnt ern_cnt;
0240 struct dpaa_bp *dpaa_bp;
0241 struct dpaa_priv *priv;
0242 int total_stats, i;
0243 bool cg_status;
0244
0245 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
0246 priv = netdev_priv(net_dev);
0247 num_cpus = num_online_cpus();
0248
0249 memset(&bp_count, 0, sizeof(bp_count));
0250 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
0251 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
0252 memset(data, 0, total_stats * sizeof(u64));
0253
0254 for_each_online_cpu(i) {
0255 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
0256 dpaa_bp = priv->dpaa_bp;
0257 if (!dpaa_bp->percpu_count)
0258 continue;
0259 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
0260 rx_errors.dme += percpu_priv->rx_errors.dme;
0261 rx_errors.fpe += percpu_priv->rx_errors.fpe;
0262 rx_errors.fse += percpu_priv->rx_errors.fse;
0263 rx_errors.phe += percpu_priv->rx_errors.phe;
0264
0265 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
0266 ern_cnt.wred += percpu_priv->ern_cnt.wred;
0267 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
0268 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
0269 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
0270 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
0271 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
0272 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
0273
0274 copy_stats(percpu_priv, num_cpus, i, bp_count, data);
0275 }
0276
0277 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
0278 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
0279
0280 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
0281 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
0282
0283
0284 cg_num = 0;
0285 cg_status = false;
0286 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
0287 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
0288 cg_num = priv->cgr_data.cgr_congested_count;
0289
0290
0291 priv->cgr_data.congested_jiffies = 0;
0292 priv->cgr_data.cgr_congested_count = 0;
0293 }
0294
0295 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
0296 data[offset++] = cg_time;
0297 data[offset++] = cg_num;
0298 data[offset++] = cg_status;
0299 }
0300
0301 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
0302 u8 *data)
0303 {
0304 unsigned int i, j, num_cpus, size;
0305 char string_cpu[ETH_GSTRING_LEN];
0306 u8 *strings;
0307
0308 memset(string_cpu, 0, sizeof(string_cpu));
0309 strings = data;
0310 num_cpus = num_online_cpus();
0311 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
0312
0313 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
0314 for (j = 0; j < num_cpus; j++) {
0315 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
0316 dpaa_stats_percpu[i], j);
0317 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
0318 strings += ETH_GSTRING_LEN;
0319 }
0320 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
0321 dpaa_stats_percpu[i]);
0322 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
0323 strings += ETH_GSTRING_LEN;
0324 }
0325 for (j = 0; j < num_cpus; j++) {
0326 snprintf(string_cpu, ETH_GSTRING_LEN,
0327 "bpool [CPU %d]", j);
0328 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
0329 strings += ETH_GSTRING_LEN;
0330 }
0331 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
0332 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
0333 strings += ETH_GSTRING_LEN;
0334
0335 memcpy(strings, dpaa_stats_global, size);
0336 }
0337
0338 static int dpaa_get_hash_opts(struct net_device *dev,
0339 struct ethtool_rxnfc *cmd)
0340 {
0341 struct dpaa_priv *priv = netdev_priv(dev);
0342
0343 cmd->data = 0;
0344
0345 switch (cmd->flow_type) {
0346 case TCP_V4_FLOW:
0347 case TCP_V6_FLOW:
0348 case UDP_V4_FLOW:
0349 case UDP_V6_FLOW:
0350 if (priv->keygen_in_use)
0351 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0352 fallthrough;
0353 case IPV4_FLOW:
0354 case IPV6_FLOW:
0355 case SCTP_V4_FLOW:
0356 case SCTP_V6_FLOW:
0357 case AH_ESP_V4_FLOW:
0358 case AH_ESP_V6_FLOW:
0359 case AH_V4_FLOW:
0360 case AH_V6_FLOW:
0361 case ESP_V4_FLOW:
0362 case ESP_V6_FLOW:
0363 if (priv->keygen_in_use)
0364 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
0365 break;
0366 default:
0367 cmd->data = 0;
0368 break;
0369 }
0370
0371 return 0;
0372 }
0373
0374 static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
0375 u32 *unused)
0376 {
0377 int ret = -EOPNOTSUPP;
0378
0379 switch (cmd->cmd) {
0380 case ETHTOOL_GRXFH:
0381 ret = dpaa_get_hash_opts(dev, cmd);
0382 break;
0383 default:
0384 break;
0385 }
0386
0387 return ret;
0388 }
0389
0390 static void dpaa_set_hash(struct net_device *net_dev, bool enable)
0391 {
0392 struct mac_device *mac_dev;
0393 struct fman_port *rxport;
0394 struct dpaa_priv *priv;
0395
0396 priv = netdev_priv(net_dev);
0397 mac_dev = priv->mac_dev;
0398 rxport = mac_dev->port[0];
0399
0400 fman_port_use_kg_hash(rxport, enable);
0401 priv->keygen_in_use = enable;
0402 }
0403
0404 static int dpaa_set_hash_opts(struct net_device *dev,
0405 struct ethtool_rxnfc *nfc)
0406 {
0407 int ret = -EINVAL;
0408
0409
0410 if (nfc->data &
0411 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
0412 return -EINVAL;
0413
0414 switch (nfc->flow_type) {
0415 case TCP_V4_FLOW:
0416 case TCP_V6_FLOW:
0417 case UDP_V4_FLOW:
0418 case UDP_V6_FLOW:
0419 case IPV4_FLOW:
0420 case IPV6_FLOW:
0421 case SCTP_V4_FLOW:
0422 case SCTP_V6_FLOW:
0423 case AH_ESP_V4_FLOW:
0424 case AH_ESP_V6_FLOW:
0425 case AH_V4_FLOW:
0426 case AH_V6_FLOW:
0427 case ESP_V4_FLOW:
0428 case ESP_V6_FLOW:
0429 dpaa_set_hash(dev, !!nfc->data);
0430 ret = 0;
0431 break;
0432 default:
0433 break;
0434 }
0435
0436 return ret;
0437 }
0438
0439 static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
0440 {
0441 int ret = -EOPNOTSUPP;
0442
0443 switch (cmd->cmd) {
0444 case ETHTOOL_SRXFH:
0445 ret = dpaa_set_hash_opts(dev, cmd);
0446 break;
0447 default:
0448 break;
0449 }
0450
0451 return ret;
0452 }
0453
0454 static int dpaa_get_ts_info(struct net_device *net_dev,
0455 struct ethtool_ts_info *info)
0456 {
0457 struct device *dev = net_dev->dev.parent;
0458 struct device_node *mac_node = dev->of_node;
0459 struct device_node *fman_node = NULL, *ptp_node = NULL;
0460 struct platform_device *ptp_dev = NULL;
0461 struct ptp_qoriq *ptp = NULL;
0462
0463 info->phc_index = -1;
0464
0465 fman_node = of_get_parent(mac_node);
0466 if (fman_node) {
0467 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
0468 of_node_put(fman_node);
0469 }
0470
0471 if (ptp_node) {
0472 ptp_dev = of_find_device_by_node(ptp_node);
0473 of_node_put(ptp_node);
0474 }
0475
0476 if (ptp_dev)
0477 ptp = platform_get_drvdata(ptp_dev);
0478
0479 if (ptp)
0480 info->phc_index = ptp->phc_index;
0481
0482 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
0483 SOF_TIMESTAMPING_RX_HARDWARE |
0484 SOF_TIMESTAMPING_RAW_HARDWARE;
0485 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
0486 (1 << HWTSTAMP_TX_ON);
0487 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
0488 (1 << HWTSTAMP_FILTER_ALL);
0489
0490 return 0;
0491 }
0492
0493 static int dpaa_get_coalesce(struct net_device *dev,
0494 struct ethtool_coalesce *c,
0495 struct kernel_ethtool_coalesce *kernel_coal,
0496 struct netlink_ext_ack *extack)
0497 {
0498 struct qman_portal *portal;
0499 u32 period;
0500 u8 thresh;
0501
0502 portal = qman_get_affine_portal(smp_processor_id());
0503 qman_portal_get_iperiod(portal, &period);
0504 qman_dqrr_get_ithresh(portal, &thresh);
0505
0506 c->rx_coalesce_usecs = period;
0507 c->rx_max_coalesced_frames = thresh;
0508
0509 return 0;
0510 }
0511
0512 static int dpaa_set_coalesce(struct net_device *dev,
0513 struct ethtool_coalesce *c,
0514 struct kernel_ethtool_coalesce *kernel_coal,
0515 struct netlink_ext_ack *extack)
0516 {
0517 const cpumask_t *cpus = qman_affine_cpus();
0518 bool needs_revert[NR_CPUS] = {false};
0519 struct qman_portal *portal;
0520 u32 period, prev_period;
0521 u8 thresh, prev_thresh;
0522 int cpu, res;
0523
0524 period = c->rx_coalesce_usecs;
0525 thresh = c->rx_max_coalesced_frames;
0526
0527
0528 portal = qman_get_affine_portal(smp_processor_id());
0529 qman_portal_get_iperiod(portal, &prev_period);
0530 qman_dqrr_get_ithresh(portal, &prev_thresh);
0531
0532
0533 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
0534 portal = qman_get_affine_portal(cpu);
0535 res = qman_portal_set_iperiod(portal, period);
0536 if (res)
0537 goto revert_values;
0538 res = qman_dqrr_set_ithresh(portal, thresh);
0539 if (res) {
0540 qman_portal_set_iperiod(portal, prev_period);
0541 goto revert_values;
0542 }
0543 needs_revert[cpu] = true;
0544 }
0545
0546 return 0;
0547
0548 revert_values:
0549
0550 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
0551 if (!needs_revert[cpu])
0552 continue;
0553 portal = qman_get_affine_portal(cpu);
0554
0555 qman_portal_set_iperiod(portal, prev_period);
0556 qman_dqrr_set_ithresh(portal, prev_thresh);
0557 }
0558
0559 return res;
0560 }
0561
0562 const struct ethtool_ops dpaa_ethtool_ops = {
0563 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
0564 ETHTOOL_COALESCE_RX_MAX_FRAMES,
0565 .get_drvinfo = dpaa_get_drvinfo,
0566 .get_msglevel = dpaa_get_msglevel,
0567 .set_msglevel = dpaa_set_msglevel,
0568 .nway_reset = dpaa_nway_reset,
0569 .get_pauseparam = dpaa_get_pauseparam,
0570 .set_pauseparam = dpaa_set_pauseparam,
0571 .get_link = ethtool_op_get_link,
0572 .get_sset_count = dpaa_get_sset_count,
0573 .get_ethtool_stats = dpaa_get_ethtool_stats,
0574 .get_strings = dpaa_get_strings,
0575 .get_link_ksettings = dpaa_get_link_ksettings,
0576 .set_link_ksettings = dpaa_set_link_ksettings,
0577 .get_rxnfc = dpaa_get_rxnfc,
0578 .set_rxnfc = dpaa_set_rxnfc,
0579 .get_ts_info = dpaa_get_ts_info,
0580 .get_coalesce = dpaa_get_coalesce,
0581 .set_coalesce = dpaa_set_coalesce,
0582 };