0001
0002
0003
0004
0005
0006
0007 #include <linux/ethtool.h>
0008 #include <linux/rtnetlink.h>
0009 #include "gve.h"
0010 #include "gve_adminq.h"
0011 #include "gve_dqo.h"
0012
0013 static void gve_get_drvinfo(struct net_device *netdev,
0014 struct ethtool_drvinfo *info)
0015 {
0016 struct gve_priv *priv = netdev_priv(netdev);
0017
0018 strscpy(info->driver, "gve", sizeof(info->driver));
0019 strscpy(info->version, gve_version_str, sizeof(info->version));
0020 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
0021 }
0022
0023 static void gve_set_msglevel(struct net_device *netdev, u32 value)
0024 {
0025 struct gve_priv *priv = netdev_priv(netdev);
0026
0027 priv->msg_enable = value;
0028 }
0029
0030 static u32 gve_get_msglevel(struct net_device *netdev)
0031 {
0032 struct gve_priv *priv = netdev_priv(netdev);
0033
0034 return priv->msg_enable;
0035 }
0036
0037 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
0038 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
0039 "rx_dropped", "tx_dropped", "tx_timeouts",
0040 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
0041 "interface_up_cnt", "interface_down_cnt", "reset_cnt",
0042 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
0043 };
0044
0045 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
0046 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
0047 "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
0048 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
0049 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
0050 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
0051 };
0052
0053 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
0054 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
0055 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
0056 "tx_dma_mapping_error[%u]",
0057 };
0058
0059 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
0060 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
0061 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
0062 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
0063 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
0064 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
0065 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
0066 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
0067 };
0068
0069 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
0070 "report-stats",
0071 };
0072
0073 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
0074 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
0075 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
0076 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
0077 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
0078
0079 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
0080 {
0081 struct gve_priv *priv = netdev_priv(netdev);
0082 char *s = (char *)data;
0083 int i, j;
0084
0085 switch (stringset) {
0086 case ETH_SS_STATS:
0087 memcpy(s, *gve_gstrings_main_stats,
0088 sizeof(gve_gstrings_main_stats));
0089 s += sizeof(gve_gstrings_main_stats);
0090
0091 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
0092 for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
0093 snprintf(s, ETH_GSTRING_LEN,
0094 gve_gstrings_rx_stats[j], i);
0095 s += ETH_GSTRING_LEN;
0096 }
0097 }
0098
0099 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
0100 for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
0101 snprintf(s, ETH_GSTRING_LEN,
0102 gve_gstrings_tx_stats[j], i);
0103 s += ETH_GSTRING_LEN;
0104 }
0105 }
0106
0107 memcpy(s, *gve_gstrings_adminq_stats,
0108 sizeof(gve_gstrings_adminq_stats));
0109 s += sizeof(gve_gstrings_adminq_stats);
0110 break;
0111
0112 case ETH_SS_PRIV_FLAGS:
0113 memcpy(s, *gve_gstrings_priv_flags,
0114 sizeof(gve_gstrings_priv_flags));
0115 s += sizeof(gve_gstrings_priv_flags);
0116 break;
0117
0118 default:
0119 break;
0120 }
0121 }
0122
0123 static int gve_get_sset_count(struct net_device *netdev, int sset)
0124 {
0125 struct gve_priv *priv = netdev_priv(netdev);
0126
0127 switch (sset) {
0128 case ETH_SS_STATS:
0129 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
0130 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
0131 (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
0132 case ETH_SS_PRIV_FLAGS:
0133 return GVE_PRIV_FLAGS_STR_LEN;
0134 default:
0135 return -EOPNOTSUPP;
0136 }
0137 }
0138
0139 static void
0140 gve_get_ethtool_stats(struct net_device *netdev,
0141 struct ethtool_stats *stats, u64 *data)
0142 {
0143 u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
0144 tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
0145 tmp_tx_pkts, tmp_tx_bytes;
0146 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
0147 rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
0148 int stats_idx, base_stats_idx, max_stats_idx;
0149 struct stats *report_stats;
0150 int *rx_qid_to_stats_idx;
0151 int *tx_qid_to_stats_idx;
0152 struct gve_priv *priv;
0153 bool skip_nic_stats;
0154 unsigned int start;
0155 int ring;
0156 int i, j;
0157
0158 ASSERT_RTNL();
0159
0160 priv = netdev_priv(netdev);
0161 report_stats = priv->stats_report->stats;
0162 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
0163 sizeof(int), GFP_KERNEL);
0164 if (!rx_qid_to_stats_idx)
0165 return;
0166 tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
0167 sizeof(int), GFP_KERNEL);
0168 if (!tx_qid_to_stats_idx) {
0169 kfree(rx_qid_to_stats_idx);
0170 return;
0171 }
0172 for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
0173 rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
0174 ring < priv->rx_cfg.num_queues; ring++) {
0175 if (priv->rx) {
0176 do {
0177 struct gve_rx_ring *rx = &priv->rx[ring];
0178
0179 start =
0180 u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
0181 tmp_rx_pkts = rx->rpackets;
0182 tmp_rx_bytes = rx->rbytes;
0183 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
0184 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
0185 tmp_rx_desc_err_dropped_pkt =
0186 rx->rx_desc_err_dropped_pkt;
0187 } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
0188 start));
0189 rx_pkts += tmp_rx_pkts;
0190 rx_bytes += tmp_rx_bytes;
0191 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
0192 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
0193 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
0194 }
0195 }
0196 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
0197 ring < priv->tx_cfg.num_queues; ring++) {
0198 if (priv->tx) {
0199 do {
0200 start =
0201 u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
0202 tmp_tx_pkts = priv->tx[ring].pkt_done;
0203 tmp_tx_bytes = priv->tx[ring].bytes_done;
0204 } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
0205 start));
0206 tx_pkts += tmp_tx_pkts;
0207 tx_bytes += tmp_tx_bytes;
0208 tx_dropped += priv->tx[ring].dropped_pkt;
0209 }
0210 }
0211
0212 i = 0;
0213 data[i++] = rx_pkts;
0214 data[i++] = tx_pkts;
0215 data[i++] = rx_bytes;
0216 data[i++] = tx_bytes;
0217
0218 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
0219 rx_desc_err_dropped_pkt;
0220 data[i++] = tx_dropped;
0221 data[i++] = priv->tx_timeo_cnt;
0222 data[i++] = rx_skb_alloc_fail;
0223 data[i++] = rx_buf_alloc_fail;
0224 data[i++] = rx_desc_err_dropped_pkt;
0225 data[i++] = priv->interface_up_cnt;
0226 data[i++] = priv->interface_down_cnt;
0227 data[i++] = priv->reset_cnt;
0228 data[i++] = priv->page_alloc_fail;
0229 data[i++] = priv->dma_mapping_error;
0230 data[i++] = priv->stats_report_trigger_cnt;
0231 i = GVE_MAIN_STATS_LEN;
0232
0233
0234 base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
0235 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
0236 max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
0237 base_stats_idx;
0238
0239 skip_nic_stats = false;
0240 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
0241 stats_idx += NIC_RX_STATS_REPORT_NUM) {
0242 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
0243 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
0244
0245 if (stat_name == 0) {
0246
0247 skip_nic_stats = true;
0248 break;
0249 }
0250 rx_qid_to_stats_idx[queue_id] = stats_idx;
0251 }
0252
0253 if (priv->rx) {
0254 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
0255 struct gve_rx_ring *rx = &priv->rx[ring];
0256
0257 data[i++] = rx->fill_cnt;
0258 data[i++] = rx->cnt;
0259 data[i++] = rx->fill_cnt - rx->cnt;
0260 do {
0261 start =
0262 u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
0263 tmp_rx_bytes = rx->rbytes;
0264 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
0265 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
0266 tmp_rx_desc_err_dropped_pkt =
0267 rx->rx_desc_err_dropped_pkt;
0268 } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
0269 start));
0270 data[i++] = tmp_rx_bytes;
0271 data[i++] = rx->rx_cont_packet_cnt;
0272 data[i++] = rx->rx_frag_flip_cnt;
0273 data[i++] = rx->rx_frag_copy_cnt;
0274
0275 data[i++] = tmp_rx_skb_alloc_fail +
0276 tmp_rx_buf_alloc_fail +
0277 tmp_rx_desc_err_dropped_pkt;
0278 data[i++] = rx->rx_copybreak_pkt;
0279 data[i++] = rx->rx_copied_pkt;
0280
0281 if (skip_nic_stats) {
0282
0283 i += NIC_RX_STATS_REPORT_NUM;
0284 continue;
0285 }
0286 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
0287 u64 value =
0288 be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
0289
0290 data[i++] = value;
0291 }
0292 }
0293 } else {
0294 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
0295 }
0296
0297
0298 base_stats_idx = max_stats_idx;
0299 max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
0300 max_stats_idx;
0301
0302 skip_nic_stats = false;
0303 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
0304 stats_idx += NIC_TX_STATS_REPORT_NUM) {
0305 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
0306 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
0307
0308 if (stat_name == 0) {
0309
0310 skip_nic_stats = true;
0311 break;
0312 }
0313 tx_qid_to_stats_idx[queue_id] = stats_idx;
0314 }
0315
0316 if (priv->tx) {
0317 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
0318 struct gve_tx_ring *tx = &priv->tx[ring];
0319
0320 if (gve_is_gqi(priv)) {
0321 data[i++] = tx->req;
0322 data[i++] = tx->done;
0323 data[i++] = tx->req - tx->done;
0324 } else {
0325
0326
0327
0328 data[i++] = 0;
0329 data[i++] = 0;
0330 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
0331 }
0332 do {
0333 start =
0334 u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
0335 tmp_tx_bytes = tx->bytes_done;
0336 } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
0337 start));
0338 data[i++] = tmp_tx_bytes;
0339 data[i++] = tx->wake_queue;
0340 data[i++] = tx->stop_queue;
0341 data[i++] = gve_tx_load_event_counter(priv, tx);
0342 data[i++] = tx->dma_mapping_error;
0343
0344 if (skip_nic_stats) {
0345
0346 i += NIC_TX_STATS_REPORT_NUM;
0347 continue;
0348 }
0349 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
0350 u64 value =
0351 be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
0352 data[i++] = value;
0353 }
0354 }
0355 } else {
0356 i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
0357 }
0358
0359 kfree(rx_qid_to_stats_idx);
0360 kfree(tx_qid_to_stats_idx);
0361
0362 data[i++] = priv->adminq_prod_cnt;
0363 data[i++] = priv->adminq_cmd_fail;
0364 data[i++] = priv->adminq_timeouts;
0365 data[i++] = priv->adminq_describe_device_cnt;
0366 data[i++] = priv->adminq_cfg_device_resources_cnt;
0367 data[i++] = priv->adminq_register_page_list_cnt;
0368 data[i++] = priv->adminq_unregister_page_list_cnt;
0369 data[i++] = priv->adminq_create_tx_queue_cnt;
0370 data[i++] = priv->adminq_create_rx_queue_cnt;
0371 data[i++] = priv->adminq_destroy_tx_queue_cnt;
0372 data[i++] = priv->adminq_destroy_rx_queue_cnt;
0373 data[i++] = priv->adminq_dcfg_device_resources_cnt;
0374 data[i++] = priv->adminq_set_driver_parameter_cnt;
0375 data[i++] = priv->adminq_report_stats_cnt;
0376 data[i++] = priv->adminq_report_link_speed_cnt;
0377 }
0378
0379 static void gve_get_channels(struct net_device *netdev,
0380 struct ethtool_channels *cmd)
0381 {
0382 struct gve_priv *priv = netdev_priv(netdev);
0383
0384 cmd->max_rx = priv->rx_cfg.max_queues;
0385 cmd->max_tx = priv->tx_cfg.max_queues;
0386 cmd->max_other = 0;
0387 cmd->max_combined = 0;
0388 cmd->rx_count = priv->rx_cfg.num_queues;
0389 cmd->tx_count = priv->tx_cfg.num_queues;
0390 cmd->other_count = 0;
0391 cmd->combined_count = 0;
0392 }
0393
0394 static int gve_set_channels(struct net_device *netdev,
0395 struct ethtool_channels *cmd)
0396 {
0397 struct gve_priv *priv = netdev_priv(netdev);
0398 struct gve_queue_config new_tx_cfg = priv->tx_cfg;
0399 struct gve_queue_config new_rx_cfg = priv->rx_cfg;
0400 struct ethtool_channels old_settings;
0401 int new_tx = cmd->tx_count;
0402 int new_rx = cmd->rx_count;
0403
0404 gve_get_channels(netdev, &old_settings);
0405
0406
0407 if (cmd->combined_count != old_settings.combined_count)
0408 return -EINVAL;
0409
0410 if (!new_rx || !new_tx)
0411 return -EINVAL;
0412
0413 if (!netif_carrier_ok(netdev)) {
0414 priv->tx_cfg.num_queues = new_tx;
0415 priv->rx_cfg.num_queues = new_rx;
0416 return 0;
0417 }
0418
0419 new_tx_cfg.num_queues = new_tx;
0420 new_rx_cfg.num_queues = new_rx;
0421
0422 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
0423 }
0424
0425 static void gve_get_ringparam(struct net_device *netdev,
0426 struct ethtool_ringparam *cmd,
0427 struct kernel_ethtool_ringparam *kernel_cmd,
0428 struct netlink_ext_ack *extack)
0429 {
0430 struct gve_priv *priv = netdev_priv(netdev);
0431
0432 cmd->rx_max_pending = priv->rx_desc_cnt;
0433 cmd->tx_max_pending = priv->tx_desc_cnt;
0434 cmd->rx_pending = priv->rx_desc_cnt;
0435 cmd->tx_pending = priv->tx_desc_cnt;
0436 }
0437
0438 static int gve_user_reset(struct net_device *netdev, u32 *flags)
0439 {
0440 struct gve_priv *priv = netdev_priv(netdev);
0441
0442 if (*flags == ETH_RESET_ALL) {
0443 *flags = 0;
0444 return gve_reset(priv, true);
0445 }
0446
0447 return -EOPNOTSUPP;
0448 }
0449
0450 static int gve_get_tunable(struct net_device *netdev,
0451 const struct ethtool_tunable *etuna, void *value)
0452 {
0453 struct gve_priv *priv = netdev_priv(netdev);
0454
0455 switch (etuna->id) {
0456 case ETHTOOL_RX_COPYBREAK:
0457 *(u32 *)value = priv->rx_copybreak;
0458 return 0;
0459 default:
0460 return -EOPNOTSUPP;
0461 }
0462 }
0463
0464 static int gve_set_tunable(struct net_device *netdev,
0465 const struct ethtool_tunable *etuna,
0466 const void *value)
0467 {
0468 struct gve_priv *priv = netdev_priv(netdev);
0469 u32 len;
0470
0471 switch (etuna->id) {
0472 case ETHTOOL_RX_COPYBREAK:
0473 {
0474 u32 max_copybreak = gve_is_gqi(priv) ?
0475 (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
0476
0477 len = *(u32 *)value;
0478 if (len > max_copybreak)
0479 return -EINVAL;
0480 priv->rx_copybreak = len;
0481 return 0;
0482 }
0483 default:
0484 return -EOPNOTSUPP;
0485 }
0486 }
0487
0488 static u32 gve_get_priv_flags(struct net_device *netdev)
0489 {
0490 struct gve_priv *priv = netdev_priv(netdev);
0491 u32 ret_flags = 0;
0492
0493
0494 if (priv->ethtool_flags & BIT(0))
0495 ret_flags |= BIT(0);
0496 return ret_flags;
0497 }
0498
0499 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
0500 {
0501 struct gve_priv *priv = netdev_priv(netdev);
0502 u64 ori_flags, new_flags;
0503
0504 ori_flags = READ_ONCE(priv->ethtool_flags);
0505 new_flags = ori_flags;
0506
0507
0508 if (flags & BIT(0))
0509 new_flags |= BIT(0);
0510 else
0511 new_flags &= ~(BIT(0));
0512 priv->ethtool_flags = new_flags;
0513
0514 if (flags & BIT(0)) {
0515 mod_timer(&priv->stats_report_timer,
0516 round_jiffies(jiffies +
0517 msecs_to_jiffies(priv->stats_report_timer_period)));
0518 }
0519
0520
0521 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
0522 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
0523 priv->tx_cfg.num_queues;
0524 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
0525 priv->rx_cfg.num_queues;
0526
0527 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
0528 sizeof(struct stats));
0529 del_timer_sync(&priv->stats_report_timer);
0530 }
0531 return 0;
0532 }
0533
0534 static int gve_get_link_ksettings(struct net_device *netdev,
0535 struct ethtool_link_ksettings *cmd)
0536 {
0537 struct gve_priv *priv = netdev_priv(netdev);
0538 int err = gve_adminq_report_link_speed(priv);
0539
0540 cmd->base.speed = priv->link_speed;
0541 return err;
0542 }
0543
0544 static int gve_get_coalesce(struct net_device *netdev,
0545 struct ethtool_coalesce *ec,
0546 struct kernel_ethtool_coalesce *kernel_ec,
0547 struct netlink_ext_ack *extack)
0548 {
0549 struct gve_priv *priv = netdev_priv(netdev);
0550
0551 if (gve_is_gqi(priv))
0552 return -EOPNOTSUPP;
0553 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
0554 ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
0555
0556 return 0;
0557 }
0558
0559 static int gve_set_coalesce(struct net_device *netdev,
0560 struct ethtool_coalesce *ec,
0561 struct kernel_ethtool_coalesce *kernel_ec,
0562 struct netlink_ext_ack *extack)
0563 {
0564 struct gve_priv *priv = netdev_priv(netdev);
0565 u32 tx_usecs_orig = priv->tx_coalesce_usecs;
0566 u32 rx_usecs_orig = priv->rx_coalesce_usecs;
0567 int idx;
0568
0569 if (gve_is_gqi(priv))
0570 return -EOPNOTSUPP;
0571
0572 if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
0573 ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
0574 return -EINVAL;
0575 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
0576 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
0577
0578 if (tx_usecs_orig != priv->tx_coalesce_usecs) {
0579 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
0580 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
0581 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
0582
0583 gve_set_itr_coalesce_usecs_dqo(priv, block,
0584 priv->tx_coalesce_usecs);
0585 }
0586 }
0587
0588 if (rx_usecs_orig != priv->rx_coalesce_usecs) {
0589 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
0590 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
0591 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
0592
0593 gve_set_itr_coalesce_usecs_dqo(priv, block,
0594 priv->rx_coalesce_usecs);
0595 }
0596 }
0597
0598 return 0;
0599 }
0600
0601 const struct ethtool_ops gve_ethtool_ops = {
0602 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
0603 .get_drvinfo = gve_get_drvinfo,
0604 .get_strings = gve_get_strings,
0605 .get_sset_count = gve_get_sset_count,
0606 .get_ethtool_stats = gve_get_ethtool_stats,
0607 .set_msglevel = gve_set_msglevel,
0608 .get_msglevel = gve_get_msglevel,
0609 .set_channels = gve_set_channels,
0610 .get_channels = gve_get_channels,
0611 .get_link = ethtool_op_get_link,
0612 .get_coalesce = gve_get_coalesce,
0613 .set_coalesce = gve_set_coalesce,
0614 .get_ringparam = gve_get_ringparam,
0615 .reset = gve_user_reset,
0616 .get_tunable = gve_get_tunable,
0617 .set_tunable = gve_set_tunable,
0618 .get_priv_flags = gve_get_priv_flags,
0619 .set_priv_flags = gve_set_priv_flags,
0620 .get_link_ksettings = gve_get_link_ksettings
0621 };