0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/irq.h>
0034 #include "en.h"
0035 #include "en/txrx.h"
0036 #include "en/xdp.h"
0037 #include "en/xsk/rx.h"
0038 #include "en/xsk/tx.h"
0039 #include "en_accel/ktls_txrx.h"
0040
0041 static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
0042 {
0043 int current_cpu = smp_processor_id();
0044
0045 return cpumask_test_cpu(current_cpu, c->aff_mask);
0046 }
0047
0048 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
0049 {
0050 struct mlx5e_sq_stats *stats = sq->stats;
0051 struct dim_sample dim_sample = {};
0052
0053 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
0054 return;
0055
0056 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
0057 net_dim(&sq->dim, dim_sample);
0058 }
0059
0060 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
0061 {
0062 struct mlx5e_rq_stats *stats = rq->stats;
0063 struct dim_sample dim_sample = {};
0064
0065 if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
0066 return;
0067
0068 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
0069 net_dim(&rq->dim, dim_sample);
0070 }
0071
0072 void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
0073 {
0074 struct mlx5_wq_cyc *wq = &sq->wq;
0075 struct mlx5e_tx_wqe *nopwqe;
0076 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
0077
0078 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
0079 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
0080 .num_wqebbs = 1,
0081 };
0082
0083 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
0084 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
0085 }
0086
0087 static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
0088 {
0089 bool busy_xsk = false, xsk_rx_alloc_err;
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 mlx5e_xsk_update_tx_wakeup(xsksq);
0101 busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
0102 mlx5e_xsk_update_tx_wakeup(xsksq);
0103
0104 xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
0105 mlx5e_post_rx_mpwqes,
0106 mlx5e_post_rx_wqes,
0107 xskrq);
0108 busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
0109
0110 return busy_xsk;
0111 }
0112
0113 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
0114 {
0115 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
0116 napi);
0117 struct mlx5e_ch_stats *ch_stats = c->stats;
0118 struct mlx5e_xdpsq *xsksq = &c->xsksq;
0119 struct mlx5e_txqsq __rcu **qos_sqs;
0120 struct mlx5e_rq *xskrq = &c->xskrq;
0121 struct mlx5e_rq *rq = &c->rq;
0122 bool aff_change = false;
0123 bool busy_xsk = false;
0124 bool busy = false;
0125 int work_done = 0;
0126 u16 qos_sqs_size;
0127 bool xsk_open;
0128 int i;
0129
0130 rcu_read_lock();
0131
0132 qos_sqs = rcu_dereference(c->qos_sqs);
0133
0134 xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
0135
0136 ch_stats->poll++;
0137
0138 for (i = 0; i < c->num_tc; i++)
0139 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
0140
0141 if (unlikely(qos_sqs)) {
0142 smp_rmb();
0143 qos_sqs_size = READ_ONCE(c->qos_sqs_size);
0144
0145 for (i = 0; i < qos_sqs_size; i++) {
0146 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
0147
0148 if (sq)
0149 busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
0150 }
0151 }
0152
0153 busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
0154
0155 if (c->xdp)
0156 busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
0157
0158 if (likely(budget)) {
0159 if (xsk_open)
0160 work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
0161
0162 if (likely(budget - work_done))
0163 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
0164
0165 busy |= work_done == budget;
0166 }
0167
0168 mlx5e_poll_ico_cq(&c->icosq.cq);
0169 if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
0170
0171
0172
0173 clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
0174
0175
0176 if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
0177 busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
0178
0179 busy |= INDIRECT_CALL_2(rq->post_wqes,
0180 mlx5e_post_rx_mpwqes,
0181 mlx5e_post_rx_wqes,
0182 rq);
0183 if (xsk_open) {
0184 busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
0185 busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
0186 }
0187
0188 busy |= busy_xsk;
0189
0190 if (busy) {
0191 if (likely(mlx5e_channel_no_affinity_change(c))) {
0192 work_done = budget;
0193 goto out;
0194 }
0195 ch_stats->aff_change++;
0196 aff_change = true;
0197 if (budget && work_done == budget)
0198 work_done--;
0199 }
0200
0201 if (unlikely(!napi_complete_done(napi, work_done)))
0202 goto out;
0203
0204 ch_stats->arm++;
0205
0206 for (i = 0; i < c->num_tc; i++) {
0207 mlx5e_handle_tx_dim(&c->sq[i]);
0208 mlx5e_cq_arm(&c->sq[i].cq);
0209 }
0210 if (unlikely(qos_sqs)) {
0211 for (i = 0; i < qos_sqs_size; i++) {
0212 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
0213
0214 if (sq) {
0215 mlx5e_handle_tx_dim(sq);
0216 mlx5e_cq_arm(&sq->cq);
0217 }
0218 }
0219 }
0220
0221 mlx5e_handle_rx_dim(rq);
0222
0223 mlx5e_cq_arm(&rq->cq);
0224 mlx5e_cq_arm(&c->icosq.cq);
0225 mlx5e_cq_arm(&c->async_icosq.cq);
0226 mlx5e_cq_arm(&c->xdpsq.cq);
0227
0228 if (xsk_open) {
0229 mlx5e_handle_rx_dim(xskrq);
0230 mlx5e_cq_arm(&xsksq->cq);
0231 mlx5e_cq_arm(&xskrq->cq);
0232 }
0233
0234 if (unlikely(aff_change && busy_xsk)) {
0235 mlx5e_trigger_irq(&c->icosq);
0236 ch_stats->force_irq++;
0237 }
0238
0239 out:
0240 rcu_read_unlock();
0241
0242 return work_done;
0243 }
0244
0245 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
0246 {
0247 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
0248
0249 napi_schedule(cq->napi);
0250 cq->event_ctr++;
0251 cq->ch_stats->events++;
0252 }
0253
0254 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
0255 {
0256 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
0257 struct net_device *netdev = cq->netdev;
0258
0259 netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
0260 __func__, mcq->cqn, event);
0261 }