0001
0002
0003
0004 #include "en/ptp.h"
0005 #include "en/txrx.h"
0006 #include "en/params.h"
0007 #include "en/fs_tt_redirect.h"
0008
0009 struct mlx5e_ptp_fs {
0010 struct mlx5_flow_handle *l2_rule;
0011 struct mlx5_flow_handle *udp_v4_rule;
0012 struct mlx5_flow_handle *udp_v6_rule;
0013 bool valid;
0014 };
0015
0016 struct mlx5e_ptp_params {
0017 struct mlx5e_params params;
0018 struct mlx5e_sq_param txq_sq_param;
0019 struct mlx5e_rq_param rq_param;
0020 };
0021
0022 struct mlx5e_skb_cb_hwtstamp {
0023 ktime_t cqe_hwtstamp;
0024 ktime_t port_hwtstamp;
0025 };
0026
0027 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
0028 {
0029 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
0030 }
0031
0032 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
0033 {
0034 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
0035 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
0036 }
0037
0038 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
0039 struct mlx5e_ptp_cq_stats *cq_stats)
0040 {
0041 struct skb_shared_hwtstamps hwts = {};
0042 ktime_t diff;
0043
0044 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
0045 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
0046
0047
0048 if (diff > (NSEC_PER_SEC >> 7)) {
0049 cq_stats->abort++;
0050 cq_stats->abort_abs_diff_ns += diff;
0051 return;
0052 }
0053
0054 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
0055 skb_tstamp_tx(skb, &hwts);
0056 }
0057
0058 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
0059 ktime_t hwtstamp,
0060 struct mlx5e_ptp_cq_stats *cq_stats)
0061 {
0062 switch (hwtstamp_type) {
0063 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
0064 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
0065 break;
0066 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
0067 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
0068 break;
0069 }
0070
0071
0072
0073
0074 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
0075 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
0076 return;
0077
0078 mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
0079 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
0080 }
0081
0082 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
0083
0084 static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
0085 {
0086 return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
0087 }
0088
0089 static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
0090 {
0091 struct skb_shared_hwtstamps hwts = {};
0092 struct sk_buff *skb;
0093
0094 ptpsq->cq_stats->resync_event++;
0095
0096 while (skb_cc != skb_id) {
0097 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
0098 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
0099 skb_tstamp_tx(skb, &hwts);
0100 ptpsq->cq_stats->resync_cqe++;
0101 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
0102 }
0103 }
0104
0105 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
0106 struct mlx5_cqe64 *cqe,
0107 int budget)
0108 {
0109 u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
0110 u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
0111 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
0112 struct sk_buff *skb;
0113 ktime_t hwtstamp;
0114
0115 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
0116 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
0117 ptpsq->cq_stats->err_cqe++;
0118 goto out;
0119 }
0120
0121 if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
0122 mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
0123
0124 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
0125 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
0126 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
0127 hwtstamp, ptpsq->cq_stats);
0128 ptpsq->cq_stats->cqe++;
0129
0130 out:
0131 napi_consume_skb(skb, budget);
0132 }
0133
0134 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
0135 {
0136 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
0137 struct mlx5_cqwq *cqwq = &cq->wq;
0138 struct mlx5_cqe64 *cqe;
0139 int work_done = 0;
0140
0141 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
0142 return false;
0143
0144 cqe = mlx5_cqwq_get_cqe(cqwq);
0145 if (!cqe)
0146 return false;
0147
0148 do {
0149 mlx5_cqwq_pop(cqwq);
0150
0151 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
0152 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
0153
0154 mlx5_cqwq_update_db_record(cqwq);
0155
0156
0157 wmb();
0158
0159 return work_done == budget;
0160 }
0161
0162 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
0163 {
0164 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
0165 struct mlx5e_ch_stats *ch_stats = c->stats;
0166 struct mlx5e_rq *rq = &c->rq;
0167 bool busy = false;
0168 int work_done = 0;
0169 int i;
0170
0171 rcu_read_lock();
0172
0173 ch_stats->poll++;
0174
0175 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0176 for (i = 0; i < c->num_tc; i++) {
0177 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
0178 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
0179 }
0180 }
0181 if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
0182 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
0183 busy |= work_done == budget;
0184 busy |= INDIRECT_CALL_2(rq->post_wqes,
0185 mlx5e_post_rx_mpwqes,
0186 mlx5e_post_rx_wqes,
0187 rq);
0188 }
0189
0190 if (busy) {
0191 work_done = budget;
0192 goto out;
0193 }
0194
0195 if (unlikely(!napi_complete_done(napi, work_done)))
0196 goto out;
0197
0198 ch_stats->arm++;
0199
0200 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0201 for (i = 0; i < c->num_tc; i++) {
0202 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
0203 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
0204 }
0205 }
0206 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
0207 mlx5e_cq_arm(&rq->cq);
0208
0209 out:
0210 rcu_read_unlock();
0211
0212 return work_done;
0213 }
0214
0215 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
0216 struct mlx5e_params *params,
0217 struct mlx5e_sq_param *param,
0218 struct mlx5e_txqsq *sq, int tc,
0219 struct mlx5e_ptpsq *ptpsq)
0220 {
0221 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
0222 struct mlx5_core_dev *mdev = c->mdev;
0223 struct mlx5_wq_cyc *wq = &sq->wq;
0224 int err;
0225 int node;
0226
0227 sq->pdev = c->pdev;
0228 sq->clock = &mdev->clock;
0229 sq->mkey_be = c->mkey_be;
0230 sq->netdev = c->netdev;
0231 sq->priv = c->priv;
0232 sq->mdev = mdev;
0233 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
0234 sq->txq_ix = txq_ix;
0235 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
0236 sq->min_inline_mode = params->tx_min_inline_mode;
0237 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
0238 sq->stats = &c->priv->ptp_stats.sq[tc];
0239 sq->ptpsq = ptpsq;
0240 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
0241 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
0242 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
0243 sq->stop_room = param->stop_room;
0244 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
0245
0246 node = dev_to_node(mlx5_core_dma_dev(mdev));
0247
0248 param->wq.db_numa_node = node;
0249 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
0250 if (err)
0251 return err;
0252 wq->db = &wq->db[MLX5_SND_DBR];
0253
0254 err = mlx5e_alloc_txqsq_db(sq, node);
0255 if (err)
0256 goto err_sq_wq_destroy;
0257
0258 return 0;
0259
0260 err_sq_wq_destroy:
0261 mlx5_wq_destroy(&sq->wq_ctrl);
0262
0263 return err;
0264 }
0265
0266 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
0267 {
0268 mlx5_core_destroy_sq(mdev, sqn);
0269 }
0270
0271 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
0272 {
0273 int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
0274 struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
0275
0276 ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
0277 GFP_KERNEL, numa);
0278 if (!ptpsq->skb_fifo.fifo)
0279 return -ENOMEM;
0280
0281 ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
0282 ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
0283 ptpsq->skb_fifo.mask = wq_sz - 1;
0284 if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
0285 ptpsq->ts_cqe_ctr_mask =
0286 (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
0287 return 0;
0288 }
0289
0290 static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
0291 {
0292 while (*skb_fifo->pc != *skb_fifo->cc) {
0293 struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
0294
0295 dev_kfree_skb_any(skb);
0296 }
0297 }
0298
0299 static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
0300 {
0301 mlx5e_ptp_drain_skb_fifo(skb_fifo);
0302 kvfree(skb_fifo->fifo);
0303 }
0304
0305 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
0306 int txq_ix, struct mlx5e_ptp_params *cparams,
0307 int tc, struct mlx5e_ptpsq *ptpsq)
0308 {
0309 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
0310 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
0311 struct mlx5e_create_sq_param csp = {};
0312 int err;
0313
0314 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
0315 txqsq, tc, ptpsq);
0316 if (err)
0317 return err;
0318
0319 csp.tisn = tisn;
0320 csp.tis_lst_sz = 1;
0321 csp.cqn = txqsq->cq.mcq.cqn;
0322 csp.wq_ctrl = &txqsq->wq_ctrl;
0323 csp.min_inline_mode = txqsq->min_inline_mode;
0324 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
0325
0326 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
0327 if (err)
0328 goto err_free_txqsq;
0329
0330 err = mlx5e_ptp_alloc_traffic_db(ptpsq,
0331 dev_to_node(mlx5_core_dma_dev(c->mdev)));
0332 if (err)
0333 goto err_free_txqsq;
0334
0335 return 0;
0336
0337 err_free_txqsq:
0338 mlx5e_free_txqsq(txqsq);
0339
0340 return err;
0341 }
0342
0343 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
0344 {
0345 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
0346 struct mlx5_core_dev *mdev = sq->mdev;
0347
0348 mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
0349 cancel_work_sync(&sq->recover_work);
0350 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
0351 mlx5e_free_txqsq_descs(sq);
0352 mlx5e_free_txqsq(sq);
0353 }
0354
0355 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
0356 struct mlx5e_ptp_params *cparams)
0357 {
0358 struct mlx5e_params *params = &cparams->params;
0359 u8 num_tc = mlx5e_get_dcb_num_tc(params);
0360 int ix_base;
0361 int err;
0362 int tc;
0363
0364 ix_base = num_tc * params->num_channels;
0365
0366 for (tc = 0; tc < num_tc; tc++) {
0367 int txq_ix = ix_base + tc;
0368
0369 err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
0370 cparams, tc, &c->ptpsq[tc]);
0371 if (err)
0372 goto close_txqsq;
0373 }
0374
0375 return 0;
0376
0377 close_txqsq:
0378 for (--tc; tc >= 0; tc--)
0379 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
0380
0381 return err;
0382 }
0383
0384 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
0385 {
0386 int tc;
0387
0388 for (tc = 0; tc < c->num_tc; tc++)
0389 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
0390 }
0391
0392 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
0393 struct mlx5e_ptp_params *cparams)
0394 {
0395 struct mlx5e_params *params = &cparams->params;
0396 struct mlx5e_create_cq_param ccp = {};
0397 struct dim_cq_moder ptp_moder = {};
0398 struct mlx5e_cq_param *cq_param;
0399 u8 num_tc;
0400 int err;
0401 int tc;
0402
0403 num_tc = mlx5e_get_dcb_num_tc(params);
0404
0405 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
0406 ccp.ch_stats = c->stats;
0407 ccp.napi = &c->napi;
0408 ccp.ix = MLX5E_PTP_CHANNEL_IX;
0409
0410 cq_param = &cparams->txq_sq_param.cqp;
0411
0412 for (tc = 0; tc < num_tc; tc++) {
0413 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
0414
0415 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
0416 if (err)
0417 goto out_err_txqsq_cq;
0418 }
0419
0420 for (tc = 0; tc < num_tc; tc++) {
0421 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
0422 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
0423
0424 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
0425 if (err)
0426 goto out_err_ts_cq;
0427
0428 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
0429 }
0430
0431 return 0;
0432
0433 out_err_ts_cq:
0434 for (--tc; tc >= 0; tc--)
0435 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
0436 tc = num_tc;
0437 out_err_txqsq_cq:
0438 for (--tc; tc >= 0; tc--)
0439 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
0440
0441 return err;
0442 }
0443
0444 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
0445 struct mlx5e_ptp_params *cparams)
0446 {
0447 struct mlx5e_create_cq_param ccp = {};
0448 struct dim_cq_moder ptp_moder = {};
0449 struct mlx5e_cq_param *cq_param;
0450 struct mlx5e_cq *cq = &c->rq.cq;
0451
0452 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
0453 ccp.ch_stats = c->stats;
0454 ccp.napi = &c->napi;
0455 ccp.ix = MLX5E_PTP_CHANNEL_IX;
0456
0457 cq_param = &cparams->rq_param.cqp;
0458
0459 return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
0460 }
0461
0462 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
0463 {
0464 int tc;
0465
0466 for (tc = 0; tc < c->num_tc; tc++)
0467 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
0468
0469 for (tc = 0; tc < c->num_tc; tc++)
0470 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
0471 }
0472
0473 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
0474 struct mlx5e_params *params,
0475 struct mlx5e_sq_param *param)
0476 {
0477 void *sqc = param->sqc;
0478 void *wq;
0479
0480 mlx5e_build_sq_param_common(mdev, param);
0481
0482 wq = MLX5_ADDR_OF(sqc, sqc, wq);
0483 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
0484 param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
0485 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
0486 }
0487
0488 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
0489 struct net_device *netdev,
0490 u16 q_counter,
0491 struct mlx5e_ptp_params *ptp_params)
0492 {
0493 struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
0494 struct mlx5e_params *params = &ptp_params->params;
0495
0496 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
0497 mlx5e_init_rq_type_params(mdev, params);
0498 params->sw_mtu = netdev->max_mtu;
0499 mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
0500 }
0501
0502 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
0503 struct mlx5e_ptp_params *cparams,
0504 struct mlx5e_params *orig)
0505 {
0506 struct mlx5e_params *params = &cparams->params;
0507
0508 params->tx_min_inline_mode = orig->tx_min_inline_mode;
0509 params->num_channels = orig->num_channels;
0510 params->hard_mtu = orig->hard_mtu;
0511 params->sw_mtu = orig->sw_mtu;
0512 params->mqprio = orig->mqprio;
0513
0514
0515 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0516 params->log_sq_size = orig->log_sq_size;
0517 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
0518 }
0519
0520 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0521 params->vlan_strip_disable = orig->vlan_strip_disable;
0522 mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
0523 }
0524 }
0525
0526 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
0527 struct mlx5e_rq *rq)
0528 {
0529 struct mlx5_core_dev *mdev = c->mdev;
0530 struct mlx5e_priv *priv = c->priv;
0531 int err;
0532
0533 rq->wq_type = params->rq_wq_type;
0534 rq->pdev = c->pdev;
0535 rq->netdev = priv->netdev;
0536 rq->priv = priv;
0537 rq->clock = &mdev->clock;
0538 rq->tstamp = &priv->tstamp;
0539 rq->mdev = mdev;
0540 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
0541 rq->stats = &c->priv->ptp_stats.rq;
0542 rq->ix = MLX5E_PTP_CHANNEL_IX;
0543 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
0544 err = mlx5e_rq_set_handlers(rq, params, false);
0545 if (err)
0546 return err;
0547
0548 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
0549 }
0550
0551 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
0552 struct mlx5e_rq_param *rq_param)
0553 {
0554 int node = dev_to_node(c->mdev->device);
0555 int err;
0556
0557 err = mlx5e_init_ptp_rq(c, params, &c->rq);
0558 if (err)
0559 return err;
0560
0561 return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
0562 }
0563
0564 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
0565 struct mlx5e_ptp_params *cparams)
0566 {
0567 int err;
0568
0569 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0570 err = mlx5e_ptp_open_tx_cqs(c, cparams);
0571 if (err)
0572 return err;
0573
0574 err = mlx5e_ptp_open_txqsqs(c, cparams);
0575 if (err)
0576 goto close_tx_cqs;
0577 }
0578 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0579 err = mlx5e_ptp_open_rx_cq(c, cparams);
0580 if (err)
0581 goto close_txqsq;
0582
0583 err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
0584 if (err)
0585 goto close_rx_cq;
0586 }
0587 return 0;
0588
0589 close_rx_cq:
0590 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
0591 mlx5e_close_cq(&c->rq.cq);
0592 close_txqsq:
0593 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
0594 mlx5e_ptp_close_txqsqs(c);
0595 close_tx_cqs:
0596 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
0597 mlx5e_ptp_close_tx_cqs(c);
0598
0599 return err;
0600 }
0601
0602 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
0603 {
0604 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0605 mlx5e_close_rq(&c->rq);
0606 mlx5e_close_cq(&c->rq.cq);
0607 }
0608 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0609 mlx5e_ptp_close_txqsqs(c);
0610 mlx5e_ptp_close_tx_cqs(c);
0611 }
0612 }
0613
0614 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
0615 {
0616 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
0617 __set_bit(MLX5E_PTP_STATE_TX, c->state);
0618
0619 if (params->ptp_rx)
0620 __set_bit(MLX5E_PTP_STATE_RX, c->state);
0621
0622 return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
0623 }
0624
0625 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
0626 {
0627 struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
0628
0629 if (!ptp_fs->valid)
0630 return;
0631
0632 mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
0633 mlx5e_fs_tt_redirect_any_destroy(priv);
0634
0635 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
0636 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
0637 mlx5e_fs_tt_redirect_udp_destroy(priv);
0638 ptp_fs->valid = false;
0639 }
0640
0641 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
0642 {
0643 u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
0644 struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
0645 struct mlx5_flow_handle *rule;
0646 int err;
0647
0648 if (ptp_fs->valid)
0649 return 0;
0650
0651 err = mlx5e_fs_tt_redirect_udp_create(priv);
0652 if (err)
0653 goto out_free;
0654
0655 rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
0656 tirn, PTP_EV_PORT);
0657 if (IS_ERR(rule)) {
0658 err = PTR_ERR(rule);
0659 goto out_destroy_fs_udp;
0660 }
0661 ptp_fs->udp_v4_rule = rule;
0662
0663 rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
0664 tirn, PTP_EV_PORT);
0665 if (IS_ERR(rule)) {
0666 err = PTR_ERR(rule);
0667 goto out_destroy_udp_v4_rule;
0668 }
0669 ptp_fs->udp_v6_rule = rule;
0670
0671 err = mlx5e_fs_tt_redirect_any_create(priv);
0672 if (err)
0673 goto out_destroy_udp_v6_rule;
0674
0675 rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
0676 if (IS_ERR(rule)) {
0677 err = PTR_ERR(rule);
0678 goto out_destroy_fs_any;
0679 }
0680 ptp_fs->l2_rule = rule;
0681 ptp_fs->valid = true;
0682
0683 return 0;
0684
0685 out_destroy_fs_any:
0686 mlx5e_fs_tt_redirect_any_destroy(priv);
0687 out_destroy_udp_v6_rule:
0688 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
0689 out_destroy_udp_v4_rule:
0690 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
0691 out_destroy_fs_udp:
0692 mlx5e_fs_tt_redirect_udp_destroy(priv);
0693 out_free:
0694 return err;
0695 }
0696
0697 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
0698 u8 lag_port, struct mlx5e_ptp **cp)
0699 {
0700 struct net_device *netdev = priv->netdev;
0701 struct mlx5_core_dev *mdev = priv->mdev;
0702 struct mlx5e_ptp_params *cparams;
0703 struct mlx5e_ptp *c;
0704 int err;
0705
0706
0707 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
0708 cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
0709 if (!c || !cparams)
0710 return -ENOMEM;
0711
0712 c->priv = priv;
0713 c->mdev = priv->mdev;
0714 c->tstamp = &priv->tstamp;
0715 c->pdev = mlx5_core_dma_dev(priv->mdev);
0716 c->netdev = priv->netdev;
0717 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
0718 c->num_tc = mlx5e_get_dcb_num_tc(params);
0719 c->stats = &priv->ptp_stats.ch;
0720 c->lag_port = lag_port;
0721
0722 err = mlx5e_ptp_set_state(c, params);
0723 if (err)
0724 goto err_free;
0725
0726 netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
0727
0728 mlx5e_ptp_build_params(c, cparams, params);
0729
0730 err = mlx5e_ptp_open_queues(c, cparams);
0731 if (unlikely(err))
0732 goto err_napi_del;
0733
0734 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
0735 priv->rx_ptp_opened = true;
0736
0737 *cp = c;
0738
0739 kvfree(cparams);
0740
0741 return 0;
0742
0743 err_napi_del:
0744 netif_napi_del(&c->napi);
0745 err_free:
0746 kvfree(cparams);
0747 kvfree(c);
0748 return err;
0749 }
0750
0751 void mlx5e_ptp_close(struct mlx5e_ptp *c)
0752 {
0753 mlx5e_ptp_close_queues(c);
0754 netif_napi_del(&c->napi);
0755
0756 kvfree(c);
0757 }
0758
0759 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
0760 {
0761 int tc;
0762
0763 napi_enable(&c->napi);
0764
0765 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0766 for (tc = 0; tc < c->num_tc; tc++)
0767 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
0768 }
0769 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0770 mlx5e_ptp_rx_set_fs(c->priv);
0771 mlx5e_activate_rq(&c->rq);
0772 mlx5e_trigger_napi_sched(&c->napi);
0773 }
0774 }
0775
0776 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
0777 {
0778 int tc;
0779
0780 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
0781 mlx5e_deactivate_rq(&c->rq);
0782
0783 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
0784 for (tc = 0; tc < c->num_tc; tc++)
0785 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
0786 }
0787
0788 napi_disable(&c->napi);
0789 }
0790
0791 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
0792 {
0793 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
0794 return -EINVAL;
0795
0796 *rqn = c->rq.rqn;
0797 return 0;
0798 }
0799
0800 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
0801 {
0802 struct mlx5e_ptp_fs *ptp_fs;
0803
0804 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
0805 return 0;
0806
0807 ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
0808 if (!ptp_fs)
0809 return -ENOMEM;
0810
0811 priv->fs->ptp_fs = ptp_fs;
0812 return 0;
0813 }
0814
0815 void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
0816 {
0817 struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
0818
0819 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
0820 return;
0821
0822 mlx5e_ptp_rx_unset_fs(priv);
0823 kfree(ptp_fs);
0824 }
0825
0826 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
0827 {
0828 struct mlx5e_ptp *c = priv->channels.ptp;
0829
0830 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
0831 return 0;
0832
0833 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
0834 return 0;
0835
0836 if (set) {
0837 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0838 netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
0839 return -EINVAL;
0840 }
0841 return mlx5e_ptp_rx_set_fs(priv);
0842 }
0843
0844 if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
0845 netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
0846 return -EINVAL;
0847 }
0848 mlx5e_ptp_rx_unset_fs(priv);
0849 return 0;
0850 }