0001
0002
0003
0004 #include "nfp_app.h"
0005 #include "nfp_net_dp.h"
0006 #include "nfp_net_xsk.h"
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
0018 {
0019 void *frag;
0020
0021 if (!dp->xdp_prog) {
0022 frag = netdev_alloc_frag(dp->fl_bufsz);
0023 } else {
0024 struct page *page;
0025
0026 page = alloc_page(GFP_KERNEL);
0027 frag = page ? page_address(page) : NULL;
0028 }
0029 if (!frag) {
0030 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
0031 return NULL;
0032 }
0033
0034 *dma_addr = nfp_net_dma_map_rx(dp, frag);
0035 if (dma_mapping_error(dp->dev, *dma_addr)) {
0036 nfp_net_free_frag(frag, dp->xdp_prog);
0037 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
0038 return NULL;
0039 }
0040
0041 return frag;
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static void
0053 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
0054 struct nfp_net_r_vector *r_vec, unsigned int idx,
0055 bool is_xdp)
0056 {
0057 struct nfp_net *nn = r_vec->nfp_net;
0058
0059 tx_ring->idx = idx;
0060 tx_ring->r_vec = r_vec;
0061 tx_ring->is_xdp = is_xdp;
0062 u64_stats_init(&tx_ring->r_vec->tx_sync);
0063
0064 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
0065 tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
0066 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
0067 }
0068
0069
0070
0071
0072
0073
0074
0075 static void
0076 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
0077 struct nfp_net_r_vector *r_vec, unsigned int idx)
0078 {
0079 struct nfp_net *nn = r_vec->nfp_net;
0080
0081 rx_ring->idx = idx;
0082 rx_ring->r_vec = r_vec;
0083 u64_stats_init(&rx_ring->r_vec->rx_sync);
0084
0085 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
0086 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
0087 }
0088
0089
0090
0091
0092
0093
0094
0095 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
0096 {
0097 unsigned int wr_idx, last_idx;
0098
0099
0100
0101
0102 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
0103 return;
0104
0105
0106 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
0107 last_idx = rx_ring->cnt - 1;
0108 if (rx_ring->r_vec->xsk_pool) {
0109 rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
0110 memset(&rx_ring->xsk_rxbufs[last_idx], 0,
0111 sizeof(*rx_ring->xsk_rxbufs));
0112 } else {
0113 rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
0114 memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
0115 }
0116
0117 memset(rx_ring->rxds, 0, rx_ring->size);
0118 rx_ring->wr_p = 0;
0119 rx_ring->rd_p = 0;
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 static void
0132 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
0133 struct nfp_net_rx_ring *rx_ring)
0134 {
0135 unsigned int i;
0136
0137 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
0138 return;
0139
0140 for (i = 0; i < rx_ring->cnt - 1; i++) {
0141
0142
0143
0144
0145 if (!rx_ring->rxbufs[i].frag)
0146 continue;
0147
0148 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
0149 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
0150 rx_ring->rxbufs[i].dma_addr = 0;
0151 rx_ring->rxbufs[i].frag = NULL;
0152 }
0153 }
0154
0155
0156
0157
0158
0159
0160 static int
0161 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
0162 struct nfp_net_rx_ring *rx_ring)
0163 {
0164 struct nfp_net_rx_buf *rxbufs;
0165 unsigned int i;
0166
0167 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
0168 return 0;
0169
0170 rxbufs = rx_ring->rxbufs;
0171
0172 for (i = 0; i < rx_ring->cnt - 1; i++) {
0173 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
0174 if (!rxbufs[i].frag) {
0175 nfp_net_rx_ring_bufs_free(dp, rx_ring);
0176 return -ENOMEM;
0177 }
0178 }
0179
0180 return 0;
0181 }
0182
0183 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
0184 {
0185 unsigned int r;
0186
0187 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
0188 GFP_KERNEL);
0189 if (!dp->tx_rings)
0190 return -ENOMEM;
0191
0192 if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
0193 dp->txrwb = dma_alloc_coherent(dp->dev,
0194 dp->num_tx_rings * sizeof(u64),
0195 &dp->txrwb_dma, GFP_KERNEL);
0196 if (!dp->txrwb)
0197 goto err_free_rings;
0198 }
0199
0200 for (r = 0; r < dp->num_tx_rings; r++) {
0201 int bias = 0;
0202
0203 if (r >= dp->num_stack_tx_rings)
0204 bias = dp->num_stack_tx_rings;
0205
0206 nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
0207 &nn->r_vecs[r - bias], r, bias);
0208
0209 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
0210 goto err_free_prev;
0211
0212 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
0213 goto err_free_ring;
0214 }
0215
0216 return 0;
0217
0218 err_free_prev:
0219 while (r--) {
0220 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
0221 err_free_ring:
0222 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
0223 }
0224 if (dp->txrwb)
0225 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
0226 dp->txrwb, dp->txrwb_dma);
0227 err_free_rings:
0228 kfree(dp->tx_rings);
0229 return -ENOMEM;
0230 }
0231
0232 void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
0233 {
0234 unsigned int r;
0235
0236 for (r = 0; r < dp->num_tx_rings; r++) {
0237 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
0238 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
0239 }
0240
0241 if (dp->txrwb)
0242 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
0243 dp->txrwb, dp->txrwb_dma);
0244 kfree(dp->tx_rings);
0245 }
0246
0247
0248
0249
0250
0251 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
0252 {
0253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
0254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0255
0256 if (dp->netdev)
0257 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
0258
0259 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
0260 kvfree(rx_ring->xsk_rxbufs);
0261 else
0262 kvfree(rx_ring->rxbufs);
0263
0264 if (rx_ring->rxds)
0265 dma_free_coherent(dp->dev, rx_ring->size,
0266 rx_ring->rxds, rx_ring->dma);
0267
0268 rx_ring->cnt = 0;
0269 rx_ring->rxbufs = NULL;
0270 rx_ring->xsk_rxbufs = NULL;
0271 rx_ring->rxds = NULL;
0272 rx_ring->dma = 0;
0273 rx_ring->size = 0;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283 static int
0284 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
0285 {
0286 enum xdp_mem_type mem_type;
0287 size_t rxbuf_sw_desc_sz;
0288 int err;
0289
0290 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
0291 mem_type = MEM_TYPE_XSK_BUFF_POOL;
0292 rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
0293 } else {
0294 mem_type = MEM_TYPE_PAGE_ORDER0;
0295 rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
0296 }
0297
0298 if (dp->netdev) {
0299 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
0300 rx_ring->idx, rx_ring->r_vec->napi.napi_id);
0301 if (err < 0)
0302 return err;
0303
0304 err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
0305 if (err)
0306 goto err_alloc;
0307 }
0308
0309 rx_ring->cnt = dp->rxd_cnt;
0310 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
0311 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
0312 &rx_ring->dma,
0313 GFP_KERNEL | __GFP_NOWARN);
0314 if (!rx_ring->rxds) {
0315 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
0316 rx_ring->cnt);
0317 goto err_alloc;
0318 }
0319
0320 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
0321 rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
0322 GFP_KERNEL);
0323 if (!rx_ring->xsk_rxbufs)
0324 goto err_alloc;
0325 } else {
0326 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
0327 GFP_KERNEL);
0328 if (!rx_ring->rxbufs)
0329 goto err_alloc;
0330 }
0331
0332 return 0;
0333
0334 err_alloc:
0335 nfp_net_rx_ring_free(rx_ring);
0336 return -ENOMEM;
0337 }
0338
0339 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
0340 {
0341 unsigned int r;
0342
0343 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
0344 GFP_KERNEL);
0345 if (!dp->rx_rings)
0346 return -ENOMEM;
0347
0348 for (r = 0; r < dp->num_rx_rings; r++) {
0349 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
0350
0351 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
0352 goto err_free_prev;
0353
0354 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
0355 goto err_free_ring;
0356 }
0357
0358 return 0;
0359
0360 err_free_prev:
0361 while (r--) {
0362 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
0363 err_free_ring:
0364 nfp_net_rx_ring_free(&dp->rx_rings[r]);
0365 }
0366 kfree(dp->rx_rings);
0367 return -ENOMEM;
0368 }
0369
0370 void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
0371 {
0372 unsigned int r;
0373
0374 for (r = 0; r < dp->num_rx_rings; r++) {
0375 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
0376 nfp_net_rx_ring_free(&dp->rx_rings[r]);
0377 }
0378
0379 kfree(dp->rx_rings);
0380 }
0381
0382 void
0383 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
0384 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
0385 {
0386
0387 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
0388 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
0389 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
0390 }
0391
0392 void
0393 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
0394 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
0395 {
0396 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
0397 if (tx_ring->txrwb) {
0398 *tx_ring->txrwb = 0;
0399 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
0400 nn->dp.txrwb_dma + idx * sizeof(u64));
0401 }
0402 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
0403 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
0404 }
0405
0406 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
0407 {
0408 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
0409 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
0410 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
0411
0412 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
0413 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
0414 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
0415 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
0416 }
0417
0418 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
0419 {
0420 struct nfp_net *nn = netdev_priv(netdev);
0421
0422 return nn->dp.ops->xmit(skb, netdev);
0423 }
0424
0425 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
0426 {
0427 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
0428
0429 return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
0430 }
0431
0432 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
0433 {
0434 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
0435 bool ret;
0436
0437 spin_lock_bh(&r_vec->lock);
0438 ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
0439 spin_unlock_bh(&r_vec->lock);
0440
0441 return ret;
0442 }
0443
0444 bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
0445 const struct nfp_meta_parsed *meta)
0446 {
0447 u16 tpid = 0, tci = 0;
0448
0449 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
0450 tpid = ETH_P_8021Q;
0451 tci = le16_to_cpu(rxd->rxd.vlan);
0452 } else if (meta->vlan.stripped) {
0453 if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
0454 tpid = ETH_P_8021Q;
0455 else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
0456 tpid = ETH_P_8021AD;
0457 else
0458 return false;
0459
0460 tci = meta->vlan.tci;
0461 }
0462 if (tpid)
0463 __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
0464
0465 return true;
0466 }