0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "sdma.h"
0012 #include "verbs.h"
0013 #include "netdev.h"
0014 #include "hfi.h"
0015
0016 #include <linux/netdevice.h>
0017 #include <linux/etherdevice.h>
0018 #include <rdma/ib_verbs.h>
0019
0020 static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
0021 struct hfi1_ctxtdata *uctxt)
0022 {
0023 unsigned int rcvctrl_ops;
0024 struct hfi1_devdata *dd = rx->dd;
0025 int ret;
0026
0027 uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
0028 uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
0029
0030
0031 ret = hfi1_create_rcvhdrq(dd, uctxt);
0032 if (ret)
0033 goto done;
0034
0035 ret = hfi1_setup_eagerbufs(uctxt);
0036 if (ret)
0037 goto done;
0038
0039 clear_rcvhdrtail(uctxt);
0040
0041 rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
0042 rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
0043
0044 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
0045 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
0046 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
0047 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
0048 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
0049 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
0050 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
0051 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
0052
0053 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
0054 done:
0055 return ret;
0056 }
0057
0058 static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
0059 struct hfi1_ctxtdata **ctxt)
0060 {
0061 struct hfi1_ctxtdata *uctxt;
0062 int ret;
0063
0064 if (dd->flags & HFI1_FROZEN)
0065 return -EIO;
0066
0067 ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
0068 if (ret < 0) {
0069 dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
0070 return -ENOMEM;
0071 }
0072
0073 uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
0074 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
0075 HFI1_CAP_KGET(NODROP_EGR_FULL) |
0076 HFI1_CAP_KGET(DMA_RTAIL);
0077
0078 uctxt->fast_handler = handle_receive_interrupt_napi_fp;
0079 uctxt->slow_handler = handle_receive_interrupt_napi_sp;
0080 hfi1_set_seq_cnt(uctxt, 1);
0081 uctxt->is_vnic = true;
0082
0083 hfi1_stats.sps_ctxts++;
0084
0085 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
0086 *ctxt = uctxt;
0087
0088 return 0;
0089 }
0090
0091 static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
0092 struct hfi1_ctxtdata *uctxt)
0093 {
0094 flush_wc();
0095
0096
0097
0098
0099
0100 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
0101 HFI1_RCVCTRL_TIDFLOW_DIS |
0102 HFI1_RCVCTRL_INTRAVAIL_DIS |
0103 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
0104 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
0105 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
0106
0107 if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
0108 msix_free_irq(dd, uctxt->msix_intr);
0109
0110 uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
0111 uctxt->event_flags = 0;
0112
0113 hfi1_clear_tids(uctxt);
0114 hfi1_clear_ctxt_pkey(dd, uctxt);
0115
0116 hfi1_stats.sps_ctxts--;
0117
0118 hfi1_free_ctxt(uctxt);
0119 }
0120
0121 static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
0122 struct hfi1_ctxtdata **ctxt)
0123 {
0124 int rc;
0125 struct hfi1_devdata *dd = rx->dd;
0126
0127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
0128 if (rc) {
0129 dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
0130 return rc;
0131 }
0132
0133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
0134 if (rc) {
0135 dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
0136 hfi1_netdev_deallocate_ctxt(dd, *ctxt);
0137 *ctxt = NULL;
0138 }
0139
0140 return rc;
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
0157 struct cpumask *cpu_mask)
0158 {
0159 cpumask_var_t node_cpu_mask;
0160 unsigned int available_cpus;
0161
0162 if (!HFI1_CAP_IS_KSET(AIP))
0163 return 0;
0164
0165
0166 if (available_contexts == 0) {
0167 dd_dev_info(dd, "No receive contexts available for netdevs.\n");
0168 return 0;
0169 }
0170
0171 if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
0172 dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
0173 return 0;
0174 }
0175
0176 cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
0177
0178 available_cpus = cpumask_weight(node_cpu_mask);
0179
0180 free_cpumask_var(node_cpu_mask);
0181
0182 return min3(available_cpus, available_contexts,
0183 (u32)HFI1_MAX_NETDEV_CTXTS);
0184 }
0185
0186 static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
0187 {
0188 int i;
0189 int rc;
0190 struct hfi1_devdata *dd = rx->dd;
0191 struct net_device *dev = &rx->rx_napi;
0192
0193 rx->num_rx_q = dd->num_netdev_contexts;
0194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
0195 GFP_KERNEL, dd->node);
0196
0197 if (!rx->rxq) {
0198 dd_dev_err(dd, "Unable to allocate netdev queue data\n");
0199 return (-ENOMEM);
0200 }
0201
0202 for (i = 0; i < rx->num_rx_q; i++) {
0203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
0204
0205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
0206 if (rc)
0207 goto bail_context_irq_failure;
0208
0209 hfi1_rcd_get(rxq->rcd);
0210 rxq->rx = rx;
0211 rxq->rcd->napi = &rxq->napi;
0212 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
0213 i, rxq->rcd->ctxt);
0214
0215
0216
0217
0218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
0219 netif_napi_add_weight(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
0220 rc = msix_netdev_request_rcd_irq(rxq->rcd);
0221 if (rc)
0222 goto bail_context_irq_failure;
0223 }
0224
0225 return 0;
0226
0227 bail_context_irq_failure:
0228 dd_dev_err(dd, "Unable to allot receive context\n");
0229 for (; i >= 0; i--) {
0230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
0231
0232 if (rxq->rcd) {
0233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
0234 hfi1_rcd_put(rxq->rcd);
0235 rxq->rcd = NULL;
0236 }
0237 }
0238 kfree(rx->rxq);
0239 rx->rxq = NULL;
0240
0241 return rc;
0242 }
0243
0244 static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
0245 {
0246 int i;
0247 struct hfi1_devdata *dd = rx->dd;
0248
0249 for (i = 0; i < rx->num_rx_q; i++) {
0250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
0251
0252 netif_napi_del(&rxq->napi);
0253 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
0254 hfi1_rcd_put(rxq->rcd);
0255 rxq->rcd = NULL;
0256 }
0257
0258 kfree(rx->rxq);
0259 rx->rxq = NULL;
0260 rx->num_rx_q = 0;
0261 }
0262
0263 static void enable_queues(struct hfi1_netdev_rx *rx)
0264 {
0265 int i;
0266
0267 for (i = 0; i < rx->num_rx_q; i++) {
0268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
0269
0270 dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
0271 rxq->rcd->ctxt);
0272 napi_enable(&rxq->napi);
0273 hfi1_rcvctrl(rx->dd,
0274 HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
0275 rxq->rcd);
0276 }
0277 }
0278
0279 static void disable_queues(struct hfi1_netdev_rx *rx)
0280 {
0281 int i;
0282
0283 msix_netdev_synchronize_irq(rx->dd);
0284
0285 for (i = 0; i < rx->num_rx_q; i++) {
0286 struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
0287
0288 dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
0289 rxq->rcd->ctxt);
0290
0291
0292 hfi1_rcvctrl(rx->dd,
0293 HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
0294 rxq->rcd);
0295 napi_synchronize(&rxq->napi);
0296 napi_disable(&rxq->napi);
0297 }
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307 int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
0308 {
0309 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0310 int res;
0311
0312 if (atomic_fetch_inc(&rx->netdevs))
0313 return 0;
0314
0315 mutex_lock(&hfi1_mutex);
0316 res = hfi1_netdev_rxq_init(rx);
0317 mutex_unlock(&hfi1_mutex);
0318 return res;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
0328 {
0329 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0330
0331
0332 if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
0333 mutex_lock(&hfi1_mutex);
0334 hfi1_netdev_rxq_deinit(rx);
0335 mutex_unlock(&hfi1_mutex);
0336 }
0337
0338 return 0;
0339 }
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 int hfi1_alloc_rx(struct hfi1_devdata *dd)
0354 {
0355 struct hfi1_netdev_rx *rx;
0356
0357 dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
0358 rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
0359
0360 if (!rx)
0361 return -ENOMEM;
0362 rx->dd = dd;
0363 init_dummy_netdev(&rx->rx_napi);
0364
0365 xa_init(&rx->dev_tbl);
0366 atomic_set(&rx->enabled, 0);
0367 atomic_set(&rx->netdevs, 0);
0368 dd->netdev_rx = rx;
0369
0370 return 0;
0371 }
0372
0373 void hfi1_free_rx(struct hfi1_devdata *dd)
0374 {
0375 if (dd->netdev_rx) {
0376 dd_dev_info(dd, "hfi1 rx freed\n");
0377 kfree(dd->netdev_rx);
0378 dd->netdev_rx = NULL;
0379 }
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
0392 {
0393 struct hfi1_netdev_rx *rx;
0394
0395 if (!dd->netdev_rx)
0396 return;
0397
0398 rx = dd->netdev_rx;
0399 if (atomic_fetch_inc(&rx->enabled))
0400 return;
0401
0402 mutex_lock(&hfi1_mutex);
0403 enable_queues(rx);
0404 mutex_unlock(&hfi1_mutex);
0405 }
0406
0407 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
0408 {
0409 struct hfi1_netdev_rx *rx;
0410
0411 if (!dd->netdev_rx)
0412 return;
0413
0414 rx = dd->netdev_rx;
0415 if (atomic_dec_if_positive(&rx->enabled))
0416 return;
0417
0418 mutex_lock(&hfi1_mutex);
0419 disable_queues(rx);
0420 mutex_unlock(&hfi1_mutex);
0421 }
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
0434 {
0435 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0436
0437 return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
0448 {
0449 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0450
0451 return xa_erase(&rx->dev_tbl, id);
0452 }
0453
0454
0455
0456
0457
0458
0459
0460 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
0461 {
0462 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0463
0464 return xa_load(&rx->dev_tbl, id);
0465 }
0466
0467
0468
0469
0470
0471
0472
0473 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
0474 {
0475 struct hfi1_netdev_rx *rx = dd->netdev_rx;
0476 unsigned long index = *start_id;
0477 void *ret;
0478
0479 ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
0480 *start_id = (int)index;
0481 return ret;
0482 }