0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include <linux/kernel.h>
0039 #include <linux/module.h>
0040 #include <linux/errno.h>
0041 #include <linux/types.h>
0042 #include <linux/debugfs.h>
0043 #include <linux/export.h>
0044 #include <linux/list.h>
0045 #include <linux/skbuff.h>
0046 #include <linux/pci.h>
0047
0048 #include "cxgb4.h"
0049 #include "cxgb4_uld.h"
0050 #include "t4_regs.h"
0051 #include "t4fw_api.h"
0052 #include "t4_msg.h"
0053
0054 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
0055
0056
0057 static void uldrx_flush_handler(struct sge_rspq *q)
0058 {
0059 struct adapter *adap = q->adap;
0060
0061 if (adap->uld[q->uld].lro_flush)
0062 adap->uld[q->uld].lro_flush(&q->lro_mgr);
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
0075 const struct pkt_gl *gl)
0076 {
0077 struct adapter *adap = q->adap;
0078 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
0079 int ret;
0080
0081
0082 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
0083 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
0084 rsp += 2;
0085
0086 if (q->flush_handler)
0087 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
0088 rsp, gl, &q->lro_mgr,
0089 &q->napi);
0090 else
0091 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
0092 rsp, gl);
0093
0094 if (ret) {
0095 rxq->stats.nomem++;
0096 return -1;
0097 }
0098
0099 if (!gl)
0100 rxq->stats.imm++;
0101 else if (gl == CXGB4_MSG_AN)
0102 rxq->stats.an++;
0103 else
0104 rxq->stats.pkts++;
0105 return 0;
0106 }
0107
0108 static int alloc_uld_rxqs(struct adapter *adap,
0109 struct sge_uld_rxq_info *rxq_info, bool lro)
0110 {
0111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
0112 struct sge_ofld_rxq *q = rxq_info->uldrxq;
0113 unsigned short *ids = rxq_info->rspq_id;
0114 int i, err, msi_idx, que_idx = 0;
0115 struct sge *s = &adap->sge;
0116 unsigned int per_chan;
0117
0118 per_chan = rxq_info->nrxq / adap->params.nports;
0119
0120 if (adap->flags & CXGB4_USING_MSIX)
0121 msi_idx = 1;
0122 else
0123 msi_idx = -((int)s->intrq.abs_id + 1);
0124
0125 for (i = 0; i < nq; i++, q++) {
0126 if (i == rxq_info->nrxq) {
0127
0128 per_chan = rxq_info->nciq / adap->params.nports;
0129 que_idx = 0;
0130 }
0131
0132 if (msi_idx >= 0) {
0133 msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
0134 if (msi_idx < 0) {
0135 err = -ENOSPC;
0136 goto freeout;
0137 }
0138
0139 snprintf(adap->msix_info[msi_idx].desc,
0140 sizeof(adap->msix_info[msi_idx].desc),
0141 "%s-%s%d",
0142 adap->port[0]->name, rxq_info->name, i);
0143
0144 q->msix = &adap->msix_info[msi_idx];
0145 }
0146 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
0147 adap->port[que_idx++ / per_chan],
0148 msi_idx,
0149 q->fl.size ? &q->fl : NULL,
0150 uldrx_handler,
0151 lro ? uldrx_flush_handler : NULL,
0152 0);
0153 if (err)
0154 goto freeout;
0155
0156 memset(&q->stats, 0, sizeof(q->stats));
0157 if (ids)
0158 ids[i] = q->rspq.abs_id;
0159 }
0160 return 0;
0161 freeout:
0162 q = rxq_info->uldrxq;
0163 for ( ; i; i--, q++) {
0164 if (q->rspq.desc)
0165 free_rspq_fl(adap, &q->rspq,
0166 q->fl.size ? &q->fl : NULL);
0167 if (q->msix)
0168 cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
0169 }
0170 return err;
0171 }
0172
0173 static int
0174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
0175 {
0176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0177 int i, ret;
0178
0179 ret = alloc_uld_rxqs(adap, rxq_info, lro);
0180 if (ret)
0181 return ret;
0182
0183
0184 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
0185 struct sge *s = &adap->sge;
0186 unsigned int cmplqid;
0187 u32 param, cmdop;
0188
0189 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
0190 for_each_port(adap, i) {
0191 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
0192 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
0193 FW_PARAMS_PARAM_X_V(cmdop) |
0194 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
0195 ret = t4_set_params(adap, adap->mbox, adap->pf,
0196 0, 1, ¶m, &cmplqid);
0197 }
0198 }
0199 return ret;
0200 }
0201
0202 static void t4_free_uld_rxqs(struct adapter *adap, int n,
0203 struct sge_ofld_rxq *q)
0204 {
0205 for ( ; n; n--, q++) {
0206 if (q->rspq.desc)
0207 free_rspq_fl(adap, &q->rspq,
0208 q->fl.size ? &q->fl : NULL);
0209 }
0210 }
0211
0212 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
0213 {
0214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0215
0216 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
0217 struct sge *s = &adap->sge;
0218 u32 param, cmdop, cmplqid = 0;
0219 int i;
0220
0221 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
0222 for_each_port(adap, i) {
0223 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
0224 FW_PARAMS_PARAM_X_V(cmdop) |
0225 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
0226 t4_set_params(adap, adap->mbox, adap->pf,
0227 0, 1, ¶m, &cmplqid);
0228 }
0229 }
0230
0231 if (rxq_info->nciq)
0232 t4_free_uld_rxqs(adap, rxq_info->nciq,
0233 rxq_info->uldrxq + rxq_info->nrxq);
0234 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
0235 }
0236
0237 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
0238 const struct cxgb4_uld_info *uld_info)
0239 {
0240 struct sge *s = &adap->sge;
0241 struct sge_uld_rxq_info *rxq_info;
0242 int i, nrxq, ciq_size;
0243
0244 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
0245 if (!rxq_info)
0246 return -ENOMEM;
0247
0248 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
0249 i = s->nqs_per_uld;
0250 rxq_info->nrxq = roundup(i, adap->params.nports);
0251 } else {
0252 i = min_t(int, uld_info->nrxq,
0253 num_online_cpus());
0254 rxq_info->nrxq = roundup(i, adap->params.nports);
0255 }
0256 if (!uld_info->ciq) {
0257 rxq_info->nciq = 0;
0258 } else {
0259 if (adap->flags & CXGB4_USING_MSIX)
0260 rxq_info->nciq = min_t(int, s->nqs_per_uld,
0261 num_online_cpus());
0262 else
0263 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
0264 num_online_cpus());
0265 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
0266 adap->params.nports);
0267 rxq_info->nciq = max_t(int, rxq_info->nciq,
0268 adap->params.nports);
0269 }
0270
0271 nrxq = rxq_info->nrxq + rxq_info->nciq;
0272 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
0273 GFP_KERNEL);
0274 if (!rxq_info->uldrxq) {
0275 kfree(rxq_info);
0276 return -ENOMEM;
0277 }
0278
0279 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
0280 if (!rxq_info->rspq_id) {
0281 kfree(rxq_info->uldrxq);
0282 kfree(rxq_info);
0283 return -ENOMEM;
0284 }
0285
0286 for (i = 0; i < rxq_info->nrxq; i++) {
0287 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
0288
0289 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
0290 r->rspq.uld = uld_type;
0291 r->fl.size = 72;
0292 }
0293
0294 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
0295 if (ciq_size > SGE_MAX_IQ_SIZE) {
0296 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
0297 ciq_size = SGE_MAX_IQ_SIZE;
0298 }
0299
0300 for (i = rxq_info->nrxq; i < nrxq; i++) {
0301 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
0302
0303 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
0304 r->rspq.uld = uld_type;
0305 }
0306
0307 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
0308 adap->sge.uld_rxq_info[uld_type] = rxq_info;
0309
0310 return 0;
0311 }
0312
0313 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
0314 {
0315 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0316
0317 adap->sge.uld_rxq_info[uld_type] = NULL;
0318 kfree(rxq_info->rspq_id);
0319 kfree(rxq_info->uldrxq);
0320 kfree(rxq_info);
0321 }
0322
0323 static int
0324 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
0325 {
0326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0327 struct msix_info *minfo;
0328 unsigned int idx;
0329 int err = 0;
0330
0331 for_each_uldrxq(rxq_info, idx) {
0332 minfo = rxq_info->uldrxq[idx].msix;
0333 err = request_irq(minfo->vec,
0334 t4_sge_intr_msix, 0,
0335 minfo->desc,
0336 &rxq_info->uldrxq[idx].rspq);
0337 if (err)
0338 goto unwind;
0339
0340 cxgb4_set_msix_aff(adap, minfo->vec,
0341 &minfo->aff_mask, idx);
0342 }
0343 return 0;
0344
0345 unwind:
0346 while (idx-- > 0) {
0347 minfo = rxq_info->uldrxq[idx].msix;
0348 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
0349 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
0350 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
0351 }
0352 return err;
0353 }
0354
0355 static void
0356 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
0357 {
0358 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0359 struct msix_info *minfo;
0360 unsigned int idx;
0361
0362 for_each_uldrxq(rxq_info, idx) {
0363 minfo = rxq_info->uldrxq[idx].msix;
0364 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
0365 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
0366 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
0367 }
0368 }
0369
0370 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
0371 {
0372 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0373 int idx;
0374
0375 for_each_uldrxq(rxq_info, idx) {
0376 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
0377
0378 if (!q)
0379 continue;
0380
0381 cxgb4_enable_rx(adap, q);
0382 }
0383 }
0384
0385 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
0386 {
0387 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0388 int idx;
0389
0390 for_each_uldrxq(rxq_info, idx) {
0391 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
0392
0393 if (!q)
0394 continue;
0395
0396 cxgb4_quiesce_rx(q);
0397 }
0398 }
0399
0400 static void
0401 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
0402 {
0403 int nq = txq_info->ntxq;
0404 int i;
0405
0406 for (i = 0; i < nq; i++) {
0407 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
0408
0409 if (txq && txq->q.desc) {
0410 tasklet_kill(&txq->qresume_tsk);
0411 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
0412 txq->q.cntxt_id);
0413 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
0414 kfree(txq->q.sdesc);
0415 __skb_queue_purge(&txq->sendq);
0416 free_txq(adap, &txq->q);
0417 }
0418 }
0419 }
0420
0421 static int
0422 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
0423 unsigned int uld_type)
0424 {
0425 struct sge *s = &adap->sge;
0426 int nq = txq_info->ntxq;
0427 int i, j, err;
0428
0429 j = nq / adap->params.nports;
0430 for (i = 0; i < nq; i++) {
0431 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
0432
0433 txq->q.size = 1024;
0434 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
0435 s->fw_evtq.cntxt_id, uld_type);
0436 if (err)
0437 goto freeout;
0438 }
0439 return 0;
0440 freeout:
0441 free_sge_txq_uld(adap, txq_info);
0442 return err;
0443 }
0444
0445 static void
0446 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
0447 {
0448 struct sge_uld_txq_info *txq_info = NULL;
0449 int tx_uld_type = TX_ULD(uld_type);
0450
0451 txq_info = adap->sge.uld_txq_info[tx_uld_type];
0452
0453 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
0454 free_sge_txq_uld(adap, txq_info);
0455 kfree(txq_info->uldtxq);
0456 kfree(txq_info);
0457 adap->sge.uld_txq_info[tx_uld_type] = NULL;
0458 }
0459 }
0460
0461 static int
0462 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
0463 const struct cxgb4_uld_info *uld_info)
0464 {
0465 struct sge_uld_txq_info *txq_info = NULL;
0466 int tx_uld_type, i;
0467
0468 tx_uld_type = TX_ULD(uld_type);
0469 txq_info = adap->sge.uld_txq_info[tx_uld_type];
0470
0471 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
0472 (atomic_inc_return(&txq_info->users) > 1))
0473 return 0;
0474
0475 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
0476 if (!txq_info)
0477 return -ENOMEM;
0478 if (uld_type == CXGB4_ULD_CRYPTO) {
0479 i = min_t(int, adap->vres.ncrypto_fc,
0480 num_online_cpus());
0481 txq_info->ntxq = rounddown(i, adap->params.nports);
0482 if (txq_info->ntxq <= 0) {
0483 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
0484 kfree(txq_info);
0485 return -EINVAL;
0486 }
0487
0488 } else {
0489 i = min_t(int, uld_info->ntxq, num_online_cpus());
0490 txq_info->ntxq = roundup(i, adap->params.nports);
0491 }
0492 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
0493 GFP_KERNEL);
0494 if (!txq_info->uldtxq) {
0495 kfree(txq_info);
0496 return -ENOMEM;
0497 }
0498
0499 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
0500 kfree(txq_info->uldtxq);
0501 kfree(txq_info);
0502 return -ENOMEM;
0503 }
0504
0505 atomic_inc(&txq_info->users);
0506 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
0507 return 0;
0508 }
0509
0510 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
0511 struct cxgb4_lld_info *lli)
0512 {
0513 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
0514 int tx_uld_type = TX_ULD(uld_type);
0515 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
0516
0517 lli->rxq_ids = rxq_info->rspq_id;
0518 lli->nrxq = rxq_info->nrxq;
0519 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
0520 lli->nciq = rxq_info->nciq;
0521 lli->ntxq = txq_info->ntxq;
0522 }
0523
0524 int t4_uld_mem_alloc(struct adapter *adap)
0525 {
0526 struct sge *s = &adap->sge;
0527
0528 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
0529 if (!adap->uld)
0530 return -ENOMEM;
0531
0532 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
0533 sizeof(struct sge_uld_rxq_info *),
0534 GFP_KERNEL);
0535 if (!s->uld_rxq_info)
0536 goto err_uld;
0537
0538 s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
0539 sizeof(struct sge_uld_txq_info *),
0540 GFP_KERNEL);
0541 if (!s->uld_txq_info)
0542 goto err_uld_rx;
0543 return 0;
0544
0545 err_uld_rx:
0546 kfree(s->uld_rxq_info);
0547 err_uld:
0548 kfree(adap->uld);
0549 return -ENOMEM;
0550 }
0551
0552 void t4_uld_mem_free(struct adapter *adap)
0553 {
0554 struct sge *s = &adap->sge;
0555
0556 kfree(s->uld_txq_info);
0557 kfree(s->uld_rxq_info);
0558 kfree(adap->uld);
0559 }
0560
0561
0562 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
0563 {
0564 if (adap->uld[type].handle) {
0565 adap->uld[type].handle = NULL;
0566 adap->uld[type].add = NULL;
0567 release_sge_txq_uld(adap, type);
0568
0569 if (adap->flags & CXGB4_FULL_INIT_DONE)
0570 quiesce_rx_uld(adap, type);
0571
0572 if (adap->flags & CXGB4_USING_MSIX)
0573 free_msix_queue_irqs_uld(adap, type);
0574
0575 free_sge_queues_uld(adap, type);
0576 free_queues_uld(adap, type);
0577 }
0578 }
0579
0580 void t4_uld_clean_up(struct adapter *adap)
0581 {
0582 unsigned int i;
0583
0584 if (!is_uld(adap))
0585 return;
0586
0587 mutex_lock(&uld_mutex);
0588 for (i = 0; i < CXGB4_ULD_MAX; i++) {
0589 if (!adap->uld[i].handle)
0590 continue;
0591
0592 cxgb4_shutdown_uld_adapter(adap, i);
0593 }
0594 mutex_unlock(&uld_mutex);
0595 }
0596
0597 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
0598 {
0599 int i;
0600
0601 lld->pdev = adap->pdev;
0602 lld->pf = adap->pf;
0603 lld->l2t = adap->l2t;
0604 lld->tids = &adap->tids;
0605 lld->ports = adap->port;
0606 lld->vr = &adap->vres;
0607 lld->mtus = adap->params.mtus;
0608 lld->nchan = adap->params.nports;
0609 lld->nports = adap->params.nports;
0610 lld->wr_cred = adap->params.ofldq_wr_cred;
0611 lld->crypto = adap->params.crypto;
0612 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
0613 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
0614 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
0615 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
0616 lld->iscsi_ppm = &adap->iscsi_ppm;
0617 lld->adapter_type = adap->params.chip;
0618 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
0619 lld->udb_density = 1 << adap->params.sge.eq_qpp;
0620 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
0621 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
0622 lld->filt_mode = adap->params.tp.vlan_pri_map;
0623
0624 for (i = 0; i < NCHAN; i++)
0625 lld->tx_modq[i] = i;
0626 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
0627 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
0628 lld->fw_vers = adap->params.fw_vers;
0629 lld->dbfifo_int_thresh = dbfifo_int_thresh;
0630 lld->sge_ingpadboundary = adap->sge.fl_align;
0631 lld->sge_egrstatuspagesize = adap->sge.stat_len;
0632 lld->sge_pktshift = adap->sge.pktshift;
0633 lld->ulp_crypto = adap->params.crypto;
0634 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
0635 lld->max_ordird_qp = adap->params.max_ordird_qp;
0636 lld->max_ird_adapter = adap->params.max_ird_adapter;
0637 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
0638 lld->nodeid = dev_to_node(adap->pdev_dev);
0639 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
0640 lld->write_w_imm_support = adap->params.write_w_imm_support;
0641 lld->write_cmpl_support = adap->params.write_cmpl_support;
0642 }
0643
0644 static int uld_attach(struct adapter *adap, unsigned int uld)
0645 {
0646 struct cxgb4_lld_info lli;
0647 void *handle;
0648
0649 uld_init(adap, &lli);
0650 uld_queue_init(adap, uld, &lli);
0651
0652 handle = adap->uld[uld].add(&lli);
0653 if (IS_ERR(handle)) {
0654 dev_warn(adap->pdev_dev,
0655 "could not attach to the %s driver, error %ld\n",
0656 adap->uld[uld].name, PTR_ERR(handle));
0657 return PTR_ERR(handle);
0658 }
0659
0660 adap->uld[uld].handle = handle;
0661 t4_register_netevent_notifier();
0662
0663 if (adap->flags & CXGB4_FULL_INIT_DONE)
0664 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
0665
0666 return 0;
0667 }
0668
0669 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0670 static bool cxgb4_uld_in_use(struct adapter *adap)
0671 {
0672 const struct tid_info *t = &adap->tids;
0673
0674 return (atomic_read(&t->conns_in_use) || t->stids_in_use);
0675 }
0676
0677
0678
0679
0680
0681 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
0682 {
0683 int ret = 0;
0684 u32 params =
0685 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
0686 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
0687 FW_PARAMS_PARAM_Y_V(enable) |
0688 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
0689
0690 if (enable) {
0691 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
0692
0693
0694
0695 if (cxgb4_uld_in_use(adap)) {
0696 dev_dbg(adap->pdev_dev,
0697 "ULD connections (tid/stid) active. Can't enable kTLS\n");
0698 return -EINVAL;
0699 }
0700 ret = t4_set_params(adap, adap->mbox, adap->pf,
0701 0, 1, ¶ms, ¶ms);
0702 if (ret)
0703 return ret;
0704 refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
0705 pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
0706 } else {
0707
0708 refcount_inc(&adap->chcr_ktls.ktls_refcount);
0709 }
0710 } else {
0711
0712 if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
0713 return -EINVAL;
0714
0715
0716
0717 if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
0718 ret = t4_set_params(adap, adap->mbox, adap->pf,
0719 0, 1, ¶ms, ¶ms);
0720 if (ret)
0721 return ret;
0722 pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
0723 }
0724 }
0725
0726 return ret;
0727 }
0728 #endif
0729
0730 static void cxgb4_uld_alloc_resources(struct adapter *adap,
0731 enum cxgb4_uld type,
0732 const struct cxgb4_uld_info *p)
0733 {
0734 int ret = 0;
0735
0736 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
0737 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
0738 return;
0739 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
0740 return;
0741 ret = cfg_queues_uld(adap, type, p);
0742 if (ret)
0743 goto out;
0744 ret = setup_sge_queues_uld(adap, type, p->lro);
0745 if (ret)
0746 goto free_queues;
0747 if (adap->flags & CXGB4_USING_MSIX) {
0748 ret = request_msix_queue_irqs_uld(adap, type);
0749 if (ret)
0750 goto free_rxq;
0751 }
0752 if (adap->flags & CXGB4_FULL_INIT_DONE)
0753 enable_rx_uld(adap, type);
0754 if (adap->uld[type].add)
0755 goto free_irq;
0756 ret = setup_sge_txq_uld(adap, type, p);
0757 if (ret)
0758 goto free_irq;
0759 adap->uld[type] = *p;
0760 ret = uld_attach(adap, type);
0761 if (ret)
0762 goto free_txq;
0763 return;
0764 free_txq:
0765 release_sge_txq_uld(adap, type);
0766 free_irq:
0767 if (adap->flags & CXGB4_FULL_INIT_DONE)
0768 quiesce_rx_uld(adap, type);
0769 if (adap->flags & CXGB4_USING_MSIX)
0770 free_msix_queue_irqs_uld(adap, type);
0771 free_rxq:
0772 free_sge_queues_uld(adap, type);
0773 free_queues:
0774 free_queues_uld(adap, type);
0775 out:
0776 dev_warn(adap->pdev_dev,
0777 "ULD registration failed for uld type %d\n", type);
0778 }
0779
0780 void cxgb4_uld_enable(struct adapter *adap)
0781 {
0782 struct cxgb4_uld_list *uld_entry;
0783
0784 mutex_lock(&uld_mutex);
0785 list_add_tail(&adap->list_node, &adapter_list);
0786 list_for_each_entry(uld_entry, &uld_list, list_node)
0787 cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
0788 &uld_entry->uld_info);
0789 mutex_unlock(&uld_mutex);
0790 }
0791
0792
0793
0794
0795
0796
0797
0798
0799 void cxgb4_register_uld(enum cxgb4_uld type,
0800 const struct cxgb4_uld_info *p)
0801 {
0802 struct cxgb4_uld_list *uld_entry;
0803 struct adapter *adap;
0804
0805 if (type >= CXGB4_ULD_MAX)
0806 return;
0807
0808 uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
0809 if (!uld_entry)
0810 return;
0811
0812 memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
0813 mutex_lock(&uld_mutex);
0814 list_for_each_entry(adap, &adapter_list, list_node)
0815 cxgb4_uld_alloc_resources(adap, type, p);
0816
0817 uld_entry->uld_type = type;
0818 list_add_tail(&uld_entry->list_node, &uld_list);
0819 mutex_unlock(&uld_mutex);
0820 return;
0821 }
0822 EXPORT_SYMBOL(cxgb4_register_uld);
0823
0824
0825
0826
0827
0828
0829
0830 int cxgb4_unregister_uld(enum cxgb4_uld type)
0831 {
0832 struct cxgb4_uld_list *uld_entry, *tmp;
0833 struct adapter *adap;
0834
0835 if (type >= CXGB4_ULD_MAX)
0836 return -EINVAL;
0837
0838 mutex_lock(&uld_mutex);
0839 list_for_each_entry(adap, &adapter_list, list_node) {
0840 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
0841 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
0842 continue;
0843 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
0844 continue;
0845
0846 cxgb4_shutdown_uld_adapter(adap, type);
0847 }
0848
0849 list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
0850 if (uld_entry->uld_type == type) {
0851 list_del(&uld_entry->list_node);
0852 kfree(uld_entry);
0853 }
0854 }
0855 mutex_unlock(&uld_mutex);
0856
0857 return 0;
0858 }
0859 EXPORT_SYMBOL(cxgb4_unregister_uld);