0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/kernel.h>
0036 #include <linux/string.h>
0037 #include <linux/compiler.h>
0038 #include <linux/slab.h>
0039 #include <asm/page.h>
0040 #include <linux/cache.h>
0041
0042 #include "t4_values.h"
0043 #include "csio_hw.h"
0044 #include "csio_wr.h"
0045 #include "csio_mb.h"
0046 #include "csio_defs.h"
0047
0048 int csio_intr_coalesce_cnt;
0049 static int csio_sge_thresh_reg;
0050
0051 int csio_intr_coalesce_time = 10;
0052 static int csio_sge_timer_reg = 1;
0053
0054 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
0055 csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
0056
0057 static void
0058 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
0059 {
0060 sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
0061 reg * sizeof(uint32_t));
0062 }
0063
0064
0065 static inline uint32_t
0066 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
0067 {
0068 return sge->sge_fl_buf_size[buf->paddr & 0xF];
0069 }
0070
0071
0072 static inline uint32_t
0073 csio_wr_qstat_pgsz(struct csio_hw *hw)
0074 {
0075 return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
0076 }
0077
0078
0079 static inline void
0080 csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
0081 {
0082
0083
0084
0085
0086
0087 if (flq->inc_idx >= 8) {
0088 csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
0089 PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
0090 MYPF_REG(SGE_PF_KDOORBELL_A));
0091 flq->inc_idx &= 7;
0092 }
0093 }
0094
0095
0096 static void
0097 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
0098 {
0099 csio_wr_reg32(hw, CIDXINC_V(0) |
0100 INGRESSQID_V(iqid) |
0101 TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
0102 MYPF_REG(SGE_PF_GTS_A));
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 static int
0115 csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
0116 {
0117 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0118 struct csio_sge *sge = &wrm->sge;
0119 __be64 *d = (__be64 *)(flq->vstart);
0120 struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
0121 uint64_t paddr;
0122 int sreg = flq->un.fl.sreg;
0123 int n = flq->credits;
0124
0125 while (n--) {
0126 buf->len = sge->sge_fl_buf_size[sreg];
0127 buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
0128 &buf->paddr, GFP_KERNEL);
0129 if (!buf->vaddr) {
0130 csio_err(hw, "Could only fill %d buffers!\n", n + 1);
0131 return -ENOMEM;
0132 }
0133
0134 paddr = buf->paddr | (sreg & 0xF);
0135
0136 *d++ = cpu_to_be64(paddr);
0137 buf++;
0138 }
0139
0140 return 0;
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150 static inline void
0151 csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
0152 {
0153
0154 flq->inc_idx += n;
0155 flq->pidx += n;
0156 if (unlikely(flq->pidx >= flq->credits))
0157 flq->pidx -= (uint16_t)flq->credits;
0158
0159 CSIO_INC_STATS(flq, n_flq_refill);
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 int
0186 csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
0187 uint16_t type, void *owner, uint32_t nflb, int sreg,
0188 iq_handler_t iq_intx_handler)
0189 {
0190 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0191 struct csio_q *q, *flq;
0192 int free_idx = wrm->free_qidx;
0193 int ret_idx = free_idx;
0194 uint32_t qsz;
0195 int flq_idx;
0196
0197 if (free_idx >= wrm->num_q) {
0198 csio_err(hw, "No more free queues.\n");
0199 return -1;
0200 }
0201
0202 switch (type) {
0203 case CSIO_EGRESS:
0204 qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
0205 break;
0206 case CSIO_INGRESS:
0207 switch (wrsize) {
0208 case 16:
0209 case 32:
0210 case 64:
0211 case 128:
0212 break;
0213 default:
0214 csio_err(hw, "Invalid Ingress queue WR size:%d\n",
0215 wrsize);
0216 return -1;
0217 }
0218
0219
0220
0221
0222
0223 qsz = ALIGN(qsize/wrsize, 16) * wrsize;
0224
0225 break;
0226 case CSIO_FREELIST:
0227 qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
0228 break;
0229 default:
0230 csio_err(hw, "Invalid queue type: 0x%x\n", type);
0231 return -1;
0232 }
0233
0234 q = wrm->q_arr[free_idx];
0235
0236 q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
0237 GFP_KERNEL);
0238 if (!q->vstart) {
0239 csio_err(hw,
0240 "Failed to allocate DMA memory for "
0241 "queue at id: %d size: %d\n", free_idx, qsize);
0242 return -1;
0243 }
0244
0245 q->type = type;
0246 q->owner = owner;
0247 q->pidx = q->cidx = q->inc_idx = 0;
0248 q->size = qsz;
0249 q->wr_sz = wrsize;
0250
0251 wrm->free_qidx++;
0252
0253 if (type == CSIO_INGRESS) {
0254
0255 q->un.iq.genbit = 1;
0256
0257
0258
0259
0260
0261 q->credits = (qsz - q->wr_sz) / q->wr_sz;
0262 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
0263 - q->wr_sz);
0264
0265
0266 if (nflb > 0) {
0267 flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
0268 sizeof(__be64), CSIO_FREELIST,
0269 owner, 0, sreg, NULL);
0270 if (flq_idx == -1) {
0271 csio_err(hw,
0272 "Failed to allocate FL queue"
0273 " for IQ idx:%d\n", free_idx);
0274 return -1;
0275 }
0276
0277
0278 q->un.iq.flq_idx = flq_idx;
0279
0280 flq = wrm->q_arr[q->un.iq.flq_idx];
0281 flq->un.fl.bufs = kcalloc(flq->credits,
0282 sizeof(struct csio_dma_buf),
0283 GFP_KERNEL);
0284 if (!flq->un.fl.bufs) {
0285 csio_err(hw,
0286 "Failed to allocate FL queue bufs"
0287 " for IQ idx:%d\n", free_idx);
0288 return -1;
0289 }
0290
0291 flq->un.fl.packen = 0;
0292 flq->un.fl.offset = 0;
0293 flq->un.fl.sreg = sreg;
0294
0295
0296 if (csio_wr_fill_fl(hw, flq))
0297 return -1;
0298
0299
0300
0301
0302
0303
0304 flq->pidx = flq->inc_idx = flq->credits - 8;
0305 } else {
0306 q->un.iq.flq_idx = -1;
0307 }
0308
0309
0310 q->un.iq.iq_intx_handler = iq_intx_handler;
0311
0312 csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
0313
0314 } else if (type == CSIO_EGRESS) {
0315 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
0316 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
0317 - csio_wr_qstat_pgsz(hw));
0318 csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
0319 } else {
0320 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
0321 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
0322 - csio_wr_qstat_pgsz(hw));
0323 csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
0324 }
0325
0326 return ret_idx;
0327 }
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static int
0338 csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
0339 {
0340 struct csio_iq_params iqp;
0341 enum fw_retval retval;
0342 uint32_t iq_id;
0343 int flq_idx;
0344
0345 memset(&iqp, 0, sizeof(struct csio_iq_params));
0346
0347 csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
0348
0349 if (retval != FW_SUCCESS) {
0350 csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
0351 mempool_free(mbp, hw->mb_mempool);
0352 return -EINVAL;
0353 }
0354
0355 csio_q_iqid(hw, iq_idx) = iqp.iqid;
0356 csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
0357 csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
0358 csio_q_inc_idx(hw, iq_idx) = 0;
0359
0360
0361 iq_id = iqp.iqid - hw->wrm.fw_iq_start;
0362
0363
0364 if (iq_id >= CSIO_MAX_IQ) {
0365 csio_err(hw,
0366 "Exceeding MAX_IQ(%d) supported!"
0367 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
0368 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
0369 mempool_free(mbp, hw->mb_mempool);
0370 return -EINVAL;
0371 }
0372 csio_q_set_intr_map(hw, iq_idx, iq_id);
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 csio_wr_sge_intr_enable(hw, iqp.physiqid);
0383
0384 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
0385 if (flq_idx != -1) {
0386 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
0387
0388 csio_q_flid(hw, flq_idx) = iqp.fl0id;
0389 csio_q_cidx(hw, flq_idx) = 0;
0390 csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
0391 csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
0392
0393
0394 csio_wr_ring_fldb(hw, flq);
0395 }
0396
0397 mempool_free(mbp, hw->mb_mempool);
0398
0399 return 0;
0400 }
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 int
0416 csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
0417 uint32_t vec, uint8_t portid, bool async,
0418 void (*cbfn) (struct csio_hw *, struct csio_mb *))
0419 {
0420 struct csio_mb *mbp;
0421 struct csio_iq_params iqp;
0422 int flq_idx;
0423
0424 memset(&iqp, 0, sizeof(struct csio_iq_params));
0425 csio_q_portid(hw, iq_idx) = portid;
0426
0427 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0428 if (!mbp) {
0429 csio_err(hw, "IQ command out of memory!\n");
0430 return -ENOMEM;
0431 }
0432
0433 switch (hw->intr_mode) {
0434 case CSIO_IM_INTX:
0435 case CSIO_IM_MSI:
0436
0437 if (hw->intr_iq_idx == iq_idx)
0438 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
0439 else
0440 iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
0441 iqp.iqandstindex =
0442 csio_q_physiqid(hw, hw->intr_iq_idx);
0443 break;
0444 case CSIO_IM_MSIX:
0445 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
0446 iqp.iqandstindex = (uint16_t)vec;
0447 break;
0448 case CSIO_IM_NONE:
0449 mempool_free(mbp, hw->mb_mempool);
0450 return -EINVAL;
0451 }
0452
0453
0454 iqp.pfn = hw->pfn;
0455 iqp.vfn = 0;
0456 iqp.iq_start = 1;
0457 iqp.viid = 0;
0458 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
0459 iqp.iqasynch = async;
0460 if (csio_intr_coalesce_cnt)
0461 iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
0462 else
0463 iqp.iqanus = X_UPDATESCHEDULING_TIMER;
0464 iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
0465 iqp.iqpciech = portid;
0466 iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
0467
0468 switch (csio_q_wr_sz(hw, iq_idx)) {
0469 case 16:
0470 iqp.iqesize = 0; break;
0471 case 32:
0472 iqp.iqesize = 1; break;
0473 case 64:
0474 iqp.iqesize = 2; break;
0475 case 128:
0476 iqp.iqesize = 3; break;
0477 }
0478
0479 iqp.iqsize = csio_q_size(hw, iq_idx) /
0480 csio_q_wr_sz(hw, iq_idx);
0481 iqp.iqaddr = csio_q_pstart(hw, iq_idx);
0482
0483 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
0484 if (flq_idx != -1) {
0485 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
0486 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
0487
0488 iqp.fl0paden = 1;
0489 iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
0490 iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
0491 iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
0492 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
0493 iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
0494 iqp.fl0addr = csio_q_pstart(hw, flq_idx);
0495 }
0496
0497 csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
0498
0499 if (csio_mb_issue(hw, mbp)) {
0500 csio_err(hw, "Issue of IQ cmd failed!\n");
0501 mempool_free(mbp, hw->mb_mempool);
0502 return -EINVAL;
0503 }
0504
0505 if (cbfn != NULL)
0506 return 0;
0507
0508 return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
0509 }
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 static int
0520 csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
0521 {
0522 struct csio_eq_params eqp;
0523 enum fw_retval retval;
0524
0525 memset(&eqp, 0, sizeof(struct csio_eq_params));
0526
0527 csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
0528
0529 if (retval != FW_SUCCESS) {
0530 csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
0531 mempool_free(mbp, hw->mb_mempool);
0532 return -EINVAL;
0533 }
0534
0535 csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
0536 csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
0537 csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
0538 csio_q_inc_idx(hw, eq_idx) = 0;
0539
0540 mempool_free(mbp, hw->mb_mempool);
0541
0542 return 0;
0543 }
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 int
0557 csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
0558 int iq_idx, uint8_t portid,
0559 void (*cbfn) (struct csio_hw *, struct csio_mb *))
0560 {
0561 struct csio_mb *mbp;
0562 struct csio_eq_params eqp;
0563
0564 memset(&eqp, 0, sizeof(struct csio_eq_params));
0565
0566 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0567 if (!mbp) {
0568 csio_err(hw, "EQ command out of memory!\n");
0569 return -ENOMEM;
0570 }
0571
0572 eqp.pfn = hw->pfn;
0573 eqp.vfn = 0;
0574 eqp.eqstart = 1;
0575 eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
0576 eqp.iqid = csio_q_iqid(hw, iq_idx);
0577 eqp.fbmin = X_FETCHBURSTMIN_64B;
0578 eqp.fbmax = X_FETCHBURSTMAX_512B;
0579 eqp.cidxfthresh = 0;
0580 eqp.pciechn = portid;
0581 eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
0582 eqp.eqaddr = csio_q_pstart(hw, eq_idx);
0583
0584 csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
0585 &eqp, cbfn);
0586
0587 if (csio_mb_issue(hw, mbp)) {
0588 csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
0589 mempool_free(mbp, hw->mb_mempool);
0590 return -EINVAL;
0591 }
0592
0593 if (cbfn != NULL)
0594 return 0;
0595
0596 return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
0597 }
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 static int
0608 csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
0609 {
0610 enum fw_retval retval = csio_mb_fw_retval(mbp);
0611 int rv = 0;
0612
0613 if (retval != FW_SUCCESS)
0614 rv = -EINVAL;
0615
0616 mempool_free(mbp, hw->mb_mempool);
0617
0618 return rv;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631 static int
0632 csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
0633 void (*cbfn)(struct csio_hw *, struct csio_mb *))
0634 {
0635 int rv = 0;
0636 struct csio_mb *mbp;
0637 struct csio_iq_params iqp;
0638 int flq_idx;
0639
0640 memset(&iqp, 0, sizeof(struct csio_iq_params));
0641
0642 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0643 if (!mbp)
0644 return -ENOMEM;
0645
0646 iqp.pfn = hw->pfn;
0647 iqp.vfn = 0;
0648 iqp.iqid = csio_q_iqid(hw, iq_idx);
0649 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
0650
0651 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
0652 if (flq_idx != -1)
0653 iqp.fl0id = csio_q_flid(hw, flq_idx);
0654 else
0655 iqp.fl0id = 0xFFFF;
0656
0657 iqp.fl1id = 0xFFFF;
0658
0659 csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
0660
0661 rv = csio_mb_issue(hw, mbp);
0662 if (rv != 0) {
0663 mempool_free(mbp, hw->mb_mempool);
0664 return rv;
0665 }
0666
0667 if (cbfn != NULL)
0668 return 0;
0669
0670 return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
0671 }
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681 static int
0682 csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
0683 {
0684 enum fw_retval retval = csio_mb_fw_retval(mbp);
0685 int rv = 0;
0686
0687 if (retval != FW_SUCCESS)
0688 rv = -EINVAL;
0689
0690 mempool_free(mbp, hw->mb_mempool);
0691
0692 return rv;
0693 }
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 static int
0706 csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
0707 void (*cbfn) (struct csio_hw *, struct csio_mb *))
0708 {
0709 int rv = 0;
0710 struct csio_mb *mbp;
0711 struct csio_eq_params eqp;
0712
0713 memset(&eqp, 0, sizeof(struct csio_eq_params));
0714
0715 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0716 if (!mbp)
0717 return -ENOMEM;
0718
0719 eqp.pfn = hw->pfn;
0720 eqp.vfn = 0;
0721 eqp.eqid = csio_q_eqid(hw, eq_idx);
0722
0723 csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
0724
0725 rv = csio_mb_issue(hw, mbp);
0726 if (rv != 0) {
0727 mempool_free(mbp, hw->mb_mempool);
0728 return rv;
0729 }
0730
0731 if (cbfn != NULL)
0732 return 0;
0733
0734 return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
0735 }
0736
0737
0738
0739
0740
0741
0742
0743
0744 static void
0745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
0746 {
0747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
0748 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
0749
0750 memset(stp, 0, sizeof(*stp));
0751 }
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761 static void
0762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
0763 {
0764 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0765 struct csio_q *q = wrm->q_arr[qidx];
0766 void *wr;
0767 struct csio_iqwr_footer *ftr;
0768 uint32_t i = 0;
0769
0770
0771 q->un.iq.genbit = 1;
0772
0773 for (i = 0; i < q->credits; i++) {
0774
0775 wr = (void *)((uintptr_t)q->vstart +
0776 (i * q->wr_sz));
0777
0778 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
0779 (q->wr_sz - sizeof(*ftr)));
0780
0781 memset(ftr, 0, sizeof(*ftr));
0782 }
0783 }
0784
0785 int
0786 csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
0787 {
0788 int i, flq_idx;
0789 struct csio_q *q;
0790 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0791 int rv;
0792
0793 for (i = 0; i < wrm->free_qidx; i++) {
0794 q = wrm->q_arr[i];
0795
0796 switch (q->type) {
0797 case CSIO_EGRESS:
0798 if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
0799 csio_wr_cleanup_eq_stpg(hw, i);
0800 if (!cmd) {
0801 csio_q_eqid(hw, i) = CSIO_MAX_QID;
0802 continue;
0803 }
0804
0805 rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
0806 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
0807 cmd = false;
0808
0809 csio_q_eqid(hw, i) = CSIO_MAX_QID;
0810 }
0811 fallthrough;
0812 case CSIO_INGRESS:
0813 if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
0814 csio_wr_cleanup_iq_ftr(hw, i);
0815 if (!cmd) {
0816 csio_q_iqid(hw, i) = CSIO_MAX_QID;
0817 flq_idx = csio_q_iq_flq_idx(hw, i);
0818 if (flq_idx != -1)
0819 csio_q_flid(hw, flq_idx) =
0820 CSIO_MAX_QID;
0821 continue;
0822 }
0823
0824 rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
0825 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
0826 cmd = false;
0827
0828 csio_q_iqid(hw, i) = CSIO_MAX_QID;
0829 flq_idx = csio_q_iq_flq_idx(hw, i);
0830 if (flq_idx != -1)
0831 csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
0832 }
0833 break;
0834 default:
0835 break;
0836 }
0837 }
0838
0839 hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
0840
0841 return 0;
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862 int
0863 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
0864 struct csio_wr_pair *wrp)
0865 {
0866 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0867 struct csio_q *q = wrm->q_arr[qidx];
0868 void *cwr = (void *)((uintptr_t)(q->vstart) +
0869 (q->pidx * CSIO_QCREDIT_SZ));
0870 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
0871 uint16_t cidx = q->cidx = ntohs(stp->cidx);
0872 uint16_t pidx = q->pidx;
0873 uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
0874 int req_credits = req_sz / CSIO_QCREDIT_SZ;
0875 int credits;
0876
0877 CSIO_DB_ASSERT(q->owner != NULL);
0878 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
0879 CSIO_DB_ASSERT(cidx <= q->credits);
0880
0881
0882 if (pidx > cidx) {
0883 credits = q->credits - (pidx - cidx) - 1;
0884 } else if (cidx > pidx) {
0885 credits = cidx - pidx - 1;
0886 } else {
0887
0888 credits = q->credits;
0889 CSIO_INC_STATS(q, n_qempty);
0890 }
0891
0892
0893
0894
0895
0896 if (!credits || (req_credits > credits)) {
0897 CSIO_INC_STATS(q, n_qfull);
0898 return -EBUSY;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
0910 wrp->addr1 = cwr;
0911 wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
0912 wrp->addr2 = q->vstart;
0913 wrp->size2 = req_sz - wrp->size1;
0914 q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
0915 CSIO_QCREDIT_SZ);
0916 CSIO_INC_STATS(q, n_qwrap);
0917 CSIO_INC_STATS(q, n_eq_wr_split);
0918 } else {
0919 wrp->addr1 = cwr;
0920 wrp->size1 = req_sz;
0921 wrp->addr2 = NULL;
0922 wrp->size2 = 0;
0923 q->pidx += (uint16_t)req_credits;
0924
0925
0926 if (unlikely(q->pidx == q->credits)) {
0927 q->pidx = 0;
0928 CSIO_INC_STATS(q, n_qwrap);
0929 }
0930 }
0931
0932 q->inc_idx = (uint16_t)req_credits;
0933
0934 CSIO_INC_STATS(q, n_tot_reqs);
0935
0936 return 0;
0937 }
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 void
0951 csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
0952 uint32_t wr_off, uint32_t data_len)
0953 {
0954 uint32_t nbytes;
0955
0956
0957 nbytes = ((wrp->size1 - wr_off) >= data_len) ?
0958 data_len : (wrp->size1 - wr_off);
0959
0960 memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
0961 data_len -= nbytes;
0962
0963
0964 if (data_len) {
0965 CSIO_DB_ASSERT(data_len <= wrp->size2);
0966 CSIO_DB_ASSERT(wrp->addr2 != NULL);
0967 memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
0968 }
0969 }
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981 int
0982 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
0983 {
0984 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
0985 struct csio_q *q = wrm->q_arr[qidx];
0986
0987 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
0988
0989 wmb();
0990
0991 csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
0992 PIDX_T5_V(q->inc_idx) | DBTYPE_F,
0993 MYPF_REG(SGE_PF_KDOORBELL_A));
0994 q->inc_idx = 0;
0995
0996 return 0;
0997 }
0998
0999 static inline uint32_t
1000 csio_wr_avail_qcredits(struct csio_q *q)
1001 {
1002 if (q->pidx > q->cidx)
1003 return q->pidx - q->cidx;
1004 else if (q->cidx > q->pidx)
1005 return q->credits - (q->cidx - q->pidx);
1006 else
1007 return 0;
1008 }
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 static inline void
1022 csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
1023 {
1024 flq->cidx++;
1025 if (flq->cidx == flq->credits) {
1026 flq->cidx = 0;
1027 CSIO_INC_STATS(flq, n_qwrap);
1028 }
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 static inline void
1042 csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
1043 void *wr, uint32_t len_to_qid,
1044 void (*iq_handler)(struct csio_hw *, void *,
1045 uint32_t, struct csio_fl_dma_buf *,
1046 void *),
1047 void *priv)
1048 {
1049 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1050 struct csio_sge *sge = &wrm->sge;
1051 struct csio_fl_dma_buf flb;
1052 struct csio_dma_buf *buf, *fbuf;
1053 uint32_t bufsz, len, lastlen = 0;
1054 int n;
1055 struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
1056
1057 CSIO_DB_ASSERT(flq != NULL);
1058
1059 len = len_to_qid;
1060
1061 if (len & IQWRF_NEWBUF) {
1062 if (flq->un.fl.offset > 0) {
1063 csio_wr_inval_flq_buf(hw, flq);
1064 flq->un.fl.offset = 0;
1065 }
1066 len = IQWRF_LEN_GET(len);
1067 }
1068
1069 CSIO_DB_ASSERT(len != 0);
1070
1071 flb.totlen = len;
1072
1073
1074 for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
1075 buf = &flq->un.fl.bufs[flq->cidx];
1076 bufsz = csio_wr_fl_bufsz(sge, buf);
1077
1078 fbuf->paddr = buf->paddr;
1079 fbuf->vaddr = buf->vaddr;
1080
1081 flb.offset = flq->un.fl.offset;
1082 lastlen = min(bufsz, len);
1083 fbuf->len = lastlen;
1084
1085 len -= lastlen;
1086 if (!len)
1087 break;
1088 csio_wr_inval_flq_buf(hw, flq);
1089 }
1090
1091 flb.defer_free = flq->un.fl.packen ? 0 : 1;
1092
1093 iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
1094 &flb, priv);
1095
1096 if (flq->un.fl.packen)
1097 flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
1098 else
1099 csio_wr_inval_flq_buf(hw, flq);
1100
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 static inline bool
1112 csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
1113 {
1114 return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
1115 }
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 int
1129 csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
1130 void (*iq_handler)(struct csio_hw *, void *,
1131 uint32_t, struct csio_fl_dma_buf *,
1132 void *),
1133 void *priv)
1134 {
1135 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1136 void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
1137 struct csio_iqwr_footer *ftr;
1138 uint32_t wr_type, fw_qid, qid;
1139 struct csio_q *q_completed;
1140 struct csio_q *flq = csio_iq_has_fl(q) ?
1141 wrm->q_arr[q->un.iq.flq_idx] : NULL;
1142 int rv = 0;
1143
1144
1145 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1146 (q->wr_sz - sizeof(*ftr)));
1147
1148
1149
1150
1151
1152 while (csio_is_new_iqwr(q, ftr)) {
1153
1154 CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
1155 (uintptr_t)q->vwrap);
1156 rmb();
1157 wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
1158
1159 switch (wr_type) {
1160 case X_RSPD_TYPE_CPL:
1161
1162 iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
1163 break;
1164 case X_RSPD_TYPE_FLBUF:
1165 csio_wr_process_fl(hw, q, wr,
1166 ntohl(ftr->pldbuflen_qid),
1167 iq_handler, priv);
1168 break;
1169 case X_RSPD_TYPE_INTR:
1170 fw_qid = ntohl(ftr->pldbuflen_qid);
1171 qid = fw_qid - wrm->fw_iq_start;
1172 q_completed = hw->wrm.intr_map[qid];
1173
1174 if (unlikely(qid ==
1175 csio_q_physiqid(hw, hw->intr_iq_idx))) {
1176
1177
1178
1179
1180
1181
1182 } else {
1183 CSIO_DB_ASSERT(q_completed);
1184 CSIO_DB_ASSERT(
1185 q_completed->un.iq.iq_intx_handler);
1186
1187
1188 q_completed->un.iq.iq_intx_handler(hw, NULL,
1189 0, NULL, (void *)q_completed);
1190 }
1191 break;
1192 default:
1193 csio_warn(hw, "Unknown resp type 0x%x received\n",
1194 wr_type);
1195 CSIO_INC_STATS(q, n_rsp_unknown);
1196 break;
1197 }
1198
1199
1200
1201
1202
1203
1204 if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
1205
1206
1207 q->cidx = 0;
1208 wr = q->vstart;
1209
1210
1211 q->un.iq.genbit ^= 0x1;
1212
1213 CSIO_INC_STATS(q, n_qwrap);
1214 } else {
1215 q->cidx++;
1216 wr = (void *)((uintptr_t)(q->vstart) +
1217 (q->cidx * q->wr_sz));
1218 }
1219
1220 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1221 (q->wr_sz - sizeof(*ftr)));
1222 q->inc_idx++;
1223
1224 }
1225
1226
1227
1228
1229
1230 if (unlikely(!q->inc_idx)) {
1231 CSIO_INC_STATS(q, n_stray_comp);
1232 rv = -EINVAL;
1233 goto restart;
1234 }
1235
1236
1237 if (flq) {
1238 uint32_t avail = csio_wr_avail_qcredits(flq);
1239 if (avail <= 16) {
1240
1241
1242
1243
1244 csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
1245 csio_wr_ring_fldb(hw, flq);
1246 }
1247 }
1248
1249 restart:
1250
1251 csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
1252 INGRESSQID_V(q->un.iq.physiqid) |
1253 TIMERREG_V(csio_sge_timer_reg),
1254 MYPF_REG(SGE_PF_GTS_A));
1255 q->stats.n_tot_rsps += q->inc_idx;
1256
1257 q->inc_idx = 0;
1258
1259 return rv;
1260 }
1261
1262 int
1263 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
1264 void (*iq_handler)(struct csio_hw *, void *,
1265 uint32_t, struct csio_fl_dma_buf *,
1266 void *),
1267 void *priv)
1268 {
1269 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1270 struct csio_q *iq = wrm->q_arr[qidx];
1271
1272 return csio_wr_process_iq(hw, iq, iq_handler, priv);
1273 }
1274
1275 static int
1276 csio_closest_timer(struct csio_sge *s, int time)
1277 {
1278 int i, delta, match = 0, min_delta = INT_MAX;
1279
1280 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1281 delta = time - s->timer_val[i];
1282 if (delta < 0)
1283 delta = -delta;
1284 if (delta < min_delta) {
1285 min_delta = delta;
1286 match = i;
1287 }
1288 }
1289 return match;
1290 }
1291
1292 static int
1293 csio_closest_thresh(struct csio_sge *s, int cnt)
1294 {
1295 int i, delta, match = 0, min_delta = INT_MAX;
1296
1297 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1298 delta = cnt - s->counter_val[i];
1299 if (delta < 0)
1300 delta = -delta;
1301 if (delta < min_delta) {
1302 min_delta = delta;
1303 match = i;
1304 }
1305 }
1306 return match;
1307 }
1308
1309 static void
1310 csio_wr_fixup_host_params(struct csio_hw *hw)
1311 {
1312 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1313 struct csio_sge *sge = &wrm->sge;
1314 uint32_t clsz = L1_CACHE_BYTES;
1315 uint32_t s_hps = PAGE_SHIFT - 10;
1316 uint32_t stat_len = clsz > 64 ? 128 : 64;
1317 u32 fl_align = clsz < 32 ? 32 : clsz;
1318 u32 pack_align;
1319 u32 ingpad, ingpack;
1320
1321 csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
1322 HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
1323 HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
1324 HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
1325 SGE_HOST_PAGE_SIZE_A);
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 pack_align = fl_align;
1350 if (pci_is_pcie(hw->pdev)) {
1351 u32 mps, mps_log;
1352 u16 devctl;
1353
1354
1355
1356
1357
1358 pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl);
1359 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
1360 mps = 1 << mps_log;
1361 if (mps > pack_align)
1362 pack_align = mps;
1363 }
1364
1365
1366
1367
1368
1369 if (pack_align <= 16) {
1370 ingpack = INGPACKBOUNDARY_16B_X;
1371 fl_align = 16;
1372 } else if (pack_align == 32) {
1373 ingpack = INGPACKBOUNDARY_64B_X;
1374 fl_align = 64;
1375 } else {
1376 u32 pack_align_log = fls(pack_align) - 1;
1377
1378 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
1379 fl_align = pack_align;
1380 }
1381
1382
1383
1384
1385
1386
1387 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
1388 ingpad = INGPADBOUNDARY_32B_X;
1389 else
1390 ingpad = T6_INGPADBOUNDARY_8B_X;
1391
1392 csio_set_reg_field(hw, SGE_CONTROL_A,
1393 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
1394 EGRSTATUSPAGESIZE_F,
1395 INGPADBOUNDARY_V(ingpad) |
1396 EGRSTATUSPAGESIZE_V(stat_len != 64));
1397 csio_set_reg_field(hw, SGE_CONTROL2_A,
1398 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
1399 INGPACKBOUNDARY_V(ingpack));
1400
1401
1402 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
1403
1404
1405
1406
1407
1408 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1409 csio_wr_reg32(hw,
1410 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
1411 fl_align - 1) & ~(fl_align - 1),
1412 SGE_FL_BUFFER_SIZE2_A);
1413 csio_wr_reg32(hw,
1414 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
1415 fl_align - 1) & ~(fl_align - 1),
1416 SGE_FL_BUFFER_SIZE3_A);
1417 }
1418
1419 sge->csio_fl_align = fl_align;
1420
1421 csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
1422
1423
1424 csio_set_reg_field(hw, SGE_CONTROL_A,
1425 PKTSHIFT_V(PKTSHIFT_M),
1426 PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
1427
1428 csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
1429 CSUM_HAS_PSEUDO_HDR_F, 0);
1430 }
1431
1432 static void
1433 csio_init_intr_coalesce_parms(struct csio_hw *hw)
1434 {
1435 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1436 struct csio_sge *sge = &wrm->sge;
1437
1438 csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
1439 if (csio_intr_coalesce_cnt) {
1440 csio_sge_thresh_reg = 0;
1441 csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
1442 return;
1443 }
1444
1445 csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
1446 }
1447
1448
1449
1450
1451
1452
1453
1454 static void
1455 csio_wr_get_sge(struct csio_hw *hw)
1456 {
1457 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1458 struct csio_sge *sge = &wrm->sge;
1459 uint32_t ingpad;
1460 int i;
1461 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1462 u32 ingress_rx_threshold;
1463
1464 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1465
1466 ingpad = INGPADBOUNDARY_G(sge->sge_control);
1467
1468 switch (ingpad) {
1469 case X_INGPCIEBOUNDARY_32B:
1470 sge->csio_fl_align = 32; break;
1471 case X_INGPCIEBOUNDARY_64B:
1472 sge->csio_fl_align = 64; break;
1473 case X_INGPCIEBOUNDARY_128B:
1474 sge->csio_fl_align = 128; break;
1475 case X_INGPCIEBOUNDARY_256B:
1476 sge->csio_fl_align = 256; break;
1477 case X_INGPCIEBOUNDARY_512B:
1478 sge->csio_fl_align = 512; break;
1479 case X_INGPCIEBOUNDARY_1024B:
1480 sge->csio_fl_align = 1024; break;
1481 case X_INGPCIEBOUNDARY_2048B:
1482 sge->csio_fl_align = 2048; break;
1483 case X_INGPCIEBOUNDARY_4096B:
1484 sge->csio_fl_align = 4096; break;
1485 }
1486
1487 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1488 csio_get_flbuf_size(hw, sge, i);
1489
1490 timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
1491 timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
1492 timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
1493
1494 sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
1495 TIMERVALUE0_G(timer_value_0_and_1));
1496 sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
1497 TIMERVALUE1_G(timer_value_0_and_1));
1498 sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
1499 TIMERVALUE2_G(timer_value_2_and_3));
1500 sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
1501 TIMERVALUE3_G(timer_value_2_and_3));
1502 sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
1503 TIMERVALUE4_G(timer_value_4_and_5));
1504 sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
1505 TIMERVALUE5_G(timer_value_4_and_5));
1506
1507 ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
1508 sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
1509 sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
1510 sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
1511 sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
1512
1513 csio_init_intr_coalesce_parms(hw);
1514 }
1515
1516
1517
1518
1519
1520
1521
1522
1523 static void
1524 csio_wr_set_sge(struct csio_hw *hw)
1525 {
1526 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1527 struct csio_sge *sge = &wrm->sge;
1528 int i;
1529
1530
1531
1532
1533
1534 csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
1535
1536 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1537
1538
1539
1540
1541
1542
1543
1544 csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
1545 LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1546 LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1547 csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
1548 HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1549 HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1550
1551 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
1552 ENABLE_DROP_F);
1553
1554
1555
1556 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1557 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1558 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
1559 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1560 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
1561 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1562 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1563 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
1564 CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
1565 CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
1566
1567 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1568 csio_get_flbuf_size(hw, sge, i);
1569
1570
1571 sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
1572 sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
1573 sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
1574 sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
1575 sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
1576 sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
1577
1578 sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
1579 sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
1580 sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
1581 sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
1582
1583 csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
1584 THRESHOLD_1_V(sge->counter_val[1]) |
1585 THRESHOLD_2_V(sge->counter_val[2]) |
1586 THRESHOLD_3_V(sge->counter_val[3]),
1587 SGE_INGRESS_RX_THRESHOLD_A);
1588
1589 csio_wr_reg32(hw,
1590 TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
1591 TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
1592 SGE_TIMER_VALUE_0_AND_1_A);
1593
1594 csio_wr_reg32(hw,
1595 TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
1596 TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
1597 SGE_TIMER_VALUE_2_AND_3_A);
1598
1599 csio_wr_reg32(hw,
1600 TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
1601 TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
1602 SGE_TIMER_VALUE_4_AND_5_A);
1603
1604 csio_init_intr_coalesce_parms(hw);
1605 }
1606
1607 void
1608 csio_wr_sge_init(struct csio_hw *hw)
1609 {
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 if (csio_is_hw_master(hw)) {
1627 if (hw->fw_state != CSIO_DEV_STATE_INIT)
1628 csio_wr_fixup_host_params(hw);
1629
1630 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1631 csio_wr_get_sge(hw);
1632 else
1633 csio_wr_set_sge(hw);
1634 } else
1635 csio_wr_get_sge(hw);
1636 }
1637
1638
1639
1640
1641
1642
1643
1644
1645 int
1646 csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
1647 {
1648 int i;
1649
1650 if (!wrm->num_q) {
1651 csio_err(hw, "Num queues is not set\n");
1652 return -EINVAL;
1653 }
1654
1655 wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL);
1656 if (!wrm->q_arr)
1657 goto err;
1658
1659 for (i = 0; i < wrm->num_q; i++) {
1660 wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
1661 if (!wrm->q_arr[i]) {
1662 while (--i >= 0)
1663 kfree(wrm->q_arr[i]);
1664 goto err_free_arr;
1665 }
1666 }
1667 wrm->free_qidx = 0;
1668
1669 return 0;
1670
1671 err_free_arr:
1672 kfree(wrm->q_arr);
1673 err:
1674 return -ENOMEM;
1675 }
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 void
1687 csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1688 {
1689 int i;
1690 uint32_t j;
1691 struct csio_q *q;
1692 struct csio_dma_buf *buf;
1693
1694 for (i = 0; i < wrm->num_q; i++) {
1695 q = wrm->q_arr[i];
1696
1697 if (wrm->free_qidx && (i < wrm->free_qidx)) {
1698 if (q->type == CSIO_FREELIST) {
1699 if (!q->un.fl.bufs)
1700 continue;
1701 for (j = 0; j < q->credits; j++) {
1702 buf = &q->un.fl.bufs[j];
1703 if (!buf->vaddr)
1704 continue;
1705 dma_free_coherent(&hw->pdev->dev,
1706 buf->len, buf->vaddr,
1707 buf->paddr);
1708 }
1709 kfree(q->un.fl.bufs);
1710 }
1711 dma_free_coherent(&hw->pdev->dev, q->size,
1712 q->vstart, q->pstart);
1713 }
1714 kfree(q);
1715 }
1716
1717 hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
1718
1719 kfree(wrm->q_arr);
1720 }