0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/skbuff.h>
0037 #include <linux/netdevice.h>
0038 #include <linux/etherdevice.h>
0039 #include <linux/if_vlan.h>
0040 #include <linux/ip.h>
0041 #include <net/ipv6.h>
0042 #include <net/tcp.h>
0043 #include <linux/dma-mapping.h>
0044 #include <linux/prefetch.h>
0045
0046 #include "t4vf_common.h"
0047 #include "t4vf_defs.h"
0048
0049 #include "../cxgb4/t4_regs.h"
0050 #include "../cxgb4/t4_values.h"
0051 #include "../cxgb4/t4fw_api.h"
0052 #include "../cxgb4/t4_msg.h"
0053
0054
0055
0056
0057 enum {
0058
0059
0060
0061
0062
0063
0064
0065 EQ_UNIT = SGE_EQ_IDXSIZE,
0066 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
0067 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
0068
0069
0070
0071
0072
0073
0074
0075 MAX_TX_RECLAIM = 16,
0076
0077
0078
0079
0080
0081 MAX_RX_REFILL = 16,
0082
0083
0084
0085
0086
0087
0088 RX_QCHECK_PERIOD = (HZ / 2),
0089
0090
0091
0092
0093
0094 TX_QCHECK_PERIOD = (HZ / 2),
0095 MAX_TIMER_TX_RECLAIM = 100,
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
0106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
0107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
0108 2),
0109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0110 sizeof(struct cpl_tx_pkt_lso_core) +
0111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
0112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
0113
0114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
0115
0116
0117
0118
0119
0120
0121
0122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
0123
0124
0125
0126
0127 MAX_CTRL_WR_LEN = 256,
0128
0129
0130
0131
0132
0133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
0134 ? MAX_IMM_TX_PKT_LEN
0135 : MAX_CTRL_WR_LEN),
0136
0137
0138
0139
0140
0141
0142
0143 RX_COPY_THRES = 256,
0144 RX_PULL_LEN = 128,
0145
0146
0147
0148
0149
0150
0151 RX_SKB_LEN = 512,
0152 };
0153
0154
0155
0156
0157 struct tx_sw_desc {
0158 struct sk_buff *skb;
0159 struct ulptx_sgl *sgl;
0160 };
0161
0162
0163
0164
0165
0166
0167
0168 struct rx_sw_desc {
0169 struct page *page;
0170 dma_addr_t dma_addr;
0171
0172 };
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 enum {
0184 RX_LARGE_BUF = 1 << 0,
0185 RX_UNMAPPED_BUF = 1 << 1,
0186 };
0187
0188
0189
0190
0191
0192
0193
0194
0195 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
0196 {
0197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207 static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
0208 {
0209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
0210 }
0211
0212
0213
0214
0215
0216
0217
0218 static inline int need_skb_unmap(void)
0219 {
0220 #ifdef CONFIG_NEED_DMA_MAP_STATE
0221 return 1;
0222 #else
0223 return 0;
0224 #endif
0225 }
0226
0227
0228
0229
0230
0231
0232
0233 static inline unsigned int txq_avail(const struct sge_txq *tq)
0234 {
0235 return tq->size - 1 - tq->in_use;
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 static inline unsigned int fl_cap(const struct sge_fl *fl)
0248 {
0249 return fl->size - FL_PER_EQ_UNIT;
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 static inline bool fl_starving(const struct adapter *adapter,
0262 const struct sge_fl *fl)
0263 {
0264 const struct sge *s = &adapter->sge;
0265
0266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
0267 }
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277 static int map_skb(struct device *dev, const struct sk_buff *skb,
0278 dma_addr_t *addr)
0279 {
0280 const skb_frag_t *fp, *end;
0281 const struct skb_shared_info *si;
0282
0283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
0284 if (dma_mapping_error(dev, *addr))
0285 goto out_err;
0286
0287 si = skb_shinfo(skb);
0288 end = &si->frags[si->nr_frags];
0289 for (fp = si->frags; fp < end; fp++) {
0290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
0291 DMA_TO_DEVICE);
0292 if (dma_mapping_error(dev, *addr))
0293 goto unwind;
0294 }
0295 return 0;
0296
0297 unwind:
0298 while (fp-- > si->frags)
0299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
0300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
0301
0302 out_err:
0303 return -ENOMEM;
0304 }
0305
0306 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
0307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
0308 {
0309 const struct ulptx_sge_pair *p;
0310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
0311
0312 if (likely(skb_headlen(skb)))
0313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
0314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
0315 else {
0316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
0317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
0318 nfrags--;
0319 }
0320
0321
0322
0323
0324
0325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
0326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
0327 unmap:
0328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
0329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
0331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0332 p++;
0333 } else if ((u8 *)p == (u8 *)tq->stat) {
0334 p = (const struct ulptx_sge_pair *)tq->desc;
0335 goto unmap;
0336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
0337 const __be64 *addr = (const __be64 *)tq->desc;
0338
0339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
0340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
0342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0343 p = (const struct ulptx_sge_pair *)&addr[2];
0344 } else {
0345 const __be64 *addr = (const __be64 *)tq->desc;
0346
0347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
0348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
0350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0351 p = (const struct ulptx_sge_pair *)&addr[1];
0352 }
0353 }
0354 if (nfrags) {
0355 __be64 addr;
0356
0357 if ((u8 *)p == (u8 *)tq->stat)
0358 p = (const struct ulptx_sge_pair *)tq->desc;
0359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
0360 ? p->addr[0]
0361 : *(const __be64 *)tq->desc);
0362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
0363 DMA_TO_DEVICE);
0364 }
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
0378 unsigned int n, bool unmap)
0379 {
0380 struct tx_sw_desc *sdesc;
0381 unsigned int cidx = tq->cidx;
0382 struct device *dev = adapter->pdev_dev;
0383
0384 const int need_unmap = need_skb_unmap() && unmap;
0385
0386 sdesc = &tq->sdesc[cidx];
0387 while (n--) {
0388
0389
0390
0391
0392 if (sdesc->skb) {
0393 if (need_unmap)
0394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
0395 dev_consume_skb_any(sdesc->skb);
0396 sdesc->skb = NULL;
0397 }
0398
0399 sdesc++;
0400 if (++cidx == tq->size) {
0401 cidx = 0;
0402 sdesc = tq->sdesc;
0403 }
0404 }
0405 tq->cidx = cidx;
0406 }
0407
0408
0409
0410
0411 static inline int reclaimable(const struct sge_txq *tq)
0412 {
0413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
0414 int reclaimable = hw_cidx - tq->cidx;
0415 if (reclaimable < 0)
0416 reclaimable += tq->size;
0417 return reclaimable;
0418 }
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static inline void reclaim_completed_tx(struct adapter *adapter,
0431 struct sge_txq *tq,
0432 bool unmap)
0433 {
0434 int avail = reclaimable(tq);
0435
0436 if (avail) {
0437
0438
0439
0440
0441 if (avail > MAX_TX_RECLAIM)
0442 avail = MAX_TX_RECLAIM;
0443
0444 free_tx_desc(adapter, tq, avail, unmap);
0445 tq->in_use -= avail;
0446 }
0447 }
0448
0449
0450
0451
0452
0453
0454 static inline int get_buf_size(const struct adapter *adapter,
0455 const struct rx_sw_desc *sdesc)
0456 {
0457 const struct sge *s = &adapter->sge;
0458
0459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
0460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
0474 {
0475 while (n--) {
0476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
0477
0478 if (is_buf_mapped(sdesc))
0479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
0480 get_buf_size(adapter, sdesc),
0481 DMA_FROM_DEVICE);
0482 put_page(sdesc->page);
0483 sdesc->page = NULL;
0484 if (++fl->cidx == fl->size)
0485 fl->cidx = 0;
0486 fl->avail--;
0487 }
0488 }
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
0504 {
0505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
0506
0507 if (is_buf_mapped(sdesc))
0508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
0509 get_buf_size(adapter, sdesc),
0510 DMA_FROM_DEVICE);
0511 sdesc->page = NULL;
0512 if (++fl->cidx == fl->size)
0513 fl->cidx = 0;
0514 fl->avail--;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
0526 {
0527 u32 val = adapter->params.arch.sge_fl_db;
0528
0529
0530
0531
0532
0533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
0534 if (is_t4(adapter->params.chip))
0535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
0536 else
0537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
0538
0539
0540
0541
0542 wmb();
0543
0544
0545
0546
0547
0548 if (unlikely(fl->bar2_addr == NULL)) {
0549 t4_write_reg(adapter,
0550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
0551 QID_V(fl->cntxt_id) | val);
0552 } else {
0553 writel(val | QID_V(fl->bar2_qid),
0554 fl->bar2_addr + SGE_UDB_KDOORBELL);
0555
0556
0557
0558
0559 wmb();
0560 }
0561 fl->pend_cred %= FL_PER_EQ_UNIT;
0562 }
0563 }
0564
0565
0566
0567
0568
0569
0570
0571 static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
0572 dma_addr_t dma_addr)
0573 {
0574 sdesc->page = page;
0575 sdesc->dma_addr = dma_addr;
0576 }
0577
0578
0579
0580
0581 #define POISON_BUF_VAL -1
0582
0583 static inline void poison_buf(struct page *page, size_t sz)
0584 {
0585 #if POISON_BUF_VAL >= 0
0586 memset(page_address(page), POISON_BUF_VAL, sz);
0587 #endif
0588 }
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
0605 int n, gfp_t gfp)
0606 {
0607 struct sge *s = &adapter->sge;
0608 struct page *page;
0609 dma_addr_t dma_addr;
0610 unsigned int cred = fl->avail;
0611 __be64 *d = &fl->desc[fl->pidx];
0612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
0613
0614
0615
0616
0617
0618
0619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
0620
0621 gfp |= __GFP_NOWARN;
0622
0623
0624
0625
0626
0627
0628
0629 if (s->fl_pg_order == 0)
0630 goto alloc_small_pages;
0631
0632 while (n) {
0633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
0634 if (unlikely(!page)) {
0635
0636
0637
0638
0639
0640 fl->large_alloc_failed++;
0641 break;
0642 }
0643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
0644
0645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
0646 PAGE_SIZE << s->fl_pg_order,
0647 DMA_FROM_DEVICE);
0648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
0649
0650
0651
0652
0653
0654
0655
0656
0657 __free_pages(page, s->fl_pg_order);
0658 goto out;
0659 }
0660 dma_addr |= RX_LARGE_BUF;
0661 *d++ = cpu_to_be64(dma_addr);
0662
0663 set_rx_sw_desc(sdesc, page, dma_addr);
0664 sdesc++;
0665
0666 fl->avail++;
0667 if (++fl->pidx == fl->size) {
0668 fl->pidx = 0;
0669 sdesc = fl->sdesc;
0670 d = fl->desc;
0671 }
0672 n--;
0673 }
0674
0675 alloc_small_pages:
0676 while (n--) {
0677 page = __dev_alloc_page(gfp);
0678 if (unlikely(!page)) {
0679 fl->alloc_failed++;
0680 break;
0681 }
0682 poison_buf(page, PAGE_SIZE);
0683
0684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
0685 DMA_FROM_DEVICE);
0686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
0687 put_page(page);
0688 break;
0689 }
0690 *d++ = cpu_to_be64(dma_addr);
0691
0692 set_rx_sw_desc(sdesc, page, dma_addr);
0693 sdesc++;
0694
0695 fl->avail++;
0696 if (++fl->pidx == fl->size) {
0697 fl->pidx = 0;
0698 sdesc = fl->sdesc;
0699 d = fl->desc;
0700 }
0701 }
0702
0703 out:
0704
0705
0706
0707
0708
0709 cred = fl->avail - cred;
0710 fl->pend_cred += cred;
0711 ring_fl_db(adapter, fl);
0712
0713 if (unlikely(fl_starving(adapter, fl))) {
0714 smp_wmb();
0715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
0716 }
0717
0718 return cred;
0719 }
0720
0721
0722
0723
0724
0725 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
0726 {
0727 refill_fl(adapter, fl,
0728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
0729 GFP_ATOMIC);
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
0752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
0753 size_t stat_size)
0754 {
0755
0756
0757
0758 size_t hwlen = nelem * hwsize + stat_size;
0759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
0760
0761 if (!hwring)
0762 return NULL;
0763
0764
0765
0766
0767
0768 BUG_ON((swsize != 0) != (swringp != NULL));
0769 if (swsize) {
0770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
0771
0772 if (!swring) {
0773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
0774 return NULL;
0775 }
0776 *(void **)swringp = swring;
0777 }
0778
0779 return hwring;
0780 }
0781
0782
0783
0784
0785
0786
0787
0788
0789 static inline unsigned int sgl_len(unsigned int n)
0790 {
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808 n--;
0809 return (3 * n) / 2 + (n & 1) + 2;
0810 }
0811
0812
0813
0814
0815
0816
0817
0818
0819 static inline unsigned int flits_to_desc(unsigned int flits)
0820 {
0821 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
0822 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
0823 }
0824
0825
0826
0827
0828
0829
0830
0831
0832 static inline int is_eth_imm(const struct sk_buff *skb)
0833 {
0834
0835
0836
0837
0838
0839
0840
0841 return false;
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
0852 {
0853 unsigned int flits;
0854
0855
0856
0857
0858
0859
0860 if (is_eth_imm(skb))
0861 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
0862 sizeof(__be64));
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
0874 if (skb_shinfo(skb)->gso_size)
0875 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0876 sizeof(struct cpl_tx_pkt_lso_core) +
0877 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
0878 else
0879 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0880 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
0881 return flits;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
0902 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
0903 const dma_addr_t *addr)
0904 {
0905 unsigned int i, len;
0906 struct ulptx_sge_pair *to;
0907 const struct skb_shared_info *si = skb_shinfo(skb);
0908 unsigned int nfrags = si->nr_frags;
0909 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
0910
0911 len = skb_headlen(skb) - start;
0912 if (likely(len)) {
0913 sgl->len0 = htonl(len);
0914 sgl->addr0 = cpu_to_be64(addr[0] + start);
0915 nfrags++;
0916 } else {
0917 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
0918 sgl->addr0 = cpu_to_be64(addr[1]);
0919 }
0920
0921 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
0922 ULPTX_NSGE_V(nfrags));
0923 if (likely(--nfrags == 0))
0924 return;
0925
0926
0927
0928
0929
0930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
0931
0932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
0933 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
0934 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
0935 to->addr[0] = cpu_to_be64(addr[i]);
0936 to->addr[1] = cpu_to_be64(addr[++i]);
0937 }
0938 if (nfrags) {
0939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
0940 to->len[1] = cpu_to_be32(0);
0941 to->addr[0] = cpu_to_be64(addr[i + 1]);
0942 }
0943 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
0944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
0945
0946 if (likely(part0))
0947 memcpy(sgl->sge, buf, part0);
0948 part1 = (u8 *)end - (u8 *)tq->stat;
0949 memcpy(tq->desc, (u8 *)buf + part0, part1);
0950 end = (void *)tq->desc + part1;
0951 }
0952 if ((uintptr_t)end & 8)
0953 *end = 0;
0954 }
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
0965 int n)
0966 {
0967
0968
0969
0970 wmb();
0971
0972
0973
0974
0975 if (unlikely(tq->bar2_addr == NULL)) {
0976 u32 val = PIDX_V(n);
0977
0978 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
0979 QID_V(tq->cntxt_id) | val);
0980 } else {
0981 u32 val = PIDX_T5_V(n);
0982
0983
0984
0985
0986
0987
0988
0989 WARN_ON(val & DBPRIO_F);
0990
0991
0992
0993
0994
0995 if (n == 1 && tq->bar2_qid == 0) {
0996 unsigned int index = (tq->pidx
0997 ? (tq->pidx - 1)
0998 : (tq->size - 1));
0999 __be64 *src = (__be64 *)&tq->desc[index];
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001 SGE_UDB_WCDOORBELL);
1002 unsigned int count = EQ_UNIT / sizeof(__be64);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 while (count) {
1014
1015
1016
1017
1018 writeq((__force u64)*src, dst);
1019 src++;
1020 dst++;
1021 count--;
1022 }
1023 } else
1024 writel(val | QID_V(tq->bar2_qid),
1025 tq->bar2_addr + SGE_UDB_KDOORBELL);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 wmb();
1038 }
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053 void *pos)
1054 {
1055 u64 *p;
1056 int left = (void *)tq->stat - pos;
1057
1058 if (likely(skb->len <= left)) {
1059 if (likely(!skb->data_len))
1060 skb_copy_from_linear_data(skb, pos, skb->len);
1061 else
1062 skb_copy_bits(skb, 0, pos, skb->len);
1063 pos += skb->len;
1064 } else {
1065 skb_copy_bits(skb, 0, pos, left);
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067 pos = (void *)tq->desc + (skb->len - left);
1068 }
1069
1070
1071 p = PTR_ALIGN(pos, 8);
1072 if ((uintptr_t)p & 8)
1073 *p = 0;
1074 }
1075
1076
1077
1078
1079
1080 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081 {
1082 int csum_type;
1083 const struct iphdr *iph = ip_hdr(skb);
1084
1085 if (iph->version == 4) {
1086 if (iph->protocol == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP;
1088 else if (iph->protocol == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP;
1090 else {
1091 nocsum:
1092
1093
1094
1095
1096 return TXPKT_L4CSUM_DIS_F;
1097 }
1098 } else {
1099
1100
1101
1102 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103
1104 if (ip6h->nexthdr == IPPROTO_TCP)
1105 csum_type = TX_CSUM_TCPIP6;
1106 else if (ip6h->nexthdr == IPPROTO_UDP)
1107 csum_type = TX_CSUM_UDPIP6;
1108 else
1109 goto nocsum;
1110 }
1111
1112 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1113 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1114 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1115
1116 if (chip <= CHELSIO_T5)
1117 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1118 else
1119 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1120 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1121 } else {
1122 int start = skb_transport_offset(skb);
1123
1124 return TXPKT_CSUM_TYPE_V(csum_type) |
1125 TXPKT_CSUM_START_V(start) |
1126 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127 }
1128 }
1129
1130
1131
1132
1133 static void txq_stop(struct sge_eth_txq *txq)
1134 {
1135 netif_tx_stop_queue(txq->txq);
1136 txq->q.stops++;
1137 }
1138
1139
1140
1141
1142 static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143 {
1144 tq->in_use += n;
1145 tq->pidx += n;
1146 if (tq->pidx >= tq->size)
1147 tq->pidx -= tq->size;
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157 netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158 {
1159 u32 wr_mid;
1160 u64 cntrl, *end;
1161 int qidx, credits, max_pkt_len;
1162 unsigned int flits, ndesc;
1163 struct adapter *adapter;
1164 struct sge_eth_txq *txq;
1165 const struct port_info *pi;
1166 struct fw_eth_tx_pkt_vm_wr *wr;
1167 struct cpl_tx_pkt_core *cpl;
1168 const struct skb_shared_info *ssi;
1169 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170 const size_t fw_hdr_copy_len = sizeof(wr->firmware);
1171
1172
1173
1174
1175
1176
1177
1178 if (unlikely(skb->len < fw_hdr_copy_len))
1179 goto out_free;
1180
1181
1182 max_pkt_len = ETH_HLEN + dev->mtu;
1183 if (skb_vlan_tagged(skb))
1184 max_pkt_len += VLAN_HLEN;
1185 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1186 goto out_free;
1187
1188
1189
1190
1191 pi = netdev_priv(dev);
1192 adapter = pi->adapter;
1193 qidx = skb_get_queue_mapping(skb);
1194 BUG_ON(qidx >= pi->nqsets);
1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1196
1197 if (pi->vlan_id && !skb_vlan_tag_present(skb))
1198 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1199 pi->vlan_id);
1200
1201
1202
1203
1204
1205 reclaim_completed_tx(adapter, &txq->q, true);
1206
1207
1208
1209
1210
1211
1212 flits = calc_tx_flits(skb);
1213 ndesc = flits_to_desc(flits);
1214 credits = txq_avail(&txq->q) - ndesc;
1215
1216 if (unlikely(credits < 0)) {
1217
1218
1219
1220
1221
1222
1223 txq_stop(txq);
1224 dev_err(adapter->pdev_dev,
1225 "%s: TX ring %u full while queue awake!\n",
1226 dev->name, qidx);
1227 return NETDEV_TX_BUSY;
1228 }
1229
1230 if (!is_eth_imm(skb) &&
1231 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1232
1233
1234
1235
1236
1237 txq->mapping_err++;
1238 goto out_free;
1239 }
1240
1241 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1242 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 txq_stop(txq);
1253 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1254 }
1255
1256
1257
1258
1259
1260
1261
1262 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1263 wr = (void *)&txq->q.desc[txq->q.pidx];
1264 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1265 wr->r3[0] = cpu_to_be32(0);
1266 wr->r3[1] = cpu_to_be32(0);
1267 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1268 end = (u64 *)wr + flits;
1269
1270
1271
1272
1273
1274
1275 ssi = skb_shinfo(skb);
1276 if (ssi->gso_size) {
1277 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1278 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1279 int l3hdr_len = skb_network_header_len(skb);
1280 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1281
1282 wr->op_immdlen =
1283 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1284 FW_WR_IMMDLEN_V(sizeof(*lso) +
1285 sizeof(*cpl)));
1286
1287
1288
1289 lso->lso_ctrl =
1290 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1291 LSO_FIRST_SLICE_F |
1292 LSO_LAST_SLICE_F |
1293 LSO_IPV6_V(v6) |
1294 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1295 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1296 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1297 lso->ipid_ofst = cpu_to_be16(0);
1298 lso->mss = cpu_to_be16(ssi->gso_size);
1299 lso->seqno_offset = cpu_to_be32(0);
1300 if (is_t4(adapter->params.chip))
1301 lso->len = cpu_to_be32(skb->len);
1302 else
1303 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1304
1305
1306
1307
1308
1309 cpl = (void *)(lso + 1);
1310
1311 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1312 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1313 else
1314 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1315
1316 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1317 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1318 TXPKT_IPHDR_LEN_V(l3hdr_len);
1319 txq->tso++;
1320 txq->tx_cso += ssi->gso_segs;
1321 } else {
1322 int len;
1323
1324 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1325 wr->op_immdlen =
1326 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1327 FW_WR_IMMDLEN_V(len));
1328
1329
1330
1331
1332
1333 cpl = (void *)(wr + 1);
1334 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1335 cntrl = hwcsum(adapter->params.chip, skb) |
1336 TXPKT_IPCSUM_DIS_F;
1337 txq->tx_cso++;
1338 } else
1339 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1340 }
1341
1342
1343
1344
1345
1346 if (skb_vlan_tag_present(skb)) {
1347 txq->vlan_ins++;
1348 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1349 }
1350
1351
1352
1353
1354 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1355 TXPKT_INTF_V(pi->port_id) |
1356 TXPKT_PF_V(0));
1357 cpl->pack = cpu_to_be16(0);
1358 cpl->len = cpu_to_be16(skb->len);
1359 cpl->ctrl1 = cpu_to_be64(cntrl);
1360
1361 #ifdef T4_TRACE
1362 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1363 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1364 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1365 #endif
1366
1367
1368
1369
1370
1371 if (is_eth_imm(skb)) {
1372
1373
1374
1375
1376 inline_tx_skb(skb, &txq->q, cpl + 1);
1377 dev_consume_skb_any(skb);
1378 } else {
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1417 struct sge_txq *tq = &txq->q;
1418 int last_desc;
1419
1420
1421
1422
1423
1424
1425
1426
1427 if (unlikely((void *)sgl == (void *)tq->stat)) {
1428 sgl = (void *)tq->desc;
1429 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1430 }
1431
1432 write_sgl(skb, tq, sgl, end, 0, addr);
1433 skb_orphan(skb);
1434
1435 last_desc = tq->pidx + ndesc - 1;
1436 if (last_desc >= tq->size)
1437 last_desc -= tq->size;
1438 tq->sdesc[last_desc].skb = skb;
1439 tq->sdesc[last_desc].sgl = sgl;
1440 }
1441
1442
1443
1444
1445
1446 txq_advance(&txq->q, ndesc);
1447 netif_trans_update(dev);
1448 ring_tx_db(adapter, &txq->q, ndesc);
1449 return NETDEV_TX_OK;
1450
1451 out_free:
1452
1453
1454
1455
1456 dev_kfree_skb_any(skb);
1457 return NETDEV_TX_OK;
1458 }
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 static inline void copy_frags(struct sk_buff *skb,
1470 const struct pkt_gl *gl,
1471 unsigned int offset)
1472 {
1473 int i;
1474
1475
1476 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1477 gl->frags[0].offset + offset,
1478 gl->frags[0].size - offset);
1479 skb_shinfo(skb)->nr_frags = gl->nfrags;
1480 for (i = 1; i < gl->nfrags; i++)
1481 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1482 gl->frags[i].offset,
1483 gl->frags[i].size);
1484
1485
1486 get_page(gl->frags[gl->nfrags - 1].page);
1487 }
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1499 unsigned int skb_len,
1500 unsigned int pull_len)
1501 {
1502 struct sk_buff *skb;
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 if (gl->tot_len <= RX_COPY_THRES) {
1516
1517 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1518 if (unlikely(!skb))
1519 goto out;
1520 __skb_put(skb, gl->tot_len);
1521 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1522 } else {
1523 skb = alloc_skb(skb_len, GFP_ATOMIC);
1524 if (unlikely(!skb))
1525 goto out;
1526 __skb_put(skb, pull_len);
1527 skb_copy_to_linear_data(skb, gl->va, pull_len);
1528
1529 copy_frags(skb, gl, pull_len);
1530 skb->len = gl->tot_len;
1531 skb->data_len = skb->len - pull_len;
1532 skb->truesize += skb->data_len;
1533 }
1534
1535 out:
1536 return skb;
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546 static void t4vf_pktgl_free(const struct pkt_gl *gl)
1547 {
1548 int frag;
1549
1550 frag = gl->nfrags - 1;
1551 while (frag--)
1552 put_page(gl->frags[frag].page);
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1565 const struct cpl_rx_pkt *pkt)
1566 {
1567 struct adapter *adapter = rxq->rspq.adapter;
1568 struct sge *s = &adapter->sge;
1569 struct port_info *pi;
1570 int ret;
1571 struct sk_buff *skb;
1572
1573 skb = napi_get_frags(&rxq->rspq.napi);
1574 if (unlikely(!skb)) {
1575 t4vf_pktgl_free(gl);
1576 rxq->stats.rx_drops++;
1577 return;
1578 }
1579
1580 copy_frags(skb, gl, s->pktshift);
1581 skb->len = gl->tot_len - s->pktshift;
1582 skb->data_len = skb->len;
1583 skb->truesize += skb->data_len;
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
1585 skb_record_rx_queue(skb, rxq->rspq.idx);
1586 pi = netdev_priv(skb->dev);
1587
1588 if (pkt->vlan_ex && !pi->vlan_id) {
1589 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1590 be16_to_cpu(pkt->vlan));
1591 rxq->stats.vlan_ex++;
1592 }
1593 ret = napi_gro_frags(&rxq->rspq.napi);
1594
1595 if (ret == GRO_HELD)
1596 rxq->stats.lro_pkts++;
1597 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1598 rxq->stats.lro_merged++;
1599 rxq->stats.pkts++;
1600 rxq->stats.rx_cso++;
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1612 const struct pkt_gl *gl)
1613 {
1614 struct sk_buff *skb;
1615 const struct cpl_rx_pkt *pkt = (void *)rsp;
1616 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1617 (rspq->netdev->features & NETIF_F_RXCSUM);
1618 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1619 struct adapter *adapter = rspq->adapter;
1620 struct sge *s = &adapter->sge;
1621 struct port_info *pi;
1622
1623
1624
1625
1626
1627 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1628 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1629 !pkt->ip_frag) {
1630 do_gro(rxq, gl, pkt);
1631 return 0;
1632 }
1633
1634
1635
1636
1637 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1638 if (unlikely(!skb)) {
1639 t4vf_pktgl_free(gl);
1640 rxq->stats.rx_drops++;
1641 return 0;
1642 }
1643 __skb_pull(skb, s->pktshift);
1644 skb->protocol = eth_type_trans(skb, rspq->netdev);
1645 skb_record_rx_queue(skb, rspq->idx);
1646 pi = netdev_priv(skb->dev);
1647 rxq->stats.pkts++;
1648
1649 if (csum_ok && !pkt->err_vec &&
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651 if (!pkt->ip_frag) {
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 rxq->stats.rx_cso++;
1654 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1655 __sum16 c = (__force __sum16)pkt->csum;
1656 skb->csum = csum_unfold(c);
1657 skb->ip_summed = CHECKSUM_COMPLETE;
1658 rxq->stats.rx_cso++;
1659 }
1660 } else
1661 skb_checksum_none_assert(skb);
1662
1663 if (pkt->vlan_ex && !pi->vlan_id) {
1664 rxq->stats.vlan_ex++;
1665 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1666 be16_to_cpu(pkt->vlan));
1667 }
1668
1669 netif_receive_skb(skb);
1670
1671 return 0;
1672 }
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682 static inline bool is_new_response(const struct rsp_ctrl *rc,
1683 const struct sge_rspq *rspq)
1684 {
1685 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1686 }
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1709 int frags)
1710 {
1711 struct rx_sw_desc *sdesc;
1712
1713 while (frags--) {
1714 if (fl->cidx == 0)
1715 fl->cidx = fl->size - 1;
1716 else
1717 fl->cidx--;
1718 sdesc = &fl->sdesc[fl->cidx];
1719 sdesc->page = gl->frags[frags].page;
1720 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1721 fl->avail++;
1722 }
1723 }
1724
1725
1726
1727
1728
1729
1730
1731 static inline void rspq_next(struct sge_rspq *rspq)
1732 {
1733 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1734 if (unlikely(++rspq->cidx == rspq->size)) {
1735 rspq->cidx = 0;
1736 rspq->gen ^= 1;
1737 rspq->cur_desc = rspq->desc;
1738 }
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 static int process_responses(struct sge_rspq *rspq, int budget)
1755 {
1756 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1757 struct adapter *adapter = rspq->adapter;
1758 struct sge *s = &adapter->sge;
1759 int budget_left = budget;
1760
1761 while (likely(budget_left)) {
1762 int ret, rsp_type;
1763 const struct rsp_ctrl *rc;
1764
1765 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1766 if (!is_new_response(rc, rspq))
1767 break;
1768
1769
1770
1771
1772
1773 dma_rmb();
1774 rsp_type = RSPD_TYPE_G(rc->type_gen);
1775 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1776 struct page_frag *fp;
1777 struct pkt_gl gl;
1778 const struct rx_sw_desc *sdesc;
1779 u32 bufsz, frag;
1780 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1781
1782
1783
1784
1785
1786 if (len & RSPD_NEWBUF_F) {
1787
1788
1789
1790
1791
1792 if (likely(rspq->offset > 0)) {
1793 free_rx_bufs(rspq->adapter, &rxq->fl,
1794 1);
1795 rspq->offset = 0;
1796 }
1797 len = RSPD_LEN_G(len);
1798 }
1799 gl.tot_len = len;
1800
1801
1802
1803
1804 for (frag = 0, fp = gl.frags; ; frag++, fp++) {
1805 BUG_ON(frag >= MAX_SKB_FRAGS);
1806 BUG_ON(rxq->fl.avail == 0);
1807 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1808 bufsz = get_buf_size(adapter, sdesc);
1809 fp->page = sdesc->page;
1810 fp->offset = rspq->offset;
1811 fp->size = min(bufsz, len);
1812 len -= fp->size;
1813 if (!len)
1814 break;
1815 unmap_rx_buf(rspq->adapter, &rxq->fl);
1816 }
1817 gl.nfrags = frag+1;
1818
1819
1820
1821
1822
1823
1824 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1825 get_buf_addr(sdesc),
1826 fp->size, DMA_FROM_DEVICE);
1827 gl.va = (page_address(gl.frags[0].page) +
1828 gl.frags[0].offset);
1829 prefetch(gl.va);
1830
1831
1832
1833
1834
1835 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1836 if (likely(ret == 0))
1837 rspq->offset += ALIGN(fp->size, s->fl_align);
1838 else
1839 restore_rx_bufs(&gl, &rxq->fl, frag);
1840 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1841 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1842 } else {
1843 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1844 ret = 0;
1845 }
1846
1847 if (unlikely(ret)) {
1848
1849
1850
1851
1852
1853 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1854 rspq->next_intr_params =
1855 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1856 break;
1857 }
1858
1859 rspq_next(rspq);
1860 budget_left--;
1861 }
1862
1863
1864
1865
1866
1867
1868 if (rspq->offset >= 0 &&
1869 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1870 __refill_fl(rspq->adapter, &rxq->fl);
1871 return budget - budget_left;
1872 }
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 static int napi_rx_handler(struct napi_struct *napi, int budget)
1886 {
1887 unsigned int intr_params;
1888 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1889 int work_done = process_responses(rspq, budget);
1890 u32 val;
1891
1892 if (likely(work_done < budget)) {
1893 napi_complete_done(napi, work_done);
1894 intr_params = rspq->next_intr_params;
1895 rspq->next_intr_params = rspq->intr_params;
1896 } else
1897 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1898
1899 if (unlikely(work_done == 0))
1900 rspq->unhandled_irqs++;
1901
1902 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1903
1904
1905
1906 if (unlikely(!rspq->bar2_addr)) {
1907 t4_write_reg(rspq->adapter,
1908 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1909 val | INGRESSQID_V((u32)rspq->cntxt_id));
1910 } else {
1911 writel(val | INGRESSQID_V(rspq->bar2_qid),
1912 rspq->bar2_addr + SGE_UDB_GTS);
1913 wmb();
1914 }
1915 return work_done;
1916 }
1917
1918
1919
1920
1921
1922 irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1923 {
1924 struct sge_rspq *rspq = cookie;
1925
1926 napi_schedule(&rspq->napi);
1927 return IRQ_HANDLED;
1928 }
1929
1930
1931
1932
1933
1934 static unsigned int process_intrq(struct adapter *adapter)
1935 {
1936 struct sge *s = &adapter->sge;
1937 struct sge_rspq *intrq = &s->intrq;
1938 unsigned int work_done;
1939 u32 val;
1940
1941 spin_lock(&adapter->sge.intrq_lock);
1942 for (work_done = 0; ; work_done++) {
1943 const struct rsp_ctrl *rc;
1944 unsigned int qid, iq_idx;
1945 struct sge_rspq *rspq;
1946
1947
1948
1949
1950
1951 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1952 if (!is_new_response(rc, intrq))
1953 break;
1954
1955
1956
1957
1958
1959
1960 dma_rmb();
1961 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1962 dev_err(adapter->pdev_dev,
1963 "Unexpected INTRQ response type %d\n",
1964 RSPD_TYPE_G(rc->type_gen));
1965 continue;
1966 }
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1977 iq_idx = IQ_IDX(s, qid);
1978 if (unlikely(iq_idx >= MAX_INGQ)) {
1979 dev_err(adapter->pdev_dev,
1980 "Ingress QID %d out of range\n", qid);
1981 continue;
1982 }
1983 rspq = s->ingr_map[iq_idx];
1984 if (unlikely(rspq == NULL)) {
1985 dev_err(adapter->pdev_dev,
1986 "Ingress QID %d RSPQ=NULL\n", qid);
1987 continue;
1988 }
1989 if (unlikely(rspq->abs_id != qid)) {
1990 dev_err(adapter->pdev_dev,
1991 "Ingress QID %d refers to RSPQ %d\n",
1992 qid, rspq->abs_id);
1993 continue;
1994 }
1995
1996
1997
1998
1999
2000
2001 napi_schedule(&rspq->napi);
2002 rspq_next(intrq);
2003 }
2004
2005 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2006
2007
2008
2009 if (unlikely(!intrq->bar2_addr)) {
2010 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2011 val | INGRESSQID_V(intrq->cntxt_id));
2012 } else {
2013 writel(val | INGRESSQID_V(intrq->bar2_qid),
2014 intrq->bar2_addr + SGE_UDB_GTS);
2015 wmb();
2016 }
2017
2018 spin_unlock(&adapter->sge.intrq_lock);
2019
2020 return work_done;
2021 }
2022
2023
2024
2025
2026
2027 static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2028 {
2029 struct adapter *adapter = cookie;
2030
2031 process_intrq(adapter);
2032 return IRQ_HANDLED;
2033 }
2034
2035
2036
2037
2038
2039
2040
2041
2042 irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2043 {
2044 BUG_ON((adapter->flags &
2045 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2046 if (adapter->flags & CXGB4VF_USING_MSIX)
2047 return t4vf_sge_intr_msix;
2048 else
2049 return t4vf_intr_msi;
2050 }
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063 static void sge_rx_timer_cb(struct timer_list *t)
2064 {
2065 struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2066 struct sge *s = &adapter->sge;
2067 unsigned int i;
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2078 unsigned long m;
2079
2080 for (m = s->starving_fl[i]; m; m &= m - 1) {
2081 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2082 struct sge_fl *fl = s->egr_map[id];
2083
2084 clear_bit(id, s->starving_fl);
2085 smp_mb__after_atomic();
2086
2087
2088
2089
2090
2091
2092
2093 if (fl_starving(adapter, fl)) {
2094 struct sge_eth_rxq *rxq;
2095
2096 rxq = container_of(fl, struct sge_eth_rxq, fl);
2097 if (napi_reschedule(&rxq->rspq.napi))
2098 fl->starving++;
2099 else
2100 set_bit(id, s->starving_fl);
2101 }
2102 }
2103 }
2104
2105
2106
2107
2108 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2109 }
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 static void sge_tx_timer_cb(struct timer_list *t)
2123 {
2124 struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2125 struct sge *s = &adapter->sge;
2126 unsigned int i, budget;
2127
2128 budget = MAX_TIMER_TX_RECLAIM;
2129 i = s->ethtxq_rover;
2130 do {
2131 struct sge_eth_txq *txq = &s->ethtxq[i];
2132
2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2134 int avail = reclaimable(&txq->q);
2135
2136 if (avail > budget)
2137 avail = budget;
2138
2139 free_tx_desc(adapter, &txq->q, avail, true);
2140 txq->q.in_use -= avail;
2141 __netif_tx_unlock(txq->txq);
2142
2143 budget -= avail;
2144 if (!budget)
2145 break;
2146 }
2147
2148 i++;
2149 if (i >= s->ethqsets)
2150 i = 0;
2151 } while (i != s->ethtxq_rover);
2152 s->ethtxq_rover = i;
2153
2154
2155
2156
2157
2158
2159 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2160 }
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 static void __iomem *bar2_address(struct adapter *adapter,
2176 unsigned int qid,
2177 enum t4_bar2_qtype qtype,
2178 unsigned int *pbar2_qid)
2179 {
2180 u64 bar2_qoffset;
2181 int ret;
2182
2183 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2184 &bar2_qoffset, pbar2_qid);
2185 if (ret)
2186 return NULL;
2187
2188 return adapter->bar2 + bar2_qoffset;
2189 }
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2202 bool iqasynch, struct net_device *dev,
2203 int intr_dest,
2204 struct sge_fl *fl, rspq_handler_t hnd)
2205 {
2206 struct sge *s = &adapter->sge;
2207 struct port_info *pi = netdev_priv(dev);
2208 struct fw_iq_cmd cmd, rpl;
2209 int ret, iqandst, flsz = 0;
2210 int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2211
2212
2213
2214
2215
2216
2217
2218
2219 if ((adapter->flags & CXGB4VF_USING_MSI) &&
2220 rspq != &adapter->sge.intrq) {
2221 iqandst = SGE_INTRDST_IQ;
2222 intr_dest = adapter->sge.intrq.abs_id;
2223 } else
2224 iqandst = SGE_INTRDST_PCI;
2225
2226
2227
2228
2229
2230
2231
2232 rspq->size = roundup(rspq->size, 16);
2233 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2234 0, &rspq->phys_addr, NULL, 0);
2235 if (!rspq->desc)
2236 return -ENOMEM;
2237
2238
2239
2240
2241
2242
2243
2244
2245 memset(&cmd, 0, sizeof(cmd));
2246 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2247 FW_CMD_REQUEST_F |
2248 FW_CMD_WRITE_F |
2249 FW_CMD_EXEC_F);
2250 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2251 FW_IQ_CMD_IQSTART_F |
2252 FW_LEN16(cmd));
2253 cmd.type_to_iqandstindex =
2254 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2255 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2256 FW_IQ_CMD_VIID_V(pi->viid) |
2257 FW_IQ_CMD_IQANDST_V(iqandst) |
2258 FW_IQ_CMD_IQANUS_V(1) |
2259 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2260 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2261 cmd.iqdroprss_to_iqesize =
2262 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2263 FW_IQ_CMD_IQGTSMODE_F |
2264 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2265 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2266 cmd.iqsize = cpu_to_be16(rspq->size);
2267 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2268
2269 if (fl) {
2270 unsigned int chip_ver =
2271 CHELSIO_CHIP_VERSION(adapter->params.chip);
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2281 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2282 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2283 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2284 sizeof(__be64), sizeof(struct rx_sw_desc),
2285 &fl->addr, &fl->sdesc, s->stat_len);
2286 if (!fl->desc) {
2287 ret = -ENOMEM;
2288 goto err;
2289 }
2290
2291
2292
2293
2294
2295
2296 flsz = (fl->size / FL_PER_EQ_UNIT +
2297 s->stat_len / EQ_UNIT);
2298
2299
2300
2301
2302
2303 cmd.iqns_to_fl0congen =
2304 cpu_to_be32(
2305 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2306 FW_IQ_CMD_FL0PACKEN_F |
2307 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2308 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2309 FW_IQ_CMD_FL0PADEN_F);
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319 cmd.fl0dcaen_to_fl0cidxfthresh =
2320 cpu_to_be16(
2321 FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2322 ? FETCHBURSTMIN_128B_X
2323 : FETCHBURSTMIN_64B_T6_X) |
2324 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
2325 FETCHBURSTMAX_512B_X :
2326 FETCHBURSTMAX_256B_X));
2327 cmd.fl0size = cpu_to_be16(flsz);
2328 cmd.fl0addr = cpu_to_be64(fl->addr);
2329 }
2330
2331
2332
2333
2334
2335 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2336 if (ret)
2337 goto err;
2338
2339 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2340 rspq->cur_desc = rspq->desc;
2341 rspq->cidx = 0;
2342 rspq->gen = 1;
2343 rspq->next_intr_params = rspq->intr_params;
2344 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2345 rspq->bar2_addr = bar2_address(adapter,
2346 rspq->cntxt_id,
2347 T4_BAR2_QTYPE_INGRESS,
2348 &rspq->bar2_qid);
2349 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2350 rspq->size--;
2351 rspq->adapter = adapter;
2352 rspq->netdev = dev;
2353 rspq->handler = hnd;
2354
2355
2356 rspq->offset = fl ? 0 : -1;
2357
2358 if (fl) {
2359 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2360 fl->avail = 0;
2361 fl->pend_cred = 0;
2362 fl->pidx = 0;
2363 fl->cidx = 0;
2364 fl->alloc_failed = 0;
2365 fl->large_alloc_failed = 0;
2366 fl->starving = 0;
2367
2368
2369
2370
2371 fl->bar2_addr = bar2_address(adapter,
2372 fl->cntxt_id,
2373 T4_BAR2_QTYPE_EGRESS,
2374 &fl->bar2_qid);
2375
2376 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2377 }
2378
2379 return 0;
2380
2381 err:
2382
2383
2384
2385
2386 if (rspq->desc) {
2387 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2388 rspq->desc, rspq->phys_addr);
2389 rspq->desc = NULL;
2390 }
2391 if (fl && fl->desc) {
2392 kfree(fl->sdesc);
2393 fl->sdesc = NULL;
2394 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2395 fl->desc, fl->addr);
2396 fl->desc = NULL;
2397 }
2398 return ret;
2399 }
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2411 struct net_device *dev, struct netdev_queue *devq,
2412 unsigned int iqid)
2413 {
2414 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2415 struct port_info *pi = netdev_priv(dev);
2416 struct fw_eq_eth_cmd cmd, rpl;
2417 struct sge *s = &adapter->sge;
2418 int ret, nentries;
2419
2420
2421
2422
2423
2424 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2425
2426
2427
2428
2429
2430 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2431 sizeof(struct tx_desc),
2432 sizeof(struct tx_sw_desc),
2433 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2434 if (!txq->q.desc)
2435 return -ENOMEM;
2436
2437
2438
2439
2440
2441
2442
2443
2444 memset(&cmd, 0, sizeof(cmd));
2445 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2446 FW_CMD_REQUEST_F |
2447 FW_CMD_WRITE_F |
2448 FW_CMD_EXEC_F);
2449 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2450 FW_EQ_ETH_CMD_EQSTART_F |
2451 FW_LEN16(cmd));
2452 cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2453 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2454 cmd.fetchszm_to_iqid =
2455 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2456 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2457 FW_EQ_ETH_CMD_IQID_V(iqid));
2458 cmd.dcaen_to_eqsize =
2459 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2460 ? FETCHBURSTMIN_64B_X
2461 : FETCHBURSTMIN_64B_T6_X) |
2462 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2463 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2464 CIDXFLUSHTHRESH_32_X) |
2465 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2466 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2467
2468
2469
2470
2471
2472 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2473 if (ret) {
2474
2475
2476
2477
2478 kfree(txq->q.sdesc);
2479 txq->q.sdesc = NULL;
2480 dma_free_coherent(adapter->pdev_dev,
2481 nentries * sizeof(struct tx_desc),
2482 txq->q.desc, txq->q.phys_addr);
2483 txq->q.desc = NULL;
2484 return ret;
2485 }
2486
2487 txq->q.in_use = 0;
2488 txq->q.cidx = 0;
2489 txq->q.pidx = 0;
2490 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2491 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2492 txq->q.bar2_addr = bar2_address(adapter,
2493 txq->q.cntxt_id,
2494 T4_BAR2_QTYPE_EGRESS,
2495 &txq->q.bar2_qid);
2496 txq->q.abs_id =
2497 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2498 txq->txq = devq;
2499 txq->tso = 0;
2500 txq->tx_cso = 0;
2501 txq->vlan_ins = 0;
2502 txq->q.stops = 0;
2503 txq->q.restarts = 0;
2504 txq->mapping_err = 0;
2505 return 0;
2506 }
2507
2508
2509
2510
2511 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2512 {
2513 struct sge *s = &adapter->sge;
2514
2515 dma_free_coherent(adapter->pdev_dev,
2516 tq->size * sizeof(*tq->desc) + s->stat_len,
2517 tq->desc, tq->phys_addr);
2518 tq->cntxt_id = 0;
2519 tq->sdesc = NULL;
2520 tq->desc = NULL;
2521 }
2522
2523
2524
2525
2526
2527 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2528 struct sge_fl *fl)
2529 {
2530 struct sge *s = &adapter->sge;
2531 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2532
2533 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2534 rspq->cntxt_id, flid, 0xffff);
2535 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2536 rspq->desc, rspq->phys_addr);
2537 netif_napi_del(&rspq->napi);
2538 rspq->netdev = NULL;
2539 rspq->cntxt_id = 0;
2540 rspq->abs_id = 0;
2541 rspq->desc = NULL;
2542
2543 if (fl) {
2544 free_rx_bufs(adapter, fl, fl->avail);
2545 dma_free_coherent(adapter->pdev_dev,
2546 fl->size * sizeof(*fl->desc) + s->stat_len,
2547 fl->desc, fl->addr);
2548 kfree(fl->sdesc);
2549 fl->sdesc = NULL;
2550 fl->cntxt_id = 0;
2551 fl->desc = NULL;
2552 }
2553 }
2554
2555
2556
2557
2558
2559
2560
2561 void t4vf_free_sge_resources(struct adapter *adapter)
2562 {
2563 struct sge *s = &adapter->sge;
2564 struct sge_eth_rxq *rxq = s->ethrxq;
2565 struct sge_eth_txq *txq = s->ethtxq;
2566 struct sge_rspq *evtq = &s->fw_evtq;
2567 struct sge_rspq *intrq = &s->intrq;
2568 int qs;
2569
2570 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2571 if (rxq->rspq.desc)
2572 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2573 if (txq->q.desc) {
2574 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2575 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2576 kfree(txq->q.sdesc);
2577 free_txq(adapter, &txq->q);
2578 }
2579 }
2580 if (evtq->desc)
2581 free_rspq_fl(adapter, evtq, NULL);
2582 if (intrq->desc)
2583 free_rspq_fl(adapter, intrq, NULL);
2584 }
2585
2586
2587
2588
2589
2590
2591
2592 void t4vf_sge_start(struct adapter *adapter)
2593 {
2594 adapter->sge.ethtxq_rover = 0;
2595 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2596 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2597 }
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607 void t4vf_sge_stop(struct adapter *adapter)
2608 {
2609 struct sge *s = &adapter->sge;
2610
2611 if (s->rx_timer.function)
2612 del_timer_sync(&s->rx_timer);
2613 if (s->tx_timer.function)
2614 del_timer_sync(&s->tx_timer);
2615 }
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626 int t4vf_sge_init(struct adapter *adapter)
2627 {
2628 struct sge_params *sge_params = &adapter->params.sge;
2629 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2630 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2631 struct sge *s = &adapter->sge;
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 if (fl_large_pg <= fl_small_pg)
2643 fl_large_pg = 0;
2644
2645
2646
2647
2648 if (fl_small_pg != PAGE_SIZE ||
2649 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2650 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2651 fl_small_pg, fl_large_pg);
2652 return -EINVAL;
2653 }
2654 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2655 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2656 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2657 return -EINVAL;
2658 }
2659
2660
2661
2662
2663 if (fl_large_pg)
2664 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2665 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2666 ? 128 : 64);
2667 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2668 s->fl_align = t4vf_fl_pkt_align(adapter);
2669
2670
2671
2672
2673
2674
2675
2676
2677 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2678 case CHELSIO_T4:
2679 s->fl_starve_thres =
2680 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2681 break;
2682 case CHELSIO_T5:
2683 s->fl_starve_thres =
2684 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2685 break;
2686 case CHELSIO_T6:
2687 default:
2688 s->fl_starve_thres =
2689 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2690 break;
2691 }
2692 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2693
2694
2695
2696
2697 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
2698 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2699
2700
2701
2702
2703 spin_lock_init(&s->intrq_lock);
2704
2705 return 0;
2706 }