0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "sdma.h"
0011 #include "vnic.h"
0012
0013 #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
0014 #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
0015
0016 #define HFI1_VNIC_TXREQ_NAME_LEN 32
0017 #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 struct vnic_txreq {
0029 struct sdma_txreq txreq;
0030 struct hfi1_vnic_sdma *sdma;
0031
0032 struct sk_buff *skb;
0033 unsigned char pad[HFI1_VNIC_MAX_PAD];
0034 u16 plen;
0035 __le64 pbc_val;
0036 };
0037
0038 static void vnic_sdma_complete(struct sdma_txreq *txreq,
0039 int status)
0040 {
0041 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
0042 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
0043
0044 sdma_txclean(vnic_sdma->dd, txreq);
0045 dev_kfree_skb_any(tx->skb);
0046 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
0047 }
0048
0049 static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
0050 struct vnic_txreq *tx)
0051 {
0052 int i, ret = 0;
0053
0054 ret = sdma_txadd_kvaddr(
0055 sde->dd,
0056 &tx->txreq,
0057 tx->skb->data,
0058 skb_headlen(tx->skb));
0059 if (unlikely(ret))
0060 goto bail_txadd;
0061
0062 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
0063 skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
0064
0065
0066 ret = sdma_txadd_page(sde->dd,
0067 &tx->txreq,
0068 skb_frag_page(frag),
0069 skb_frag_off(frag),
0070 skb_frag_size(frag));
0071 if (unlikely(ret))
0072 goto bail_txadd;
0073 }
0074
0075 if (tx->plen)
0076 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
0077 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
0078 tx->plen);
0079
0080 bail_txadd:
0081 return ret;
0082 }
0083
0084 static int build_vnic_tx_desc(struct sdma_engine *sde,
0085 struct vnic_txreq *tx,
0086 u64 pbc)
0087 {
0088 int ret = 0;
0089 u16 hdrbytes = 2 << 2;
0090
0091 ret = sdma_txinit_ahg(
0092 &tx->txreq,
0093 0,
0094 hdrbytes + tx->skb->len + tx->plen,
0095 0,
0096 0,
0097 NULL,
0098 0,
0099 vnic_sdma_complete);
0100 if (unlikely(ret))
0101 goto bail_txadd;
0102
0103
0104 tx->pbc_val = cpu_to_le64(pbc);
0105 ret = sdma_txadd_kvaddr(
0106 sde->dd,
0107 &tx->txreq,
0108 &tx->pbc_val,
0109 hdrbytes);
0110 if (unlikely(ret))
0111 goto bail_txadd;
0112
0113
0114 ret = build_vnic_ulp_payload(sde, tx);
0115 bail_txadd:
0116 return ret;
0117 }
0118
0119
0120 static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
0121 {
0122 pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
0123 }
0124
0125 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
0126 struct hfi1_vnic_vport_info *vinfo,
0127 struct sk_buff *skb, u64 pbc, u8 plen)
0128 {
0129 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
0130 struct sdma_engine *sde = vnic_sdma->sde;
0131 struct vnic_txreq *tx;
0132 int ret = -ECOMM;
0133
0134 if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
0135 goto tx_err;
0136
0137 if (unlikely(!sde || !sdma_running(sde)))
0138 goto tx_err;
0139
0140 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
0141 if (unlikely(!tx)) {
0142 ret = -ENOMEM;
0143 goto tx_err;
0144 }
0145
0146 tx->sdma = vnic_sdma;
0147 tx->skb = skb;
0148 hfi1_vnic_update_pad(tx->pad, plen);
0149 tx->plen = plen;
0150 ret = build_vnic_tx_desc(sde, tx, pbc);
0151 if (unlikely(ret))
0152 goto free_desc;
0153
0154 ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
0155 &tx->txreq, vnic_sdma->pkts_sent);
0156
0157 if (unlikely(ret && unlikely(ret != -ECOMM)))
0158 goto free_desc;
0159
0160 if (!ret) {
0161 vnic_sdma->pkts_sent = true;
0162 iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
0163 }
0164 return ret;
0165
0166 free_desc:
0167 sdma_txclean(dd, &tx->txreq);
0168 kmem_cache_free(dd->vnic.txreq_cache, tx);
0169 tx_err:
0170 if (ret != -EBUSY)
0171 dev_kfree_skb_any(skb);
0172 else
0173 vnic_sdma->pkts_sent = false;
0174 return ret;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
0186 struct iowait_work *wait,
0187 struct sdma_txreq *txreq,
0188 uint seq,
0189 bool pkts_sent)
0190 {
0191 struct hfi1_vnic_sdma *vnic_sdma =
0192 container_of(wait->iow, struct hfi1_vnic_sdma, wait);
0193
0194 write_seqlock(&sde->waitlock);
0195 if (sdma_progress(sde, seq, txreq)) {
0196 write_sequnlock(&sde->waitlock);
0197 return -EAGAIN;
0198 }
0199
0200 vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
0201 if (list_empty(&vnic_sdma->wait.list)) {
0202 iowait_get_priority(wait->iow);
0203 iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
0204 }
0205 write_sequnlock(&sde->waitlock);
0206 return -EBUSY;
0207 }
0208
0209
0210
0211
0212
0213
0214
0215
0216 static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
0217 {
0218 struct hfi1_vnic_sdma *vnic_sdma =
0219 container_of(wait, struct hfi1_vnic_sdma, wait);
0220 struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
0221
0222 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
0223 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
0224 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
0225 };
0226
0227 inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
0228 u8 q_idx)
0229 {
0230 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
0231
0232 return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
0233 }
0234
0235 void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
0236 {
0237 int i;
0238
0239 for (i = 0; i < vinfo->num_tx_q; i++) {
0240 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
0241
0242 iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
0243 hfi1_vnic_sdma_sleep,
0244 hfi1_vnic_sdma_wakeup, NULL, NULL);
0245 vnic_sdma->sde = &vinfo->dd->per_sdma[i];
0246 vnic_sdma->dd = vinfo->dd;
0247 vnic_sdma->vinfo = vinfo;
0248 vnic_sdma->q_idx = i;
0249 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
0250
0251
0252 if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
0253 struct iowait_work *work;
0254
0255 INIT_LIST_HEAD(&vnic_sdma->stx.list);
0256 vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
0257 work = iowait_get_ib_work(&vnic_sdma->wait);
0258 list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
0259 }
0260 }
0261 }
0262
0263 int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
0264 {
0265 char buf[HFI1_VNIC_TXREQ_NAME_LEN];
0266
0267 snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
0268 dd->vnic.txreq_cache = kmem_cache_create(buf,
0269 sizeof(struct vnic_txreq),
0270 0, SLAB_HWCACHE_ALIGN,
0271 NULL);
0272 if (!dd->vnic.txreq_cache)
0273 return -ENOMEM;
0274 return 0;
0275 }
0276
0277 void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
0278 {
0279 kmem_cache_destroy(dd->vnic.txreq_cache);
0280 dd->vnic.txreq_cache = NULL;
0281 }