0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <net/ip6_checksum.h>
0013
0014 #include "net_driver.h"
0015 #include "tx_common.h"
0016 #include "nic_common.h"
0017 #include "mcdi_functions.h"
0018 #include "ef100_regs.h"
0019 #include "io.h"
0020 #include "ef100_tx.h"
0021 #include "ef100_nic.h"
0022
0023 int ef100_tx_probe(struct efx_tx_queue *tx_queue)
0024 {
0025
0026 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
0027 (tx_queue->ptr_mask + 2) *
0028 sizeof(efx_oword_t),
0029 GFP_KERNEL);
0030 }
0031
0032 void ef100_tx_init(struct efx_tx_queue *tx_queue)
0033 {
0034
0035 tx_queue->core_txq =
0036 netdev_get_tx_queue(tx_queue->efx->net_dev,
0037 tx_queue->channel->channel -
0038 tx_queue->efx->tx_channel_offset);
0039
0040
0041
0042
0043
0044
0045
0046 tx_queue->tso_version = 3;
0047 if (efx_mcdi_tx_init(tx_queue))
0048 netdev_WARN(tx_queue->efx->net_dev,
0049 "failed to initialise TXQ %d\n", tx_queue->queue);
0050 }
0051
0052 static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
0053 {
0054 struct efx_nic *efx = tx_queue->efx;
0055 struct ef100_nic_data *nic_data;
0056 struct efx_tx_buffer *buffer;
0057 size_t header_len;
0058 u32 mss;
0059
0060 nic_data = efx->nic_data;
0061
0062 if (!skb_is_gso_tcp(skb))
0063 return false;
0064 if (!(efx->net_dev->features & NETIF_F_TSO))
0065 return false;
0066
0067 mss = skb_shinfo(skb)->gso_size;
0068 if (unlikely(mss < 4)) {
0069 WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss);
0070 return false;
0071 }
0072
0073 header_len = efx_tx_tso_header_length(skb);
0074 if (header_len > nic_data->tso_max_hdr_len)
0075 return false;
0076
0077 if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) {
0078
0079 WARN_ON_ONCE(1);
0080 return false;
0081 }
0082
0083 if (skb->data_len / mss > nic_data->tso_max_frames)
0084 return false;
0085
0086
0087 if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len))
0088 return false;
0089
0090
0091
0092
0093 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
0094 buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT;
0095 buffer->len = header_len;
0096 buffer->unmap_len = 0;
0097 buffer->skb = skb;
0098 ++tx_queue->insert_count;
0099 return true;
0100 }
0101
0102 static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
0103 {
0104 if (likely(tx_queue->txd.buf.addr))
0105 return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
0106 else
0107 return NULL;
0108 }
0109
0110 static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
0111 {
0112 unsigned int write_ptr;
0113 efx_dword_t reg;
0114
0115 tx_queue->xmit_pending = false;
0116
0117 if (unlikely(tx_queue->notify_count == tx_queue->write_count))
0118 return;
0119
0120 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
0121
0122 EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr);
0123 efx_writed_page(tx_queue->efx, ®,
0124 ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
0125 tx_queue->notify_count = tx_queue->write_count;
0126 }
0127
0128 static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
0129 {
0130 ef100_notify_tx_desc(tx_queue);
0131 ++tx_queue->pushes;
0132 }
0133
0134 static void ef100_set_tx_csum_partial(const struct sk_buff *skb,
0135 struct efx_tx_buffer *buffer, efx_oword_t *txd)
0136 {
0137 efx_oword_t csum;
0138 int csum_start;
0139
0140 if (!skb || skb->ip_summed != CHECKSUM_PARTIAL)
0141 return;
0142
0143
0144
0145
0146 csum_start = skb_checksum_start_offset(skb);
0147 EFX_POPULATE_OWORD_3(csum,
0148 ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1,
0149 ESF_GZ_TX_SEND_CSO_PARTIAL_START_W,
0150 csum_start >> 1,
0151 ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W,
0152 skb->csum_offset >> 1);
0153 EFX_OR_OWORD(*txd, *txd, csum);
0154 }
0155
0156 static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd)
0157 {
0158 u16 vlan_tci = skb_vlan_tag_get(skb);
0159 efx_oword_t vlan;
0160
0161 EFX_POPULATE_OWORD_2(vlan,
0162 ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
0163 ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci);
0164 EFX_OR_OWORD(*txd, *txd, vlan);
0165 }
0166
0167 static void ef100_make_send_desc(struct efx_nic *efx,
0168 const struct sk_buff *skb,
0169 struct efx_tx_buffer *buffer, efx_oword_t *txd,
0170 unsigned int segment_count)
0171 {
0172
0173 EFX_POPULATE_OWORD_3(*txd,
0174 ESF_GZ_TX_SEND_NUM_SEGS, segment_count,
0175 ESF_GZ_TX_SEND_LEN, buffer->len,
0176 ESF_GZ_TX_SEND_ADDR, buffer->dma_addr);
0177
0178 if (likely(efx->net_dev->features & NETIF_F_HW_CSUM))
0179 ef100_set_tx_csum_partial(skb, buffer, txd);
0180 if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
0181 skb && skb_vlan_tag_present(skb))
0182 ef100_set_tx_hw_vlan(skb, txd);
0183 }
0184
0185 static void ef100_make_tso_desc(struct efx_nic *efx,
0186 const struct sk_buff *skb,
0187 struct efx_tx_buffer *buffer, efx_oword_t *txd,
0188 unsigned int segment_count)
0189 {
0190 bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
0191 unsigned int len, ip_offset, tcp_offset, payload_segs;
0192 u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
0193 unsigned int outer_ip_offset, outer_l4_offset;
0194 u16 vlan_tci = skb_vlan_tag_get(skb);
0195 u32 mss = skb_shinfo(skb)->gso_size;
0196 bool encap = skb->encapsulation;
0197 bool udp_encap = false;
0198 u16 vlan_enable = 0;
0199 struct tcphdr *tcp;
0200 bool outer_csum;
0201 u32 paylen;
0202
0203 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
0204 mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
0205 if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
0206 vlan_enable = skb_vlan_tag_present(skb);
0207
0208 len = skb->len - buffer->len;
0209
0210 payload_segs = segment_count - 2;
0211 if (encap) {
0212 outer_ip_offset = skb_network_offset(skb);
0213 outer_l4_offset = skb_transport_offset(skb);
0214 ip_offset = skb_inner_network_offset(skb);
0215 tcp_offset = skb_inner_transport_offset(skb);
0216 if (skb_shinfo(skb)->gso_type &
0217 (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))
0218 udp_encap = true;
0219 } else {
0220 ip_offset = skb_network_offset(skb);
0221 tcp_offset = skb_transport_offset(skb);
0222 outer_ip_offset = outer_l4_offset = 0;
0223 }
0224 outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM;
0225
0226
0227 tcp = (void *)skb->data + tcp_offset;
0228 paylen = skb->len - tcp_offset;
0229 csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
0230
0231 EFX_POPULATE_OWORD_19(*txd,
0232 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
0233 ESF_GZ_TX_TSO_MSS, mss,
0234 ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
0235 ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
0236 ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
0237 ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
0238 ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum,
0239 ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
0240 ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
0241 ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
0242 ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
0243 ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
0244 ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1,
0245 ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
0246 ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
0247 ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
0248 ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
0249 ESE_GZ_TX_DESC_IP4_ID_NO_OP,
0250 ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
0251 ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
0252 );
0253 }
0254
0255 static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
0256 const struct sk_buff *skb,
0257 unsigned int segment_count,
0258 struct efx_rep *efv)
0259 {
0260 unsigned int old_write_count = tx_queue->write_count;
0261 unsigned int new_write_count = old_write_count;
0262 struct efx_tx_buffer *buffer;
0263 unsigned int next_desc_type;
0264 unsigned int write_ptr;
0265 efx_oword_t *txd;
0266 unsigned int nr_descs = tx_queue->insert_count - old_write_count;
0267
0268 if (unlikely(nr_descs == 0))
0269 return;
0270
0271 if (segment_count)
0272 next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO;
0273 else
0274 next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
0275
0276 if (unlikely(efv)) {
0277
0278 write_ptr = new_write_count & tx_queue->ptr_mask;
0279 txd = ef100_tx_desc(tx_queue, write_ptr);
0280 ++new_write_count;
0281
0282 tx_queue->packet_write_count = new_write_count;
0283 EFX_POPULATE_OWORD_3(*txd,
0284 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
0285 ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
0286 ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
0287 nr_descs--;
0288 }
0289
0290
0291 if (!skb)
0292 nr_descs = 1;
0293
0294 do {
0295 write_ptr = new_write_count & tx_queue->ptr_mask;
0296 buffer = &tx_queue->buffer[write_ptr];
0297 txd = ef100_tx_desc(tx_queue, write_ptr);
0298 ++new_write_count;
0299
0300
0301 tx_queue->packet_write_count = new_write_count;
0302
0303 switch (next_desc_type) {
0304 case ESE_GZ_TX_DESC_TYPE_SEND:
0305 ef100_make_send_desc(tx_queue->efx, skb,
0306 buffer, txd, nr_descs);
0307 break;
0308 case ESE_GZ_TX_DESC_TYPE_TSO:
0309
0310 WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3));
0311 ef100_make_tso_desc(tx_queue->efx, skb,
0312 buffer, txd, nr_descs);
0313 break;
0314 default:
0315
0316 EFX_POPULATE_OWORD_3(*txd,
0317 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG,
0318 ESF_GZ_TX_SEG_LEN, buffer->len,
0319 ESF_GZ_TX_SEG_ADDR, buffer->dma_addr);
0320 }
0321
0322 next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
0323 ESE_GZ_TX_DESC_TYPE_SEND;
0324
0325 if (unlikely(efv))
0326 buffer->flags |= EFX_TX_BUF_EFV;
0327
0328 } while (new_write_count != tx_queue->insert_count);
0329
0330 wmb();
0331
0332 tx_queue->write_count = new_write_count;
0333
0334
0335
0336
0337
0338
0339
0340 smp_mb();
0341 }
0342
0343 void ef100_tx_write(struct efx_tx_queue *tx_queue)
0344 {
0345 ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
0346 ef100_tx_push_buffers(tx_queue);
0347 }
0348
0349 void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
0350 {
0351 unsigned int tx_done =
0352 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
0353 unsigned int qlabel =
0354 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL);
0355 struct efx_tx_queue *tx_queue =
0356 efx_channel_get_tx_queue(channel, qlabel);
0357 unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
0358 tx_queue->ptr_mask;
0359
0360 efx_xmit_done(tx_queue, tx_index);
0361 }
0362
0363
0364
0365
0366
0367
0368
0369
0370 int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
0371 {
0372 return __ef100_enqueue_skb(tx_queue, skb, NULL);
0373 }
0374
0375 int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
0376 struct efx_rep *efv)
0377 {
0378 unsigned int old_insert_count = tx_queue->insert_count;
0379 struct efx_nic *efx = tx_queue->efx;
0380 bool xmit_more = netdev_xmit_more();
0381 unsigned int fill_level;
0382 unsigned int segments;
0383 int rc;
0384
0385 if (!tx_queue->buffer || !tx_queue->ptr_mask) {
0386 netif_stop_queue(efx->net_dev);
0387 dev_kfree_skb_any(skb);
0388 return -ENODEV;
0389 }
0390
0391 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
0392 if (segments == 1)
0393 segments = 0;
0394 if (segments && !ef100_tx_can_tso(tx_queue, skb)) {
0395 rc = efx_tx_tso_fallback(tx_queue, skb);
0396 tx_queue->tso_fallbacks++;
0397 if (rc)
0398 goto err;
0399 else
0400 return 0;
0401 }
0402
0403 if (unlikely(efv)) {
0404 struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
0405
0406
0407
0408
0409
0410
0411
0412 if (netif_tx_queue_stopped(tx_queue->core_txq) ||
0413 unlikely(efx_tx_buffer_in_use(buffer))) {
0414 atomic64_inc(&efv->stats.tx_errors);
0415 rc = -ENOSPC;
0416 goto err;
0417 }
0418
0419
0420
0421
0422
0423
0424 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
0425 fill_level += efx_tx_max_skb_descs(efx);
0426 if (fill_level > efx->txq_stop_thresh) {
0427 struct efx_tx_queue *txq2;
0428
0429
0430 efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
0431 txq2->old_read_count = READ_ONCE(txq2->read_count);
0432
0433 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
0434 fill_level += efx_tx_max_skb_descs(efx);
0435 if (fill_level > efx->txq_stop_thresh) {
0436 atomic64_inc(&efv->stats.tx_errors);
0437 rc = -ENOSPC;
0438 goto err;
0439 }
0440 }
0441
0442 buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
0443 tx_queue->insert_count++;
0444 }
0445
0446
0447 rc = efx_tx_map_data(tx_queue, skb, segments);
0448 if (rc)
0449 goto err;
0450 ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
0451
0452 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
0453 if (fill_level > efx->txq_stop_thresh) {
0454 struct efx_tx_queue *txq2;
0455
0456
0457
0458
0459 WARN_ON(efv);
0460
0461 netif_tx_stop_queue(tx_queue->core_txq);
0462
0463
0464
0465
0466 smp_mb();
0467 efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
0468 txq2->old_read_count = READ_ONCE(txq2->read_count);
0469 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
0470 if (fill_level < efx->txq_stop_thresh)
0471 netif_tx_start_queue(tx_queue->core_txq);
0472 }
0473
0474 tx_queue->xmit_pending = true;
0475
0476
0477
0478
0479
0480
0481
0482
0483 if (unlikely(efv) ||
0484 __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
0485 tx_queue->write_count - tx_queue->notify_count > 255)
0486 ef100_tx_push_buffers(tx_queue);
0487
0488 if (segments) {
0489 tx_queue->tso_bursts++;
0490 tx_queue->tso_packets += segments;
0491 tx_queue->tx_packets += segments;
0492 } else {
0493 tx_queue->tx_packets++;
0494 }
0495 return 0;
0496
0497 err:
0498 efx_enqueue_unwind(tx_queue, old_insert_count);
0499 if (!IS_ERR_OR_NULL(skb))
0500 dev_kfree_skb_any(skb);
0501
0502
0503
0504
0505
0506
0507 if (tx_queue->xmit_pending && !xmit_more)
0508 ef100_tx_push_buffers(tx_queue);
0509 return rc;
0510 }