0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/pci.h>
0009 #include <linux/tcp.h>
0010 #include <linux/ip.h>
0011 #include <linux/in.h>
0012 #include <linux/ipv6.h>
0013 #include <linux/slab.h>
0014 #include <net/ipv6.h>
0015 #include <linux/if_ether.h>
0016 #include <linux/highmem.h>
0017 #include <linux/moduleparam.h>
0018 #include <linux/cache.h>
0019 #include "net_driver.h"
0020 #include "efx.h"
0021 #include "io.h"
0022 #include "nic.h"
0023 #include "tx.h"
0024 #include "workarounds.h"
0025 #include "ef10_regs.h"
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 struct tso_state {
0058
0059 unsigned int out_len;
0060 unsigned int seqnum;
0061 u16 ipv4_id;
0062 unsigned int packet_space;
0063
0064
0065 dma_addr_t dma_addr;
0066 unsigned int in_len;
0067 unsigned int unmap_len;
0068 dma_addr_t unmap_addr;
0069
0070 __be16 protocol;
0071 unsigned int ip_off;
0072 unsigned int tcp_off;
0073 unsigned int header_len;
0074 unsigned int ip_base_len;
0075 dma_addr_t header_dma_addr;
0076 unsigned int header_unmap_len;
0077 };
0078
0079 static inline void prefetch_ptr(struct efx_tx_queue *tx_queue)
0080 {
0081 unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue);
0082 char *ptr;
0083
0084 ptr = (char *) (tx_queue->buffer + insert_ptr);
0085 prefetch(ptr);
0086 prefetch(ptr + 0x80);
0087
0088 ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr);
0089 prefetch(ptr);
0090 prefetch(ptr + 0x80);
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
0103 dma_addr_t dma_addr, unsigned int len,
0104 struct efx_tx_buffer **final_buffer)
0105 {
0106 struct efx_tx_buffer *buffer;
0107 unsigned int dma_len;
0108
0109 EFX_WARN_ON_ONCE_PARANOID(len <= 0);
0110
0111 while (1) {
0112 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
0113 ++tx_queue->insert_count;
0114
0115 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count -
0116 tx_queue->read_count >=
0117 tx_queue->efx->txq_entries);
0118
0119 buffer->dma_addr = dma_addr;
0120
0121 dma_len = tx_queue->efx->type->tx_limit_len(tx_queue,
0122 dma_addr, len);
0123
0124
0125 if (dma_len >= len)
0126 break;
0127
0128 buffer->len = dma_len;
0129 buffer->flags = EFX_TX_BUF_CONT;
0130 dma_addr += dma_len;
0131 len -= dma_len;
0132 }
0133
0134 EFX_WARN_ON_ONCE_PARANOID(!len);
0135 buffer->len = len;
0136 *final_buffer = buffer;
0137 }
0138
0139
0140
0141
0142
0143 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
0144 {
0145 __be16 protocol = skb->protocol;
0146
0147 EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
0148 protocol);
0149 if (protocol == htons(ETH_P_8021Q)) {
0150 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
0151
0152 protocol = veh->h_vlan_encapsulated_proto;
0153 }
0154
0155 if (protocol == htons(ETH_P_IP)) {
0156 EFX_WARN_ON_ONCE_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
0157 } else {
0158 EFX_WARN_ON_ONCE_PARANOID(protocol != htons(ETH_P_IPV6));
0159 EFX_WARN_ON_ONCE_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
0160 }
0161 EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) +
0162 (tcp_hdr(skb)->doff << 2u)) >
0163 skb_headlen(skb));
0164
0165 return protocol;
0166 }
0167
0168
0169 static int tso_start(struct tso_state *st, struct efx_nic *efx,
0170 struct efx_tx_queue *tx_queue,
0171 const struct sk_buff *skb)
0172 {
0173 struct device *dma_dev = &efx->pci_dev->dev;
0174 unsigned int header_len, in_len;
0175 dma_addr_t dma_addr;
0176
0177 st->ip_off = skb_network_header(skb) - skb->data;
0178 st->tcp_off = skb_transport_header(skb) - skb->data;
0179 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
0180 in_len = skb_headlen(skb) - header_len;
0181 st->header_len = header_len;
0182 st->in_len = in_len;
0183 if (st->protocol == htons(ETH_P_IP)) {
0184 st->ip_base_len = st->header_len - st->ip_off;
0185 st->ipv4_id = ntohs(ip_hdr(skb)->id);
0186 } else {
0187 st->ip_base_len = st->header_len - st->tcp_off;
0188 st->ipv4_id = 0;
0189 }
0190 st->seqnum = ntohl(tcp_hdr(skb)->seq);
0191
0192 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg);
0193 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn);
0194 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst);
0195
0196 st->out_len = skb->len - header_len;
0197
0198 dma_addr = dma_map_single(dma_dev, skb->data,
0199 skb_headlen(skb), DMA_TO_DEVICE);
0200 st->header_dma_addr = dma_addr;
0201 st->header_unmap_len = skb_headlen(skb);
0202 st->dma_addr = dma_addr + header_len;
0203 st->unmap_len = 0;
0204
0205 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
0206 }
0207
0208 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
0209 skb_frag_t *frag)
0210 {
0211 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
0212 skb_frag_size(frag), DMA_TO_DEVICE);
0213 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
0214 st->unmap_len = skb_frag_size(frag);
0215 st->in_len = skb_frag_size(frag);
0216 st->dma_addr = st->unmap_addr;
0217 return 0;
0218 }
0219 return -ENOMEM;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
0233 const struct sk_buff *skb,
0234 struct tso_state *st)
0235 {
0236 struct efx_tx_buffer *buffer;
0237 int n;
0238
0239 if (st->in_len == 0)
0240 return;
0241 if (st->packet_space == 0)
0242 return;
0243
0244 EFX_WARN_ON_ONCE_PARANOID(st->in_len <= 0);
0245 EFX_WARN_ON_ONCE_PARANOID(st->packet_space <= 0);
0246
0247 n = min(st->in_len, st->packet_space);
0248
0249 st->packet_space -= n;
0250 st->out_len -= n;
0251 st->in_len -= n;
0252
0253 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
0254
0255 if (st->out_len == 0) {
0256
0257 buffer->skb = skb;
0258 buffer->flags = EFX_TX_BUF_SKB;
0259 } else if (st->packet_space != 0) {
0260 buffer->flags = EFX_TX_BUF_CONT;
0261 }
0262
0263 if (st->in_len == 0) {
0264
0265 buffer->unmap_len = st->unmap_len;
0266 buffer->dma_offset = buffer->unmap_len - buffer->len;
0267 st->unmap_len = 0;
0268 }
0269
0270 st->dma_addr += n;
0271 }
0272
0273
0274 #define TCP_FLAGS_OFFSET 13
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
0286 const struct sk_buff *skb,
0287 struct tso_state *st)
0288 {
0289 struct efx_tx_buffer *buffer =
0290 efx_tx_queue_get_insert_buffer(tx_queue);
0291 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
0292 u8 tcp_flags_mask, tcp_flags;
0293
0294 if (!is_last) {
0295 st->packet_space = skb_shinfo(skb)->gso_size;
0296 tcp_flags_mask = 0x09;
0297 } else {
0298 st->packet_space = st->out_len;
0299 tcp_flags_mask = 0x00;
0300 }
0301
0302 if (WARN_ON(!st->header_unmap_len))
0303 return -EINVAL;
0304
0305
0306
0307 tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask;
0308
0309 buffer->flags = EFX_TX_BUF_OPTION;
0310 buffer->len = 0;
0311 buffer->unmap_len = 0;
0312 EFX_POPULATE_QWORD_5(buffer->option,
0313 ESF_DZ_TX_DESC_IS_OPT, 1,
0314 ESF_DZ_TX_OPTION_TYPE,
0315 ESE_DZ_TX_OPTION_DESC_TSO,
0316 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
0317 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
0318 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
0319 ++tx_queue->insert_count;
0320
0321
0322
0323
0324 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
0325 buffer->dma_addr = st->header_dma_addr;
0326 buffer->len = st->header_len;
0327 if (is_last) {
0328 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
0329 buffer->unmap_len = st->header_unmap_len;
0330 buffer->dma_offset = 0;
0331
0332
0333
0334 st->header_unmap_len = 0;
0335 } else {
0336 buffer->flags = EFX_TX_BUF_CONT;
0337 buffer->unmap_len = 0;
0338 }
0339 ++tx_queue->insert_count;
0340
0341 st->seqnum += skb_shinfo(skb)->gso_size;
0342
0343
0344 ++st->ipv4_id;
0345
0346 return 0;
0347 }
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
0363 struct sk_buff *skb,
0364 bool *data_mapped)
0365 {
0366 struct efx_nic *efx = tx_queue->efx;
0367 int frag_i, rc;
0368 struct tso_state state;
0369
0370 if (tx_queue->tso_version != 1)
0371 return -EINVAL;
0372
0373 prefetch(skb->data);
0374
0375
0376 state.protocol = efx_tso_check_protocol(skb);
0377
0378 EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count);
0379
0380 rc = tso_start(&state, efx, tx_queue, skb);
0381 if (rc)
0382 goto fail;
0383
0384 if (likely(state.in_len == 0)) {
0385
0386 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1);
0387 frag_i = 0;
0388 rc = tso_get_fragment(&state, efx,
0389 skb_shinfo(skb)->frags + frag_i);
0390 if (rc)
0391 goto fail;
0392 } else {
0393
0394 frag_i = -1;
0395 }
0396
0397 rc = tso_start_new_packet(tx_queue, skb, &state);
0398 if (rc)
0399 goto fail;
0400
0401 prefetch_ptr(tx_queue);
0402
0403 while (1) {
0404 tso_fill_packet_with_fragment(tx_queue, skb, &state);
0405
0406
0407 if (state.in_len == 0) {
0408 if (++frag_i >= skb_shinfo(skb)->nr_frags)
0409
0410 break;
0411 rc = tso_get_fragment(&state, efx,
0412 skb_shinfo(skb)->frags + frag_i);
0413 if (rc)
0414 goto fail;
0415 }
0416
0417
0418 if (state.packet_space == 0) {
0419 rc = tso_start_new_packet(tx_queue, skb, &state);
0420 if (rc)
0421 goto fail;
0422 }
0423 }
0424
0425 *data_mapped = true;
0426
0427 return 0;
0428
0429 fail:
0430 if (rc == -ENOMEM)
0431 netif_err(efx, tx_err, efx->net_dev,
0432 "Out of memory for TSO headers, or DMA mapping error\n");
0433 else
0434 netif_err(efx, tx_err, efx->net_dev, "TSO failed, rc = %d\n", rc);
0435
0436
0437 if (state.unmap_len) {
0438 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
0439 state.unmap_len, DMA_TO_DEVICE);
0440 }
0441
0442
0443 if (state.header_unmap_len)
0444 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
0445 state.header_unmap_len, DMA_TO_DEVICE);
0446
0447 return rc;
0448 }