0001
0002
0003
0004 #include <linux/filter.h>
0005
0006 #include "ice_txrx_lib.h"
0007 #include "ice_eswitch.h"
0008 #include "ice_lib.h"
0009
0010
0011
0012
0013
0014
0015 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
0016 {
0017 u16 prev_ntu = rx_ring->next_to_use & ~0x7;
0018
0019 rx_ring->next_to_use = val;
0020
0021
0022 rx_ring->next_to_alloc = val;
0023
0024
0025
0026
0027
0028
0029 val &= ~0x7;
0030 if (prev_ntu != val) {
0031
0032
0033
0034
0035
0036 wmb();
0037 writel(val, rx_ring->tail);
0038 }
0039 }
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
0050 {
0051 struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
0052
0053 if (!decoded.known)
0054 return PKT_HASH_TYPE_NONE;
0055 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
0056 return PKT_HASH_TYPE_L4;
0057 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
0058 return PKT_HASH_TYPE_L3;
0059 if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
0060 return PKT_HASH_TYPE_L2;
0061
0062 return PKT_HASH_TYPE_NONE;
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072 static void
0073 ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
0074 struct sk_buff *skb, u16 rx_ptype)
0075 {
0076 struct ice_32b_rx_flex_desc_nic *nic_mdid;
0077 u32 hash;
0078
0079 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
0080 return;
0081
0082 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
0083 return;
0084
0085 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
0086 hash = le32_to_cpu(nic_mdid->rss_hash);
0087 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static void
0100 ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
0101 union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
0102 {
0103 struct ice_rx_ptype_decoded decoded;
0104 u16 rx_status0, rx_status1;
0105 bool ipv4, ipv6;
0106
0107 rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
0108 rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
0109
0110 decoded = ice_decode_rx_desc_ptype(ptype);
0111
0112
0113 skb->ip_summed = CHECKSUM_NONE;
0114 skb_checksum_none_assert(skb);
0115
0116
0117 if (!(ring->netdev->features & NETIF_F_RXCSUM))
0118 return;
0119
0120
0121 if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
0122 return;
0123
0124 if (!(decoded.known && decoded.outer_ip))
0125 return;
0126
0127 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
0128 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
0129 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
0130 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
0131
0132 if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
0133 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
0134 goto checksum_fail;
0135
0136 if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
0137 goto checksum_fail;
0138
0139
0140
0141
0142 if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
0143 goto checksum_fail;
0144
0145
0146 if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
0147 (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
0148 goto checksum_fail;
0149
0150
0151
0152
0153
0154 if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
0155 skb->csum_level = 1;
0156
0157
0158 switch (decoded.inner_prot) {
0159 case ICE_RX_PTYPE_INNER_PROT_TCP:
0160 case ICE_RX_PTYPE_INNER_PROT_UDP:
0161 case ICE_RX_PTYPE_INNER_PROT_SCTP:
0162 skb->ip_summed = CHECKSUM_UNNECESSARY;
0163 break;
0164 default:
0165 break;
0166 }
0167 return;
0168
0169 checksum_fail:
0170 ring->vsi->back->hw_csum_rx_error++;
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 void
0185 ice_process_skb_fields(struct ice_rx_ring *rx_ring,
0186 union ice_32b_rx_flex_desc *rx_desc,
0187 struct sk_buff *skb, u16 ptype)
0188 {
0189 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
0190
0191
0192 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
0193
0194 ice_rx_csum(rx_ring, skb, rx_desc, ptype);
0195
0196 if (rx_ring->ptp_rx)
0197 ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 void
0210 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
0211 {
0212 netdev_features_t features = rx_ring->netdev->features;
0213 bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
0214
0215 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
0216 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
0217 else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
0218 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
0219
0220 napi_gro_receive(&rx_ring->q_vector->napi, skb);
0221 }
0222
0223
0224
0225
0226
0227 static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
0228 {
0229 unsigned int total_bytes = 0, total_pkts = 0;
0230 u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
0231 u16 ntc = xdp_ring->next_to_clean;
0232 struct ice_tx_desc *next_dd_desc;
0233 u16 next_dd = xdp_ring->next_dd;
0234 struct ice_tx_buf *tx_buf;
0235 int i;
0236
0237 next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
0238 if (!(next_dd_desc->cmd_type_offset_bsz &
0239 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
0240 return;
0241
0242 for (i = 0; i < tx_thresh; i++) {
0243 tx_buf = &xdp_ring->tx_buf[ntc];
0244
0245 total_bytes += tx_buf->bytecount;
0246
0247
0248
0249 total_pkts++;
0250
0251 page_frag_free(tx_buf->raw_buf);
0252 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
0253 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
0254 dma_unmap_len_set(tx_buf, len, 0);
0255 tx_buf->raw_buf = NULL;
0256
0257 ntc++;
0258 if (ntc >= xdp_ring->count)
0259 ntc = 0;
0260 }
0261
0262 next_dd_desc->cmd_type_offset_bsz = 0;
0263 xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
0264 if (xdp_ring->next_dd > xdp_ring->count)
0265 xdp_ring->next_dd = tx_thresh - 1;
0266 xdp_ring->next_to_clean = ntc;
0267 ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
0268 }
0269
0270
0271
0272
0273
0274
0275
0276 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
0277 {
0278 u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
0279 u16 i = xdp_ring->next_to_use;
0280 struct ice_tx_desc *tx_desc;
0281 struct ice_tx_buf *tx_buf;
0282 dma_addr_t dma;
0283
0284 if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
0285 ice_clean_xdp_irq(xdp_ring);
0286
0287 if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
0288 xdp_ring->tx_stats.tx_busy++;
0289 return ICE_XDP_CONSUMED;
0290 }
0291
0292 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
0293 if (dma_mapping_error(xdp_ring->dev, dma))
0294 return ICE_XDP_CONSUMED;
0295
0296 tx_buf = &xdp_ring->tx_buf[i];
0297 tx_buf->bytecount = size;
0298 tx_buf->gso_segs = 1;
0299 tx_buf->raw_buf = data;
0300
0301
0302 dma_unmap_len_set(tx_buf, len, size);
0303 dma_unmap_addr_set(tx_buf, dma, dma);
0304
0305 tx_desc = ICE_TX_DESC(xdp_ring, i);
0306 tx_desc->buf_addr = cpu_to_le64(dma);
0307 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
0308 size, 0);
0309
0310 xdp_ring->xdp_tx_active++;
0311 i++;
0312 if (i == xdp_ring->count) {
0313 i = 0;
0314 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
0315 tx_desc->cmd_type_offset_bsz |=
0316 cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
0317 xdp_ring->next_rs = tx_thresh - 1;
0318 }
0319 xdp_ring->next_to_use = i;
0320
0321 if (i > xdp_ring->next_rs) {
0322 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
0323 tx_desc->cmd_type_offset_bsz |=
0324 cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
0325 xdp_ring->next_rs += tx_thresh;
0326 }
0327
0328 return ICE_XDP_TX;
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
0339 {
0340 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
0341
0342 if (unlikely(!xdpf))
0343 return ICE_XDP_CONSUMED;
0344
0345 return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
0358 {
0359 if (xdp_res & ICE_XDP_REDIR)
0360 xdp_do_flush_map();
0361
0362 if (xdp_res & ICE_XDP_TX) {
0363 if (static_branch_unlikely(&ice_xdp_locking_key))
0364 spin_lock(&xdp_ring->tx_lock);
0365 ice_xdp_ring_update_tail(xdp_ring);
0366 if (static_branch_unlikely(&ice_xdp_locking_key))
0367 spin_unlock(&xdp_ring->tx_lock);
0368 }
0369 }