0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "net_driver.h"
0012 #include "ef100_rx.h"
0013 #include "rx_common.h"
0014 #include "efx.h"
0015 #include "nic_common.h"
0016 #include "mcdi_functions.h"
0017 #include "ef100_regs.h"
0018 #include "ef100_nic.h"
0019 #include "io.h"
0020
0021
0022 #define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
0023 #define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
0024 #define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
0025 #define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
0026 #define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
0027 PREFIX_WIDTH_MASK(_f))
0028
0029 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
0030 (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
0031 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
0032 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
0033
0034 bool ef100_rx_buf_hash_valid(const u8 *prefix)
0035 {
0036 return PREFIX_FIELD(prefix, RSS_HASH_VALID);
0037 }
0038
0039 static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
0040 {
0041 u16 rxclass;
0042 u8 l2status;
0043
0044 rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
0045 l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
0046
0047 if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
0048
0049 return false;
0050
0051 if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
0052 channel->n_rx_eth_crc_err++;
0053 return true;
0054 }
0055
0056 void __ef100_rx_packet(struct efx_channel *channel)
0057 {
0058 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
0059 struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
0060 channel->rx_pkt_index);
0061 struct efx_nic *efx = channel->efx;
0062 struct ef100_nic_data *nic_data;
0063 u8 *eh = efx_rx_buf_va(rx_buf);
0064 __wsum csum = 0;
0065 u16 ing_port;
0066 u32 *prefix;
0067
0068 prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
0069
0070 if (ef100_has_fcs_error(channel, prefix) &&
0071 unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
0072 goto out;
0073
0074 rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
0075 if (rx_buf->len <= sizeof(struct ethhdr)) {
0076 if (net_ratelimit())
0077 netif_err(channel->efx, rx_err, channel->efx->net_dev,
0078 "RX packet too small (%d)\n", rx_buf->len);
0079 ++channel->n_rx_frm_trunc;
0080 goto out;
0081 }
0082
0083 ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
0084
0085 nic_data = efx->nic_data;
0086
0087 if (nic_data->have_mport && ing_port != nic_data->base_mport) {
0088 #ifdef CONFIG_SFC_SRIOV
0089 struct efx_rep *efv;
0090
0091 rcu_read_lock();
0092 efv = efx_ef100_find_rep_by_mport(efx, ing_port);
0093 if (efv) {
0094 if (efv->net_dev->flags & IFF_UP)
0095 efx_ef100_rep_rx_packet(efv, rx_buf);
0096 rcu_read_unlock();
0097
0098
0099
0100
0101
0102 goto free_rx_buffer;
0103 }
0104 rcu_read_unlock();
0105 #endif
0106 if (net_ratelimit())
0107 netif_warn(efx, drv, efx->net_dev,
0108 "Unrecognised ing_port %04x (base %04x), dropping\n",
0109 ing_port, nic_data->base_mport);
0110 channel->n_rx_mport_bad++;
0111 goto free_rx_buffer;
0112 }
0113
0114 if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
0115 if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
0116 ++channel->n_rx_ip_hdr_chksum_err;
0117 } else {
0118 u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
0119
0120 csum = (__force __wsum) sum;
0121 }
0122 }
0123
0124 if (channel->type->receive_skb) {
0125
0126 WARN_ON_ONCE(1);
0127 goto free_rx_buffer;
0128 }
0129
0130 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
0131 goto out;
0132
0133 free_rx_buffer:
0134 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0135 out:
0136 channel->rx_pkt_n_frags = 0;
0137 }
0138
0139 static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
0140 {
0141 struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
0142 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
0143 struct efx_nic *efx = rx_queue->efx;
0144
0145 ++rx_queue->rx_packets;
0146
0147 netif_vdbg(efx, rx_status, efx->net_dev,
0148 "RX queue %d received id %x\n",
0149 efx_rx_queue_index(rx_queue), index);
0150
0151 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
0152
0153 prefetch(efx_rx_buf_va(rx_buf));
0154
0155 rx_buf->page_offset += efx->rx_prefix_size;
0156
0157 efx_recycle_rx_pages(channel, rx_buf, 1);
0158
0159 efx_rx_flush_packet(channel);
0160 channel->rx_pkt_n_frags = 1;
0161 channel->rx_pkt_index = index;
0162 }
0163
0164 void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
0165 {
0166 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
0167 unsigned int n_packets =
0168 EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
0169 int i;
0170
0171 WARN_ON_ONCE(!n_packets);
0172 if (n_packets > 1)
0173 ++channel->n_rx_merge_events;
0174
0175 channel->irq_mod_score += 2 * n_packets;
0176
0177 for (i = 0; i < n_packets; ++i) {
0178 ef100_rx_packet(rx_queue,
0179 rx_queue->removed_count & rx_queue->ptr_mask);
0180 ++rx_queue->removed_count;
0181 }
0182 }
0183
0184 void ef100_rx_write(struct efx_rx_queue *rx_queue)
0185 {
0186 struct efx_rx_buffer *rx_buf;
0187 unsigned int idx;
0188 efx_qword_t *rxd;
0189 efx_dword_t rxdb;
0190
0191 while (rx_queue->notified_count != rx_queue->added_count) {
0192 idx = rx_queue->notified_count & rx_queue->ptr_mask;
0193 rx_buf = efx_rx_buffer(rx_queue, idx);
0194 rxd = efx_rx_desc(rx_queue, idx);
0195
0196 EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
0197
0198 ++rx_queue->notified_count;
0199 }
0200
0201 wmb();
0202 EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
0203 rx_queue->added_count & rx_queue->ptr_mask);
0204 efx_writed_page(rx_queue->efx, &rxdb,
0205 ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
0206 }