0001
0002
0003
0004
0005
0006
0007
0008 #ifndef EFX_EFX_H
0009 #define EFX_EFX_H
0010
0011 #include <linux/indirect_call_wrapper.h>
0012 #include "net_driver.h"
0013 #include "ef100_rx.h"
0014 #include "ef100_tx.h"
0015 #include "efx_common.h"
0016 #include "filter.h"
0017
0018 int efx_net_open(struct net_device *net_dev);
0019 int efx_net_stop(struct net_device *net_dev);
0020
0021
0022 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
0023 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
0024 struct net_device *net_dev);
0025 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
0026 static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
0027 {
0028 return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
0029 ef100_enqueue_skb, __efx_enqueue_skb,
0030 tx_queue, skb);
0031 }
0032 void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
0033 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
0034 void *type_data);
0035 extern unsigned int efx_piobuf_size;
0036
0037
0038 void __efx_rx_packet(struct efx_channel *channel);
0039 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
0040 unsigned int n_frags, unsigned int len, u16 flags);
0041 static inline void efx_rx_flush_packet(struct efx_channel *channel)
0042 {
0043 if (channel->rx_pkt_n_frags)
0044 INDIRECT_CALL_2(channel->efx->type->rx_packet,
0045 __ef100_rx_packet, __efx_rx_packet,
0046 channel);
0047 }
0048 static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
0049 {
0050 if (efx->type->rx_buf_hash_valid)
0051 return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
0052 ef100_rx_buf_hash_valid,
0053 prefix);
0054 return true;
0055 }
0056
0057
0058 #define EFX_TSO_MAX_SEGS 100
0059
0060
0061
0062
0063
0064 #define EFX_RXQ_MIN_ENT 128U
0065 #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
0066
0067
0068
0069
0070 #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
0071 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
0072
0073 static inline bool efx_rss_enabled(struct efx_nic *efx)
0074 {
0075 return efx->rss_spread > 1;
0076 }
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
0106 struct efx_filter_spec *spec,
0107 bool replace_equal)
0108 {
0109 return efx->type->filter_insert(efx, spec, replace_equal);
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
0122 enum efx_filter_priority priority,
0123 u32 filter_id)
0124 {
0125 return efx->type->filter_remove_safe(efx, priority, filter_id);
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 static inline int
0139 efx_filter_get_filter_safe(struct efx_nic *efx,
0140 enum efx_filter_priority priority,
0141 u32 filter_id, struct efx_filter_spec *spec)
0142 {
0143 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
0144 }
0145
0146 static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
0147 enum efx_filter_priority priority)
0148 {
0149 return efx->type->filter_count_rx_used(efx, priority);
0150 }
0151 static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
0152 {
0153 return efx->type->filter_get_rx_id_limit(efx);
0154 }
0155 static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
0156 enum efx_filter_priority priority,
0157 u32 *buf, u32 size)
0158 {
0159 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
0160 }
0161
0162
0163 static inline bool efx_rss_active(struct efx_rss_context *ctx)
0164 {
0165 return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
0166 }
0167
0168
0169 extern const struct ethtool_ops efx_ethtool_ops;
0170
0171
0172 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
0173 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
0174 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
0175 unsigned int rx_usecs, bool rx_adaptive,
0176 bool rx_may_override_tx);
0177 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
0178 unsigned int *rx_usecs, bool *rx_adaptive);
0179
0180
0181 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
0182
0183
0184 #ifdef CONFIG_SFC_MTD
0185 int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
0186 size_t n_parts, size_t sizeof_part);
0187 static inline int efx_mtd_probe(struct efx_nic *efx)
0188 {
0189 return efx->type->mtd_probe(efx);
0190 }
0191 void efx_mtd_rename(struct efx_nic *efx);
0192 void efx_mtd_remove(struct efx_nic *efx);
0193 #else
0194 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
0195 static inline void efx_mtd_rename(struct efx_nic *efx) {}
0196 static inline void efx_mtd_remove(struct efx_nic *efx) {}
0197 #endif
0198
0199 #ifdef CONFIG_SFC_SRIOV
0200 static inline unsigned int efx_vf_size(struct efx_nic *efx)
0201 {
0202 return 1 << efx->vi_scale;
0203 }
0204 #endif
0205
0206 static inline void efx_device_detach_sync(struct efx_nic *efx)
0207 {
0208 struct net_device *dev = efx->net_dev;
0209
0210
0211 efx_detach_reps(efx);
0212
0213
0214
0215
0216
0217 netif_tx_lock_bh(dev);
0218 netif_device_detach(dev);
0219 netif_tx_unlock_bh(dev);
0220 }
0221
0222 static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
0223 {
0224 if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
0225 netif_device_attach(efx->net_dev);
0226 if (efx->state == STATE_NET_UP)
0227 efx_attach_reps(efx);
0228 }
0229 }
0230
0231 static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
0232 {
0233 if (WARN_ON(down_read_trylock(sem))) {
0234 up_read(sem);
0235 return false;
0236 }
0237 return true;
0238 }
0239
0240 int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
0241 bool flush);
0242
0243 #endif