0001
0002
0003
0004
0005
0006
0007
0008 #ifndef EF4_EFX_H
0009 #define EF4_EFX_H
0010
0011 #include "net_driver.h"
0012 #include "filter.h"
0013
0014
0015
0016 #define EF4_MEM_BAR 2
0017 #define EF4_MEM_VF_BAR 0
0018
0019 int ef4_net_open(struct net_device *net_dev);
0020 int ef4_net_stop(struct net_device *net_dev);
0021
0022
0023 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
0024 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
0025 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
0026 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
0027 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
0028 netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
0029 struct net_device *net_dev);
0030 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
0031 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
0032 int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
0033 void *type_data);
0034 unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
0035 extern bool ef4_separate_tx_channels;
0036
0037
0038 void ef4_set_default_rx_indir_table(struct ef4_nic *efx);
0039 void ef4_rx_config_page_split(struct ef4_nic *efx);
0040 int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
0041 void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
0042 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
0043 void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
0044 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
0045 void ef4_rx_slow_fill(struct timer_list *t);
0046 void __ef4_rx_packet(struct ef4_channel *channel);
0047 void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
0048 unsigned int n_frags, unsigned int len, u16 flags);
0049 static inline void ef4_rx_flush_packet(struct ef4_channel *channel)
0050 {
0051 if (channel->rx_pkt_n_frags)
0052 __ef4_rx_packet(channel);
0053 }
0054 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
0055
0056 #define EF4_MAX_DMAQ_SIZE 4096UL
0057 #define EF4_DEFAULT_DMAQ_SIZE 1024UL
0058 #define EF4_MIN_DMAQ_SIZE 512UL
0059
0060 #define EF4_MAX_EVQ_SIZE 16384UL
0061 #define EF4_MIN_EVQ_SIZE 512UL
0062
0063
0064 #define EF4_TSO_MAX_SEGS 100
0065
0066
0067
0068
0069
0070 #define EF4_RXQ_MIN_ENT 128U
0071 #define EF4_TXQ_MIN_ENT(efx) (2 * ef4_tx_max_skb_descs(efx))
0072
0073 static inline bool ef4_rss_enabled(struct ef4_nic *efx)
0074 {
0075 return efx->rss_spread > 1;
0076 }
0077
0078
0079
0080 void ef4_mac_reconfigure(struct ef4_nic *efx);
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static inline s32 ef4_filter_insert_filter(struct ef4_nic *efx,
0108 struct ef4_filter_spec *spec,
0109 bool replace_equal)
0110 {
0111 return efx->type->filter_insert(efx, spec, replace_equal);
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 static inline int ef4_filter_remove_id_safe(struct ef4_nic *efx,
0124 enum ef4_filter_priority priority,
0125 u32 filter_id)
0126 {
0127 return efx->type->filter_remove_safe(efx, priority, filter_id);
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static inline int
0141 ef4_filter_get_filter_safe(struct ef4_nic *efx,
0142 enum ef4_filter_priority priority,
0143 u32 filter_id, struct ef4_filter_spec *spec)
0144 {
0145 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
0146 }
0147
0148 static inline u32 ef4_filter_count_rx_used(struct ef4_nic *efx,
0149 enum ef4_filter_priority priority)
0150 {
0151 return efx->type->filter_count_rx_used(efx, priority);
0152 }
0153 static inline u32 ef4_filter_get_rx_id_limit(struct ef4_nic *efx)
0154 {
0155 return efx->type->filter_get_rx_id_limit(efx);
0156 }
0157 static inline s32 ef4_filter_get_rx_ids(struct ef4_nic *efx,
0158 enum ef4_filter_priority priority,
0159 u32 *buf, u32 size)
0160 {
0161 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
0162 }
0163 #ifdef CONFIG_RFS_ACCEL
0164 int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
0165 u16 rxq_index, u32 flow_id);
0166 bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned quota);
0167 static inline void ef4_filter_rfs_expire(struct ef4_channel *channel)
0168 {
0169 if (channel->rfs_filters_added >= 60 &&
0170 __ef4_filter_rfs_expire(channel->efx, 100))
0171 channel->rfs_filters_added -= 60;
0172 }
0173 #define ef4_filter_rfs_enabled() 1
0174 #else
0175 static inline void ef4_filter_rfs_expire(struct ef4_channel *channel) {}
0176 #define ef4_filter_rfs_enabled() 0
0177 #endif
0178 bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec);
0179
0180
0181 int ef4_channel_dummy_op_int(struct ef4_channel *channel);
0182 void ef4_channel_dummy_op_void(struct ef4_channel *channel);
0183 int ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries);
0184
0185
0186 int ef4_reconfigure_port(struct ef4_nic *efx);
0187 int __ef4_reconfigure_port(struct ef4_nic *efx);
0188
0189
0190 extern const struct ethtool_ops ef4_ethtool_ops;
0191
0192
0193 int ef4_reset(struct ef4_nic *efx, enum reset_type method);
0194 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method);
0195 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok);
0196 int ef4_try_recovery(struct ef4_nic *efx);
0197
0198
0199 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
0200 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
0201 unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
0202 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
0203 unsigned int rx_usecs, bool rx_adaptive,
0204 bool rx_may_override_tx);
0205 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
0206 unsigned int *rx_usecs, bool *rx_adaptive);
0207 void ef4_stop_eventq(struct ef4_channel *channel);
0208 void ef4_start_eventq(struct ef4_channel *channel);
0209
0210
0211 int ef4_port_dummy_op_int(struct ef4_nic *efx);
0212 void ef4_port_dummy_op_void(struct ef4_nic *efx);
0213
0214
0215 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats);
0216
0217
0218 #ifdef CONFIG_SFC_FALCON_MTD
0219 int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
0220 size_t n_parts, size_t sizeof_part);
0221 static inline int ef4_mtd_probe(struct ef4_nic *efx)
0222 {
0223 return efx->type->mtd_probe(efx);
0224 }
0225 void ef4_mtd_rename(struct ef4_nic *efx);
0226 void ef4_mtd_remove(struct ef4_nic *efx);
0227 #else
0228 static inline int ef4_mtd_probe(struct ef4_nic *efx) { return 0; }
0229 static inline void ef4_mtd_rename(struct ef4_nic *efx) {}
0230 static inline void ef4_mtd_remove(struct ef4_nic *efx) {}
0231 #endif
0232
0233 static inline void ef4_schedule_channel(struct ef4_channel *channel)
0234 {
0235 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
0236 "channel %d scheduling NAPI poll on CPU%d\n",
0237 channel->channel, raw_smp_processor_id());
0238
0239 napi_schedule(&channel->napi_str);
0240 }
0241
0242 static inline void ef4_schedule_channel_irq(struct ef4_channel *channel)
0243 {
0244 channel->event_test_cpu = raw_smp_processor_id();
0245 ef4_schedule_channel(channel);
0246 }
0247
0248 void ef4_link_status_changed(struct ef4_nic *efx);
0249 void ef4_link_set_advertising(struct ef4_nic *efx, u32);
0250 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8);
0251
0252 static inline void ef4_device_detach_sync(struct ef4_nic *efx)
0253 {
0254 struct net_device *dev = efx->net_dev;
0255
0256
0257
0258
0259
0260 netif_tx_lock_bh(dev);
0261 netif_device_detach(dev);
0262 netif_tx_unlock_bh(dev);
0263 }
0264
0265 static inline bool ef4_rwsem_assert_write_locked(struct rw_semaphore *sem)
0266 {
0267 if (WARN_ON(down_read_trylock(sem))) {
0268 up_read(sem);
0269 return false;
0270 }
0271 return true;
0272 }
0273
0274 #endif