Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2006-2013 Solarflare Communications Inc.
0006  */
0007 
0008 #ifndef EF4_EFX_H
0009 #define EF4_EFX_H
0010 
0011 #include "net_driver.h"
0012 #include "filter.h"
0013 
0014 /* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
0015 /* All VFs use BAR 0/1 for memory */
0016 #define EF4_MEM_BAR 2
0017 #define EF4_MEM_VF_BAR 0
0018 
0019 int ef4_net_open(struct net_device *net_dev);
0020 int ef4_net_stop(struct net_device *net_dev);
0021 
0022 /* TX */
0023 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
0024 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
0025 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
0026 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
0027 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
0028 netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
0029                 struct net_device *net_dev);
0030 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
0031 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
0032 int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
0033          void *type_data);
0034 unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
0035 extern bool ef4_separate_tx_channels;
0036 
0037 /* RX */
0038 void ef4_set_default_rx_indir_table(struct ef4_nic *efx);
0039 void ef4_rx_config_page_split(struct ef4_nic *efx);
0040 int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
0041 void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
0042 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
0043 void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
0044 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
0045 void ef4_rx_slow_fill(struct timer_list *t);
0046 void __ef4_rx_packet(struct ef4_channel *channel);
0047 void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
0048            unsigned int n_frags, unsigned int len, u16 flags);
0049 static inline void ef4_rx_flush_packet(struct ef4_channel *channel)
0050 {
0051     if (channel->rx_pkt_n_frags)
0052         __ef4_rx_packet(channel);
0053 }
0054 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
0055 
0056 #define EF4_MAX_DMAQ_SIZE 4096UL
0057 #define EF4_DEFAULT_DMAQ_SIZE 1024UL
0058 #define EF4_MIN_DMAQ_SIZE 512UL
0059 
0060 #define EF4_MAX_EVQ_SIZE 16384UL
0061 #define EF4_MIN_EVQ_SIZE 512UL
0062 
0063 /* Maximum number of TCP segments we support for soft-TSO */
0064 #define EF4_TSO_MAX_SEGS    100
0065 
0066 /* The smallest [rt]xq_entries that the driver supports.  RX minimum
0067  * is a bit arbitrary.  For TX, we must have space for at least 2
0068  * TSO skbs.
0069  */
0070 #define EF4_RXQ_MIN_ENT     128U
0071 #define EF4_TXQ_MIN_ENT(efx)    (2 * ef4_tx_max_skb_descs(efx))
0072 
0073 static inline bool ef4_rss_enabled(struct ef4_nic *efx)
0074 {
0075     return efx->rss_spread > 1;
0076 }
0077 
0078 /* Filters */
0079 
0080 void ef4_mac_reconfigure(struct ef4_nic *efx);
0081 
0082 /**
0083  * ef4_filter_insert_filter - add or replace a filter
0084  * @efx: NIC in which to insert the filter
0085  * @spec: Specification for the filter
0086  * @replace_equal: Flag for whether the specified filter may replace an
0087  *  existing filter with equal priority
0088  *
0089  * On success, return the filter ID.
0090  * On failure, return a negative error code.
0091  *
0092  * If existing filters have equal match values to the new filter spec,
0093  * then the new filter might replace them or the function might fail,
0094  * as follows.
0095  *
0096  * 1. If the existing filters have lower priority, or @replace_equal
0097  *    is set and they have equal priority, replace them.
0098  *
0099  * 2. If the existing filters have higher priority, return -%EPERM.
0100  *
0101  * 3. If !ef4_filter_is_mc_recipient(@spec), or the NIC does not
0102  *    support delivery to multiple recipients, return -%EEXIST.
0103  *
0104  * This implies that filters for multiple multicast recipients must
0105  * all be inserted with the same priority and @replace_equal = %false.
0106  */
0107 static inline s32 ef4_filter_insert_filter(struct ef4_nic *efx,
0108                        struct ef4_filter_spec *spec,
0109                        bool replace_equal)
0110 {
0111     return efx->type->filter_insert(efx, spec, replace_equal);
0112 }
0113 
0114 /**
0115  * ef4_filter_remove_id_safe - remove a filter by ID, carefully
0116  * @efx: NIC from which to remove the filter
0117  * @priority: Priority of filter, as passed to @ef4_filter_insert_filter
0118  * @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
0119  *
0120  * This function will range-check @filter_id, so it is safe to call
0121  * with a value passed from userland.
0122  */
0123 static inline int ef4_filter_remove_id_safe(struct ef4_nic *efx,
0124                         enum ef4_filter_priority priority,
0125                         u32 filter_id)
0126 {
0127     return efx->type->filter_remove_safe(efx, priority, filter_id);
0128 }
0129 
0130 /**
0131  * ef4_filter_get_filter_safe - retrieve a filter by ID, carefully
0132  * @efx: NIC from which to remove the filter
0133  * @priority: Priority of filter, as passed to @ef4_filter_insert_filter
0134  * @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
0135  * @spec: Buffer in which to store filter specification
0136  *
0137  * This function will range-check @filter_id, so it is safe to call
0138  * with a value passed from userland.
0139  */
0140 static inline int
0141 ef4_filter_get_filter_safe(struct ef4_nic *efx,
0142                enum ef4_filter_priority priority,
0143                u32 filter_id, struct ef4_filter_spec *spec)
0144 {
0145     return efx->type->filter_get_safe(efx, priority, filter_id, spec);
0146 }
0147 
0148 static inline u32 ef4_filter_count_rx_used(struct ef4_nic *efx,
0149                        enum ef4_filter_priority priority)
0150 {
0151     return efx->type->filter_count_rx_used(efx, priority);
0152 }
0153 static inline u32 ef4_filter_get_rx_id_limit(struct ef4_nic *efx)
0154 {
0155     return efx->type->filter_get_rx_id_limit(efx);
0156 }
0157 static inline s32 ef4_filter_get_rx_ids(struct ef4_nic *efx,
0158                     enum ef4_filter_priority priority,
0159                     u32 *buf, u32 size)
0160 {
0161     return efx->type->filter_get_rx_ids(efx, priority, buf, size);
0162 }
0163 #ifdef CONFIG_RFS_ACCEL
0164 int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
0165            u16 rxq_index, u32 flow_id);
0166 bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned quota);
0167 static inline void ef4_filter_rfs_expire(struct ef4_channel *channel)
0168 {
0169     if (channel->rfs_filters_added >= 60 &&
0170         __ef4_filter_rfs_expire(channel->efx, 100))
0171         channel->rfs_filters_added -= 60;
0172 }
0173 #define ef4_filter_rfs_enabled() 1
0174 #else
0175 static inline void ef4_filter_rfs_expire(struct ef4_channel *channel) {}
0176 #define ef4_filter_rfs_enabled() 0
0177 #endif
0178 bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec);
0179 
0180 /* Channels */
0181 int ef4_channel_dummy_op_int(struct ef4_channel *channel);
0182 void ef4_channel_dummy_op_void(struct ef4_channel *channel);
0183 int ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries);
0184 
0185 /* Ports */
0186 int ef4_reconfigure_port(struct ef4_nic *efx);
0187 int __ef4_reconfigure_port(struct ef4_nic *efx);
0188 
0189 /* Ethtool support */
0190 extern const struct ethtool_ops ef4_ethtool_ops;
0191 
0192 /* Reset handling */
0193 int ef4_reset(struct ef4_nic *efx, enum reset_type method);
0194 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method);
0195 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok);
0196 int ef4_try_recovery(struct ef4_nic *efx);
0197 
0198 /* Global */
0199 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
0200 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
0201 unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
0202 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
0203                 unsigned int rx_usecs, bool rx_adaptive,
0204                 bool rx_may_override_tx);
0205 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
0206                 unsigned int *rx_usecs, bool *rx_adaptive);
0207 void ef4_stop_eventq(struct ef4_channel *channel);
0208 void ef4_start_eventq(struct ef4_channel *channel);
0209 
0210 /* Dummy PHY ops for PHY drivers */
0211 int ef4_port_dummy_op_int(struct ef4_nic *efx);
0212 void ef4_port_dummy_op_void(struct ef4_nic *efx);
0213 
0214 /* Update the generic software stats in the passed stats array */
0215 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats);
0216 
0217 /* MTD */
0218 #ifdef CONFIG_SFC_FALCON_MTD
0219 int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
0220         size_t n_parts, size_t sizeof_part);
0221 static inline int ef4_mtd_probe(struct ef4_nic *efx)
0222 {
0223     return efx->type->mtd_probe(efx);
0224 }
0225 void ef4_mtd_rename(struct ef4_nic *efx);
0226 void ef4_mtd_remove(struct ef4_nic *efx);
0227 #else
0228 static inline int ef4_mtd_probe(struct ef4_nic *efx) { return 0; }
0229 static inline void ef4_mtd_rename(struct ef4_nic *efx) {}
0230 static inline void ef4_mtd_remove(struct ef4_nic *efx) {}
0231 #endif
0232 
0233 static inline void ef4_schedule_channel(struct ef4_channel *channel)
0234 {
0235     netif_vdbg(channel->efx, intr, channel->efx->net_dev,
0236            "channel %d scheduling NAPI poll on CPU%d\n",
0237            channel->channel, raw_smp_processor_id());
0238 
0239     napi_schedule(&channel->napi_str);
0240 }
0241 
0242 static inline void ef4_schedule_channel_irq(struct ef4_channel *channel)
0243 {
0244     channel->event_test_cpu = raw_smp_processor_id();
0245     ef4_schedule_channel(channel);
0246 }
0247 
0248 void ef4_link_status_changed(struct ef4_nic *efx);
0249 void ef4_link_set_advertising(struct ef4_nic *efx, u32);
0250 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8);
0251 
0252 static inline void ef4_device_detach_sync(struct ef4_nic *efx)
0253 {
0254     struct net_device *dev = efx->net_dev;
0255 
0256     /* Lock/freeze all TX queues so that we can be sure the
0257      * TX scheduler is stopped when we're done and before
0258      * netif_device_present() becomes false.
0259      */
0260     netif_tx_lock_bh(dev);
0261     netif_device_detach(dev);
0262     netif_tx_unlock_bh(dev);
0263 }
0264 
0265 static inline bool ef4_rwsem_assert_write_locked(struct rw_semaphore *sem)
0266 {
0267     if (WARN_ON(down_read_trylock(sem))) {
0268         up_read(sem);
0269         return false;
0270     }
0271     return true;
0272 }
0273 
0274 #endif /* EF4_EFX_H */