Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2006-2013 Solarflare Communications Inc.
0006  */
0007 
0008 #ifndef EFX_EFX_H
0009 #define EFX_EFX_H
0010 
0011 #include <linux/indirect_call_wrapper.h>
0012 #include "net_driver.h"
0013 #include "ef100_rx.h"
0014 #include "ef100_tx.h"
0015 #include "efx_common.h"
0016 #include "filter.h"
0017 
0018 int efx_net_open(struct net_device *net_dev);
0019 int efx_net_stop(struct net_device *net_dev);
0020 
0021 /* TX */
0022 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
0023 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
0024                 struct net_device *net_dev);
0025 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
0026 static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
0027 {
0028     return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
0029                    ef100_enqueue_skb, __efx_enqueue_skb,
0030                    tx_queue, skb);
0031 }
0032 void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
0033 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
0034          void *type_data);
0035 extern unsigned int efx_piobuf_size;
0036 
0037 /* RX */
0038 void __efx_rx_packet(struct efx_channel *channel);
0039 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
0040            unsigned int n_frags, unsigned int len, u16 flags);
0041 static inline void efx_rx_flush_packet(struct efx_channel *channel)
0042 {
0043     if (channel->rx_pkt_n_frags)
0044         INDIRECT_CALL_2(channel->efx->type->rx_packet,
0045                 __ef100_rx_packet, __efx_rx_packet,
0046                 channel);
0047 }
0048 static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
0049 {
0050     if (efx->type->rx_buf_hash_valid)
0051         return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
0052                        ef100_rx_buf_hash_valid,
0053                        prefix);
0054     return true;
0055 }
0056 
0057 /* Maximum number of TCP segments we support for soft-TSO */
0058 #define EFX_TSO_MAX_SEGS    100
0059 
0060 /* The smallest [rt]xq_entries that the driver supports.  RX minimum
0061  * is a bit arbitrary.  For TX, we must have space for at least 2
0062  * TSO skbs.
0063  */
0064 #define EFX_RXQ_MIN_ENT     128U
0065 #define EFX_TXQ_MIN_ENT(efx)    (2 * efx_tx_max_skb_descs(efx))
0066 
0067 /* All EF10 architecture NICs steal one bit of the DMAQ size for various
0068  * other purposes when counting TxQ entries, so we halve the queue size.
0069  */
0070 #define EFX_TXQ_MAX_ENT(efx)    (EFX_WORKAROUND_EF10(efx) ? \
0071                  EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
0072 
0073 static inline bool efx_rss_enabled(struct efx_nic *efx)
0074 {
0075     return efx->rss_spread > 1;
0076 }
0077 
0078 /* Filters */
0079 
0080 /**
0081  * efx_filter_insert_filter - add or replace a filter
0082  * @efx: NIC in which to insert the filter
0083  * @spec: Specification for the filter
0084  * @replace_equal: Flag for whether the specified filter may replace an
0085  *  existing filter with equal priority
0086  *
0087  * On success, return the filter ID.
0088  * On failure, return a negative error code.
0089  *
0090  * If existing filters have equal match values to the new filter spec,
0091  * then the new filter might replace them or the function might fail,
0092  * as follows.
0093  *
0094  * 1. If the existing filters have lower priority, or @replace_equal
0095  *    is set and they have equal priority, replace them.
0096  *
0097  * 2. If the existing filters have higher priority, return -%EPERM.
0098  *
0099  * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
0100  *    support delivery to multiple recipients, return -%EEXIST.
0101  *
0102  * This implies that filters for multiple multicast recipients must
0103  * all be inserted with the same priority and @replace_equal = %false.
0104  */
0105 static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
0106                        struct efx_filter_spec *spec,
0107                        bool replace_equal)
0108 {
0109     return efx->type->filter_insert(efx, spec, replace_equal);
0110 }
0111 
0112 /**
0113  * efx_filter_remove_id_safe - remove a filter by ID, carefully
0114  * @efx: NIC from which to remove the filter
0115  * @priority: Priority of filter, as passed to @efx_filter_insert_filter
0116  * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
0117  *
0118  * This function will range-check @filter_id, so it is safe to call
0119  * with a value passed from userland.
0120  */
0121 static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
0122                         enum efx_filter_priority priority,
0123                         u32 filter_id)
0124 {
0125     return efx->type->filter_remove_safe(efx, priority, filter_id);
0126 }
0127 
0128 /**
0129  * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
0130  * @efx: NIC from which to remove the filter
0131  * @priority: Priority of filter, as passed to @efx_filter_insert_filter
0132  * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
0133  * @spec: Buffer in which to store filter specification
0134  *
0135  * This function will range-check @filter_id, so it is safe to call
0136  * with a value passed from userland.
0137  */
0138 static inline int
0139 efx_filter_get_filter_safe(struct efx_nic *efx,
0140                enum efx_filter_priority priority,
0141                u32 filter_id, struct efx_filter_spec *spec)
0142 {
0143     return efx->type->filter_get_safe(efx, priority, filter_id, spec);
0144 }
0145 
0146 static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
0147                        enum efx_filter_priority priority)
0148 {
0149     return efx->type->filter_count_rx_used(efx, priority);
0150 }
0151 static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
0152 {
0153     return efx->type->filter_get_rx_id_limit(efx);
0154 }
0155 static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
0156                     enum efx_filter_priority priority,
0157                     u32 *buf, u32 size)
0158 {
0159     return efx->type->filter_get_rx_ids(efx, priority, buf, size);
0160 }
0161 
0162 /* RSS contexts */
0163 static inline bool efx_rss_active(struct efx_rss_context *ctx)
0164 {
0165     return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
0166 }
0167 
0168 /* Ethtool support */
0169 extern const struct ethtool_ops efx_ethtool_ops;
0170 
0171 /* Global */
0172 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
0173 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
0174 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
0175                 unsigned int rx_usecs, bool rx_adaptive,
0176                 bool rx_may_override_tx);
0177 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
0178                 unsigned int *rx_usecs, bool *rx_adaptive);
0179 
0180 /* Update the generic software stats in the passed stats array */
0181 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
0182 
0183 /* MTD */
0184 #ifdef CONFIG_SFC_MTD
0185 int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
0186         size_t n_parts, size_t sizeof_part);
0187 static inline int efx_mtd_probe(struct efx_nic *efx)
0188 {
0189     return efx->type->mtd_probe(efx);
0190 }
0191 void efx_mtd_rename(struct efx_nic *efx);
0192 void efx_mtd_remove(struct efx_nic *efx);
0193 #else
0194 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
0195 static inline void efx_mtd_rename(struct efx_nic *efx) {}
0196 static inline void efx_mtd_remove(struct efx_nic *efx) {}
0197 #endif
0198 
0199 #ifdef CONFIG_SFC_SRIOV
0200 static inline unsigned int efx_vf_size(struct efx_nic *efx)
0201 {
0202     return 1 << efx->vi_scale;
0203 }
0204 #endif
0205 
0206 static inline void efx_device_detach_sync(struct efx_nic *efx)
0207 {
0208     struct net_device *dev = efx->net_dev;
0209 
0210     /* We must stop reps (which use our TX) before we stop ourselves. */
0211     efx_detach_reps(efx);
0212 
0213     /* Lock/freeze all TX queues so that we can be sure the
0214      * TX scheduler is stopped when we're done and before
0215      * netif_device_present() becomes false.
0216      */
0217     netif_tx_lock_bh(dev);
0218     netif_device_detach(dev);
0219     netif_tx_unlock_bh(dev);
0220 }
0221 
0222 static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
0223 {
0224     if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
0225         netif_device_attach(efx->net_dev);
0226         if (efx->state == STATE_NET_UP)
0227             efx_attach_reps(efx);
0228     }
0229 }
0230 
0231 static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
0232 {
0233     if (WARN_ON(down_read_trylock(sem))) {
0234         up_read(sem);
0235         return false;
0236     }
0237     return true;
0238 }
0239 
0240 int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
0241                bool flush);
0242 
0243 #endif /* EFX_EFX_H */