Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is part of the Chelsio T4 Ethernet driver for Linux.
0003  *
0004  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #ifndef __CXGB4_ULD_H
0036 #define __CXGB4_ULD_H
0037 
0038 #include <linux/cache.h>
0039 #include <linux/spinlock.h>
0040 #include <linux/skbuff.h>
0041 #include <linux/inetdevice.h>
0042 #include <linux/atomic.h>
0043 #include <net/tls.h>
0044 #include "cxgb4.h"
0045 
0046 #define MAX_ULD_QSETS 16
0047 #define MAX_ULD_NPORTS 4
0048 
0049 /* ulp_mem_io + ulptx_idata + payload + padding */
0050 #define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
0051 
0052 /* CPL message priority levels */
0053 enum {
0054     CPL_PRIORITY_DATA     = 0,  /* data messages */
0055     CPL_PRIORITY_SETUP    = 1,  /* connection setup messages */
0056     CPL_PRIORITY_TEARDOWN = 0,  /* connection teardown messages */
0057     CPL_PRIORITY_LISTEN   = 1,  /* listen start/stop messages */
0058     CPL_PRIORITY_ACK      = 1,  /* RX ACK messages */
0059     CPL_PRIORITY_CONTROL  = 1   /* control messages */
0060 };
0061 
0062 #define INIT_TP_WR(w, tid) do { \
0063     (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \
0064                   FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \
0065     (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \
0066                    FW_WR_FLOWID_V(tid)); \
0067     (w)->wr.wr_lo = cpu_to_be64(0); \
0068 } while (0)
0069 
0070 #define INIT_TP_WR_CPL(w, cpl, tid) do { \
0071     INIT_TP_WR(w, tid); \
0072     OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
0073 } while (0)
0074 
0075 #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
0076     (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \
0077                   FW_WR_ATOMIC_V(atomic)); \
0078     (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \
0079                    FW_WR_FLOWID_V(tid)); \
0080     (w)->wr.wr_lo = cpu_to_be64(0); \
0081 } while (0)
0082 
0083 /* Special asynchronous notification message */
0084 #define CXGB4_MSG_AN ((void *)1)
0085 #define TX_ULD(uld)(((uld) != CXGB4_ULD_CRYPTO) ? CXGB4_TX_OFLD :\
0086               CXGB4_TX_CRYPTO)
0087 
0088 struct serv_entry {
0089     void *data;
0090 };
0091 
0092 union aopen_entry {
0093     void *data;
0094     union aopen_entry *next;
0095 };
0096 
0097 struct eotid_entry {
0098     void *data;
0099 };
0100 
0101 /*
0102  * Holds the size, base address, free list start, etc of the TID, server TID,
0103  * and active-open TID tables.  The tables themselves are allocated dynamically.
0104  */
0105 struct tid_info {
0106     void **tid_tab;
0107     unsigned int tid_base;
0108     unsigned int ntids;
0109 
0110     struct serv_entry *stid_tab;
0111     unsigned long *stid_bmap;
0112     unsigned int nstids;
0113     unsigned int stid_base;
0114 
0115     unsigned int nhash;
0116     unsigned int hash_base;
0117 
0118     union aopen_entry *atid_tab;
0119     unsigned int natids;
0120     unsigned int atid_base;
0121 
0122     struct filter_entry *hpftid_tab;
0123     unsigned long *hpftid_bmap;
0124     unsigned int nhpftids;
0125     unsigned int hpftid_base;
0126 
0127     struct filter_entry *ftid_tab;
0128     unsigned long *ftid_bmap;
0129     unsigned int nftids;
0130     unsigned int ftid_base;
0131     unsigned int aftid_base;
0132     unsigned int aftid_end;
0133     /* Server filter region */
0134     unsigned int sftid_base;
0135     unsigned int nsftids;
0136 
0137     spinlock_t atid_lock ____cacheline_aligned_in_smp;
0138     union aopen_entry *afree;
0139     unsigned int atids_in_use;
0140 
0141     spinlock_t stid_lock;
0142     unsigned int stids_in_use;
0143     unsigned int v6_stids_in_use;
0144     unsigned int sftids_in_use;
0145 
0146     /* ETHOFLD range */
0147     struct eotid_entry *eotid_tab;
0148     unsigned long *eotid_bmap;
0149     unsigned int eotid_base;
0150     unsigned int neotids;
0151 
0152     /* TIDs in the TCAM */
0153     atomic_t tids_in_use;
0154     /* TIDs in the HASH */
0155     atomic_t hash_tids_in_use;
0156     atomic_t conns_in_use;
0157     /* ETHOFLD TIDs used for rate limiting */
0158     atomic_t eotids_in_use;
0159 
0160     /* lock for setting/clearing filter bitmap */
0161     spinlock_t ftid_lock;
0162 
0163     unsigned int tc_hash_tids_max_prio;
0164 };
0165 
0166 static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
0167 {
0168     tid -= t->tid_base;
0169     return tid < t->ntids ? t->tid_tab[tid] : NULL;
0170 }
0171 
0172 static inline bool tid_out_of_range(const struct tid_info *t, unsigned int tid)
0173 {
0174     return ((tid - t->tid_base) >= t->ntids);
0175 }
0176 
0177 static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
0178 {
0179     return atid < t->natids ? t->atid_tab[atid].data : NULL;
0180 }
0181 
0182 static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
0183 {
0184     /* Is it a server filter TID? */
0185     if (t->nsftids && (stid >= t->sftid_base)) {
0186         stid -= t->sftid_base;
0187         stid += t->nstids;
0188     } else {
0189         stid -= t->stid_base;
0190     }
0191 
0192     return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
0193 }
0194 
0195 static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
0196                     unsigned int tid, unsigned short family)
0197 {
0198     t->tid_tab[tid - t->tid_base] = data;
0199     if (t->hash_base && (tid >= t->hash_base)) {
0200         if (family == AF_INET6)
0201             atomic_add(2, &t->hash_tids_in_use);
0202         else
0203             atomic_inc(&t->hash_tids_in_use);
0204     } else {
0205         if (family == AF_INET6)
0206             atomic_add(2, &t->tids_in_use);
0207         else
0208             atomic_inc(&t->tids_in_use);
0209     }
0210     atomic_inc(&t->conns_in_use);
0211 }
0212 
0213 static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
0214                              u32 eotid)
0215 {
0216     return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
0217 }
0218 
0219 static inline int cxgb4_get_free_eotid(struct tid_info *t)
0220 {
0221     int eotid;
0222 
0223     eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
0224     if (eotid >= t->neotids)
0225         eotid = -1;
0226 
0227     return eotid;
0228 }
0229 
0230 static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
0231 {
0232     set_bit(eotid, t->eotid_bmap);
0233     t->eotid_tab[eotid].data = data;
0234     atomic_inc(&t->eotids_in_use);
0235 }
0236 
0237 static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
0238 {
0239     clear_bit(eotid, t->eotid_bmap);
0240     t->eotid_tab[eotid].data = NULL;
0241     atomic_dec(&t->eotids_in_use);
0242 }
0243 
0244 int cxgb4_alloc_atid(struct tid_info *t, void *data);
0245 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
0246 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
0247 void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
0248 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
0249 void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
0250               unsigned short family);
0251 struct in6_addr;
0252 
0253 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
0254             __be32 sip, __be16 sport, __be16 vlan,
0255             unsigned int queue);
0256 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
0257              const struct in6_addr *sip, __be16 sport,
0258              unsigned int queue);
0259 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
0260             unsigned int queue, bool ipv6);
0261 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
0262                    __be32 sip, __be16 sport, __be16 vlan,
0263                    unsigned int queue,
0264                    unsigned char port, unsigned char mask);
0265 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
0266                    unsigned int queue, bool ipv6);
0267 
0268 /* Filter operation context to allow callers of cxgb4_set_filter() and
0269  * cxgb4_del_filter() to wait for an asynchronous completion.
0270  */
0271 struct filter_ctx {
0272     struct completion completion;   /* completion rendezvous */
0273     void *closure;          /* caller's opaque information */
0274     int result;         /* result of operation */
0275     u32 tid;            /* to store tid */
0276 };
0277 
0278 struct chcr_ktls {
0279     refcount_t ktls_refcount;
0280 };
0281 
0282 struct ch_filter_specification;
0283 
0284 int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
0285             u32 tc_prio);
0286 int __cxgb4_set_filter(struct net_device *dev, int filter_id,
0287                struct ch_filter_specification *fs,
0288                struct filter_ctx *ctx);
0289 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
0290                struct ch_filter_specification *fs,
0291                struct filter_ctx *ctx);
0292 int cxgb4_set_filter(struct net_device *dev, int filter_id,
0293              struct ch_filter_specification *fs);
0294 int cxgb4_del_filter(struct net_device *dev, int filter_id,
0295              struct ch_filter_specification *fs);
0296 int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
0297                   u64 *hitcnt, u64 *bytecnt, bool hash);
0298 
0299 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
0300 {
0301     skb_set_queue_mapping(skb, (queue << 1) | prio);
0302 }
0303 
0304 enum cxgb4_uld {
0305     CXGB4_ULD_INIT,
0306     CXGB4_ULD_RDMA,
0307     CXGB4_ULD_ISCSI,
0308     CXGB4_ULD_ISCSIT,
0309     CXGB4_ULD_CRYPTO,
0310     CXGB4_ULD_IPSEC,
0311     CXGB4_ULD_TLS,
0312     CXGB4_ULD_KTLS,
0313     CXGB4_ULD_MAX
0314 };
0315 
0316 enum cxgb4_tx_uld {
0317     CXGB4_TX_OFLD,
0318     CXGB4_TX_CRYPTO,
0319     CXGB4_TX_MAX
0320 };
0321 
0322 enum cxgb4_txq_type {
0323     CXGB4_TXQ_ETH,
0324     CXGB4_TXQ_ULD,
0325     CXGB4_TXQ_CTRL,
0326     CXGB4_TXQ_MAX
0327 };
0328 
0329 enum cxgb4_state {
0330     CXGB4_STATE_UP,
0331     CXGB4_STATE_START_RECOVERY,
0332     CXGB4_STATE_DOWN,
0333     CXGB4_STATE_DETACH,
0334     CXGB4_STATE_FATAL_ERROR
0335 };
0336 
0337 enum cxgb4_control {
0338     CXGB4_CONTROL_DB_FULL,
0339     CXGB4_CONTROL_DB_EMPTY,
0340     CXGB4_CONTROL_DB_DROP,
0341 };
0342 
0343 struct adapter;
0344 struct pci_dev;
0345 struct l2t_data;
0346 struct net_device;
0347 struct pkt_gl;
0348 struct tp_tcp_stats;
0349 struct t4_lro_mgr;
0350 
0351 struct cxgb4_range {
0352     unsigned int start;
0353     unsigned int size;
0354 };
0355 
0356 struct cxgb4_virt_res {                      /* virtualized HW resources */
0357     struct cxgb4_range ddp;
0358     struct cxgb4_range iscsi;
0359     struct cxgb4_range stag;
0360     struct cxgb4_range rq;
0361     struct cxgb4_range srq;
0362     struct cxgb4_range pbl;
0363     struct cxgb4_range qp;
0364     struct cxgb4_range cq;
0365     struct cxgb4_range ocq;
0366     struct cxgb4_range key;
0367     unsigned int ncrypto_fc;
0368     struct cxgb4_range ppod_edram;
0369 };
0370 
0371 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0372 struct ch_ktls_port_stats_debug {
0373     atomic64_t ktls_tx_connection_open;
0374     atomic64_t ktls_tx_connection_fail;
0375     atomic64_t ktls_tx_connection_close;
0376     atomic64_t ktls_tx_encrypted_packets;
0377     atomic64_t ktls_tx_encrypted_bytes;
0378     atomic64_t ktls_tx_ctx;
0379     atomic64_t ktls_tx_ooo;
0380     atomic64_t ktls_tx_skip_no_sync_data;
0381     atomic64_t ktls_tx_drop_no_sync_data;
0382     atomic64_t ktls_tx_drop_bypass_req;
0383 };
0384 
0385 struct ch_ktls_stats_debug {
0386     struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
0387     atomic64_t ktls_tx_send_records;
0388     atomic64_t ktls_tx_end_pkts;
0389     atomic64_t ktls_tx_start_pkts;
0390     atomic64_t ktls_tx_middle_pkts;
0391     atomic64_t ktls_tx_retransmit_pkts;
0392     atomic64_t ktls_tx_complete_pkts;
0393     atomic64_t ktls_tx_trimmed_pkts;
0394     atomic64_t ktls_tx_fallback;
0395 };
0396 #endif
0397 
0398 struct chcr_stats_debug {
0399     atomic_t cipher_rqst;
0400     atomic_t digest_rqst;
0401     atomic_t aead_rqst;
0402     atomic_t complete;
0403     atomic_t error;
0404     atomic_t fallback;
0405     atomic_t tls_pdu_tx;
0406     atomic_t tls_pdu_rx;
0407     atomic_t tls_key;
0408 };
0409 
0410 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
0411 struct ch_ipsec_stats_debug {
0412     atomic_t ipsec_cnt;
0413 };
0414 #endif
0415 
0416 #define OCQ_WIN_OFFSET(pdev, vres) \
0417     (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
0418 
0419 /*
0420  * Block of information the LLD provides to ULDs attaching to a device.
0421  */
0422 struct cxgb4_lld_info {
0423     struct pci_dev *pdev;                /* associated PCI device */
0424     struct l2t_data *l2t;                /* L2 table */
0425     struct tid_info *tids;               /* TID table */
0426     struct net_device **ports;           /* device ports */
0427     const struct cxgb4_virt_res *vr;     /* assorted HW resources */
0428     const unsigned short *mtus;          /* MTU table */
0429     const unsigned short *rxq_ids;       /* the ULD's Rx queue ids */
0430     const unsigned short *ciq_ids;       /* the ULD's concentrator IQ ids */
0431     unsigned short nrxq;                 /* # of Rx queues */
0432     unsigned short ntxq;                 /* # of Tx queues */
0433     unsigned short nciq;             /* # of concentrator IQ */
0434     unsigned char nchan:4;               /* # of channels */
0435     unsigned char nports:4;              /* # of ports */
0436     unsigned char wr_cred;               /* WR 16-byte credits */
0437     unsigned char adapter_type;          /* type of adapter */
0438     unsigned char fw_api_ver;            /* FW API version */
0439     unsigned char crypto;                /* crypto support */
0440     unsigned int fw_vers;                /* FW version */
0441     unsigned int iscsi_iolen;            /* iSCSI max I/O length */
0442     unsigned int cclk_ps;                /* Core clock period in psec */
0443     unsigned short udb_density;          /* # of user DB/page */
0444     unsigned short ucq_density;          /* # of user CQs/page */
0445     unsigned int sge_host_page_size;     /* SGE host page size */
0446     unsigned short filt_mode;            /* filter optional components */
0447     unsigned short tx_modq[NCHAN];       /* maps each tx channel to a */
0448                          /* scheduler queue */
0449     void __iomem *gts_reg;               /* address of GTS register */
0450     void __iomem *db_reg;                /* address of kernel doorbell */
0451     int dbfifo_int_thresh;           /* doorbell fifo int threshold */
0452     unsigned int sge_ingpadboundary;     /* SGE ingress padding boundary */
0453     unsigned int sge_egrstatuspagesize;  /* SGE egress status page size */
0454     unsigned int sge_pktshift;           /* Padding between CPL and */
0455                          /* packet data */
0456     unsigned int pf;             /* Physical Function we're using */
0457     bool enable_fw_ofld_conn;            /* Enable connection through fw */
0458                          /* WR */
0459     unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
0460     unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
0461     bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
0462     unsigned int iscsi_tagmask;      /* iscsi ddp tag mask */
0463     unsigned int iscsi_pgsz_order;       /* iscsi ddp page size orders */
0464     unsigned int iscsi_llimit;       /* chip's iscsi region llimit */
0465     unsigned int ulp_crypto;             /* crypto lookaside support */
0466     void **iscsi_ppm;            /* iscsi page pod manager */
0467     int nodeid;              /* device numa node id */
0468     bool fr_nsmr_tpte_wr_support;        /* FW supports FR_NSMR_TPTE_WR */
0469     bool write_w_imm_support;         /* FW supports WRITE_WITH_IMMEDIATE */
0470     bool write_cmpl_support;             /* FW supports WRITE_CMPL WR */
0471 };
0472 
0473 struct cxgb4_uld_info {
0474     char name[IFNAMSIZ];
0475     void *handle;
0476     unsigned int nrxq;
0477     unsigned int rxq_size;
0478     unsigned int ntxq;
0479     bool ciq;
0480     bool lro;
0481     void *(*add)(const struct cxgb4_lld_info *p);
0482     int (*rx_handler)(void *handle, const __be64 *rsp,
0483               const struct pkt_gl *gl);
0484     int (*state_change)(void *handle, enum cxgb4_state new_state);
0485     int (*control)(void *handle, enum cxgb4_control control, ...);
0486     int (*lro_rx_handler)(void *handle, const __be64 *rsp,
0487                   const struct pkt_gl *gl,
0488                   struct t4_lro_mgr *lro_mgr,
0489                   struct napi_struct *napi);
0490     void (*lro_flush)(struct t4_lro_mgr *);
0491     int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
0492 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0493     const struct tlsdev_ops *tlsdev_ops;
0494 #endif
0495 #if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
0496     const struct xfrmdev_ops *xfrmdev_ops;
0497 #endif
0498 };
0499 
0500 static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
0501 {
0502     return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
0503 }
0504 
0505 void cxgb4_uld_enable(struct adapter *adap);
0506 void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
0507 int cxgb4_unregister_uld(enum cxgb4_uld type);
0508 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
0509 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
0510                const void *src, unsigned int len);
0511 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
0512 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
0513 unsigned int cxgb4_port_chan(const struct net_device *dev);
0514 unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
0515 unsigned int cxgb4_port_viid(const struct net_device *dev);
0516 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
0517 unsigned int cxgb4_port_idx(const struct net_device *dev);
0518 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
0519                 unsigned int *idx);
0520 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
0521                     unsigned short header_size,
0522                     unsigned short data_size_max,
0523                     unsigned short data_size_align,
0524                     unsigned int *mtu_idxp);
0525 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
0526              struct tp_tcp_stats *v6);
0527 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
0528               const unsigned int *pgsz_order);
0529 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
0530                    unsigned int skb_len, unsigned int pull_len);
0531 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
0532 int cxgb4_flush_eq_cache(struct net_device *dev);
0533 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
0534 u64 cxgb4_read_sge_timestamp(struct net_device *dev);
0535 
0536 enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS };
0537 int cxgb4_bar2_sge_qregs(struct net_device *dev,
0538              unsigned int qid,
0539              enum cxgb4_bar2_qtype qtype,
0540              int user,
0541              u64 *pbar2_qoffset,
0542              unsigned int *pbar2_qid);
0543 
0544 #endif  /* !__CXGB4_ULD_H */