0001
0002
0003
0004
0005
0006
0007 #ifndef _QED_LL2_IF_H
0008 #define _QED_LL2_IF_H
0009
0010 #include <linux/types.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/pci.h>
0014 #include <linux/skbuff.h>
0015 #include <linux/kernel.h>
0016 #include <linux/slab.h>
0017 #include <linux/qed/qed_if.h>
0018
0019 enum qed_ll2_conn_type {
0020 QED_LL2_TYPE_FCOE,
0021 QED_LL2_TYPE_TCP_ULP,
0022 QED_LL2_TYPE_TEST,
0023 QED_LL2_TYPE_OOO,
0024 QED_LL2_TYPE_RESERVED2,
0025 QED_LL2_TYPE_ROCE,
0026 QED_LL2_TYPE_IWARP,
0027 QED_LL2_TYPE_RESERVED3,
0028 MAX_QED_LL2_CONN_TYPE
0029 };
0030
0031 enum qed_ll2_rx_conn_type {
0032 QED_LL2_RX_TYPE_LEGACY,
0033 QED_LL2_RX_TYPE_CTX,
0034 MAX_QED_LL2_RX_CONN_TYPE
0035 };
0036
0037 enum qed_ll2_roce_flavor_type {
0038 QED_LL2_ROCE,
0039 QED_LL2_RROCE,
0040 MAX_QED_LL2_ROCE_FLAVOR_TYPE
0041 };
0042
0043 enum qed_ll2_tx_dest {
0044 QED_LL2_TX_DEST_NW,
0045 QED_LL2_TX_DEST_LB,
0046 QED_LL2_TX_DEST_DROP,
0047 QED_LL2_TX_DEST_MAX
0048 };
0049
0050 enum qed_ll2_error_handle {
0051 QED_LL2_DROP_PACKET,
0052 QED_LL2_DO_NOTHING,
0053 QED_LL2_ASSERT,
0054 };
0055
0056 struct qed_ll2_stats {
0057 u64 gsi_invalid_hdr;
0058 u64 gsi_invalid_pkt_length;
0059 u64 gsi_unsupported_pkt_typ;
0060 u64 gsi_crcchksm_error;
0061
0062 u64 packet_too_big_discard;
0063 u64 no_buff_discard;
0064
0065 u64 rcv_ucast_bytes;
0066 u64 rcv_mcast_bytes;
0067 u64 rcv_bcast_bytes;
0068 u64 rcv_ucast_pkts;
0069 u64 rcv_mcast_pkts;
0070 u64 rcv_bcast_pkts;
0071
0072 u64 sent_ucast_bytes;
0073 u64 sent_mcast_bytes;
0074 u64 sent_bcast_bytes;
0075 u64 sent_ucast_pkts;
0076 u64 sent_mcast_pkts;
0077 u64 sent_bcast_pkts;
0078 };
0079
0080 struct qed_ll2_comp_rx_data {
0081 void *cookie;
0082 dma_addr_t rx_buf_addr;
0083 u16 parse_flags;
0084 u16 err_flags;
0085 u16 vlan;
0086 bool b_last_packet;
0087 u8 connection_handle;
0088
0089 union {
0090 u16 packet_length;
0091 u16 data_length;
0092 } length;
0093
0094 u32 opaque_data_0;
0095 u32 opaque_data_1;
0096
0097
0098 u32 src_qp;
0099 u16 qp_id;
0100
0101 union {
0102 u8 placement_offset;
0103 u8 data_length_error;
0104 } u;
0105 };
0106
0107 typedef
0108 void (*qed_ll2_complete_rx_packet_cb)(void *cxt,
0109 struct qed_ll2_comp_rx_data *data);
0110
0111 typedef
0112 void (*qed_ll2_release_rx_packet_cb)(void *cxt,
0113 u8 connection_handle,
0114 void *cookie,
0115 dma_addr_t rx_buf_addr,
0116 bool b_last_packet);
0117
0118 typedef
0119 void (*qed_ll2_complete_tx_packet_cb)(void *cxt,
0120 u8 connection_handle,
0121 void *cookie,
0122 dma_addr_t first_frag_addr,
0123 bool b_last_fragment,
0124 bool b_last_packet);
0125
0126 typedef
0127 void (*qed_ll2_release_tx_packet_cb)(void *cxt,
0128 u8 connection_handle,
0129 void *cookie,
0130 dma_addr_t first_frag_addr,
0131 bool b_last_fragment, bool b_last_packet);
0132
0133 typedef
0134 void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
0135 u32 opaque_data_0, u32 opaque_data_1);
0136
0137 struct qed_ll2_cbs {
0138 qed_ll2_complete_rx_packet_cb rx_comp_cb;
0139 qed_ll2_release_rx_packet_cb rx_release_cb;
0140 qed_ll2_complete_tx_packet_cb tx_comp_cb;
0141 qed_ll2_release_tx_packet_cb tx_release_cb;
0142 qed_ll2_slowpath_cb slowpath_cb;
0143 void *cookie;
0144 };
0145
0146 struct qed_ll2_acquire_data_inputs {
0147 enum qed_ll2_rx_conn_type rx_conn_type;
0148 enum qed_ll2_conn_type conn_type;
0149 u16 mtu;
0150 u16 rx_num_desc;
0151 u16 rx_num_ooo_buffers;
0152 u8 rx_drop_ttl0_flg;
0153 u8 rx_vlan_removal_en;
0154 u16 tx_num_desc;
0155 u8 tx_max_bds_per_packet;
0156 u8 tx_tc;
0157 enum qed_ll2_tx_dest tx_dest;
0158 enum qed_ll2_error_handle ai_err_packet_too_big;
0159 enum qed_ll2_error_handle ai_err_no_buf;
0160 bool secondary_queue;
0161 u8 gsi_enable;
0162 };
0163
0164 struct qed_ll2_acquire_data {
0165 struct qed_ll2_acquire_data_inputs input;
0166 const struct qed_ll2_cbs *cbs;
0167
0168
0169 u8 *p_connection_handle;
0170 };
0171
0172 struct qed_ll2_tx_pkt_info {
0173 void *cookie;
0174 dma_addr_t first_frag;
0175 enum qed_ll2_tx_dest tx_dest;
0176 enum qed_ll2_roce_flavor_type qed_roce_flavor;
0177 u16 vlan;
0178 u16 l4_hdr_offset_w;
0179 u16 first_frag_len;
0180 u8 num_of_bds;
0181 u8 bd_flags;
0182 bool enable_ip_cksum;
0183 bool enable_l4_cksum;
0184 bool calc_ip_len;
0185 bool remove_stag;
0186 };
0187
0188 #define QED_LL2_UNUSED_HANDLE (0xff)
0189
0190 struct qed_ll2_cb_ops {
0191 int (*rx_cb)(void *, struct sk_buff *, u32, u32);
0192 int (*tx_cb)(void *, struct sk_buff *, bool);
0193 };
0194
0195 struct qed_ll2_params {
0196 u16 mtu;
0197 bool drop_ttl0_packets;
0198 bool rx_vlan_stripping;
0199 u8 tx_tc;
0200 bool frags_mapped;
0201 u8 ll2_mac_address[ETH_ALEN];
0202 };
0203
0204 enum qed_ll2_xmit_flags {
0205
0206 QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
0207 };
0208
0209 struct qed_ll2_ops {
0210
0211
0212
0213
0214
0215
0216
0217
0218 int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
0219
0220
0221
0222
0223
0224
0225
0226
0227 int (*stop)(struct qed_dev *cdev);
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
0239 unsigned long xmit_flags);
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 void (*register_cb_ops)(struct qed_dev *cdev,
0252 const struct qed_ll2_cb_ops *ops,
0253 void *cookie);
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
0264 };
0265
0266 #ifdef CONFIG_QED_LL2
0267 int qed_ll2_alloc_if(struct qed_dev *);
0268 void qed_ll2_dealloc_if(struct qed_dev *);
0269 #else
0270 static const struct qed_ll2_ops qed_ll2_ops_pass = {
0271 .start = NULL,
0272 .stop = NULL,
0273 .start_xmit = NULL,
0274 .register_cb_ops = NULL,
0275 .get_stats = NULL,
0276 };
0277
0278 static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
0279 {
0280 return 0;
0281 }
0282
0283 static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
0284 {
0285 }
0286 #endif
0287 #endif