0001
0002
0003
0004 #ifndef _MANA_H
0005 #define _MANA_H
0006
0007 #include "gdma.h"
0008 #include "hw_channel.h"
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define MANA_MAJOR_VERSION 0
0018 #define MANA_MINOR_VERSION 1
0019 #define MANA_MICRO_VERSION 1
0020
0021 typedef u64 mana_handle_t;
0022 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
0023
0024 enum TRI_STATE {
0025 TRI_STATE_UNKNOWN = -1,
0026 TRI_STATE_FALSE = 0,
0027 TRI_STATE_TRUE = 1
0028 };
0029
0030
0031 #define MANA_INDIRECT_TABLE_SIZE 64
0032 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
0033
0034
0035 #define MANA_HASH_KEY_SIZE 40
0036
0037 #define COMP_ENTRY_SIZE 64
0038
0039 #define ADAPTER_MTU_SIZE 1500
0040 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
0041
0042 #define RX_BUFFERS_PER_QUEUE 512
0043
0044 #define MAX_SEND_BUFFERS_PER_QUEUE 256
0045
0046 #define EQ_SIZE (8 * PAGE_SIZE)
0047 #define LOG2_EQ_THROTTLE 3
0048
0049 #define MAX_PORTS_IN_MANA_DEV 256
0050
0051 struct mana_stats_rx {
0052 u64 packets;
0053 u64 bytes;
0054 u64 xdp_drop;
0055 u64 xdp_tx;
0056 u64 xdp_redirect;
0057 struct u64_stats_sync syncp;
0058 };
0059
0060 struct mana_stats_tx {
0061 u64 packets;
0062 u64 bytes;
0063 u64 xdp_xmit;
0064 struct u64_stats_sync syncp;
0065 };
0066
0067 struct mana_txq {
0068 struct gdma_queue *gdma_sq;
0069
0070 union {
0071 u32 gdma_txq_id;
0072 struct {
0073 u32 reserved1 : 10;
0074 u32 vsq_frame : 14;
0075 u32 reserved2 : 8;
0076 };
0077 };
0078
0079 u16 vp_offset;
0080
0081 struct net_device *ndev;
0082
0083
0084 struct sk_buff_head pending_skbs;
0085 struct netdev_queue *net_txq;
0086
0087 atomic_t pending_sends;
0088
0089 struct mana_stats_tx stats;
0090 };
0091
0092
0093 struct mana_skb_head {
0094 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
0095
0096 u32 size[MAX_SKB_FRAGS + 1];
0097 };
0098
0099 #define MANA_HEADROOM sizeof(struct mana_skb_head)
0100
0101 enum mana_tx_pkt_format {
0102 MANA_SHORT_PKT_FMT = 0,
0103 MANA_LONG_PKT_FMT = 1,
0104 };
0105
0106 struct mana_tx_short_oob {
0107 u32 pkt_fmt : 2;
0108 u32 is_outer_ipv4 : 1;
0109 u32 is_outer_ipv6 : 1;
0110 u32 comp_iphdr_csum : 1;
0111 u32 comp_tcp_csum : 1;
0112 u32 comp_udp_csum : 1;
0113 u32 supress_txcqe_gen : 1;
0114 u32 vcq_num : 24;
0115
0116 u32 trans_off : 10;
0117 u32 vsq_frame : 14;
0118 u32 short_vp_offset : 8;
0119 };
0120
0121 struct mana_tx_long_oob {
0122 u32 is_encap : 1;
0123 u32 inner_is_ipv6 : 1;
0124 u32 inner_tcp_opt : 1;
0125 u32 inject_vlan_pri_tag : 1;
0126 u32 reserved1 : 12;
0127 u32 pcp : 3;
0128 u32 dei : 1;
0129 u32 vlan_id : 12;
0130
0131 u32 inner_frame_offset : 10;
0132 u32 inner_ip_rel_offset : 6;
0133 u32 long_vp_offset : 12;
0134 u32 reserved2 : 4;
0135
0136 u32 reserved3;
0137 u32 reserved4;
0138 };
0139
0140 struct mana_tx_oob {
0141 struct mana_tx_short_oob s_oob;
0142 struct mana_tx_long_oob l_oob;
0143 };
0144
0145 enum mana_cq_type {
0146 MANA_CQ_TYPE_RX,
0147 MANA_CQ_TYPE_TX,
0148 };
0149
0150 enum mana_cqe_type {
0151 CQE_INVALID = 0,
0152 CQE_RX_OKAY = 1,
0153 CQE_RX_COALESCED_4 = 2,
0154 CQE_RX_OBJECT_FENCE = 3,
0155 CQE_RX_TRUNCATED = 4,
0156
0157 CQE_TX_OKAY = 32,
0158 CQE_TX_SA_DROP = 33,
0159 CQE_TX_MTU_DROP = 34,
0160 CQE_TX_INVALID_OOB = 35,
0161 CQE_TX_INVALID_ETH_TYPE = 36,
0162 CQE_TX_HDR_PROCESSING_ERROR = 37,
0163 CQE_TX_VF_DISABLED = 38,
0164 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
0165 CQE_TX_VPORT_DISABLED = 40,
0166 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
0167 };
0168
0169 #define MANA_CQE_COMPLETION 1
0170
0171 struct mana_cqe_header {
0172 u32 cqe_type : 6;
0173 u32 client_type : 2;
0174 u32 vendor_err : 24;
0175 };
0176
0177
0178 #define NDIS_HASH_IPV4 BIT(0)
0179 #define NDIS_HASH_TCP_IPV4 BIT(1)
0180 #define NDIS_HASH_UDP_IPV4 BIT(2)
0181 #define NDIS_HASH_IPV6 BIT(3)
0182 #define NDIS_HASH_TCP_IPV6 BIT(4)
0183 #define NDIS_HASH_UDP_IPV6 BIT(5)
0184 #define NDIS_HASH_IPV6_EX BIT(6)
0185 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
0186 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
0187
0188 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
0189 #define MANA_HASH_L4 \
0190 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
0191 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
0192
0193 struct mana_rxcomp_perpkt_info {
0194 u32 pkt_len : 16;
0195 u32 reserved1 : 16;
0196 u32 reserved2;
0197 u32 pkt_hash;
0198 };
0199
0200 #define MANA_RXCOMP_OOB_NUM_PPI 4
0201
0202
0203 struct mana_rxcomp_oob {
0204 struct mana_cqe_header cqe_hdr;
0205
0206 u32 rx_vlan_id : 12;
0207 u32 rx_vlantag_present : 1;
0208 u32 rx_outer_iphdr_csum_succeed : 1;
0209 u32 rx_outer_iphdr_csum_fail : 1;
0210 u32 reserved1 : 1;
0211 u32 rx_hashtype : 9;
0212 u32 rx_iphdr_csum_succeed : 1;
0213 u32 rx_iphdr_csum_fail : 1;
0214 u32 rx_tcp_csum_succeed : 1;
0215 u32 rx_tcp_csum_fail : 1;
0216 u32 rx_udp_csum_succeed : 1;
0217 u32 rx_udp_csum_fail : 1;
0218 u32 reserved2 : 1;
0219
0220 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
0221
0222 u32 rx_wqe_offset;
0223 };
0224
0225 struct mana_tx_comp_oob {
0226 struct mana_cqe_header cqe_hdr;
0227
0228 u32 tx_data_offset;
0229
0230 u32 tx_sgl_offset : 5;
0231 u32 tx_wqe_offset : 27;
0232
0233 u32 reserved[12];
0234 };
0235
0236 struct mana_rxq;
0237
0238 #define CQE_POLLING_BUFFER 512
0239
0240 struct mana_cq {
0241 struct gdma_queue *gdma_cq;
0242
0243
0244 u32 gdma_id;
0245
0246
0247 enum mana_cq_type type;
0248
0249
0250
0251
0252 struct mana_rxq *rxq;
0253
0254
0255
0256
0257 struct mana_txq *txq;
0258
0259
0260 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
0261
0262
0263 struct napi_struct napi;
0264 int work_done;
0265 int budget;
0266 };
0267
0268 #define GDMA_MAX_RQE_SGES 15
0269
0270 struct mana_recv_buf_oob {
0271
0272 struct gdma_wqe_request wqe_req;
0273
0274 void *buf_va;
0275 dma_addr_t buf_dma_addr;
0276
0277
0278 u32 num_sge;
0279 struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
0280
0281
0282
0283
0284
0285 struct gdma_posted_wqe_info wqe_inf;
0286 };
0287
0288 struct mana_rxq {
0289 struct gdma_queue *gdma_rq;
0290
0291 u32 gdma_id;
0292
0293
0294 u32 rxq_idx;
0295
0296 u32 datasize;
0297
0298 mana_handle_t rxobj;
0299
0300 struct mana_cq rx_cq;
0301
0302 struct completion fence_event;
0303
0304 struct net_device *ndev;
0305
0306
0307 u32 num_rx_buf;
0308
0309 u32 buf_index;
0310
0311 struct mana_stats_rx stats;
0312
0313 struct bpf_prog __rcu *bpf_prog;
0314 struct xdp_rxq_info xdp_rxq;
0315 struct page *xdp_save_page;
0316 bool xdp_flush;
0317 int xdp_rc;
0318
0319
0320
0321
0322 struct mana_recv_buf_oob rx_oobs[];
0323 };
0324
0325 struct mana_tx_qp {
0326 struct mana_txq txq;
0327
0328 struct mana_cq tx_cq;
0329
0330 mana_handle_t tx_object;
0331 };
0332
0333 struct mana_ethtool_stats {
0334 u64 stop_queue;
0335 u64 wake_queue;
0336 };
0337
0338 struct mana_context {
0339 struct gdma_dev *gdma_dev;
0340
0341 u16 num_ports;
0342
0343 struct mana_eq *eqs;
0344
0345 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
0346 };
0347
0348 struct mana_port_context {
0349 struct mana_context *ac;
0350 struct net_device *ndev;
0351
0352 u8 mac_addr[ETH_ALEN];
0353
0354 enum TRI_STATE rss_state;
0355
0356 mana_handle_t default_rxobj;
0357 bool tx_shortform_allowed;
0358 u16 tx_vp_offset;
0359
0360 struct mana_tx_qp *tx_qp;
0361
0362
0363 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
0364
0365
0366 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
0367
0368
0369 u8 hashkey[MANA_HASH_KEY_SIZE];
0370
0371
0372 struct mana_rxq **rxqs;
0373
0374 struct bpf_prog *bpf_prog;
0375
0376
0377 unsigned int max_queues;
0378 unsigned int num_queues;
0379
0380 mana_handle_t port_handle;
0381 mana_handle_t pf_filter_handle;
0382
0383 u16 port_idx;
0384
0385 bool port_is_up;
0386 bool port_st_save;
0387
0388 struct mana_ethtool_stats eth_stats;
0389 };
0390
0391 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
0392 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
0393 bool update_hash, bool update_tab);
0394
0395 int mana_alloc_queues(struct net_device *ndev);
0396 int mana_attach(struct net_device *ndev);
0397 int mana_detach(struct net_device *ndev, bool from_close);
0398
0399 int mana_probe(struct gdma_dev *gd, bool resuming);
0400 void mana_remove(struct gdma_dev *gd, bool suspending);
0401
0402 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
0403 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
0404 u32 flags);
0405 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
0406 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
0407 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
0408 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
0409 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
0410
0411 extern const struct ethtool_ops mana_ethtool_ops;
0412
0413 struct mana_obj_spec {
0414 u32 queue_index;
0415 u64 gdma_region;
0416 u32 queue_size;
0417 u32 attached_eq;
0418 u32 modr_ctx_id;
0419 };
0420
0421 enum mana_command_code {
0422 MANA_QUERY_DEV_CONFIG = 0x20001,
0423 MANA_QUERY_GF_STAT = 0x20002,
0424 MANA_CONFIG_VPORT_TX = 0x20003,
0425 MANA_CREATE_WQ_OBJ = 0x20004,
0426 MANA_DESTROY_WQ_OBJ = 0x20005,
0427 MANA_FENCE_RQ = 0x20006,
0428 MANA_CONFIG_VPORT_RX = 0x20007,
0429 MANA_QUERY_VPORT_CONFIG = 0x20008,
0430
0431
0432 MANA_REGISTER_FILTER = 0x28000,
0433 MANA_DEREGISTER_FILTER = 0x28001,
0434 MANA_REGISTER_HW_PORT = 0x28003,
0435 MANA_DEREGISTER_HW_PORT = 0x28004,
0436 };
0437
0438
0439 struct mana_query_device_cfg_req {
0440 struct gdma_req_hdr hdr;
0441
0442
0443 u64 mn_drv_cap_flags1;
0444 u64 mn_drv_cap_flags2;
0445 u64 mn_drv_cap_flags3;
0446 u64 mn_drv_cap_flags4;
0447
0448 u32 proto_major_ver;
0449 u32 proto_minor_ver;
0450 u32 proto_micro_ver;
0451
0452 u32 reserved;
0453 };
0454
0455 struct mana_query_device_cfg_resp {
0456 struct gdma_resp_hdr hdr;
0457
0458 u64 pf_cap_flags1;
0459 u64 pf_cap_flags2;
0460 u64 pf_cap_flags3;
0461 u64 pf_cap_flags4;
0462
0463 u16 max_num_vports;
0464 u16 reserved;
0465 u32 max_num_eqs;
0466 };
0467
0468
0469 struct mana_query_vport_cfg_req {
0470 struct gdma_req_hdr hdr;
0471 u32 vport_index;
0472 };
0473
0474 struct mana_query_vport_cfg_resp {
0475 struct gdma_resp_hdr hdr;
0476 u32 max_num_sq;
0477 u32 max_num_rq;
0478 u32 num_indirection_ent;
0479 u32 reserved1;
0480 u8 mac_addr[6];
0481 u8 reserved2[2];
0482 mana_handle_t vport;
0483 };
0484
0485
0486 struct mana_config_vport_req {
0487 struct gdma_req_hdr hdr;
0488 mana_handle_t vport;
0489 u32 pdid;
0490 u32 doorbell_pageid;
0491 };
0492
0493 struct mana_config_vport_resp {
0494 struct gdma_resp_hdr hdr;
0495 u16 tx_vport_offset;
0496 u8 short_form_allowed;
0497 u8 reserved;
0498 };
0499
0500
0501 struct mana_create_wqobj_req {
0502 struct gdma_req_hdr hdr;
0503 mana_handle_t vport;
0504 u32 wq_type;
0505 u32 reserved;
0506 u64 wq_gdma_region;
0507 u64 cq_gdma_region;
0508 u32 wq_size;
0509 u32 cq_size;
0510 u32 cq_moderation_ctx_id;
0511 u32 cq_parent_qid;
0512 };
0513
0514 struct mana_create_wqobj_resp {
0515 struct gdma_resp_hdr hdr;
0516 u32 wq_id;
0517 u32 cq_id;
0518 mana_handle_t wq_obj;
0519 };
0520
0521
0522 struct mana_destroy_wqobj_req {
0523 struct gdma_req_hdr hdr;
0524 u32 wq_type;
0525 u32 reserved;
0526 mana_handle_t wq_obj_handle;
0527 };
0528
0529 struct mana_destroy_wqobj_resp {
0530 struct gdma_resp_hdr hdr;
0531 };
0532
0533
0534 struct mana_fence_rq_req {
0535 struct gdma_req_hdr hdr;
0536 mana_handle_t wq_obj_handle;
0537 };
0538
0539 struct mana_fence_rq_resp {
0540 struct gdma_resp_hdr hdr;
0541 };
0542
0543
0544 struct mana_cfg_rx_steer_req {
0545 struct gdma_req_hdr hdr;
0546 mana_handle_t vport;
0547 u16 num_indir_entries;
0548 u16 indir_tab_offset;
0549 u32 rx_enable;
0550 u32 rss_enable;
0551 u8 update_default_rxobj;
0552 u8 update_hashkey;
0553 u8 update_indir_tab;
0554 u8 reserved;
0555 mana_handle_t default_rxobj;
0556 u8 hashkey[MANA_HASH_KEY_SIZE];
0557 };
0558
0559 struct mana_cfg_rx_steer_resp {
0560 struct gdma_resp_hdr hdr;
0561 };
0562
0563
0564 struct mana_register_hw_vport_req {
0565 struct gdma_req_hdr hdr;
0566 u16 attached_gfid;
0567 u8 is_pf_default_vport;
0568 u8 reserved1;
0569 u8 allow_all_ether_types;
0570 u8 reserved2;
0571 u8 reserved3;
0572 u8 reserved4;
0573 };
0574
0575 struct mana_register_hw_vport_resp {
0576 struct gdma_resp_hdr hdr;
0577 mana_handle_t hw_vport_handle;
0578 };
0579
0580
0581 struct mana_deregister_hw_vport_req {
0582 struct gdma_req_hdr hdr;
0583 mana_handle_t hw_vport_handle;
0584 };
0585
0586 struct mana_deregister_hw_vport_resp {
0587 struct gdma_resp_hdr hdr;
0588 };
0589
0590
0591 struct mana_register_filter_req {
0592 struct gdma_req_hdr hdr;
0593 mana_handle_t vport;
0594 u8 mac_addr[6];
0595 u8 reserved1;
0596 u8 reserved2;
0597 u8 reserved3;
0598 u8 reserved4;
0599 u16 reserved5;
0600 u32 reserved6;
0601 u32 reserved7;
0602 u32 reserved8;
0603 };
0604
0605 struct mana_register_filter_resp {
0606 struct gdma_resp_hdr hdr;
0607 mana_handle_t filter_handle;
0608 };
0609
0610
0611 struct mana_deregister_filter_req {
0612 struct gdma_req_hdr hdr;
0613 mana_handle_t filter_handle;
0614 };
0615
0616 struct mana_deregister_filter_resp {
0617 struct gdma_resp_hdr hdr;
0618 };
0619
0620 #define MANA_MAX_NUM_QUEUES 64
0621
0622 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
0623
0624 struct mana_tx_package {
0625 struct gdma_wqe_request wqe_req;
0626 struct gdma_sge sgl_array[5];
0627 struct gdma_sge *sgl_ptr;
0628
0629 struct mana_tx_oob tx_oob;
0630
0631 struct gdma_posted_wqe_info wqe_info;
0632 };
0633
0634 #endif