Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: ISC */
0002 /*
0003  * Copyright (c) 2005-2011 Atheros Communications Inc.
0004  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
0005  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
0006  */
0007 
0008 #ifndef _HTT_H_
0009 #define _HTT_H_
0010 
0011 #include <linux/bug.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/dmapool.h>
0014 #include <linux/hashtable.h>
0015 #include <linux/kfifo.h>
0016 #include <net/mac80211.h>
0017 
0018 #include "htc.h"
0019 #include "hw.h"
0020 #include "rx_desc.h"
0021 
0022 enum htt_dbg_stats_type {
0023     HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
0024     HTT_DBG_STATS_RX_REORDER    = 1 << 1,
0025     HTT_DBG_STATS_RX_RATE_INFO  = 1 << 2,
0026     HTT_DBG_STATS_TX_PPDU_LOG   = 1 << 3,
0027     HTT_DBG_STATS_TX_RATE_INFO  = 1 << 4,
0028     /* bits 5-23 currently reserved */
0029 
0030     HTT_DBG_NUM_STATS /* keep this last */
0031 };
0032 
0033 enum htt_h2t_msg_type { /* host-to-target */
0034     HTT_H2T_MSG_TYPE_VERSION_REQ        = 0,
0035     HTT_H2T_MSG_TYPE_TX_FRM             = 1,
0036     HTT_H2T_MSG_TYPE_RX_RING_CFG        = 2,
0037     HTT_H2T_MSG_TYPE_STATS_REQ          = 3,
0038     HTT_H2T_MSG_TYPE_SYNC               = 4,
0039     HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
0040     HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
0041 
0042     /* This command is used for sending management frames in HTT < 3.0.
0043      * HTT >= 3.0 uses TX_FRM for everything.
0044      */
0045     HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
0046     HTT_H2T_MSG_TYPE_TX_FETCH_RESP      = 11,
0047 
0048     HTT_H2T_NUM_MSGS /* keep this last */
0049 };
0050 
0051 struct htt_cmd_hdr {
0052     u8 msg_type;
0053 } __packed;
0054 
0055 struct htt_ver_req {
0056     u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
0057 } __packed;
0058 
0059 /*
0060  * HTT tx MSDU descriptor
0061  *
0062  * The HTT tx MSDU descriptor is created by the host HTT SW for each
0063  * tx MSDU.  The HTT tx MSDU descriptor contains the information that
0064  * the target firmware needs for the FW's tx processing, particularly
0065  * for creating the HW msdu descriptor.
0066  * The same HTT tx descriptor is used for HL and LL systems, though
0067  * a few fields within the tx descriptor are used only by LL or
0068  * only by HL.
0069  * The HTT tx descriptor is defined in two manners: by a struct with
0070  * bitfields, and by a series of [dword offset, bit mask, bit shift]
0071  * definitions.
0072  * The target should use the struct def, for simplicitly and clarity,
0073  * but the host shall use the bit-mast + bit-shift defs, to be endian-
0074  * neutral.  Specifically, the host shall use the get/set macros built
0075  * around the mask + shift defs.
0076  */
0077 struct htt_data_tx_desc_frag {
0078     union {
0079         struct double_word_addr {
0080             __le32 paddr;
0081             __le32 len;
0082         } __packed dword_addr;
0083         struct triple_word_addr {
0084             __le32 paddr_lo;
0085             __le16 paddr_hi;
0086             __le16 len_16;
0087         } __packed tword_addr;
0088     } __packed;
0089 } __packed;
0090 
0091 struct htt_msdu_ext_desc {
0092     __le32 tso_flag[3];
0093     __le16 ip_identification;
0094     u8 flags;
0095     u8 reserved;
0096     struct htt_data_tx_desc_frag frags[6];
0097 };
0098 
0099 struct htt_msdu_ext_desc_64 {
0100     __le32 tso_flag[5];
0101     __le16 ip_identification;
0102     u8 flags;
0103     u8 reserved;
0104     struct htt_data_tx_desc_frag frags[6];
0105 };
0106 
0107 #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE     BIT(0)
0108 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
0109 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
0110 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
0111 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
0112 
0113 #define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
0114                  | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
0115                  | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
0116                  | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
0117                  | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
0118 
0119 #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64      BIT(16)
0120 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64      BIT(17)
0121 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64      BIT(18)
0122 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64      BIT(19)
0123 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64      BIT(20)
0124 #define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64       BIT(21)
0125 
0126 #define HTT_MSDU_CHECKSUM_ENABLE_64  (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
0127                      | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
0128                      | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
0129                      | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
0130                      | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
0131 
0132 enum htt_data_tx_desc_flags0 {
0133     HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
0134     HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
0135     HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT      = 1 << 2,
0136     HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY     = 1 << 3,
0137     HTT_DATA_TX_DESC_FLAGS0_RSVD0           = 1 << 4
0138 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
0139 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
0140 };
0141 
0142 enum htt_data_tx_desc_flags1 {
0143 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
0144 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
0145 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB  0
0146 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
0147 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
0148 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB  6
0149     HTT_DATA_TX_DESC_FLAGS1_POSTPONED        = 1 << 11,
0150     HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH    = 1 << 12,
0151     HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
0152     HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
0153     HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE      = 1 << 15
0154 };
0155 
0156 #define HTT_TX_CREDIT_DELTA_ABS_M      0xffff0000
0157 #define HTT_TX_CREDIT_DELTA_ABS_S      16
0158 #define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
0159         (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
0160 
0161 #define HTT_TX_CREDIT_SIGN_BIT_M       0x00000100
0162 #define HTT_TX_CREDIT_SIGN_BIT_S       8
0163 #define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
0164         (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
0165 
0166 enum htt_data_tx_ext_tid {
0167     HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
0168     HTT_DATA_TX_EXT_TID_MGMT                = 17,
0169     HTT_DATA_TX_EXT_TID_INVALID             = 31
0170 };
0171 
0172 #define HTT_INVALID_PEERID 0xFFFF
0173 
0174 /*
0175  * htt_data_tx_desc - used for data tx path
0176  *
0177  * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
0178  *       ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
0179  *                for special kinds of tids
0180  *       postponed: only for HL hosts. indicates if this is a resend
0181  *                  (HL hosts manage queues on the host )
0182  *       more_in_batch: only for HL hosts. indicates if more packets are
0183  *                      pending. this allows target to wait and aggregate
0184  *       freq: 0 means home channel of given vdev. intended for offchannel
0185  */
0186 struct htt_data_tx_desc {
0187     u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
0188     __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
0189     __le16 len;
0190     __le16 id;
0191     __le32 frags_paddr;
0192     union {
0193         __le32 peerid;
0194         struct {
0195             __le16 peerid;
0196             __le16 freq;
0197         } __packed offchan_tx;
0198     } __packed;
0199     u8 prefetch[0]; /* start of frame, for FW classification engine */
0200 } __packed;
0201 
0202 struct htt_data_tx_desc_64 {
0203     u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
0204     __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
0205     __le16 len;
0206     __le16 id;
0207     __le64 frags_paddr;
0208     union {
0209         __le32 peerid;
0210         struct {
0211             __le16 peerid;
0212             __le16 freq;
0213         } __packed offchan_tx;
0214     } __packed;
0215     u8 prefetch[0]; /* start of frame, for FW classification engine */
0216 } __packed;
0217 
0218 enum htt_rx_ring_flags {
0219     HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
0220     HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
0221     HTT_RX_RING_FLAGS_PPDU_START   = 1 << 2,
0222     HTT_RX_RING_FLAGS_PPDU_END     = 1 << 3,
0223     HTT_RX_RING_FLAGS_MPDU_START   = 1 << 4,
0224     HTT_RX_RING_FLAGS_MPDU_END     = 1 << 5,
0225     HTT_RX_RING_FLAGS_MSDU_START   = 1 << 6,
0226     HTT_RX_RING_FLAGS_MSDU_END     = 1 << 7,
0227     HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
0228     HTT_RX_RING_FLAGS_FRAG_INFO    = 1 << 9,
0229     HTT_RX_RING_FLAGS_UNICAST_RX   = 1 << 10,
0230     HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
0231     HTT_RX_RING_FLAGS_CTRL_RX      = 1 << 12,
0232     HTT_RX_RING_FLAGS_MGMT_RX      = 1 << 13,
0233     HTT_RX_RING_FLAGS_NULL_RX      = 1 << 14,
0234     HTT_RX_RING_FLAGS_PHY_DATA_RX  = 1 << 15
0235 };
0236 
0237 #define HTT_RX_RING_SIZE_MIN 128
0238 #define HTT_RX_RING_SIZE_MAX 2048
0239 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
0240 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
0241 #define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
0242 
0243 struct htt_rx_ring_rx_desc_offsets {
0244     /* the following offsets are in 4-byte units */
0245     __le16 mac80211_hdr_offset;
0246     __le16 msdu_payload_offset;
0247     __le16 ppdu_start_offset;
0248     __le16 ppdu_end_offset;
0249     __le16 mpdu_start_offset;
0250     __le16 mpdu_end_offset;
0251     __le16 msdu_start_offset;
0252     __le16 msdu_end_offset;
0253     __le16 rx_attention_offset;
0254     __le16 frag_info_offset;
0255 } __packed;
0256 
0257 struct htt_rx_ring_setup_ring32 {
0258     __le32 fw_idx_shadow_reg_paddr;
0259     __le32 rx_ring_base_paddr;
0260     __le16 rx_ring_len; /* in 4-byte words */
0261     __le16 rx_ring_bufsize; /* rx skb size - in bytes */
0262     __le16 flags; /* %HTT_RX_RING_FLAGS_ */
0263     __le16 fw_idx_init_val;
0264 
0265     struct htt_rx_ring_rx_desc_offsets offsets;
0266 } __packed;
0267 
0268 struct htt_rx_ring_setup_ring64 {
0269     __le64 fw_idx_shadow_reg_paddr;
0270     __le64 rx_ring_base_paddr;
0271     __le16 rx_ring_len; /* in 4-byte words */
0272     __le16 rx_ring_bufsize; /* rx skb size - in bytes */
0273     __le16 flags; /* %HTT_RX_RING_FLAGS_ */
0274     __le16 fw_idx_init_val;
0275 
0276     struct htt_rx_ring_rx_desc_offsets offsets;
0277 } __packed;
0278 
0279 struct htt_rx_ring_setup_hdr {
0280     u8 num_rings; /* supported values: 1, 2 */
0281     __le16 rsvd0;
0282 } __packed;
0283 
0284 struct htt_rx_ring_setup_32 {
0285     struct htt_rx_ring_setup_hdr hdr;
0286     struct htt_rx_ring_setup_ring32 rings[];
0287 } __packed;
0288 
0289 struct htt_rx_ring_setup_64 {
0290     struct htt_rx_ring_setup_hdr hdr;
0291     struct htt_rx_ring_setup_ring64 rings[];
0292 } __packed;
0293 
0294 /*
0295  * htt_stats_req - request target to send specified statistics
0296  *
0297  * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
0298  * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
0299  *  so make sure its little-endian.
0300  * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
0301  *  so make sure its little-endian.
0302  * @cfg_val: stat_type specific configuration
0303  * @stat_type: see %htt_dbg_stats_type
0304  * @cookie_lsb: used for confirmation message from target->host
0305  * @cookie_msb: ditto as %cookie
0306  */
0307 struct htt_stats_req {
0308     u8 upload_types[3];
0309     u8 rsvd0;
0310     u8 reset_types[3];
0311     struct {
0312         u8 mpdu_bytes;
0313         u8 mpdu_num_msdus;
0314         u8 msdu_bytes;
0315     } __packed;
0316     u8 stat_type;
0317     __le32 cookie_lsb;
0318     __le32 cookie_msb;
0319 } __packed;
0320 
0321 #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
0322 #define HTT_STATS_BIT_MASK GENMASK(16, 0)
0323 
0324 /*
0325  * htt_oob_sync_req - request out-of-band sync
0326  *
0327  * The HTT SYNC tells the target to suspend processing of subsequent
0328  * HTT host-to-target messages until some other target agent locally
0329  * informs the target HTT FW that the current sync counter is equal to
0330  * or greater than (in a modulo sense) the sync counter specified in
0331  * the SYNC message.
0332  *
0333  * This allows other host-target components to synchronize their operation
0334  * with HTT, e.g. to ensure that tx frames don't get transmitted until a
0335  * security key has been downloaded to and activated by the target.
0336  * In the absence of any explicit synchronization counter value
0337  * specification, the target HTT FW will use zero as the default current
0338  * sync value.
0339  *
0340  * The HTT target FW will suspend its host->target message processing as long
0341  * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
0342  */
0343 struct htt_oob_sync_req {
0344     u8 sync_count;
0345     __le16 rsvd0;
0346 } __packed;
0347 
0348 struct htt_aggr_conf {
0349     u8 max_num_ampdu_subframes;
0350     /* amsdu_subframes is limited by 0x1F mask */
0351     u8 max_num_amsdu_subframes;
0352 } __packed;
0353 
0354 struct htt_aggr_conf_v2 {
0355     u8 max_num_ampdu_subframes;
0356     /* amsdu_subframes is limited by 0x1F mask */
0357     u8 max_num_amsdu_subframes;
0358     u8 reserved;
0359 } __packed;
0360 
0361 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
0362 struct htt_mgmt_tx_desc_qca99x0 {
0363     __le32 rate;
0364 } __packed;
0365 
0366 struct htt_mgmt_tx_desc {
0367     u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
0368     __le32 msdu_paddr;
0369     __le32 desc_id;
0370     __le32 len;
0371     __le32 vdev_id;
0372     u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
0373     union {
0374         struct htt_mgmt_tx_desc_qca99x0 qca99x0;
0375     } __packed;
0376 } __packed;
0377 
0378 enum htt_mgmt_tx_status {
0379     HTT_MGMT_TX_STATUS_OK    = 0,
0380     HTT_MGMT_TX_STATUS_RETRY = 1,
0381     HTT_MGMT_TX_STATUS_DROP  = 2
0382 };
0383 
0384 /*=== target -> host messages ===============================================*/
0385 
0386 enum htt_main_t2h_msg_type {
0387     HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF             = 0x0,
0388     HTT_MAIN_T2H_MSG_TYPE_RX_IND                   = 0x1,
0389     HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH                 = 0x2,
0390     HTT_MAIN_T2H_MSG_TYPE_PEER_MAP                 = 0x3,
0391     HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP               = 0x4,
0392     HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA                 = 0x5,
0393     HTT_MAIN_T2H_MSG_TYPE_RX_DELBA                 = 0x6,
0394     HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND             = 0x7,
0395     HTT_MAIN_T2H_MSG_TYPE_PKTLOG                   = 0x8,
0396     HTT_MAIN_T2H_MSG_TYPE_STATS_CONF               = 0x9,
0397     HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND              = 0xa,
0398     HTT_MAIN_T2H_MSG_TYPE_SEC_IND                  = 0xb,
0399     HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND           = 0xd,
0400     HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND        = 0xe,
0401     HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND     = 0xf,
0402     HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND                = 0x10,
0403     HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND   = 0x11,
0404     HTT_MAIN_T2H_MSG_TYPE_TEST,
0405     /* keep this last */
0406     HTT_MAIN_T2H_NUM_MSGS
0407 };
0408 
0409 enum htt_10x_t2h_msg_type {
0410     HTT_10X_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
0411     HTT_10X_T2H_MSG_TYPE_RX_IND                    = 0x1,
0412     HTT_10X_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
0413     HTT_10X_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
0414     HTT_10X_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
0415     HTT_10X_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
0416     HTT_10X_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
0417     HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
0418     HTT_10X_T2H_MSG_TYPE_PKTLOG                    = 0x8,
0419     HTT_10X_T2H_MSG_TYPE_STATS_CONF                = 0x9,
0420     HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
0421     HTT_10X_T2H_MSG_TYPE_SEC_IND                   = 0xb,
0422     HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc,
0423     HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
0424     HTT_10X_T2H_MSG_TYPE_TEST                      = 0xe,
0425     HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE               = 0xf,
0426     HTT_10X_T2H_MSG_TYPE_AGGR_CONF                 = 0x11,
0427     HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD            = 0x12,
0428     HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0x13,
0429     /* keep this last */
0430     HTT_10X_T2H_NUM_MSGS
0431 };
0432 
0433 enum htt_tlv_t2h_msg_type {
0434     HTT_TLV_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
0435     HTT_TLV_T2H_MSG_TYPE_RX_IND                    = 0x1,
0436     HTT_TLV_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
0437     HTT_TLV_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
0438     HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
0439     HTT_TLV_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
0440     HTT_TLV_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
0441     HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
0442     HTT_TLV_T2H_MSG_TYPE_PKTLOG                    = 0x8,
0443     HTT_TLV_T2H_MSG_TYPE_STATS_CONF                = 0x9,
0444     HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
0445     HTT_TLV_T2H_MSG_TYPE_SEC_IND                   = 0xb,
0446     HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc, /* deprecated */
0447     HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
0448     HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0xe,
0449     HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND      = 0xf,
0450     HTT_TLV_T2H_MSG_TYPE_RX_PN_IND                 = 0x10,
0451     HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND    = 0x11,
0452     HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND       = 0x12,
0453     /* 0x13 reservd */
0454     HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE       = 0x14,
0455     HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE               = 0x15,
0456     HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR           = 0x16,
0457     HTT_TLV_T2H_MSG_TYPE_TEST,
0458     /* keep this last */
0459     HTT_TLV_T2H_NUM_MSGS
0460 };
0461 
0462 enum htt_10_4_t2h_msg_type {
0463     HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
0464     HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
0465     HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
0466     HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
0467     HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
0468     HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
0469     HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
0470     HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
0471     HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
0472     HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
0473     HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
0474     HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
0475     HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
0476     HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
0477     HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
0478     HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
0479     HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
0480     HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
0481     HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
0482     HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
0483     HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
0484     HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
0485     HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
0486     HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM       = 0x17,
0487     HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
0488     /* 0x19 to 0x2f are reserved */
0489     HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND     = 0x30,
0490     HTT_10_4_T2H_MSG_TYPE_PEER_STATS         = 0x31,
0491     /* keep this last */
0492     HTT_10_4_T2H_NUM_MSGS
0493 };
0494 
0495 enum htt_t2h_msg_type {
0496     HTT_T2H_MSG_TYPE_VERSION_CONF,
0497     HTT_T2H_MSG_TYPE_RX_IND,
0498     HTT_T2H_MSG_TYPE_RX_FLUSH,
0499     HTT_T2H_MSG_TYPE_PEER_MAP,
0500     HTT_T2H_MSG_TYPE_PEER_UNMAP,
0501     HTT_T2H_MSG_TYPE_RX_ADDBA,
0502     HTT_T2H_MSG_TYPE_RX_DELBA,
0503     HTT_T2H_MSG_TYPE_TX_COMPL_IND,
0504     HTT_T2H_MSG_TYPE_PKTLOG,
0505     HTT_T2H_MSG_TYPE_STATS_CONF,
0506     HTT_T2H_MSG_TYPE_RX_FRAG_IND,
0507     HTT_T2H_MSG_TYPE_SEC_IND,
0508     HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
0509     HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
0510     HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
0511     HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
0512     HTT_T2H_MSG_TYPE_RX_PN_IND,
0513     HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
0514     HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
0515     HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
0516     HTT_T2H_MSG_TYPE_CHAN_CHANGE,
0517     HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
0518     HTT_T2H_MSG_TYPE_AGGR_CONF,
0519     HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
0520     HTT_T2H_MSG_TYPE_TEST,
0521     HTT_T2H_MSG_TYPE_EN_STATS,
0522     HTT_T2H_MSG_TYPE_TX_FETCH_IND,
0523     HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
0524     HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
0525     HTT_T2H_MSG_TYPE_PEER_STATS,
0526     /* keep this last */
0527     HTT_T2H_NUM_MSGS
0528 };
0529 
0530 /*
0531  * htt_resp_hdr - header for target-to-host messages
0532  *
0533  * msg_type: see htt_t2h_msg_type
0534  */
0535 struct htt_resp_hdr {
0536     u8 msg_type;
0537 } __packed;
0538 
0539 #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
0540 #define HTT_RESP_HDR_MSG_TYPE_MASK   0xff
0541 #define HTT_RESP_HDR_MSG_TYPE_LSB    0
0542 
0543 /* htt_ver_resp - response sent for htt_ver_req */
0544 struct htt_ver_resp {
0545     u8 minor;
0546     u8 major;
0547     u8 rsvd0;
0548 } __packed;
0549 
0550 #define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
0551 
0552 #define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
0553 
0554 struct htt_mgmt_tx_completion {
0555     u8 rsvd0;
0556     u8 rsvd1;
0557     u8 flags;
0558     __le32 desc_id;
0559     __le32 status;
0560     __le32 ppdu_id;
0561     __le32 info;
0562 } __packed;
0563 
0564 #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x1F)
0565 #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
0566 #define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 5)
0567 #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
0568 #define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
0569 
0570 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
0571 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
0572 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK     0x00000FC0
0573 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB      6
0574 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
0575 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB  12
0576 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK   0x00FC0000
0577 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB    18
0578 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
0579 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
0580 
0581 #define HTT_TX_CMPL_FLAG_DATA_RSSI      BIT(0)
0582 #define HTT_TX_CMPL_FLAG_PPID_PRESENT       BIT(1)
0583 #define HTT_TX_CMPL_FLAG_PA_PRESENT     BIT(2)
0584 #define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT  BIT(3)
0585 
0586 #define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
0587 #define HTT_TX_DATA_APPEND_RETRIES BIT(0)
0588 #define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
0589 
0590 struct htt_rx_indication_hdr {
0591     u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
0592     __le16 peer_id;
0593     __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
0594 } __packed;
0595 
0596 #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID    (1 << 0)
0597 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
0598 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB  (1)
0599 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK  (1 << 5)
0600 #define HTT_RX_INDICATION_INFO0_END_VALID        (1 << 6)
0601 #define HTT_RX_INDICATION_INFO0_START_VALID      (1 << 7)
0602 
0603 #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK    0x00FFFFFF
0604 #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB     0
0605 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
0606 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB  24
0607 
0608 #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
0609 #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB  0
0610 #define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
0611 #define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
0612 
0613 enum htt_rx_legacy_rate {
0614     HTT_RX_OFDM_48 = 0,
0615     HTT_RX_OFDM_24 = 1,
0616     HTT_RX_OFDM_12,
0617     HTT_RX_OFDM_6,
0618     HTT_RX_OFDM_54,
0619     HTT_RX_OFDM_36,
0620     HTT_RX_OFDM_18,
0621     HTT_RX_OFDM_9,
0622 
0623     /* long preamble */
0624     HTT_RX_CCK_11_LP = 0,
0625     HTT_RX_CCK_5_5_LP = 1,
0626     HTT_RX_CCK_2_LP,
0627     HTT_RX_CCK_1_LP,
0628     /* short preamble */
0629     HTT_RX_CCK_11_SP,
0630     HTT_RX_CCK_5_5_SP,
0631     HTT_RX_CCK_2_SP
0632 };
0633 
0634 enum htt_rx_legacy_rate_type {
0635     HTT_RX_LEGACY_RATE_OFDM = 0,
0636     HTT_RX_LEGACY_RATE_CCK
0637 };
0638 
0639 enum htt_rx_preamble_type {
0640     HTT_RX_LEGACY        = 0x4,
0641     HTT_RX_HT            = 0x8,
0642     HTT_RX_HT_WITH_TXBF  = 0x9,
0643     HTT_RX_VHT           = 0xC,
0644     HTT_RX_VHT_WITH_TXBF = 0xD,
0645 };
0646 
0647 /*
0648  * Fields: phy_err_valid, phy_err_code, tsf,
0649  * usec_timestamp, sub_usec_timestamp
0650  * ..are valid only if end_valid == 1.
0651  *
0652  * Fields: rssi_chains, legacy_rate_type,
0653  * legacy_rate_cck, preamble_type, service,
0654  * vht_sig_*
0655  * ..are valid only if start_valid == 1;
0656  */
0657 struct htt_rx_indication_ppdu {
0658     u8 combined_rssi;
0659     u8 sub_usec_timestamp;
0660     u8 phy_err_code;
0661     u8 info0; /* HTT_RX_INDICATION_INFO0_ */
0662     struct {
0663         u8 pri20_db;
0664         u8 ext20_db;
0665         u8 ext40_db;
0666         u8 ext80_db;
0667     } __packed rssi_chains[4];
0668     __le32 tsf;
0669     __le32 usec_timestamp;
0670     __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
0671     __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
0672 } __packed;
0673 
0674 enum htt_rx_mpdu_status {
0675     HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
0676     HTT_RX_IND_MPDU_STATUS_OK,
0677     HTT_RX_IND_MPDU_STATUS_ERR_FCS,
0678     HTT_RX_IND_MPDU_STATUS_ERR_DUP,
0679     HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
0680     HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
0681     /* only accept EAPOL frames */
0682     HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
0683     HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
0684     /* Non-data in promiscuous mode */
0685     HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
0686     HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
0687     HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
0688     HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
0689     HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
0690     HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
0691 
0692     /*
0693      * MISC: discard for unspecified reasons.
0694      * Leave this enum value last.
0695      */
0696     HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
0697 };
0698 
0699 struct htt_rx_indication_mpdu_range {
0700     u8 mpdu_count;
0701     u8 mpdu_range_status; /* %htt_rx_mpdu_status */
0702     u8 pad0;
0703     u8 pad1;
0704 } __packed;
0705 
0706 struct htt_rx_indication_prefix {
0707     __le16 fw_rx_desc_bytes;
0708     u8 pad0;
0709     u8 pad1;
0710 };
0711 
0712 struct htt_rx_indication {
0713     struct htt_rx_indication_hdr hdr;
0714     struct htt_rx_indication_ppdu ppdu;
0715     struct htt_rx_indication_prefix prefix;
0716 
0717     /*
0718      * the following fields are both dynamically sized, so
0719      * take care addressing them
0720      */
0721 
0722     /* the size of this is %fw_rx_desc_bytes */
0723     struct fw_rx_desc_base fw_desc;
0724 
0725     /*
0726      * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
0727      * and has %num_mpdu_ranges elements.
0728      */
0729     struct htt_rx_indication_mpdu_range mpdu_ranges[];
0730 } __packed;
0731 
0732 /* High latency version of the RX indication */
0733 struct htt_rx_indication_hl {
0734     struct htt_rx_indication_hdr hdr;
0735     struct htt_rx_indication_ppdu ppdu;
0736     struct htt_rx_indication_prefix prefix;
0737     struct fw_rx_desc_hl fw_desc;
0738     struct htt_rx_indication_mpdu_range mpdu_ranges[];
0739 } __packed;
0740 
0741 struct htt_hl_rx_desc {
0742     __le32 info;
0743     __le32 pn_31_0;
0744     union {
0745         struct {
0746             __le16 pn_47_32;
0747             __le16 pn_63_48;
0748         } pn16;
0749         __le32 pn_63_32;
0750     } u0;
0751     __le32 pn_95_64;
0752     __le32 pn_127_96;
0753 } __packed;
0754 
0755 static inline struct htt_rx_indication_mpdu_range *
0756         htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
0757 {
0758     void *ptr = rx_ind;
0759 
0760     ptr += sizeof(rx_ind->hdr)
0761          + sizeof(rx_ind->ppdu)
0762          + sizeof(rx_ind->prefix)
0763          + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
0764     return ptr;
0765 }
0766 
0767 static inline struct htt_rx_indication_mpdu_range *
0768     htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
0769 {
0770     void *ptr = rx_ind;
0771 
0772     ptr += sizeof(rx_ind->hdr)
0773          + sizeof(rx_ind->ppdu)
0774          + sizeof(rx_ind->prefix)
0775          + sizeof(rx_ind->fw_desc);
0776     return ptr;
0777 }
0778 
0779 enum htt_rx_flush_mpdu_status {
0780     HTT_RX_FLUSH_MPDU_DISCARD = 0,
0781     HTT_RX_FLUSH_MPDU_REORDER = 1,
0782 };
0783 
0784 /*
0785  * htt_rx_flush - discard or reorder given range of mpdus
0786  *
0787  * Note: host must check if all sequence numbers between
0788  *  [seq_num_start, seq_num_end-1] are valid.
0789  */
0790 struct htt_rx_flush {
0791     __le16 peer_id;
0792     u8 tid;
0793     u8 rsvd0;
0794     u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
0795     u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
0796     u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
0797 };
0798 
0799 struct htt_rx_peer_map {
0800     u8 vdev_id;
0801     __le16 peer_id;
0802     u8 addr[6];
0803     u8 rsvd0;
0804     u8 rsvd1;
0805 } __packed;
0806 
0807 struct htt_rx_peer_unmap {
0808     u8 rsvd0;
0809     __le16 peer_id;
0810 } __packed;
0811 
0812 enum htt_txrx_sec_cast_type {
0813     HTT_TXRX_SEC_MCAST = 0,
0814     HTT_TXRX_SEC_UCAST
0815 };
0816 
0817 enum htt_rx_pn_check_type {
0818     HTT_RX_NON_PN_CHECK = 0,
0819     HTT_RX_PN_CHECK
0820 };
0821 
0822 enum htt_rx_tkip_demic_type {
0823     HTT_RX_NON_TKIP_MIC = 0,
0824     HTT_RX_TKIP_MIC
0825 };
0826 
0827 enum htt_security_types {
0828     HTT_SECURITY_NONE,
0829     HTT_SECURITY_WEP128,
0830     HTT_SECURITY_WEP104,
0831     HTT_SECURITY_WEP40,
0832     HTT_SECURITY_TKIP,
0833     HTT_SECURITY_TKIP_NOMIC,
0834     HTT_SECURITY_AES_CCMP,
0835     HTT_SECURITY_WAPI,
0836 
0837     HTT_NUM_SECURITY_TYPES /* keep this last! */
0838 };
0839 
0840 #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
0841 #define ATH10K_TXRX_NUM_EXT_TIDS 19
0842 #define ATH10K_TXRX_NON_QOS_TID 16
0843 
0844 enum htt_security_flags {
0845 #define HTT_SECURITY_TYPE_MASK 0x7F
0846 #define HTT_SECURITY_TYPE_LSB  0
0847     HTT_SECURITY_IS_UNICAST = 1 << 7
0848 };
0849 
0850 struct htt_security_indication {
0851     union {
0852         /* dont use bitfields; undefined behaviour */
0853         u8 flags; /* %htt_security_flags */
0854         struct {
0855             u8 security_type:7, /* %htt_security_types */
0856                is_unicast:1;
0857         } __packed;
0858     } __packed;
0859     __le16 peer_id;
0860     u8 michael_key[8];
0861     u8 wapi_rsc[16];
0862 } __packed;
0863 
0864 #define HTT_RX_BA_INFO0_TID_MASK     0x000F
0865 #define HTT_RX_BA_INFO0_TID_LSB      0
0866 #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
0867 #define HTT_RX_BA_INFO0_PEER_ID_LSB  4
0868 
0869 struct htt_rx_addba {
0870     u8 window_size;
0871     __le16 info0; /* %HTT_RX_BA_INFO0_ */
0872 } __packed;
0873 
0874 struct htt_rx_delba {
0875     u8 rsvd0;
0876     __le16 info0; /* %HTT_RX_BA_INFO0_ */
0877 } __packed;
0878 
0879 enum htt_data_tx_status {
0880     HTT_DATA_TX_STATUS_OK            = 0,
0881     HTT_DATA_TX_STATUS_DISCARD       = 1,
0882     HTT_DATA_TX_STATUS_NO_ACK        = 2,
0883     HTT_DATA_TX_STATUS_POSTPONE      = 3, /* HL only */
0884     HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
0885 };
0886 
0887 enum htt_data_tx_flags {
0888 #define HTT_DATA_TX_STATUS_MASK 0x07
0889 #define HTT_DATA_TX_STATUS_LSB  0
0890 #define HTT_DATA_TX_TID_MASK    0x78
0891 #define HTT_DATA_TX_TID_LSB     3
0892     HTT_DATA_TX_TID_INVALID = 1 << 7
0893 };
0894 
0895 #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
0896 
0897 struct htt_append_retries {
0898     __le16 msdu_id;
0899     u8 tx_retries;
0900     u8 flag;
0901 } __packed;
0902 
0903 struct htt_data_tx_completion_ext {
0904     struct htt_append_retries a_retries;
0905     __le32 t_stamp;
0906     __le16 msdus_rssi[];
0907 } __packed;
0908 
0909 /**
0910  * @brief target -> host TX completion indication message definition
0911  *
0912  * @details
0913  * The following diagram shows the format of the TX completion indication sent
0914  * from the target to the host
0915  *
0916  *          |31 28|27|26|25|24|23        16| 15 |14 11|10   8|7          0|
0917  *          |-------------------------------------------------------------|
0918  * header:  |rsvd |A2|TP|A1|A0|     num    | t_i| tid |status|  msg_type  |
0919  *          |-------------------------------------------------------------|
0920  * payload: |            MSDU1 ID          |         MSDU0 ID             |
0921  *          |-------------------------------------------------------------|
0922  *          :            MSDU3 ID          :         MSDU2 ID             :
0923  *          |-------------------------------------------------------------|
0924  *          |          struct htt_tx_compl_ind_append_retries             |
0925  *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
0926  *          |          struct htt_tx_compl_ind_append_tx_tstamp           |
0927  *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
0928  *          |           MSDU1 ACK RSSI     |        MSDU0 ACK RSSI        |
0929  *          |-------------------------------------------------------------|
0930  *          :           MSDU3 ACK RSSI     :        MSDU2 ACK RSSI        :
0931  *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
0932  *    -msg_type
0933  *     Bits 7:0
0934  *     Purpose: identifies this as HTT TX completion indication
0935  *    -status
0936  *     Bits 10:8
0937  *     Purpose: the TX completion status of payload fragmentations descriptors
0938  *     Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
0939  *    -tid
0940  *     Bits 14:11
0941  *     Purpose: the tid associated with those fragmentation descriptors. It is
0942  *     valid or not, depending on the tid_invalid bit.
0943  *     Value: 0 to 15
0944  *    -tid_invalid
0945  *     Bits 15:15
0946  *     Purpose: this bit indicates whether the tid field is valid or not
0947  *     Value: 0 indicates valid, 1 indicates invalid
0948  *    -num
0949  *     Bits 23:16
0950  *     Purpose: the number of payload in this indication
0951  *     Value: 1 to 255
0952  *    -A0 = append
0953  *     Bits 24:24
0954  *     Purpose: append the struct htt_tx_compl_ind_append_retries which contains
0955  *            the number of tx retries for one MSDU at the end of this message
0956  *     Value: 0 indicates no appending, 1 indicates appending
0957  *    -A1 = append1
0958  *     Bits 25:25
0959  *     Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
0960  *            contains the timestamp info for each TX msdu id in payload.
0961  *     Value: 0 indicates no appending, 1 indicates appending
0962  *    -TP = MSDU tx power presence
0963  *     Bits 26:26
0964  *     Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
0965  *            for each MSDU referenced by the TX_COMPL_IND message.
0966  *            The order of the per-MSDU tx power reports matches the order
0967  *            of the MSDU IDs.
0968  *     Value: 0 indicates not appending, 1 indicates appending
0969  *    -A2 = append2
0970  *     Bits 27:27
0971  *     Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
0972  *            TX_COMP_IND message.  The order of the per-MSDU ACK RSSI report
0973  *            matches the order of the MSDU IDs.
0974  *            The ACK RSSI values are valid when status is COMPLETE_OK (and
0975  *            this append2 bit is set).
0976  *     Value: 0 indicates not appending, 1 indicates appending
0977  */
0978 
0979 struct htt_data_tx_completion {
0980     union {
0981         u8 flags;
0982         struct {
0983             u8 status:3,
0984                tid:4,
0985                tid_invalid:1;
0986         } __packed;
0987     } __packed;
0988     u8 num_msdus;
0989     u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
0990     __le16 msdus[]; /* variable length based on %num_msdus */
0991 } __packed;
0992 
0993 #define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK  GENMASK(15, 0)
0994 #define HTT_TX_PPDU_DUR_INFO0_TID_MASK      GENMASK(20, 16)
0995 
0996 struct htt_data_tx_ppdu_dur {
0997     __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
0998     __le32 tx_duration; /* in usecs */
0999 } __packed;
1000 
1001 #define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK    GENMASK(7, 0)
1002 
1003 struct htt_data_tx_compl_ppdu_dur {
1004     __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
1005     struct htt_data_tx_ppdu_dur ppdu_dur[];
1006 } __packed;
1007 
1008 struct htt_tx_compl_ind_base {
1009     u32 hdr;
1010     u16 payload[1/*or more*/];
1011 } __packed;
1012 
1013 struct htt_rc_tx_done_params {
1014     u32 rate_code;
1015     u32 rate_code_flags;
1016     u32 flags;
1017     u32 num_enqued; /* 1 for non-AMPDU */
1018     u32 num_retries;
1019     u32 num_failed; /* for AMPDU */
1020     u32 ack_rssi;
1021     u32 time_stamp;
1022     u32 is_probe;
1023 };
1024 
1025 struct htt_rc_update {
1026     u8 vdev_id;
1027     __le16 peer_id;
1028     u8 addr[6];
1029     u8 num_elems;
1030     u8 rsvd0;
1031     struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
1032 } __packed;
1033 
1034 /* see htt_rx_indication for similar fields and descriptions */
1035 struct htt_rx_fragment_indication {
1036     union {
1037         u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
1038         struct {
1039             u8 ext_tid:5,
1040                flush_valid:1;
1041         } __packed;
1042     } __packed;
1043     __le16 peer_id;
1044     __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
1045     __le16 fw_rx_desc_bytes;
1046     __le16 rsvd0;
1047 
1048     u8 fw_msdu_rx_desc[];
1049 } __packed;
1050 
1051 #define ATH10K_IEEE80211_EXTIV               BIT(5)
1052 #define ATH10K_IEEE80211_TKIP_MICLEN         8   /* trailing MIC */
1053 
1054 #define HTT_RX_FRAG_IND_INFO0_HEADER_LEN     16
1055 
1056 #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK     0x1F
1057 #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB      0
1058 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
1059 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB  5
1060 
1061 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
1062 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB  0
1063 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK   0x00000FC0
1064 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB    6
1065 
1066 struct htt_rx_pn_ind {
1067     __le16 peer_id;
1068     u8 tid;
1069     u8 seqno_start;
1070     u8 seqno_end;
1071     u8 pn_ie_count;
1072     u8 reserved;
1073     u8 pn_ies[];
1074 } __packed;
1075 
1076 struct htt_rx_offload_msdu {
1077     __le16 msdu_len;
1078     __le16 peer_id;
1079     u8 vdev_id;
1080     u8 tid;
1081     u8 fw_desc;
1082     u8 payload[];
1083 } __packed;
1084 
1085 struct htt_rx_offload_ind {
1086     u8 reserved;
1087     __le16 msdu_count;
1088 } __packed;
1089 
1090 struct htt_rx_in_ord_msdu_desc {
1091     __le32 msdu_paddr;
1092     __le16 msdu_len;
1093     u8 fw_desc;
1094     u8 reserved;
1095 } __packed;
1096 
1097 struct htt_rx_in_ord_msdu_desc_ext {
1098     __le64 msdu_paddr;
1099     __le16 msdu_len;
1100     u8 fw_desc;
1101     u8 reserved;
1102 } __packed;
1103 
1104 struct htt_rx_in_ord_ind {
1105     u8 info;
1106     __le16 peer_id;
1107     u8 vdev_id;
1108     u8 reserved;
1109     __le16 msdu_count;
1110     union {
1111         struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
1112         struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
1113     } __packed;
1114 } __packed;
1115 
1116 #define HTT_RX_IN_ORD_IND_INFO_TID_MASK     0x0000001f
1117 #define HTT_RX_IN_ORD_IND_INFO_TID_LSB      0
1118 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
1119 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB  5
1120 #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK    0x00000040
1121 #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB     6
1122 
1123 /*
1124  * target -> host test message definition
1125  *
1126  * The following field definitions describe the format of the test
1127  * message sent from the target to the host.
1128  * The message consists of a 4-octet header, followed by a variable
1129  * number of 32-bit integer values, followed by a variable number
1130  * of 8-bit character values.
1131  *
1132  * |31                         16|15           8|7            0|
1133  * |-----------------------------------------------------------|
1134  * |          num chars          |   num ints   |   msg type   |
1135  * |-----------------------------------------------------------|
1136  * |                           int 0                           |
1137  * |-----------------------------------------------------------|
1138  * |                           int 1                           |
1139  * |-----------------------------------------------------------|
1140  * |                            ...                            |
1141  * |-----------------------------------------------------------|
1142  * |    char 3    |    char 2    |    char 1    |    char 0    |
1143  * |-----------------------------------------------------------|
1144  * |              |              |      ...     |    char 4    |
1145  * |-----------------------------------------------------------|
1146  *   - MSG_TYPE
1147  *     Bits 7:0
1148  *     Purpose: identifies this as a test message
1149  *     Value: HTT_MSG_TYPE_TEST
1150  *   - NUM_INTS
1151  *     Bits 15:8
1152  *     Purpose: indicate how many 32-bit integers follow the message header
1153  *   - NUM_CHARS
1154  *     Bits 31:16
1155  *     Purpose: indicate how many 8-bit characters follow the series of integers
1156  */
1157 struct htt_rx_test {
1158     u8 num_ints;
1159     __le16 num_chars;
1160 
1161     /* payload consists of 2 lists:
1162      *  a) num_ints * sizeof(__le32)
1163      *  b) num_chars * sizeof(u8) aligned to 4bytes
1164      */
1165     u8 payload[];
1166 } __packed;
1167 
1168 static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
1169 {
1170     return (__le32 *)rx_test->payload;
1171 }
1172 
1173 static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
1174 {
1175     return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
1176 }
1177 
1178 /*
1179  * target -> host packet log message
1180  *
1181  * The following field definitions describe the format of the packet log
1182  * message sent from the target to the host.
1183  * The message consists of a 4-octet header,followed by a variable number
1184  * of 32-bit character values.
1185  *
1186  * |31          24|23          16|15           8|7            0|
1187  * |-----------------------------------------------------------|
1188  * |              |              |              |   msg type   |
1189  * |-----------------------------------------------------------|
1190  * |                        payload                            |
1191  * |-----------------------------------------------------------|
1192  *   - MSG_TYPE
1193  *     Bits 7:0
1194  *     Purpose: identifies this as a test message
1195  *     Value: HTT_MSG_TYPE_PACKETLOG
1196  */
1197 struct htt_pktlog_msg {
1198     u8 pad[3];
1199     u8 payload[];
1200 } __packed;
1201 
1202 struct htt_dbg_stats_rx_reorder_stats {
1203     /* Non QoS MPDUs received */
1204     __le32 deliver_non_qos;
1205 
1206     /* MPDUs received in-order */
1207     __le32 deliver_in_order;
1208 
1209     /* Flush due to reorder timer expired */
1210     __le32 deliver_flush_timeout;
1211 
1212     /* Flush due to move out of window */
1213     __le32 deliver_flush_oow;
1214 
1215     /* Flush due to DELBA */
1216     __le32 deliver_flush_delba;
1217 
1218     /* MPDUs dropped due to FCS error */
1219     __le32 fcs_error;
1220 
1221     /* MPDUs dropped due to monitor mode non-data packet */
1222     __le32 mgmt_ctrl;
1223 
1224     /* MPDUs dropped due to invalid peer */
1225     __le32 invalid_peer;
1226 
1227     /* MPDUs dropped due to duplication (non aggregation) */
1228     __le32 dup_non_aggr;
1229 
1230     /* MPDUs dropped due to processed before */
1231     __le32 dup_past;
1232 
1233     /* MPDUs dropped due to duplicate in reorder queue */
1234     __le32 dup_in_reorder;
1235 
1236     /* Reorder timeout happened */
1237     __le32 reorder_timeout;
1238 
1239     /* invalid bar ssn */
1240     __le32 invalid_bar_ssn;
1241 
1242     /* reorder reset due to bar ssn */
1243     __le32 ssn_reset;
1244 };
1245 
1246 struct htt_dbg_stats_wal_tx_stats {
1247     /* Num HTT cookies queued to dispatch list */
1248     __le32 comp_queued;
1249 
1250     /* Num HTT cookies dispatched */
1251     __le32 comp_delivered;
1252 
1253     /* Num MSDU queued to WAL */
1254     __le32 msdu_enqued;
1255 
1256     /* Num MPDU queue to WAL */
1257     __le32 mpdu_enqued;
1258 
1259     /* Num MSDUs dropped by WMM limit */
1260     __le32 wmm_drop;
1261 
1262     /* Num Local frames queued */
1263     __le32 local_enqued;
1264 
1265     /* Num Local frames done */
1266     __le32 local_freed;
1267 
1268     /* Num queued to HW */
1269     __le32 hw_queued;
1270 
1271     /* Num PPDU reaped from HW */
1272     __le32 hw_reaped;
1273 
1274     /* Num underruns */
1275     __le32 underrun;
1276 
1277     /* Num PPDUs cleaned up in TX abort */
1278     __le32 tx_abort;
1279 
1280     /* Num MPDUs requeued by SW */
1281     __le32 mpdus_requeued;
1282 
1283     /* excessive retries */
1284     __le32 tx_ko;
1285 
1286     /* data hw rate code */
1287     __le32 data_rc;
1288 
1289     /* Scheduler self triggers */
1290     __le32 self_triggers;
1291 
1292     /* frames dropped due to excessive sw retries */
1293     __le32 sw_retry_failure;
1294 
1295     /* illegal rate phy errors  */
1296     __le32 illgl_rate_phy_err;
1297 
1298     /* wal pdev continuous xretry */
1299     __le32 pdev_cont_xretry;
1300 
1301     /* wal pdev continuous xretry */
1302     __le32 pdev_tx_timeout;
1303 
1304     /* wal pdev resets  */
1305     __le32 pdev_resets;
1306 
1307     __le32 phy_underrun;
1308 
1309     /* MPDU is more than txop limit */
1310     __le32 txop_ovf;
1311 } __packed;
1312 
1313 struct htt_dbg_stats_wal_rx_stats {
1314     /* Cnts any change in ring routing mid-ppdu */
1315     __le32 mid_ppdu_route_change;
1316 
1317     /* Total number of statuses processed */
1318     __le32 status_rcvd;
1319 
1320     /* Extra frags on rings 0-3 */
1321     __le32 r0_frags;
1322     __le32 r1_frags;
1323     __le32 r2_frags;
1324     __le32 r3_frags;
1325 
1326     /* MSDUs / MPDUs delivered to HTT */
1327     __le32 htt_msdus;
1328     __le32 htt_mpdus;
1329 
1330     /* MSDUs / MPDUs delivered to local stack */
1331     __le32 loc_msdus;
1332     __le32 loc_mpdus;
1333 
1334     /* AMSDUs that have more MSDUs than the status ring size */
1335     __le32 oversize_amsdu;
1336 
1337     /* Number of PHY errors */
1338     __le32 phy_errs;
1339 
1340     /* Number of PHY errors drops */
1341     __le32 phy_err_drop;
1342 
1343     /* Number of mpdu errors - FCS, MIC, ENC etc. */
1344     __le32 mpdu_errs;
1345 } __packed;
1346 
1347 struct htt_dbg_stats_wal_peer_stats {
1348     __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
1349 } __packed;
1350 
1351 struct htt_dbg_stats_wal_pdev_txrx {
1352     struct htt_dbg_stats_wal_tx_stats tx_stats;
1353     struct htt_dbg_stats_wal_rx_stats rx_stats;
1354     struct htt_dbg_stats_wal_peer_stats peer_stats;
1355 } __packed;
1356 
1357 struct htt_dbg_stats_rx_rate_info {
1358     __le32 mcs[10];
1359     __le32 sgi[10];
1360     __le32 nss[4];
1361     __le32 stbc[10];
1362     __le32 bw[3];
1363     __le32 pream[6];
1364     __le32 ldpc;
1365     __le32 txbf;
1366 };
1367 
1368 /*
1369  * htt_dbg_stats_status -
1370  * present -     The requested stats have been delivered in full.
1371  *               This indicates that either the stats information was contained
1372  *               in its entirety within this message, or else this message
1373  *               completes the delivery of the requested stats info that was
1374  *               partially delivered through earlier STATS_CONF messages.
1375  * partial -     The requested stats have been delivered in part.
1376  *               One or more subsequent STATS_CONF messages with the same
1377  *               cookie value will be sent to deliver the remainder of the
1378  *               information.
1379  * error -       The requested stats could not be delivered, for example due
1380  *               to a shortage of memory to construct a message holding the
1381  *               requested stats.
1382  * invalid -     The requested stat type is either not recognized, or the
1383  *               target is configured to not gather the stats type in question.
1384  * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1385  * series_done - This special value indicates that no further stats info
1386  *               elements are present within a series of stats info elems
1387  *               (within a stats upload confirmation message).
1388  */
1389 enum htt_dbg_stats_status {
1390     HTT_DBG_STATS_STATUS_PRESENT     = 0,
1391     HTT_DBG_STATS_STATUS_PARTIAL     = 1,
1392     HTT_DBG_STATS_STATUS_ERROR       = 2,
1393     HTT_DBG_STATS_STATUS_INVALID     = 3,
1394     HTT_DBG_STATS_STATUS_SERIES_DONE = 7
1395 };
1396 
1397 /*
1398  * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1399  *
1400  * The following field definitions describe the format of the HTT host
1401  * to target frag_desc/msdu_ext bank configuration message.
1402  * The message contains the based address and the min and max id of the
1403  * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1404  * MSDU_EXT/FRAG_DESC.
1405  * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1406  * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1407  * the hardware does the mapping/translation.
1408  *
1409  * Total banks that can be configured is configured to 16.
1410  *
1411  * This should be called before any TX has be initiated by the HTT
1412  *
1413  * |31                         16|15           8|7   5|4       0|
1414  * |------------------------------------------------------------|
1415  * | DESC_SIZE    |  NUM_BANKS   | RES |SWP|pdev|    msg type   |
1416  * |------------------------------------------------------------|
1417  * |                     BANK0_BASE_ADDRESS                     |
1418  * |------------------------------------------------------------|
1419  * |                            ...                             |
1420  * |------------------------------------------------------------|
1421  * |                    BANK15_BASE_ADDRESS                     |
1422  * |------------------------------------------------------------|
1423  * |       BANK0_MAX_ID          |       BANK0_MIN_ID           |
1424  * |------------------------------------------------------------|
1425  * |                            ...                             |
1426  * |------------------------------------------------------------|
1427  * |       BANK15_MAX_ID         |       BANK15_MIN_ID          |
1428  * |------------------------------------------------------------|
1429  * Header fields:
1430  *  - MSG_TYPE
1431  *    Bits 7:0
1432  *    Value: 0x6
1433  *  - BANKx_BASE_ADDRESS
1434  *    Bits 31:0
1435  *    Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1436  *         bank physical/bus address.
1437  *  - BANKx_MIN_ID
1438  *    Bits 15:0
1439  *    Purpose: Provide a mechanism to specify the min index that needs to
1440  *          mapped.
1441  *  - BANKx_MAX_ID
1442  *    Bits 31:16
1443  *    Purpose: Provide a mechanism to specify the max index that needs to
1444  *
1445  */
1446 struct htt_frag_desc_bank_id {
1447     __le16 bank_min_id;
1448     __le16 bank_max_id;
1449 } __packed;
1450 
1451 /* real is 16 but it wouldn't fit in the max htt message size
1452  * so we use a conservatively safe value for now
1453  */
1454 #define HTT_FRAG_DESC_BANK_MAX 4
1455 
1456 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK        0x03
1457 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB         0
1458 #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP            BIT(2)
1459 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID       BIT(3)
1460 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
1461 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB  4
1462 
1463 enum htt_q_depth_type {
1464     HTT_Q_DEPTH_TYPE_BYTES = 0,
1465     HTT_Q_DEPTH_TYPE_MSDUS = 1,
1466 };
1467 
1468 #define HTT_TX_Q_STATE_NUM_PEERS        (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
1469                          TARGET_10_4_NUM_VDEVS)
1470 #define HTT_TX_Q_STATE_NUM_TIDS         8
1471 #define HTT_TX_Q_STATE_ENTRY_SIZE       1
1472 #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER     0
1473 
1474 /**
1475  * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
1476  *
1477  * Defines host q state format and behavior. See htt_q_state.
1478  *
1479  * @record_size: Defines the size of each host q entry in bytes. In practice
1480  *  however firmware (at least 10.4.3-00191) ignores this host
1481  *  configuration value and uses hardcoded value of 1.
1482  * @record_multiplier: This is valid only when q depth type is MSDUs. It
1483  *  defines the exponent for the power of 2 multiplication.
1484  */
1485 struct htt_q_state_conf {
1486     __le32 paddr;
1487     __le16 num_peers;
1488     __le16 num_tids;
1489     u8 record_size;
1490     u8 record_multiplier;
1491     u8 pad[2];
1492 } __packed;
1493 
1494 struct htt_frag_desc_bank_cfg32 {
1495     u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1496     u8 num_banks;
1497     u8 desc_size;
1498     __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1499     struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1500     struct htt_q_state_conf q_state;
1501 } __packed;
1502 
1503 struct htt_frag_desc_bank_cfg64 {
1504     u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1505     u8 num_banks;
1506     u8 desc_size;
1507     __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
1508     struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
1509     struct htt_q_state_conf q_state;
1510 } __packed;
1511 
1512 #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT    128
1513 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK    0x3f
1514 #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB     0
1515 #define HTT_TX_Q_STATE_ENTRY_EXP_MASK       0xc0
1516 #define HTT_TX_Q_STATE_ENTRY_EXP_LSB        6
1517 
1518 /**
1519  * htt_q_state - shared between host and firmware via DMA
1520  *
1521  * This structure is used for the host to expose it's software queue state to
1522  * firmware so that its rate control can schedule fetch requests for optimized
1523  * performance. This is most notably used for MU-MIMO aggregation when multiple
1524  * MU clients are connected.
1525  *
1526  * @count: Each element defines the host queue depth. When q depth type was
1527  *  configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
1528  *  FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
1529  *  HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
1530  *  HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
1531  *  record_multiplier (see htt_q_state_conf).
1532  * @map: Used by firmware to quickly check which host queues are not empty. It
1533  *  is a bitmap simply saying.
1534  * @seq: Used by firmware to quickly check if the host queues were updated
1535  *  since it last checked.
1536  *
1537  * FIXME: Is the q_state map[] size calculation really correct?
1538  */
1539 struct htt_q_state {
1540     u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
1541     u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
1542     __le32 seq;
1543 } __packed;
1544 
1545 #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK   0x0fff
1546 #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB    0
1547 #define HTT_TX_FETCH_RECORD_INFO_TID_MASK   0xf000
1548 #define HTT_TX_FETCH_RECORD_INFO_TID_LSB    12
1549 
1550 struct htt_tx_fetch_record {
1551     __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
1552     __le16 num_msdus;
1553     __le32 num_bytes;
1554 } __packed;
1555 
1556 struct htt_tx_fetch_ind {
1557     u8 pad0;
1558     __le16 fetch_seq_num;
1559     __le32 token;
1560     __le16 num_resp_ids;
1561     __le16 num_records;
1562     union {
1563         /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
1564         DECLARE_FLEX_ARRAY(__le32, resp_ids);
1565         DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
1566     };
1567 } __packed;
1568 
1569 static inline void *
1570 ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
1571 {
1572     return (void *)&ind->records[le16_to_cpu(ind->num_records)];
1573 }
1574 
1575 struct htt_tx_fetch_resp {
1576     u8 pad0;
1577     __le16 resp_id;
1578     __le16 fetch_seq_num;
1579     __le16 num_records;
1580     __le32 token;
1581     struct htt_tx_fetch_record records[];
1582 } __packed;
1583 
1584 struct htt_tx_fetch_confirm {
1585     u8 pad0;
1586     __le16 num_resp_ids;
1587     __le32 resp_ids[];
1588 } __packed;
1589 
1590 enum htt_tx_mode_switch_mode {
1591     HTT_TX_MODE_SWITCH_PUSH = 0,
1592     HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
1593 };
1594 
1595 #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE     BIT(0)
1596 #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK   0xfffe
1597 #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB    1
1598 
1599 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK      0x0003
1600 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB       0
1601 #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
1602 #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB  2
1603 
1604 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK    0x0fff
1605 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
1606 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK    0xf000
1607 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB     12
1608 
1609 struct htt_tx_mode_switch_record {
1610     __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
1611     __le16 num_max_msdus;
1612 } __packed;
1613 
1614 struct htt_tx_mode_switch_ind {
1615     u8 pad0;
1616     __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
1617     __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
1618     u8 pad1[2];
1619     struct htt_tx_mode_switch_record records[];
1620 } __packed;
1621 
1622 struct htt_channel_change {
1623     u8 pad[3];
1624     __le32 freq;
1625     __le32 center_freq1;
1626     __le32 center_freq2;
1627     __le32 phymode;
1628 } __packed;
1629 
1630 struct htt_per_peer_tx_stats_ind {
1631     __le32  succ_bytes;
1632     __le32  retry_bytes;
1633     __le32  failed_bytes;
1634     u8  ratecode;
1635     u8  flags;
1636     __le16  peer_id;
1637     __le16  succ_pkts;
1638     __le16  retry_pkts;
1639     __le16  failed_pkts;
1640     __le16  tx_duration;
1641     __le32  reserved1;
1642     __le32  reserved2;
1643 } __packed;
1644 
1645 struct htt_peer_tx_stats {
1646     u8 num_ppdu;
1647     u8 ppdu_len;
1648     u8 version;
1649     u8 payload[];
1650 } __packed;
1651 
1652 #define ATH10K_10_2_TX_STATS_OFFSET 136
1653 #define PEER_STATS_FOR_NO_OF_PPDUS  4
1654 
1655 struct ath10k_10_2_peer_tx_stats {
1656     u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
1657     u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1658     __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1659     u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1660     __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1661     u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
1662     __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
1663     u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
1664     __le32 tx_duration;
1665     u8 tx_ppdu_cnt;
1666     u8 peer_id;
1667 } __packed;
1668 
1669 union htt_rx_pn_t {
1670     /* WEP: 24-bit PN */
1671     u32 pn24;
1672 
1673     /* TKIP or CCMP: 48-bit PN */
1674     u64 pn48;
1675 
1676     /* WAPI: 128-bit PN */
1677     u64 pn128[2];
1678 };
1679 
1680 struct htt_cmd {
1681     struct htt_cmd_hdr hdr;
1682     union {
1683         struct htt_ver_req ver_req;
1684         struct htt_mgmt_tx_desc mgmt_tx;
1685         struct htt_data_tx_desc data_tx;
1686         struct htt_rx_ring_setup_32 rx_setup_32;
1687         struct htt_rx_ring_setup_64 rx_setup_64;
1688         struct htt_stats_req stats_req;
1689         struct htt_oob_sync_req oob_sync_req;
1690         struct htt_aggr_conf aggr_conf;
1691         struct htt_aggr_conf_v2 aggr_conf_v2;
1692         struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
1693         struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
1694         struct htt_tx_fetch_resp tx_fetch_resp;
1695     };
1696 } __packed;
1697 
1698 struct htt_resp {
1699     struct htt_resp_hdr hdr;
1700     union {
1701         struct htt_ver_resp ver_resp;
1702         struct htt_mgmt_tx_completion mgmt_tx_completion;
1703         struct htt_data_tx_completion data_tx_completion;
1704         struct htt_rx_indication rx_ind;
1705         struct htt_rx_indication_hl rx_ind_hl;
1706         struct htt_rx_fragment_indication rx_frag_ind;
1707         struct htt_rx_peer_map peer_map;
1708         struct htt_rx_peer_unmap peer_unmap;
1709         struct htt_rx_flush rx_flush;
1710         struct htt_rx_addba rx_addba;
1711         struct htt_rx_delba rx_delba;
1712         struct htt_security_indication security_indication;
1713         struct htt_rc_update rc_update;
1714         struct htt_rx_test rx_test;
1715         struct htt_pktlog_msg pktlog_msg;
1716         struct htt_rx_pn_ind rx_pn_ind;
1717         struct htt_rx_offload_ind rx_offload_ind;
1718         struct htt_rx_in_ord_ind rx_in_ord_ind;
1719         struct htt_tx_fetch_ind tx_fetch_ind;
1720         struct htt_tx_fetch_confirm tx_fetch_confirm;
1721         struct htt_tx_mode_switch_ind tx_mode_switch_ind;
1722         struct htt_channel_change chan_change;
1723         struct htt_peer_tx_stats peer_tx_stats;
1724     };
1725 } __packed;
1726 
1727 /*** host side structures follow ***/
1728 
1729 struct htt_tx_done {
1730     u16 msdu_id;
1731     u16 status;
1732     u8 ack_rssi;
1733 };
1734 
1735 enum htt_tx_compl_state {
1736     HTT_TX_COMPL_STATE_NONE,
1737     HTT_TX_COMPL_STATE_ACK,
1738     HTT_TX_COMPL_STATE_NOACK,
1739     HTT_TX_COMPL_STATE_DISCARD,
1740 };
1741 
1742 struct htt_peer_map_event {
1743     u8 vdev_id;
1744     u16 peer_id;
1745     u8 addr[ETH_ALEN];
1746 };
1747 
1748 struct htt_peer_unmap_event {
1749     u16 peer_id;
1750 };
1751 
1752 struct ath10k_htt_txbuf_32 {
1753     struct htt_data_tx_desc_frag frags[2];
1754     struct ath10k_htc_hdr htc_hdr;
1755     struct htt_cmd_hdr cmd_hdr;
1756     struct htt_data_tx_desc cmd_tx;
1757 } __packed __aligned(4);
1758 
1759 struct ath10k_htt_txbuf_64 {
1760     struct htt_data_tx_desc_frag frags[2];
1761     struct ath10k_htc_hdr htc_hdr;
1762     struct htt_cmd_hdr cmd_hdr;
1763     struct htt_data_tx_desc_64 cmd_tx;
1764 } __packed __aligned(4);
1765 
1766 struct ath10k_htt {
1767     struct ath10k *ar;
1768     enum ath10k_htc_ep_id eid;
1769 
1770     struct sk_buff_head rx_indication_head;
1771 
1772     u8 target_version_major;
1773     u8 target_version_minor;
1774     struct completion target_version_received;
1775     u8 max_num_amsdu;
1776     u8 max_num_ampdu;
1777 
1778     const enum htt_t2h_msg_type *t2h_msg_types;
1779     u32 t2h_msg_types_max;
1780 
1781     struct {
1782         /*
1783          * Ring of network buffer objects - This ring is
1784          * used exclusively by the host SW. This ring
1785          * mirrors the dev_addrs_ring that is shared
1786          * between the host SW and the MAC HW. The host SW
1787          * uses this netbufs ring to locate the network
1788          * buffer objects whose data buffers the HW has
1789          * filled.
1790          */
1791         struct sk_buff **netbufs_ring;
1792 
1793         /* This is used only with firmware supporting IN_ORD_IND.
1794          *
1795          * With Full Rx Reorder the HTT Rx Ring is more of a temporary
1796          * buffer ring from which buffer addresses are copied by the
1797          * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1798          * pointing to specific (re-ordered) buffers.
1799          *
1800          * FIXME: With kernel generic hashing functions there's a lot
1801          * of hash collisions for sk_buffs.
1802          */
1803         bool in_ord_rx;
1804         DECLARE_HASHTABLE(skb_table, 4);
1805 
1806         /*
1807          * Ring of buffer addresses -
1808          * This ring holds the "physical" device address of the
1809          * rx buffers the host SW provides for the MAC HW to
1810          * fill.
1811          */
1812         union {
1813             __le64 *paddrs_ring_64;
1814             __le32 *paddrs_ring_32;
1815         };
1816 
1817         /*
1818          * Base address of ring, as a "physical" device address
1819          * rather than a CPU address.
1820          */
1821         dma_addr_t base_paddr;
1822 
1823         /* how many elems in the ring (power of 2) */
1824         int size;
1825 
1826         /* size - 1 */
1827         unsigned int size_mask;
1828 
1829         /* how many rx buffers to keep in the ring */
1830         int fill_level;
1831 
1832         /* how many rx buffers (full+empty) are in the ring */
1833         int fill_cnt;
1834 
1835         /*
1836          * alloc_idx - where HTT SW has deposited empty buffers
1837          * This is allocated in consistent mem, so that the FW can
1838          * read this variable, and program the HW's FW_IDX reg with
1839          * the value of this shadow register.
1840          */
1841         struct {
1842             __le32 *vaddr;
1843             dma_addr_t paddr;
1844         } alloc_idx;
1845 
1846         /* where HTT SW has processed bufs filled by rx MAC DMA */
1847         struct {
1848             unsigned int msdu_payld;
1849         } sw_rd_idx;
1850 
1851         /*
1852          * refill_retry_timer - timer triggered when the ring is
1853          * not refilled to the level expected
1854          */
1855         struct timer_list refill_retry_timer;
1856 
1857         /* Protects access to all rx ring buffer state variables */
1858         spinlock_t lock;
1859     } rx_ring;
1860 
1861     unsigned int prefetch_len;
1862 
1863     /* Protects access to pending_tx, num_pending_tx */
1864     spinlock_t tx_lock;
1865     int max_num_pending_tx;
1866     int num_pending_tx;
1867     int num_pending_mgmt_tx;
1868     struct idr pending_tx;
1869     wait_queue_head_t empty_tx_wq;
1870 
1871     /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
1872     DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
1873 
1874     /* set if host-fw communication goes haywire
1875      * used to avoid further failures
1876      */
1877     bool rx_confused;
1878     atomic_t num_mpdus_ready;
1879 
1880     /* This is used to group tx/rx completions separately and process them
1881      * in batches to reduce cache stalls
1882      */
1883     struct sk_buff_head rx_msdus_q;
1884     struct sk_buff_head rx_in_ord_compl_q;
1885     struct sk_buff_head tx_fetch_ind_q;
1886 
1887     /* rx_status template */
1888     struct ieee80211_rx_status rx_status;
1889 
1890     struct {
1891         dma_addr_t paddr;
1892         union {
1893             struct htt_msdu_ext_desc *vaddr_desc_32;
1894             struct htt_msdu_ext_desc_64 *vaddr_desc_64;
1895         };
1896         size_t size;
1897     } frag_desc;
1898 
1899     struct {
1900         dma_addr_t paddr;
1901         union {
1902             struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
1903             struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
1904         };
1905         size_t size;
1906     } txbuf;
1907 
1908     struct {
1909         bool enabled;
1910         struct htt_q_state *vaddr;
1911         dma_addr_t paddr;
1912         u16 num_push_allowed;
1913         u16 num_peers;
1914         u16 num_tids;
1915         enum htt_tx_mode_switch_mode mode;
1916         enum htt_q_depth_type type;
1917     } tx_q_state;
1918 
1919     bool tx_mem_allocated;
1920     const struct ath10k_htt_tx_ops *tx_ops;
1921     const struct ath10k_htt_rx_ops *rx_ops;
1922     bool disable_tx_comp;
1923     bool bundle_tx;
1924     struct sk_buff_head tx_req_head;
1925     struct sk_buff_head tx_complete_head;
1926 };
1927 
1928 struct ath10k_htt_tx_ops {
1929     int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
1930     int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
1931     int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
1932     void (*htt_free_frag_desc)(struct ath10k_htt *htt);
1933     int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1934               struct sk_buff *msdu);
1935     int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
1936     void (*htt_free_txbuff)(struct ath10k_htt *htt);
1937     int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
1938                     u8 max_subfrms_ampdu,
1939                     u8 max_subfrms_amsdu);
1940     void (*htt_flush_tx)(struct ath10k_htt *htt);
1941 };
1942 
1943 static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
1944 {
1945     if (!htt->tx_ops->htt_send_rx_ring_cfg)
1946         return -EOPNOTSUPP;
1947 
1948     return htt->tx_ops->htt_send_rx_ring_cfg(htt);
1949 }
1950 
1951 static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
1952 {
1953     if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
1954         return -EOPNOTSUPP;
1955 
1956     return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
1957 }
1958 
1959 static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
1960 {
1961     if (!htt->tx_ops->htt_alloc_frag_desc)
1962         return -EOPNOTSUPP;
1963 
1964     return htt->tx_ops->htt_alloc_frag_desc(htt);
1965 }
1966 
1967 static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
1968 {
1969     if (htt->tx_ops->htt_free_frag_desc)
1970         htt->tx_ops->htt_free_frag_desc(htt);
1971 }
1972 
1973 static inline int ath10k_htt_tx(struct ath10k_htt *htt,
1974                 enum ath10k_hw_txrx_mode txmode,
1975                 struct sk_buff *msdu)
1976 {
1977     return htt->tx_ops->htt_tx(htt, txmode, msdu);
1978 }
1979 
1980 static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
1981 {
1982     if (htt->tx_ops->htt_flush_tx)
1983         htt->tx_ops->htt_flush_tx(htt);
1984 }
1985 
1986 static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
1987 {
1988     if (!htt->tx_ops->htt_alloc_txbuff)
1989         return -EOPNOTSUPP;
1990 
1991     return htt->tx_ops->htt_alloc_txbuff(htt);
1992 }
1993 
1994 static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
1995 {
1996     if (htt->tx_ops->htt_free_txbuff)
1997         htt->tx_ops->htt_free_txbuff(htt);
1998 }
1999 
2000 static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
2001                           u8 max_subfrms_ampdu,
2002                           u8 max_subfrms_amsdu)
2003 
2004 {
2005     if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
2006         return -EOPNOTSUPP;
2007 
2008     return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
2009                          max_subfrms_ampdu,
2010                          max_subfrms_amsdu);
2011 }
2012 
2013 struct ath10k_htt_rx_ops {
2014     size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
2015     void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
2016     void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
2017                     int idx);
2018     void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
2019     void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
2020     bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
2021                     struct htt_rx_fragment_indication *rx,
2022                     struct sk_buff *skb);
2023 };
2024 
2025 static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
2026 {
2027     if (!htt->rx_ops->htt_get_rx_ring_size)
2028         return 0;
2029 
2030     return htt->rx_ops->htt_get_rx_ring_size(htt);
2031 }
2032 
2033 static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
2034                          void *vaddr)
2035 {
2036     if (htt->rx_ops->htt_config_paddrs_ring)
2037         htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
2038 }
2039 
2040 static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
2041                           dma_addr_t paddr,
2042                           int idx)
2043 {
2044     if (htt->rx_ops->htt_set_paddrs_ring)
2045         htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
2046 }
2047 
2048 static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
2049 {
2050     if (!htt->rx_ops->htt_get_vaddr_ring)
2051         return NULL;
2052 
2053     return htt->rx_ops->htt_get_vaddr_ring(htt);
2054 }
2055 
2056 static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
2057 {
2058     if (htt->rx_ops->htt_reset_paddrs_ring)
2059         htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
2060 }
2061 
2062 static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
2063                           struct htt_rx_fragment_indication *rx,
2064                           struct sk_buff *skb)
2065 {
2066     if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
2067         return true;
2068 
2069     return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
2070 }
2071 
2072 /* the driver strongly assumes that the rx header status be 64 bytes long,
2073  * so all possible rx_desc structures must respect this assumption.
2074  */
2075 #define RX_HTT_HDR_STATUS_LEN 64
2076 
2077 /* The rx descriptor structure layout is programmed via rx ring setup
2078  * so that FW knows how to transfer the rx descriptor to the host.
2079  * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
2080  * when modifying the structure layout of the rx descriptor beyond what it expects
2081  * (even if it correctly programmed during the rx ring setup).
2082  * Therefore we must keep two different memory layouts, abstract the rx descriptor
2083  * representation and use ath10k_rx_desc_ops
2084  * for correctly accessing rx descriptor data.
2085  */
2086 
2087 /* base struct used for abstracting the rx descritor representation */
2088 struct htt_rx_desc {
2089     union {
2090         /* This field is filled on the host using the msdu buffer
2091          * from htt_rx_indication
2092          */
2093         struct fw_rx_desc_base fw_desc;
2094         u32 pad;
2095     } __packed;
2096 } __packed;
2097 
2098 /* rx descriptor for wcn3990 and possibly extensible for newer cards
2099  * Buffers like this are placed on the rx ring.
2100  */
2101 struct htt_rx_desc_v2 {
2102     struct htt_rx_desc base;
2103     struct {
2104         struct rx_attention attention;
2105         struct rx_frag_info frag_info;
2106         struct rx_mpdu_start mpdu_start;
2107         struct rx_msdu_start msdu_start;
2108         struct rx_msdu_end msdu_end;
2109         struct rx_mpdu_end mpdu_end;
2110         struct rx_ppdu_start ppdu_start;
2111         struct rx_ppdu_end ppdu_end;
2112     } __packed;
2113     u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2114     u8 msdu_payload[];
2115 };
2116 
2117 /* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
2118  * works correctly. We keep a single rx descriptor for all these three
2119  * families of cards because from tests it seems to be the most stable solution,
2120  * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
2121  * during some tests.
2122  * Buffers like this are placed on the rx ring.
2123  */
2124 struct htt_rx_desc_v1 {
2125     struct htt_rx_desc base;
2126     struct {
2127         struct rx_attention attention;
2128         struct rx_frag_info_v1 frag_info;
2129         struct rx_mpdu_start mpdu_start;
2130         struct rx_msdu_start_v1 msdu_start;
2131         struct rx_msdu_end_v1 msdu_end;
2132         struct rx_mpdu_end mpdu_end;
2133         struct rx_ppdu_start ppdu_start;
2134         struct rx_ppdu_end_v1 ppdu_end;
2135     } __packed;
2136     u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
2137     u8 msdu_payload[];
2138 };
2139 
2140 /* rx_desc abstraction */
2141 struct ath10k_htt_rx_desc_ops {
2142     /* These fields are mandatory, they must be specified in any instance */
2143 
2144     /* sizeof() of the rx_desc structure used by this hw */
2145     size_t rx_desc_size;
2146 
2147     /* offset of msdu_payload inside the rx_desc structure used by this hw */
2148     size_t rx_desc_msdu_payload_offset;
2149 
2150     /* These fields are options.
2151      * When a field is not provided the default implementation gets used
2152      * (see the ath10k_rx_desc_* operations below for more info about the defaults)
2153      */
2154     bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
2155     int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
2156 
2157     /* Safely cast from a void* buffer containing an rx descriptor
2158      * to the proper rx_desc structure
2159      */
2160     struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
2161 
2162     void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
2163     struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
2164     struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
2165     struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
2166     struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
2167     struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
2168     struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
2169     struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
2170     struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
2171     u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
2172     u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
2173 };
2174 
2175 extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
2176 extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
2177 extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
2178 
2179 static inline int
2180 ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2181 {
2182     if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
2183         return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
2184     return 0;
2185 }
2186 
2187 static inline bool
2188 ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2189 {
2190     if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
2191         return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
2192     return false;
2193 }
2194 
2195 /* The default implementation of all these getters is using the old rx_desc,
2196  * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
2197  * But probably, if new wireless cards must be supported, it would be better
2198  * to switch the default implementation to the new rx_desc, since this would
2199  * make the extension easier .
2200  */
2201 static inline struct htt_rx_desc *
2202 ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
2203 {
2204     if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
2205         return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
2206     return &((struct htt_rx_desc_v1 *)buff)->base;
2207 }
2208 
2209 static inline void
2210 ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
2211                    struct htt_rx_ring_rx_desc_offsets *off)
2212 {
2213     if (hw->rx_desc_ops->rx_desc_get_offsets) {
2214         hw->rx_desc_ops->rx_desc_get_offsets(off);
2215     } else {
2216 #define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x)  / 4)
2217         off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
2218         off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
2219         off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
2220         off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
2221         off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
2222         off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
2223         off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
2224         off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
2225         off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
2226         off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
2227 #undef desc_offset
2228     }
2229 }
2230 
2231 static inline struct rx_attention *
2232 ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2233 {
2234     struct htt_rx_desc_v1 *rx_desc;
2235 
2236     if (hw->rx_desc_ops->rx_desc_get_attention)
2237         return hw->rx_desc_ops->rx_desc_get_attention(rxd);
2238 
2239     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2240     return &rx_desc->attention;
2241 }
2242 
2243 static inline struct rx_frag_info_common *
2244 ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2245 {
2246     struct htt_rx_desc_v1 *rx_desc;
2247 
2248     if (hw->rx_desc_ops->rx_desc_get_frag_info)
2249         return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
2250 
2251     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2252     return &rx_desc->frag_info.common;
2253 }
2254 
2255 static inline struct rx_mpdu_start *
2256 ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2257 {
2258     struct htt_rx_desc_v1 *rx_desc;
2259 
2260     if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
2261         return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
2262 
2263     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2264     return &rx_desc->mpdu_start;
2265 }
2266 
2267 static inline struct rx_mpdu_end *
2268 ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2269 {
2270     struct htt_rx_desc_v1 *rx_desc;
2271 
2272     if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
2273         return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
2274 
2275     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2276     return &rx_desc->mpdu_end;
2277 }
2278 
2279 static inline struct rx_msdu_start_common *
2280 ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2281 {
2282     struct htt_rx_desc_v1 *rx_desc;
2283 
2284     if (hw->rx_desc_ops->rx_desc_get_msdu_start)
2285         return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
2286 
2287     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2288     return &rx_desc->msdu_start.common;
2289 }
2290 
2291 static inline struct rx_msdu_end_common *
2292 ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2293 {
2294     struct htt_rx_desc_v1 *rx_desc;
2295 
2296     if (hw->rx_desc_ops->rx_desc_get_msdu_end)
2297         return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
2298 
2299     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2300     return &rx_desc->msdu_end.common;
2301 }
2302 
2303 static inline struct rx_ppdu_start *
2304 ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2305 {
2306     struct htt_rx_desc_v1 *rx_desc;
2307 
2308     if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
2309         return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
2310 
2311     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2312     return &rx_desc->ppdu_start;
2313 }
2314 
2315 static inline struct rx_ppdu_end_common *
2316 ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2317 {
2318     struct htt_rx_desc_v1 *rx_desc;
2319 
2320     if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
2321         return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
2322 
2323     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2324     return &rx_desc->ppdu_end.common;
2325 }
2326 
2327 static inline u8 *
2328 ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2329 {
2330     struct htt_rx_desc_v1 *rx_desc;
2331 
2332     if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
2333         return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
2334 
2335     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2336     return rx_desc->rx_hdr_status;
2337 }
2338 
2339 static inline u8 *
2340 ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
2341 {
2342     struct htt_rx_desc_v1 *rx_desc;
2343 
2344     if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
2345         return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
2346 
2347     rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
2348     return rx_desc->msdu_payload;
2349 }
2350 
2351 #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK           0x00000fff
2352 #define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB            0
2353 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK         0x00001000
2354 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB          12
2355 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
2356 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB  13
2357 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK       0x00010000
2358 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB        16
2359 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK        0x01fe0000
2360 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB         17
2361 
2362 struct htt_rx_desc_base_hl {
2363     __le32 info; /* HTT_RX_DESC_HL_INFO_ */
2364 };
2365 
2366 struct htt_rx_chan_info {
2367     __le16 primary_chan_center_freq_mhz;
2368     __le16 contig_chan1_center_freq_mhz;
2369     __le16 contig_chan2_center_freq_mhz;
2370     u8 phy_mode;
2371     u8 reserved;
2372 } __packed;
2373 
2374 #define HTT_RX_DESC_ALIGN 8
2375 
2376 #define HTT_MAC_ADDR_LEN 6
2377 
2378 /*
2379  * FIX THIS
2380  * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
2381  * rounded up to a cache line size.
2382  */
2383 #define HTT_RX_BUF_SIZE 2048
2384 
2385 /* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
2386  * because it depends on the underlying device rx_desc representation
2387  */
2388 static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
2389 {
2390     return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
2391 }
2392 
2393 /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
2394  * aggregated traffic more nicely.
2395  */
2396 #define ATH10K_HTT_MAX_NUM_REFILL 100
2397 
2398 /*
2399  * DMA_MAP expects the buffer to be an integral number of cache lines.
2400  * Rather than checking the actual cache line size, this code makes a
2401  * conservative estimate of what the cache line size could be.
2402  */
2403 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7  /* 2^7 = 128 */
2404 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
2405 
2406 /* These values are default in most firmware revisions and apparently are a
2407  * sweet spot performance wise.
2408  */
2409 #define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
2410 #define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
2411 
2412 int ath10k_htt_connect(struct ath10k_htt *htt);
2413 int ath10k_htt_init(struct ath10k *ar);
2414 int ath10k_htt_setup(struct ath10k_htt *htt);
2415 
2416 int ath10k_htt_tx_start(struct ath10k_htt *htt);
2417 void ath10k_htt_tx_stop(struct ath10k_htt *htt);
2418 void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
2419 void ath10k_htt_tx_free(struct ath10k_htt *htt);
2420 
2421 int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
2422 int ath10k_htt_rx_ring_refill(struct ath10k *ar);
2423 void ath10k_htt_rx_free(struct ath10k_htt *htt);
2424 
2425 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2426 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2427 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
2428 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
2429 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
2430                  u64 cookie);
2431 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
2432 int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
2433                  __le32 token,
2434                  __le16 fetch_seq_num,
2435                  struct htt_tx_fetch_record *records,
2436                  size_t num_records);
2437 void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
2438 
2439 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
2440                   struct ieee80211_txq *txq);
2441 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
2442                   struct ieee80211_txq *txq);
2443 void ath10k_htt_tx_txq_sync(struct ath10k *ar);
2444 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
2445 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
2446 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
2447 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
2448                    bool is_presp);
2449 
2450 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
2451 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
2452 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
2453 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2454                          struct sk_buff *skb);
2455 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
2456 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
2457 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
2458 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
2459 #endif