0001
0002
0003
0004
0005 #ifndef __iwl_trans_queue_tx_h__
0006 #define __iwl_trans_queue_tx_h__
0007 #include "iwl-fh.h"
0008 #include "fw/api/tx.h"
0009
0010 struct iwl_tso_hdr_page {
0011 struct page *page;
0012 u8 *pos;
0013 };
0014
0015 static inline dma_addr_t
0016 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
0017 {
0018 return txq->first_tb_dma +
0019 sizeof(struct iwl_pcie_first_tb_buf) * idx;
0020 }
0021
0022 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
0023 {
0024 return index & (q->n_window - 1);
0025 }
0026
0027 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
0028
0029 static inline void iwl_wake_queue(struct iwl_trans *trans,
0030 struct iwl_txq *txq)
0031 {
0032 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
0033 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
0034 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
0035 }
0036 }
0037
0038 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
0039 struct iwl_txq *txq, int idx)
0040 {
0041 if (trans->trans_cfg->use_tfh)
0042 idx = iwl_txq_get_cmd_index(txq, idx);
0043
0044 return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
0045 }
0046
0047 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
0048 bool cmd_queue);
0049
0050
0051
0052
0053
0054 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
0055 {
0056 return upper_32_bits(phys) != upper_32_bits(phys + len);
0057 }
0058
0059 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
0060
0061 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
0062 {
0063 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
0064 iwl_op_mode_queue_full(trans->op_mode, txq->id);
0065 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
0066 } else {
0067 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
0068 txq->id);
0069 }
0070 }
0071
0072
0073
0074
0075
0076 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
0077 {
0078 return ++index &
0079 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
0080 }
0081
0082
0083
0084
0085
0086 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
0087 {
0088 return --index &
0089 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
0090 }
0091
0092 static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
0093 {
0094 int index = iwl_txq_get_cmd_index(q, i);
0095 int r = iwl_txq_get_cmd_index(q, q->read_ptr);
0096 int w = iwl_txq_get_cmd_index(q, q->write_ptr);
0097
0098 return w >= r ?
0099 (index >= r && index < w) :
0100 !(index < r && index >= w);
0101 }
0102
0103 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
0104
0105 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
0106
0107 int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
0108 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
0109 u16 len);
0110
0111 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
0112 struct iwl_cmd_meta *meta,
0113 struct iwl_tfh_tfd *tfd);
0114
0115 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
0116 u32 sta_mask, u8 tid,
0117 int size, unsigned int timeout);
0118
0119 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
0120 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
0121
0122 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
0123 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
0124 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
0125 void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
0126 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
0127 bool cmd_queue);
0128 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
0129 #ifdef CONFIG_INET
0130 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
0131 struct sk_buff *skb);
0132 #endif
0133 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
0134 void *_tfd)
0135 {
0136 struct iwl_tfd *tfd;
0137
0138 if (trans->trans_cfg->use_tfh) {
0139 struct iwl_tfh_tfd *tfh_tfd = _tfd;
0140
0141 return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
0142 }
0143
0144 tfd = (struct iwl_tfd *)_tfd;
0145 return tfd->num_tbs & 0x1f;
0146 }
0147
0148 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
0149 void *_tfd, u8 idx)
0150 {
0151 struct iwl_tfd *tfd;
0152 struct iwl_tfd_tb *tb;
0153
0154 if (trans->trans_cfg->use_tfh) {
0155 struct iwl_tfh_tfd *tfh_tfd = _tfd;
0156 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
0157
0158 return le16_to_cpu(tfh_tb->tb_len);
0159 }
0160
0161 tfd = (struct iwl_tfd *)_tfd;
0162 tb = &tfd->tbs[idx];
0163
0164 return le16_to_cpu(tb->hi_n_len) >> 4;
0165 }
0166
0167 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
0168 struct iwl_cmd_meta *meta,
0169 struct iwl_txq *txq, int index);
0170 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
0171 struct iwl_txq *txq);
0172 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
0173 struct iwl_txq *txq, u16 byte_cnt,
0174 int num_tbs);
0175 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
0176 struct sk_buff_head *skbs);
0177 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
0178 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
0179 bool freeze);
0180 void iwl_txq_progress(struct iwl_txq *txq);
0181 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
0182 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
0183 #endif