0001
0002
0003
0004
0005 #include <net/tso.h>
0006 #include <linux/tcp.h>
0007
0008 #include "iwl-debug.h"
0009 #include "iwl-io.h"
0010 #include "fw/api/commands.h"
0011 #include "fw/api/tx.h"
0012 #include "fw/api/datapath.h"
0013 #include "queue/tx.h"
0014 #include "iwl-fh.h"
0015 #include "iwl-scd.h"
0016 #include <linux/dmapool.h>
0017
0018
0019
0020
0021 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
0022 struct iwl_txq *txq, u16 byte_cnt,
0023 int num_tbs)
0024 {
0025 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
0026 u8 filled_tfd_size, num_fetch_chunks;
0027 u16 len = byte_cnt;
0028 __le16 bc_ent;
0029
0030 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
0031 return;
0032
0033 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
0034 num_tbs * sizeof(struct iwl_tfh_tb);
0035
0036
0037
0038
0039
0040
0041
0042
0043 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
0044
0045 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
0046 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
0047
0048
0049 WARN_ON(trans->txqs.bc_table_dword);
0050 WARN_ON(len > 0x3FFF);
0051 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
0052 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
0053 } else {
0054 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
0055
0056
0057 WARN_ON(!trans->txqs.bc_table_dword);
0058 len = DIV_ROUND_UP(len, 4);
0059 WARN_ON(len > 0xFFF);
0060 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
0061 scd_bc_tbl->tfd_offset[idx] = bc_ent;
0062 }
0063 }
0064
0065
0066
0067
0068 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
0069 {
0070 lockdep_assert_held(&txq->lock);
0071
0072 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
0073
0074
0075
0076
0077
0078 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
0079 }
0080
0081 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
0082 struct iwl_tfh_tfd *tfd)
0083 {
0084 return le16_to_cpu(tfd->num_tbs) & 0x1f;
0085 }
0086
0087 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
0088 struct iwl_tfh_tfd *tfd)
0089 {
0090 int i, num_tbs;
0091
0092
0093 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
0094
0095 if (num_tbs > trans->txqs.tfd.max_tbs) {
0096 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
0097 return;
0098 }
0099
0100
0101 for (i = 1; i < num_tbs; i++) {
0102 if (meta->tbs & BIT(i))
0103 dma_unmap_page(trans->dev,
0104 le64_to_cpu(tfd->tbs[i].addr),
0105 le16_to_cpu(tfd->tbs[i].tb_len),
0106 DMA_TO_DEVICE);
0107 else
0108 dma_unmap_single(trans->dev,
0109 le64_to_cpu(tfd->tbs[i].addr),
0110 le16_to_cpu(tfd->tbs[i].tb_len),
0111 DMA_TO_DEVICE);
0112 }
0113
0114 tfd->num_tbs = 0;
0115 }
0116
0117 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
0118 {
0119
0120
0121
0122 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
0123 struct sk_buff *skb;
0124
0125 lockdep_assert_held(&txq->lock);
0126
0127 if (!txq->entries)
0128 return;
0129
0130 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
0131 iwl_txq_get_tfd(trans, txq, idx));
0132
0133 skb = txq->entries[idx].skb;
0134
0135
0136
0137
0138
0139 if (skb) {
0140 iwl_op_mode_free_skb(trans->op_mode, skb);
0141 txq->entries[idx].skb = NULL;
0142 }
0143 }
0144
0145 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
0146 dma_addr_t addr, u16 len)
0147 {
0148 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
0149 struct iwl_tfh_tb *tb;
0150
0151
0152
0153
0154
0155
0156
0157
0158 WARN(iwl_txq_crosses_4g_boundary(addr, len),
0159 "possible DMA problem with iova:0x%llx, len:%d\n",
0160 (unsigned long long)addr, len);
0161
0162 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
0163 return -EINVAL;
0164 tb = &tfd->tbs[idx];
0165
0166
0167 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
0168 IWL_ERR(trans, "Error can not send more than %d chunks\n",
0169 trans->txqs.tfd.max_tbs);
0170 return -EINVAL;
0171 }
0172
0173 put_unaligned_le64(addr, &tb->addr);
0174 tb->tb_len = cpu_to_le16(len);
0175
0176 tfd->num_tbs = cpu_to_le16(idx + 1);
0177
0178 return idx;
0179 }
0180
0181 static struct page *get_workaround_page(struct iwl_trans *trans,
0182 struct sk_buff *skb)
0183 {
0184 struct page **page_ptr;
0185 struct page *ret;
0186
0187 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
0188
0189 ret = alloc_page(GFP_ATOMIC);
0190 if (!ret)
0191 return NULL;
0192
0193
0194 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
0195 *page_ptr = ret;
0196
0197 return ret;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
0207 struct sk_buff *skb,
0208 struct iwl_tfh_tfd *tfd,
0209 dma_addr_t phys, void *virt,
0210 u16 len, struct iwl_cmd_meta *meta)
0211 {
0212 dma_addr_t oldphys = phys;
0213 struct page *page;
0214 int ret;
0215
0216 if (unlikely(dma_mapping_error(trans->dev, phys)))
0217 return -ENOMEM;
0218
0219 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
0220 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
0221
0222 if (ret < 0)
0223 goto unmap;
0224
0225 if (meta)
0226 meta->tbs |= BIT(ret);
0227
0228 ret = 0;
0229 goto trace;
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
0242 ret = -ENOBUFS;
0243 goto unmap;
0244 }
0245
0246 page = get_workaround_page(trans, skb);
0247 if (!page) {
0248 ret = -ENOMEM;
0249 goto unmap;
0250 }
0251
0252 memcpy(page_address(page), virt, len);
0253
0254 phys = dma_map_single(trans->dev, page_address(page), len,
0255 DMA_TO_DEVICE);
0256 if (unlikely(dma_mapping_error(trans->dev, phys)))
0257 return -ENOMEM;
0258 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
0259 if (ret < 0) {
0260
0261 oldphys = phys;
0262 meta = NULL;
0263 goto unmap;
0264 }
0265 IWL_WARN(trans,
0266 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
0267 len, (unsigned long long)oldphys, (unsigned long long)phys);
0268
0269 ret = 0;
0270 unmap:
0271 if (meta)
0272 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
0273 else
0274 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
0275 trace:
0276 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
0277
0278 return ret;
0279 }
0280
0281 #ifdef CONFIG_INET
0282 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
0283 struct sk_buff *skb)
0284 {
0285 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
0286 struct page **page_ptr;
0287
0288 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
0289
0290 if (WARN_ON(*page_ptr))
0291 return NULL;
0292
0293 if (!p->page)
0294 goto alloc;
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
0307 sizeof(void *))
0308 goto out;
0309
0310
0311 __free_page(p->page);
0312
0313 alloc:
0314 p->page = alloc_page(GFP_ATOMIC);
0315 if (!p->page)
0316 return NULL;
0317 p->pos = page_address(p->page);
0318
0319 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
0320 out:
0321 *page_ptr = p->page;
0322 get_page(p->page);
0323 return p;
0324 }
0325 #endif
0326
0327 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
0328 struct sk_buff *skb,
0329 struct iwl_tfh_tfd *tfd, int start_len,
0330 u8 hdr_len,
0331 struct iwl_device_tx_cmd *dev_cmd)
0332 {
0333 #ifdef CONFIG_INET
0334 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
0335 struct ieee80211_hdr *hdr = (void *)skb->data;
0336 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
0337 unsigned int mss = skb_shinfo(skb)->gso_size;
0338 u16 length, amsdu_pad;
0339 u8 *start_hdr;
0340 struct iwl_tso_hdr_page *hdr_page;
0341 struct tso_t tso;
0342
0343 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
0344 &dev_cmd->hdr, start_len, 0);
0345
0346 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
0347 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
0348 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
0349 amsdu_pad = 0;
0350
0351
0352 hdr_room = DIV_ROUND_UP(total_len, mss) *
0353 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
0354
0355
0356 hdr_page = get_page_hdr(trans, hdr_room, skb);
0357 if (!hdr_page)
0358 return -ENOMEM;
0359
0360 start_hdr = hdr_page->pos;
0361
0362
0363
0364
0365
0366 skb_pull(skb, hdr_len);
0367
0368
0369
0370
0371
0372
0373 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
0374
0375 tso_start(skb, &tso);
0376
0377 while (total_len) {
0378
0379 unsigned int data_left = min_t(unsigned int, mss, total_len);
0380 unsigned int tb_len;
0381 dma_addr_t tb_phys;
0382 u8 *subf_hdrs_start = hdr_page->pos;
0383
0384 total_len -= data_left;
0385
0386 memset(hdr_page->pos, 0, amsdu_pad);
0387 hdr_page->pos += amsdu_pad;
0388 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
0389 data_left)) & 0x3;
0390 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
0391 hdr_page->pos += ETH_ALEN;
0392 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
0393 hdr_page->pos += ETH_ALEN;
0394
0395 length = snap_ip_tcp_hdrlen + data_left;
0396 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
0397 hdr_page->pos += sizeof(length);
0398
0399
0400
0401
0402
0403 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
0404
0405 hdr_page->pos += snap_ip_tcp_hdrlen;
0406
0407 tb_len = hdr_page->pos - start_hdr;
0408 tb_phys = dma_map_single(trans->dev, start_hdr,
0409 tb_len, DMA_TO_DEVICE);
0410 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
0411 goto out_err;
0412
0413
0414
0415
0416
0417 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
0418 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
0419 tb_phys, tb_len);
0420
0421 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
0422
0423
0424 start_hdr = hdr_page->pos;
0425
0426
0427 while (data_left) {
0428 int ret;
0429
0430 tb_len = min_t(unsigned int, tso.size, data_left);
0431 tb_phys = dma_map_single(trans->dev, tso.data,
0432 tb_len, DMA_TO_DEVICE);
0433 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
0434 tb_phys, tso.data,
0435 tb_len, NULL);
0436 if (ret)
0437 goto out_err;
0438
0439 data_left -= tb_len;
0440 tso_build_data(skb, &tso, tb_len);
0441 }
0442 }
0443
0444
0445 skb_push(skb, hdr_len);
0446
0447 return 0;
0448
0449 out_err:
0450 #endif
0451 return -EINVAL;
0452 }
0453
0454 static struct
0455 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
0456 struct iwl_txq *txq,
0457 struct iwl_device_tx_cmd *dev_cmd,
0458 struct sk_buff *skb,
0459 struct iwl_cmd_meta *out_meta,
0460 int hdr_len,
0461 int tx_cmd_len)
0462 {
0463 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
0464 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
0465 dma_addr_t tb_phys;
0466 int len;
0467 void *tb1_addr;
0468
0469 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
0470
0471
0472
0473
0474
0475
0476 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
0477
0478
0479
0480
0481
0482
0483
0484 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
0485 IWL_FIRST_TB_SIZE;
0486
0487
0488
0489
0490 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
0491 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
0492 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
0493 goto out_err;
0494
0495
0496
0497
0498 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
0499
0500 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
0501 hdr_len, dev_cmd))
0502 goto out_err;
0503
0504
0505 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
0506 return tfd;
0507
0508 out_err:
0509 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
0510 return NULL;
0511 }
0512
0513 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
0514 struct sk_buff *skb,
0515 struct iwl_tfh_tfd *tfd,
0516 struct iwl_cmd_meta *out_meta)
0517 {
0518 int i;
0519
0520 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0521 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0522 dma_addr_t tb_phys;
0523 unsigned int fragsz = skb_frag_size(frag);
0524 int ret;
0525
0526 if (!fragsz)
0527 continue;
0528
0529 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
0530 fragsz, DMA_TO_DEVICE);
0531 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
0532 skb_frag_address(frag),
0533 fragsz, out_meta);
0534 if (ret)
0535 return ret;
0536 }
0537
0538 return 0;
0539 }
0540
0541 static struct
0542 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
0543 struct iwl_txq *txq,
0544 struct iwl_device_tx_cmd *dev_cmd,
0545 struct sk_buff *skb,
0546 struct iwl_cmd_meta *out_meta,
0547 int hdr_len,
0548 int tx_cmd_len,
0549 bool pad)
0550 {
0551 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
0552 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
0553 dma_addr_t tb_phys;
0554 int len, tb1_len, tb2_len;
0555 void *tb1_addr;
0556 struct sk_buff *frag;
0557
0558 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
0559
0560
0561 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
0562
0563
0564
0565
0566
0567
0568 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
0569
0570
0571
0572
0573
0574
0575
0576 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
0577 IWL_FIRST_TB_SIZE;
0578
0579 if (pad)
0580 tb1_len = ALIGN(len, 4);
0581 else
0582 tb1_len = len;
0583
0584
0585 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
0586 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
0587 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
0588 goto out_err;
0589
0590
0591
0592
0593 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
0594 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
0595 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
0596
0597
0598 tb2_len = skb_headlen(skb) - hdr_len;
0599
0600 if (tb2_len > 0) {
0601 int ret;
0602
0603 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
0604 tb2_len, DMA_TO_DEVICE);
0605 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
0606 skb->data + hdr_len, tb2_len,
0607 NULL);
0608 if (ret)
0609 goto out_err;
0610 }
0611
0612 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
0613 goto out_err;
0614
0615 skb_walk_frags(skb, frag) {
0616 int ret;
0617
0618 tb_phys = dma_map_single(trans->dev, frag->data,
0619 skb_headlen(frag), DMA_TO_DEVICE);
0620 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
0621 frag->data,
0622 skb_headlen(frag), NULL);
0623 if (ret)
0624 goto out_err;
0625 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
0626 goto out_err;
0627 }
0628
0629 return tfd;
0630
0631 out_err:
0632 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
0633 return NULL;
0634 }
0635
0636 static
0637 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
0638 struct iwl_txq *txq,
0639 struct iwl_device_tx_cmd *dev_cmd,
0640 struct sk_buff *skb,
0641 struct iwl_cmd_meta *out_meta)
0642 {
0643 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0644 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
0645 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
0646 int len, hdr_len;
0647 bool amsdu;
0648
0649
0650 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
0651
0652 memset(tfd, 0, sizeof(*tfd));
0653
0654 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
0655 len = sizeof(struct iwl_tx_cmd_gen2);
0656 else
0657 len = sizeof(struct iwl_tx_cmd_gen3);
0658
0659 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
0660 (*ieee80211_get_qos_ctl(hdr) &
0661 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
0662
0663 hdr_len = ieee80211_hdrlen(hdr->frame_control);
0664
0665
0666
0667
0668
0669
0670 if (amsdu && skb_shinfo(skb)->gso_size)
0671 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
0672 out_meta, hdr_len, len);
0673 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
0674 hdr_len, len, !amsdu);
0675 }
0676
0677 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
0678 {
0679 unsigned int max;
0680 unsigned int used;
0681
0682
0683
0684
0685
0686
0687
0688 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
0689 max = q->n_window;
0690 else
0691 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
0692
0693
0694
0695
0696
0697 used = (q->write_ptr - q->read_ptr) &
0698 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
0699
0700 if (WARN_ON(used > max))
0701 return 0;
0702
0703 return max - used;
0704 }
0705
0706 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
0707 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
0708 {
0709 struct iwl_cmd_meta *out_meta;
0710 struct iwl_txq *txq = trans->txqs.txq[txq_id];
0711 u16 cmd_len;
0712 int idx;
0713 void *tfd;
0714
0715 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
0716 "queue %d out of range", txq_id))
0717 return -EINVAL;
0718
0719 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
0720 "TX on unused queue %d\n", txq_id))
0721 return -EINVAL;
0722
0723 if (skb_is_nonlinear(skb) &&
0724 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
0725 __skb_linearize(skb))
0726 return -ENOMEM;
0727
0728 spin_lock(&txq->lock);
0729
0730 if (iwl_txq_space(trans, txq) < txq->high_mark) {
0731 iwl_txq_stop(trans, txq);
0732
0733
0734 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
0735 struct iwl_device_tx_cmd **dev_cmd_ptr;
0736
0737 dev_cmd_ptr = (void *)((u8 *)skb->cb +
0738 trans->txqs.dev_cmd_offs);
0739
0740 *dev_cmd_ptr = dev_cmd;
0741 __skb_queue_tail(&txq->overflow_q, skb);
0742 spin_unlock(&txq->lock);
0743 return 0;
0744 }
0745 }
0746
0747 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
0748
0749
0750 txq->entries[idx].skb = skb;
0751 txq->entries[idx].cmd = dev_cmd;
0752
0753 dev_cmd->hdr.sequence =
0754 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
0755 INDEX_TO_SEQ(idx)));
0756
0757
0758 out_meta = &txq->entries[idx].meta;
0759 out_meta->flags = 0;
0760
0761 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
0762 if (!tfd) {
0763 spin_unlock(&txq->lock);
0764 return -1;
0765 }
0766
0767 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
0768 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
0769 (void *)dev_cmd->payload;
0770
0771 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
0772 } else {
0773 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
0774 (void *)dev_cmd->payload;
0775
0776 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
0777 }
0778
0779
0780 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
0781 iwl_txq_gen2_get_num_tbs(trans, tfd));
0782
0783
0784 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
0785 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
0786
0787
0788 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
0789 iwl_txq_inc_wr_ptr(trans, txq);
0790
0791
0792
0793
0794 spin_unlock(&txq->lock);
0795 return 0;
0796 }
0797
0798
0799
0800
0801
0802
0803 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
0804 {
0805 struct iwl_txq *txq = trans->txqs.txq[txq_id];
0806
0807 spin_lock_bh(&txq->lock);
0808 while (txq->write_ptr != txq->read_ptr) {
0809 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
0810 txq_id, txq->read_ptr);
0811
0812 if (txq_id != trans->txqs.cmd.q_id) {
0813 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
0814 struct sk_buff *skb = txq->entries[idx].skb;
0815
0816 if (!WARN_ON_ONCE(!skb))
0817 iwl_txq_free_tso_page(trans, skb);
0818 }
0819 iwl_txq_gen2_free_tfd(trans, txq);
0820 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
0821 }
0822
0823 while (!skb_queue_empty(&txq->overflow_q)) {
0824 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
0825
0826 iwl_op_mode_free_skb(trans->op_mode, skb);
0827 }
0828
0829 spin_unlock_bh(&txq->lock);
0830
0831
0832 iwl_wake_queue(trans, txq);
0833 }
0834
0835 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
0836 struct iwl_txq *txq)
0837 {
0838 struct device *dev = trans->dev;
0839
0840
0841 if (txq->tfds) {
0842 dma_free_coherent(dev,
0843 trans->txqs.tfd.size * txq->n_window,
0844 txq->tfds, txq->dma_addr);
0845 dma_free_coherent(dev,
0846 sizeof(*txq->first_tb_bufs) * txq->n_window,
0847 txq->first_tb_bufs, txq->first_tb_dma);
0848 }
0849
0850 kfree(txq->entries);
0851 if (txq->bc_tbl.addr)
0852 dma_pool_free(trans->txqs.bc_pool,
0853 txq->bc_tbl.addr, txq->bc_tbl.dma);
0854 kfree(txq);
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
0866 {
0867 struct iwl_txq *txq;
0868 int i;
0869
0870 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
0871 "queue %d out of range", txq_id))
0872 return;
0873
0874 txq = trans->txqs.txq[txq_id];
0875
0876 if (WARN_ON(!txq))
0877 return;
0878
0879 iwl_txq_gen2_unmap(trans, txq_id);
0880
0881
0882 if (txq_id == trans->txqs.cmd.q_id)
0883 for (i = 0; i < txq->n_window; i++) {
0884 kfree_sensitive(txq->entries[i].cmd);
0885 kfree_sensitive(txq->entries[i].free_buf);
0886 }
0887 del_timer_sync(&txq->stuck_timer);
0888
0889 iwl_txq_gen2_free_memory(trans, txq);
0890
0891 trans->txqs.txq[txq_id] = NULL;
0892
0893 clear_bit(txq_id, trans->txqs.queue_used);
0894 }
0895
0896
0897
0898
0899 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
0900 {
0901 q->n_window = slots_num;
0902
0903
0904
0905 if (WARN_ON(!is_power_of_2(slots_num)))
0906 return -EINVAL;
0907
0908 q->low_mark = q->n_window / 4;
0909 if (q->low_mark < 4)
0910 q->low_mark = 4;
0911
0912 q->high_mark = q->n_window / 8;
0913 if (q->high_mark < 2)
0914 q->high_mark = 2;
0915
0916 q->write_ptr = 0;
0917 q->read_ptr = 0;
0918
0919 return 0;
0920 }
0921
0922 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
0923 bool cmd_queue)
0924 {
0925 int ret;
0926 u32 tfd_queue_max_size =
0927 trans->trans_cfg->base_params->max_tfd_queue_size;
0928
0929 txq->need_update = false;
0930
0931
0932
0933 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
0934 "Max tfd queue size must be a power of two, but is %d",
0935 tfd_queue_max_size))
0936 return -EINVAL;
0937
0938
0939 ret = iwl_queue_init(txq, slots_num);
0940 if (ret)
0941 return ret;
0942
0943 spin_lock_init(&txq->lock);
0944
0945 if (cmd_queue) {
0946 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
0947
0948 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
0949 }
0950
0951 __skb_queue_head_init(&txq->overflow_q);
0952
0953 return 0;
0954 }
0955
0956 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
0957 {
0958 struct page **page_ptr;
0959 struct page *next;
0960
0961 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
0962 next = *page_ptr;
0963 *page_ptr = NULL;
0964
0965 while (next) {
0966 struct page *tmp = next;
0967
0968 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
0969 sizeof(void *));
0970 __free_page(tmp);
0971 }
0972 }
0973
0974 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
0975 {
0976 u32 txq_id = txq->id;
0977 u32 status;
0978 bool active;
0979 u8 fifo;
0980
0981 if (trans->trans_cfg->use_tfh) {
0982 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
0983 txq->read_ptr, txq->write_ptr);
0984
0985 return;
0986 }
0987
0988 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
0989 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
0990 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
0991
0992 IWL_ERR(trans,
0993 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
0994 txq_id, active ? "" : "in", fifo,
0995 jiffies_to_msecs(txq->wd_timeout),
0996 txq->read_ptr, txq->write_ptr,
0997 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
0998 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
0999 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1000 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1001 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1002 }
1003
1004 static void iwl_txq_stuck_timer(struct timer_list *t)
1005 {
1006 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1007 struct iwl_trans *trans = txq->trans;
1008
1009 spin_lock(&txq->lock);
1010
1011 if (txq->read_ptr == txq->write_ptr) {
1012 spin_unlock(&txq->lock);
1013 return;
1014 }
1015 spin_unlock(&txq->lock);
1016
1017 iwl_txq_log_scd_error(trans, txq);
1018
1019 iwl_force_nmi(trans);
1020 }
1021
1022 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1023 bool cmd_queue)
1024 {
1025 size_t tfd_sz = trans->txqs.tfd.size *
1026 trans->trans_cfg->base_params->max_tfd_queue_size;
1027 size_t tb0_buf_sz;
1028 int i;
1029
1030 if (WARN_ON(txq->entries || txq->tfds))
1031 return -EINVAL;
1032
1033 if (trans->trans_cfg->use_tfh)
1034 tfd_sz = trans->txqs.tfd.size * slots_num;
1035
1036 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1037 txq->trans = trans;
1038
1039 txq->n_window = slots_num;
1040
1041 txq->entries = kcalloc(slots_num,
1042 sizeof(struct iwl_pcie_txq_entry),
1043 GFP_KERNEL);
1044
1045 if (!txq->entries)
1046 goto error;
1047
1048 if (cmd_queue)
1049 for (i = 0; i < slots_num; i++) {
1050 txq->entries[i].cmd =
1051 kmalloc(sizeof(struct iwl_device_cmd),
1052 GFP_KERNEL);
1053 if (!txq->entries[i].cmd)
1054 goto error;
1055 }
1056
1057
1058
1059 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1060 &txq->dma_addr, GFP_KERNEL);
1061 if (!txq->tfds)
1062 goto error;
1063
1064 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1065
1066 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1067
1068 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1069 &txq->first_tb_dma,
1070 GFP_KERNEL);
1071 if (!txq->first_tb_bufs)
1072 goto err_free_tfds;
1073
1074 return 0;
1075 err_free_tfds:
1076 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1077 txq->tfds = NULL;
1078 error:
1079 if (txq->entries && cmd_queue)
1080 for (i = 0; i < slots_num; i++)
1081 kfree(txq->entries[i].cmd);
1082 kfree(txq->entries);
1083 txq->entries = NULL;
1084
1085 return -ENOMEM;
1086 }
1087
1088 static struct iwl_txq *
1089 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1090 {
1091 size_t bc_tbl_size, bc_tbl_entries;
1092 struct iwl_txq *txq;
1093 int ret;
1094
1095 WARN_ON(!trans->txqs.bc_tbl_size);
1096
1097 bc_tbl_size = trans->txqs.bc_tbl_size;
1098 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1099
1100 if (WARN_ON(size > bc_tbl_entries))
1101 return ERR_PTR(-EINVAL);
1102
1103 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1104 if (!txq)
1105 return ERR_PTR(-ENOMEM);
1106
1107 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1108 &txq->bc_tbl.dma);
1109 if (!txq->bc_tbl.addr) {
1110 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1111 kfree(txq);
1112 return ERR_PTR(-ENOMEM);
1113 }
1114
1115 ret = iwl_txq_alloc(trans, txq, size, false);
1116 if (ret) {
1117 IWL_ERR(trans, "Tx queue alloc failed\n");
1118 goto error;
1119 }
1120 ret = iwl_txq_init(trans, txq, size, false);
1121 if (ret) {
1122 IWL_ERR(trans, "Tx queue init failed\n");
1123 goto error;
1124 }
1125
1126 txq->wd_timeout = msecs_to_jiffies(timeout);
1127
1128 return txq;
1129
1130 error:
1131 iwl_txq_gen2_free_memory(trans, txq);
1132 return ERR_PTR(ret);
1133 }
1134
1135 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1136 struct iwl_host_cmd *hcmd)
1137 {
1138 struct iwl_tx_queue_cfg_rsp *rsp;
1139 int ret, qid;
1140 u32 wr_ptr;
1141
1142 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1143 sizeof(*rsp))) {
1144 ret = -EINVAL;
1145 goto error_free_resp;
1146 }
1147
1148 rsp = (void *)hcmd->resp_pkt->data;
1149 qid = le16_to_cpu(rsp->queue_number);
1150 wr_ptr = le16_to_cpu(rsp->write_pointer);
1151
1152 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1153 WARN_ONCE(1, "queue index %d unsupported", qid);
1154 ret = -EIO;
1155 goto error_free_resp;
1156 }
1157
1158 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1159 WARN_ONCE(1, "queue %d already used", qid);
1160 ret = -EIO;
1161 goto error_free_resp;
1162 }
1163
1164 if (WARN_ONCE(trans->txqs.txq[qid],
1165 "queue %d already allocated\n", qid)) {
1166 ret = -EIO;
1167 goto error_free_resp;
1168 }
1169
1170 txq->id = qid;
1171 trans->txqs.txq[qid] = txq;
1172 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1173
1174
1175 txq->read_ptr = wr_ptr;
1176 txq->write_ptr = wr_ptr;
1177
1178 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1179
1180 iwl_free_resp(hcmd);
1181 return qid;
1182
1183 error_free_resp:
1184 iwl_free_resp(hcmd);
1185 iwl_txq_gen2_free_memory(trans, txq);
1186 return ret;
1187 }
1188
1189 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1190 u8 tid, int size, unsigned int timeout)
1191 {
1192 struct iwl_txq *txq;
1193 union {
1194 struct iwl_tx_queue_cfg_cmd old;
1195 struct iwl_scd_queue_cfg_cmd new;
1196 } cmd;
1197 struct iwl_host_cmd hcmd = {
1198 .flags = CMD_WANT_SKB,
1199 };
1200 int ret;
1201
1202 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1203 trans->hw_rev_step == SILICON_A_STEP)
1204 size = 4096;
1205
1206 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1207 if (IS_ERR(txq))
1208 return PTR_ERR(txq);
1209
1210 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1211 memset(&cmd.old, 0, sizeof(cmd.old));
1212 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1213 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1214 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1215 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1216 cmd.old.tid = tid;
1217
1218 if (hweight32(sta_mask) != 1) {
1219 ret = -EINVAL;
1220 goto error;
1221 }
1222 cmd.old.sta_id = ffs(sta_mask) - 1;
1223
1224 hcmd.id = SCD_QUEUE_CFG;
1225 hcmd.len[0] = sizeof(cmd.old);
1226 hcmd.data[0] = &cmd.old;
1227 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1228 memset(&cmd.new, 0, sizeof(cmd.new));
1229 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1230 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1231 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1232 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1233 cmd.new.u.add.flags = cpu_to_le32(flags);
1234 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1235 cmd.new.u.add.tid = tid;
1236
1237 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1238 hcmd.len[0] = sizeof(cmd.new);
1239 hcmd.data[0] = &cmd.new;
1240 } else {
1241 ret = -EOPNOTSUPP;
1242 goto error;
1243 }
1244
1245 ret = iwl_trans_send_cmd(trans, &hcmd);
1246 if (ret)
1247 goto error;
1248
1249 return iwl_txq_alloc_response(trans, txq, &hcmd);
1250
1251 error:
1252 iwl_txq_gen2_free_memory(trans, txq);
1253 return ret;
1254 }
1255
1256 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1257 {
1258 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1259 "queue %d out of range", queue))
1260 return;
1261
1262
1263
1264
1265
1266
1267
1268 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1269 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1270 "queue %d not used", queue);
1271 return;
1272 }
1273
1274 iwl_txq_gen2_free(trans, queue);
1275
1276 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1277 }
1278
1279 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1280 {
1281 int i;
1282
1283 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1284
1285
1286 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1287 if (!trans->txqs.txq[i])
1288 continue;
1289
1290 iwl_txq_gen2_free(trans, i);
1291 }
1292 }
1293
1294 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1295 {
1296 struct iwl_txq *queue;
1297 int ret;
1298
1299
1300 if (!trans->txqs.txq[txq_id]) {
1301 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1302 if (!queue) {
1303 IWL_ERR(trans, "Not enough memory for tx queue\n");
1304 return -ENOMEM;
1305 }
1306 trans->txqs.txq[txq_id] = queue;
1307 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1308 if (ret) {
1309 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1310 goto error;
1311 }
1312 } else {
1313 queue = trans->txqs.txq[txq_id];
1314 }
1315
1316 ret = iwl_txq_init(trans, queue, queue_size,
1317 (txq_id == trans->txqs.cmd.q_id));
1318 if (ret) {
1319 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1320 goto error;
1321 }
1322 trans->txqs.txq[txq_id]->id = txq_id;
1323 set_bit(txq_id, trans->txqs.queue_used);
1324
1325 return 0;
1326
1327 error:
1328 iwl_txq_gen2_tx_free(trans);
1329 return ret;
1330 }
1331
1332 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1333 void *_tfd, u8 idx)
1334 {
1335 struct iwl_tfd *tfd;
1336 struct iwl_tfd_tb *tb;
1337 dma_addr_t addr;
1338 dma_addr_t hi_len;
1339
1340 if (trans->trans_cfg->use_tfh) {
1341 struct iwl_tfh_tfd *tfh_tfd = _tfd;
1342 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
1343
1344 return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
1345 }
1346
1347 tfd = _tfd;
1348 tb = &tfd->tbs[idx];
1349 addr = get_unaligned_le32(&tb->lo);
1350
1351 if (sizeof(dma_addr_t) <= sizeof(u32))
1352 return addr;
1353
1354 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1355
1356
1357
1358
1359
1360
1361 return addr | ((hi_len << 16) << 16);
1362 }
1363
1364 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1365 struct iwl_cmd_meta *meta,
1366 struct iwl_txq *txq, int index)
1367 {
1368 int i, num_tbs;
1369 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1370
1371
1372 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1373
1374 if (num_tbs > trans->txqs.tfd.max_tbs) {
1375 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1376
1377 return;
1378 }
1379
1380
1381
1382 for (i = 1; i < num_tbs; i++) {
1383 if (meta->tbs & BIT(i))
1384 dma_unmap_page(trans->dev,
1385 iwl_txq_gen1_tfd_tb_get_addr(trans,
1386 tfd, i),
1387 iwl_txq_gen1_tfd_tb_get_len(trans,
1388 tfd, i),
1389 DMA_TO_DEVICE);
1390 else
1391 dma_unmap_single(trans->dev,
1392 iwl_txq_gen1_tfd_tb_get_addr(trans,
1393 tfd, i),
1394 iwl_txq_gen1_tfd_tb_get_len(trans,
1395 tfd, i),
1396 DMA_TO_DEVICE);
1397 }
1398
1399 meta->tbs = 0;
1400
1401 if (trans->trans_cfg->use_tfh) {
1402 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1403
1404 tfd_fh->num_tbs = 0;
1405 } else {
1406 struct iwl_tfd *tfd_fh = (void *)tfd;
1407
1408 tfd_fh->num_tbs = 0;
1409 }
1410 }
1411
1412 #define IWL_TX_CRC_SIZE 4
1413 #define IWL_TX_DELIMITER_SIZE 4
1414
1415
1416
1417
1418 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1419 struct iwl_txq *txq, u16 byte_cnt,
1420 int num_tbs)
1421 {
1422 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1423 int write_ptr = txq->write_ptr;
1424 int txq_id = txq->id;
1425 u8 sec_ctl = 0;
1426 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1427 __le16 bc_ent;
1428 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1429 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1430 u8 sta_id = tx_cmd->sta_id;
1431
1432 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1433
1434 sec_ctl = tx_cmd->sec_ctl;
1435
1436 switch (sec_ctl & TX_CMD_SEC_MSK) {
1437 case TX_CMD_SEC_CCM:
1438 len += IEEE80211_CCMP_MIC_LEN;
1439 break;
1440 case TX_CMD_SEC_TKIP:
1441 len += IEEE80211_TKIP_ICV_LEN;
1442 break;
1443 case TX_CMD_SEC_WEP:
1444 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1445 break;
1446 }
1447 if (trans->txqs.bc_table_dword)
1448 len = DIV_ROUND_UP(len, 4);
1449
1450 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1451 return;
1452
1453 bc_ent = cpu_to_le16(len | (sta_id << 12));
1454
1455 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1456
1457 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1458 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1459 bc_ent;
1460 }
1461
1462 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1463 struct iwl_txq *txq)
1464 {
1465 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1466 int txq_id = txq->id;
1467 int read_ptr = txq->read_ptr;
1468 u8 sta_id = 0;
1469 __le16 bc_ent;
1470 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1471 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1472
1473 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1474
1475 if (txq_id != trans->txqs.cmd.q_id)
1476 sta_id = tx_cmd->sta_id;
1477
1478 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1479
1480 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1481
1482 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1483 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1484 bc_ent;
1485 }
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1497 {
1498
1499
1500
1501 int rd_ptr = txq->read_ptr;
1502 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1503 struct sk_buff *skb;
1504
1505 lockdep_assert_held(&txq->lock);
1506
1507 if (!txq->entries)
1508 return;
1509
1510
1511
1512
1513 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1514
1515
1516 skb = txq->entries[idx].skb;
1517
1518
1519
1520
1521
1522 if (skb) {
1523 iwl_op_mode_free_skb(trans->op_mode, skb);
1524 txq->entries[idx].skb = NULL;
1525 }
1526 }
1527
1528 void iwl_txq_progress(struct iwl_txq *txq)
1529 {
1530 lockdep_assert_held(&txq->lock);
1531
1532 if (!txq->wd_timeout)
1533 return;
1534
1535
1536
1537
1538
1539 if (txq->frozen)
1540 return;
1541
1542
1543
1544
1545
1546 if (txq->read_ptr == txq->write_ptr)
1547 del_timer(&txq->stuck_timer);
1548 else
1549 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1550 }
1551
1552
1553 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1554 struct sk_buff_head *skbs)
1555 {
1556 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1557 int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1558 int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1559 int last_to_free;
1560
1561
1562 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1563 return;
1564
1565 spin_lock_bh(&txq->lock);
1566
1567 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1568 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1569 txq_id, ssn);
1570 goto out;
1571 }
1572
1573 if (read_ptr == tfd_num)
1574 goto out;
1575
1576 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1577 txq_id, txq->read_ptr, tfd_num, ssn);
1578
1579
1580
1581 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1582
1583 if (!iwl_txq_used(txq, last_to_free)) {
1584 IWL_ERR(trans,
1585 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1586 __func__, txq_id, last_to_free,
1587 trans->trans_cfg->base_params->max_tfd_queue_size,
1588 txq->write_ptr, txq->read_ptr);
1589
1590 iwl_op_mode_time_point(trans->op_mode,
1591 IWL_FW_INI_TIME_POINT_FAKE_TX,
1592 NULL);
1593 goto out;
1594 }
1595
1596 if (WARN_ON(!skb_queue_empty(skbs)))
1597 goto out;
1598
1599 for (;
1600 read_ptr != tfd_num;
1601 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1602 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1603 struct sk_buff *skb = txq->entries[read_ptr].skb;
1604
1605 if (WARN_ON_ONCE(!skb))
1606 continue;
1607
1608 iwl_txq_free_tso_page(trans, skb);
1609
1610 __skb_queue_tail(skbs, skb);
1611
1612 txq->entries[read_ptr].skb = NULL;
1613
1614 if (!trans->trans_cfg->use_tfh)
1615 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1616
1617 iwl_txq_free_tfd(trans, txq);
1618 }
1619
1620 iwl_txq_progress(txq);
1621
1622 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1623 test_bit(txq_id, trans->txqs.queue_stopped)) {
1624 struct sk_buff_head overflow_skbs;
1625
1626 __skb_queue_head_init(&overflow_skbs);
1627 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1628
1629
1630
1631
1632
1633
1634
1635
1636 txq->overflow_tx = true;
1637
1638
1639
1640
1641
1642
1643
1644
1645 spin_unlock_bh(&txq->lock);
1646
1647 while (!skb_queue_empty(&overflow_skbs)) {
1648 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1649 struct iwl_device_tx_cmd *dev_cmd_ptr;
1650
1651 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1652 trans->txqs.dev_cmd_offs);
1653
1654
1655
1656
1657
1658
1659 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1660 }
1661
1662 if (iwl_txq_space(trans, txq) > txq->low_mark)
1663 iwl_wake_queue(trans, txq);
1664
1665 spin_lock_bh(&txq->lock);
1666 txq->overflow_tx = false;
1667 }
1668
1669 out:
1670 spin_unlock_bh(&txq->lock);
1671 }
1672
1673
1674 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1675 {
1676 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1677
1678 spin_lock_bh(&txq->lock);
1679
1680 txq->write_ptr = ptr;
1681 txq->read_ptr = txq->write_ptr;
1682
1683 spin_unlock_bh(&txq->lock);
1684 }
1685
1686 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1687 bool freeze)
1688 {
1689 int queue;
1690
1691 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1692 struct iwl_txq *txq = trans->txqs.txq[queue];
1693 unsigned long now;
1694
1695 spin_lock_bh(&txq->lock);
1696
1697 now = jiffies;
1698
1699 if (txq->frozen == freeze)
1700 goto next_queue;
1701
1702 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1703 freeze ? "Freezing" : "Waking", queue);
1704
1705 txq->frozen = freeze;
1706
1707 if (txq->read_ptr == txq->write_ptr)
1708 goto next_queue;
1709
1710 if (freeze) {
1711 if (unlikely(time_after(now,
1712 txq->stuck_timer.expires))) {
1713
1714
1715
1716
1717 goto next_queue;
1718 }
1719
1720 txq->frozen_expiry_remainder =
1721 txq->stuck_timer.expires - now;
1722 del_timer(&txq->stuck_timer);
1723 goto next_queue;
1724 }
1725
1726
1727
1728
1729
1730 mod_timer(&txq->stuck_timer,
1731 now + txq->frozen_expiry_remainder);
1732
1733 next_queue:
1734 spin_unlock_bh(&txq->lock);
1735 }
1736 }
1737
1738 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1739
1740 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1741 struct iwl_host_cmd *cmd)
1742 {
1743 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1744 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1745 int cmd_idx;
1746 int ret;
1747
1748 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1749
1750 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1751 &trans->status),
1752 "Command %s: a command is already active!\n", cmd_str))
1753 return -EIO;
1754
1755 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1756
1757 cmd_idx = trans->ops->send_cmd(trans, cmd);
1758 if (cmd_idx < 0) {
1759 ret = cmd_idx;
1760 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1761 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1762 cmd_str, ret);
1763 return ret;
1764 }
1765
1766 ret = wait_event_timeout(trans->wait_command_queue,
1767 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1768 &trans->status),
1769 HOST_COMPLETE_TIMEOUT);
1770 if (!ret) {
1771 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1772 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1773
1774 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1775 txq->read_ptr, txq->write_ptr);
1776
1777 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1778 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1779 cmd_str);
1780 ret = -ETIMEDOUT;
1781
1782 iwl_trans_sync_nmi(trans);
1783 goto cancel;
1784 }
1785
1786 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1787 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1788 &trans->status)) {
1789 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1790 dump_stack();
1791 }
1792 ret = -EIO;
1793 goto cancel;
1794 }
1795
1796 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1797 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1798 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1799 ret = -ERFKILL;
1800 goto cancel;
1801 }
1802
1803 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1804 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1805 ret = -EIO;
1806 goto cancel;
1807 }
1808
1809 return 0;
1810
1811 cancel:
1812 if (cmd->flags & CMD_WANT_SKB) {
1813
1814
1815
1816
1817
1818
1819 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1820 }
1821
1822 if (cmd->resp_pkt) {
1823 iwl_free_resp(cmd);
1824 cmd->resp_pkt = NULL;
1825 }
1826
1827 return ret;
1828 }
1829
1830 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1831 struct iwl_host_cmd *cmd)
1832 {
1833
1834 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1835 return -ENODEV;
1836
1837 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1838 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1839 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1840 cmd->id);
1841 return -ERFKILL;
1842 }
1843
1844 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1845 !(cmd->flags & CMD_SEND_IN_D3))) {
1846 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1847 return -EHOSTDOWN;
1848 }
1849
1850 if (cmd->flags & CMD_ASYNC) {
1851 int ret;
1852
1853
1854 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1855 return -EINVAL;
1856
1857 ret = trans->ops->send_cmd(trans, cmd);
1858 if (ret < 0) {
1859 IWL_ERR(trans,
1860 "Error sending %s: enqueue_hcmd failed: %d\n",
1861 iwl_get_cmd_string(trans, cmd->id), ret);
1862 return ret;
1863 }
1864 return 0;
1865 }
1866
1867 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1868 }
1869