0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/mmc/card.h>
0010 #include <linux/mmc/mmc.h>
0011 #include <linux/mmc/host.h>
0012 #include <linux/mmc/sdio_func.h>
0013 #include <linux/mmc/sdio_ids.h>
0014 #include <linux/mmc/sdio.h>
0015 #include <linux/mmc/sd.h>
0016 #include <linux/bitfield.h>
0017 #include "core.h"
0018 #include "bmi.h"
0019 #include "debug.h"
0020 #include "hif.h"
0021 #include "htc.h"
0022 #include "mac.h"
0023 #include "targaddrs.h"
0024 #include "trace.h"
0025 #include "sdio.h"
0026 #include "coredump.h"
0027
0028 void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
0029
0030 #define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
0031
0032
0033
0034 static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
0035 size_t len)
0036 {
0037 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
0038 }
0039
0040 static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
0041 {
0042 return (enum ath10k_htc_ep_id)pipe_id;
0043 }
0044
0045 static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
0046 {
0047 dev_kfree_skb(pkt->skb);
0048 pkt->skb = NULL;
0049 pkt->alloc_len = 0;
0050 pkt->act_len = 0;
0051 pkt->trailer_only = false;
0052 }
0053
0054 static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
0055 size_t act_len, size_t full_len,
0056 bool part_of_bundle,
0057 bool last_in_bundle)
0058 {
0059 pkt->skb = dev_alloc_skb(full_len);
0060 if (!pkt->skb)
0061 return -ENOMEM;
0062
0063 pkt->act_len = act_len;
0064 pkt->alloc_len = full_len;
0065 pkt->part_of_bundle = part_of_bundle;
0066 pkt->last_in_bundle = last_in_bundle;
0067 pkt->trailer_only = false;
0068
0069 return 0;
0070 }
0071
0072 static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
0073 {
0074 bool trailer_only = false;
0075 struct ath10k_htc_hdr *htc_hdr =
0076 (struct ath10k_htc_hdr *)pkt->skb->data;
0077 u16 len = __le16_to_cpu(htc_hdr->len);
0078
0079 if (len == htc_hdr->trailer_len)
0080 trailer_only = true;
0081
0082 return trailer_only;
0083 }
0084
0085
0086
0087 static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
0088 unsigned int address,
0089 unsigned char val)
0090 {
0091 *arg = FIELD_PREP(BIT(31), write) |
0092 FIELD_PREP(BIT(27), raw) |
0093 FIELD_PREP(BIT(26), 1) |
0094 FIELD_PREP(GENMASK(25, 9), address) |
0095 FIELD_PREP(BIT(8), 1) |
0096 FIELD_PREP(GENMASK(7, 0), val);
0097 }
0098
0099 static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
0100 unsigned int address,
0101 unsigned char byte)
0102 {
0103 struct mmc_command io_cmd;
0104
0105 memset(&io_cmd, 0, sizeof(io_cmd));
0106 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
0107 io_cmd.opcode = SD_IO_RW_DIRECT;
0108 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
0109
0110 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
0111 }
0112
0113 static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
0114 unsigned int address,
0115 unsigned char *byte)
0116 {
0117 struct mmc_command io_cmd;
0118 int ret;
0119
0120 memset(&io_cmd, 0, sizeof(io_cmd));
0121 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
0122 io_cmd.opcode = SD_IO_RW_DIRECT;
0123 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
0124
0125 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
0126 if (!ret)
0127 *byte = io_cmd.resp[0];
0128
0129 return ret;
0130 }
0131
0132 static int ath10k_sdio_config(struct ath10k *ar)
0133 {
0134 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0135 struct sdio_func *func = ar_sdio->func;
0136 unsigned char byte, asyncintdelay = 2;
0137 int ret;
0138
0139 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
0140
0141 sdio_claim_host(func);
0142
0143 byte = 0;
0144 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
0145 SDIO_CCCR_DRIVE_STRENGTH,
0146 &byte);
0147
0148 byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
0149 byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
0150 ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
0151
0152 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
0153 SDIO_CCCR_DRIVE_STRENGTH,
0154 byte);
0155
0156 byte = 0;
0157 ret = ath10k_sdio_func0_cmd52_rd_byte(
0158 func->card,
0159 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
0160 &byte);
0161
0162 byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
0163 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
0164 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
0165
0166 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
0167 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
0168 byte);
0169 if (ret) {
0170 ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
0171 goto out;
0172 }
0173
0174 byte = 0;
0175 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
0176 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
0177 &byte);
0178
0179 byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
0180
0181 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
0182 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
0183 byte);
0184 if (ret) {
0185 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
0186 ret);
0187 goto out;
0188 }
0189
0190 byte = 0;
0191 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
0192 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
0193 &byte);
0194
0195 byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
0196 byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
0197
0198 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
0199 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
0200 byte);
0201
0202
0203 func->enable_timeout = 100;
0204
0205 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
0206 if (ret) {
0207 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
0208 ar_sdio->mbox_info.block_size, ret);
0209 goto out;
0210 }
0211
0212 out:
0213 sdio_release_host(func);
0214 return ret;
0215 }
0216
0217 static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
0218 {
0219 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0220 struct sdio_func *func = ar_sdio->func;
0221 int ret;
0222
0223 sdio_claim_host(func);
0224
0225 sdio_writel(func, val, addr, &ret);
0226 if (ret) {
0227 ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
0228 val, addr, ret);
0229 goto out;
0230 }
0231
0232 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
0233 addr, val);
0234
0235 out:
0236 sdio_release_host(func);
0237
0238 return ret;
0239 }
0240
0241 static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
0242 {
0243 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0244 struct sdio_func *func = ar_sdio->func;
0245 __le32 *buf;
0246 int ret;
0247
0248 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0249 if (!buf)
0250 return -ENOMEM;
0251
0252 *buf = cpu_to_le32(val);
0253
0254 sdio_claim_host(func);
0255
0256 ret = sdio_writesb(func, addr, buf, sizeof(*buf));
0257 if (ret) {
0258 ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
0259 val, addr, ret);
0260 goto out;
0261 }
0262
0263 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
0264 addr, val);
0265
0266 out:
0267 sdio_release_host(func);
0268
0269 kfree(buf);
0270
0271 return ret;
0272 }
0273
0274 static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
0275 {
0276 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0277 struct sdio_func *func = ar_sdio->func;
0278 int ret;
0279
0280 sdio_claim_host(func);
0281 *val = sdio_readl(func, addr, &ret);
0282 if (ret) {
0283 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
0284 addr, ret);
0285 goto out;
0286 }
0287
0288 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
0289 addr, *val);
0290
0291 out:
0292 sdio_release_host(func);
0293
0294 return ret;
0295 }
0296
0297 static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
0298 {
0299 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0300 struct sdio_func *func = ar_sdio->func;
0301 int ret;
0302
0303 sdio_claim_host(func);
0304
0305 ret = sdio_memcpy_fromio(func, buf, addr, len);
0306 if (ret) {
0307 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
0308 addr, ret);
0309 goto out;
0310 }
0311
0312 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
0313 addr, buf, len);
0314 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
0315
0316 out:
0317 sdio_release_host(func);
0318
0319 return ret;
0320 }
0321
0322 static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
0323 {
0324 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0325 struct sdio_func *func = ar_sdio->func;
0326 int ret;
0327
0328 sdio_claim_host(func);
0329
0330
0331
0332
0333 ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
0334 if (ret) {
0335 ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
0336 addr, ret);
0337 goto out;
0338 }
0339
0340 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
0341 addr, buf, len);
0342 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
0343
0344 out:
0345 sdio_release_host(func);
0346
0347 return ret;
0348 }
0349
0350 static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
0351 {
0352 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0353 struct sdio_func *func = ar_sdio->func;
0354 int ret;
0355
0356 sdio_claim_host(func);
0357
0358 len = round_down(len, ar_sdio->mbox_info.block_size);
0359
0360 ret = sdio_readsb(func, buf, addr, len);
0361 if (ret) {
0362 ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
0363 addr, ret);
0364 goto out;
0365 }
0366
0367 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
0368 addr, buf, len);
0369 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
0370
0371 out:
0372 sdio_release_host(func);
0373
0374 return ret;
0375 }
0376
0377
0378
0379 static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
0380 struct ath10k_sdio_rx_data *pkt,
0381 u32 *lookaheads,
0382 int *n_lookaheads)
0383 {
0384 struct ath10k_htc *htc = &ar->htc;
0385 struct sk_buff *skb = pkt->skb;
0386 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
0387 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
0388 enum ath10k_htc_ep_id eid;
0389 u8 *trailer;
0390 int ret;
0391
0392 if (trailer_present) {
0393 trailer = skb->data + skb->len - htc_hdr->trailer_len;
0394
0395 eid = pipe_id_to_eid(htc_hdr->eid);
0396
0397 ret = ath10k_htc_process_trailer(htc,
0398 trailer,
0399 htc_hdr->trailer_len,
0400 eid,
0401 lookaheads,
0402 n_lookaheads);
0403 if (ret)
0404 return ret;
0405
0406 if (is_trailer_only_msg(pkt))
0407 pkt->trailer_only = true;
0408
0409 skb_trim(skb, skb->len - htc_hdr->trailer_len);
0410 }
0411
0412 skb_pull(skb, sizeof(*htc_hdr));
0413
0414 return 0;
0415 }
0416
0417 static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
0418 u32 lookaheads[],
0419 int *n_lookahead)
0420 {
0421 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0422 struct ath10k_htc *htc = &ar->htc;
0423 struct ath10k_sdio_rx_data *pkt;
0424 struct ath10k_htc_ep *ep;
0425 struct ath10k_skb_rxcb *cb;
0426 enum ath10k_htc_ep_id id;
0427 int ret, i, *n_lookahead_local;
0428 u32 *lookaheads_local;
0429 int lookahead_idx = 0;
0430
0431 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
0432 lookaheads_local = lookaheads;
0433 n_lookahead_local = n_lookahead;
0434
0435 id = ((struct ath10k_htc_hdr *)
0436 &lookaheads[lookahead_idx++])->eid;
0437
0438 if (id >= ATH10K_HTC_EP_COUNT) {
0439 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
0440 id);
0441 ret = -ENOMEM;
0442 goto out;
0443 }
0444
0445 ep = &htc->endpoint[id];
0446
0447 if (ep->service_id == 0) {
0448 ath10k_warn(ar, "ep %d is not connected\n", id);
0449 ret = -ENOMEM;
0450 goto out;
0451 }
0452
0453 pkt = &ar_sdio->rx_pkts[i];
0454
0455 if (pkt->part_of_bundle && !pkt->last_in_bundle) {
0456
0457
0458
0459 lookahead_idx--;
0460 lookaheads_local = NULL;
0461 n_lookahead_local = NULL;
0462 }
0463
0464 ret = ath10k_sdio_mbox_rx_process_packet(ar,
0465 pkt,
0466 lookaheads_local,
0467 n_lookahead_local);
0468 if (ret)
0469 goto out;
0470
0471 if (!pkt->trailer_only) {
0472 cb = ATH10K_SKB_RXCB(pkt->skb);
0473 cb->eid = id;
0474
0475 skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
0476 queue_work(ar->workqueue_aux,
0477 &ar_sdio->async_work_rx);
0478 } else {
0479 kfree_skb(pkt->skb);
0480 }
0481
0482
0483 pkt->skb = NULL;
0484 pkt->alloc_len = 0;
0485 }
0486
0487 ret = 0;
0488
0489 out:
0490
0491
0492
0493 for (; i < ar_sdio->n_rx_pkts; i++)
0494 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
0495
0496 return ret;
0497 }
0498
0499 static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
0500 struct ath10k_sdio_rx_data *rx_pkts,
0501 struct ath10k_htc_hdr *htc_hdr,
0502 size_t full_len, size_t act_len,
0503 size_t *bndl_cnt)
0504 {
0505 int ret, i;
0506 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
0507
0508 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
0509
0510 if (*bndl_cnt > max_msgs) {
0511 ath10k_warn(ar,
0512 "HTC bundle length %u exceeds maximum %u\n",
0513 le16_to_cpu(htc_hdr->len),
0514 max_msgs);
0515 return -ENOMEM;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524 for (i = 0; i < *bndl_cnt; i++) {
0525 ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
0526 act_len,
0527 full_len,
0528 true,
0529 false);
0530 if (ret)
0531 return ret;
0532 }
0533
0534 return 0;
0535 }
0536
0537 static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
0538 u32 lookaheads[], int n_lookaheads)
0539 {
0540 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0541 struct ath10k_htc_hdr *htc_hdr;
0542 size_t full_len, act_len;
0543 bool last_in_bundle;
0544 int ret, i;
0545 int pkt_cnt = 0;
0546
0547 if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
0548 ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
0549 n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
0550 ret = -ENOMEM;
0551 goto err;
0552 }
0553
0554 for (i = 0; i < n_lookaheads; i++) {
0555 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
0556 last_in_bundle = false;
0557
0558 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
0559 ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
0560 le16_to_cpu(htc_hdr->len),
0561 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
0562 ret = -ENOMEM;
0563
0564 ath10k_core_start_recovery(ar);
0565 ath10k_warn(ar, "exceeds length, start recovery\n");
0566
0567 goto err;
0568 }
0569
0570 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
0571 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
0572
0573 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
0574 ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
0575 htc_hdr->eid, htc_hdr->flags,
0576 le16_to_cpu(htc_hdr->len));
0577 ret = -EINVAL;
0578 goto err;
0579 }
0580
0581 if (ath10k_htc_get_bundle_count(
0582 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
0583
0584
0585
0586
0587 size_t bndl_cnt;
0588
0589 ret = ath10k_sdio_mbox_alloc_bundle(ar,
0590 &ar_sdio->rx_pkts[pkt_cnt],
0591 htc_hdr,
0592 full_len,
0593 act_len,
0594 &bndl_cnt);
0595
0596 if (ret) {
0597 ath10k_warn(ar, "failed to allocate a bundle: %d\n",
0598 ret);
0599 goto err;
0600 }
0601
0602 pkt_cnt += bndl_cnt;
0603
0604
0605 last_in_bundle = true;
0606 }
0607
0608
0609
0610
0611
0612 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
0613 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
0614
0615 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
0616 act_len,
0617 full_len,
0618 last_in_bundle,
0619 last_in_bundle);
0620 if (ret) {
0621 ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
0622 goto err;
0623 }
0624
0625 pkt_cnt++;
0626 }
0627
0628 ar_sdio->n_rx_pkts = pkt_cnt;
0629
0630 return 0;
0631
0632 err:
0633 for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
0634 if (!ar_sdio->rx_pkts[i].alloc_len)
0635 break;
0636 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
0637 }
0638
0639 return ret;
0640 }
0641
0642 static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
0643 {
0644 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0645 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
0646 struct sk_buff *skb = pkt->skb;
0647 struct ath10k_htc_hdr *htc_hdr;
0648 int ret;
0649
0650 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
0651 skb->data, pkt->alloc_len);
0652 if (ret)
0653 goto err;
0654
0655 htc_hdr = (struct ath10k_htc_hdr *)skb->data;
0656 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
0657
0658 if (pkt->act_len > pkt->alloc_len) {
0659 ret = -EINVAL;
0660 goto err;
0661 }
0662
0663 skb_put(skb, pkt->act_len);
0664 return 0;
0665
0666 err:
0667 ar_sdio->n_rx_pkts = 0;
0668 ath10k_sdio_mbox_free_rx_pkt(pkt);
0669
0670 return ret;
0671 }
0672
0673 static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
0674 {
0675 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0676 struct ath10k_sdio_rx_data *pkt;
0677 struct ath10k_htc_hdr *htc_hdr;
0678 int ret, i;
0679 u32 pkt_offset, virt_pkt_len;
0680
0681 virt_pkt_len = 0;
0682 for (i = 0; i < ar_sdio->n_rx_pkts; i++)
0683 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
0684
0685 if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
0686 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
0687 ret = -E2BIG;
0688 goto err;
0689 }
0690
0691 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
0692 ar_sdio->vsg_buffer, virt_pkt_len);
0693 if (ret) {
0694 ath10k_warn(ar, "failed to read bundle packets: %d", ret);
0695 goto err;
0696 }
0697
0698 pkt_offset = 0;
0699 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
0700 pkt = &ar_sdio->rx_pkts[i];
0701 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
0702 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
0703
0704 if (pkt->act_len > pkt->alloc_len) {
0705 ret = -EINVAL;
0706 goto err;
0707 }
0708
0709 skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
0710 pkt_offset += pkt->alloc_len;
0711 }
0712
0713 return 0;
0714
0715 err:
0716
0717 for (i = 0; i < ar_sdio->n_rx_pkts; i++)
0718 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
0719
0720 ar_sdio->n_rx_pkts = 0;
0721
0722 return ret;
0723 }
0724
0725
0726
0727
0728
0729
0730 #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
0731
0732 static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
0733 u32 msg_lookahead, bool *done)
0734 {
0735 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0736 u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
0737 int n_lookaheads = 1;
0738 unsigned long timeout;
0739 int ret;
0740
0741 *done = true;
0742
0743
0744
0745
0746 lookaheads[0] = msg_lookahead;
0747
0748 timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
0749 do {
0750
0751
0752
0753 ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
0754 n_lookaheads);
0755 if (ret)
0756 break;
0757
0758 if (ar_sdio->n_rx_pkts >= 2)
0759
0760
0761
0762 *done = false;
0763
0764 if (ar_sdio->n_rx_pkts > 1)
0765 ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
0766 else
0767 ret = ath10k_sdio_mbox_rx_fetch(ar);
0768
0769
0770
0771
0772
0773 n_lookaheads = 0;
0774 ret = ath10k_sdio_mbox_rx_process_packets(ar,
0775 lookaheads,
0776 &n_lookaheads);
0777
0778 if (!n_lookaheads || ret)
0779 break;
0780
0781
0782
0783
0784
0785
0786
0787 *done = false;
0788 } while (time_before(jiffies, timeout));
0789
0790 if (ret && (ret != -ECANCELED))
0791 ath10k_warn(ar, "failed to get pending recv messages: %d\n",
0792 ret);
0793
0794 return ret;
0795 }
0796
0797 static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
0798 {
0799 u32 val;
0800 int ret;
0801
0802
0803 ath10k_warn(ar, "firmware crashed\n");
0804
0805
0806
0807
0808 ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
0809 if (ret)
0810 ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
0811
0812 return ret;
0813 }
0814
0815 static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
0816 {
0817 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0818 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
0819 u8 counter_int_status;
0820 int ret;
0821
0822 mutex_lock(&irq_data->mtx);
0823 counter_int_status = irq_data->irq_proc_reg->counter_int_status &
0824 irq_data->irq_en_reg->cntr_int_status_en;
0825
0826
0827
0828
0829
0830 if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
0831 ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
0832 else
0833 ret = 0;
0834
0835 mutex_unlock(&irq_data->mtx);
0836
0837 return ret;
0838 }
0839
0840 static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
0841 {
0842 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0843 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
0844 u8 error_int_status;
0845 int ret;
0846
0847 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
0848
0849 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
0850 if (!error_int_status) {
0851 ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
0852 error_int_status);
0853 return -EIO;
0854 }
0855
0856 ath10k_dbg(ar, ATH10K_DBG_SDIO,
0857 "sdio error_int_status 0x%x\n", error_int_status);
0858
0859 if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
0860 error_int_status))
0861 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
0862
0863 if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
0864 error_int_status))
0865 ath10k_warn(ar, "rx underflow interrupt error\n");
0866
0867 if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
0868 error_int_status))
0869 ath10k_warn(ar, "tx overflow interrupt error\n");
0870
0871
0872 irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
0873
0874
0875 ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
0876 error_int_status);
0877 if (ret) {
0878 ath10k_warn(ar, "unable to write to error int status address: %d\n",
0879 ret);
0880 return ret;
0881 }
0882
0883 return 0;
0884 }
0885
0886 static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
0887 {
0888 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0889 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
0890 u8 cpu_int_status;
0891 int ret;
0892
0893 mutex_lock(&irq_data->mtx);
0894 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
0895 irq_data->irq_en_reg->cpu_int_status_en;
0896 if (!cpu_int_status) {
0897 ath10k_warn(ar, "CPU interrupt status is zero\n");
0898 ret = -EIO;
0899 goto out;
0900 }
0901
0902
0903 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
0904
0905
0906
0907
0908
0909
0910
0911
0912 ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
0913 cpu_int_status);
0914 if (ret) {
0915 ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
0916 ret);
0917 goto out;
0918 }
0919
0920 out:
0921 mutex_unlock(&irq_data->mtx);
0922 if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
0923 ath10k_sdio_fw_crashed_dump(ar);
0924
0925 return ret;
0926 }
0927
0928 static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
0929 u8 *host_int_status,
0930 u32 *lookahead)
0931 {
0932 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
0933 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
0934 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
0935 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
0936 u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
0937 int ret;
0938
0939 mutex_lock(&irq_data->mtx);
0940
0941 *lookahead = 0;
0942 *host_int_status = 0;
0943
0944
0945
0946
0947
0948
0949
0950 if (!irq_en_reg->int_status_en) {
0951 ret = 0;
0952 goto out;
0953 }
0954
0955
0956
0957
0958
0959
0960 ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
0961 irq_proc_reg, sizeof(*irq_proc_reg));
0962 if (ret) {
0963 ath10k_core_start_recovery(ar);
0964 ath10k_warn(ar, "read int status fail, start recovery\n");
0965 goto out;
0966 }
0967
0968
0969 *host_int_status = irq_proc_reg->host_int_status &
0970 irq_en_reg->int_status_en;
0971
0972
0973 if (!(*host_int_status & htc_mbox)) {
0974 *lookahead = 0;
0975 ret = 0;
0976 goto out;
0977 }
0978
0979
0980
0981
0982 *host_int_status &= ~htc_mbox;
0983 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
0984 *lookahead = le32_to_cpu(
0985 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
0986 if (!*lookahead)
0987 ath10k_warn(ar, "sdio mbox lookahead is zero\n");
0988 }
0989
0990 out:
0991 mutex_unlock(&irq_data->mtx);
0992 return ret;
0993 }
0994
0995 static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
0996 bool *done)
0997 {
0998 u8 host_int_status;
0999 u32 lookahead;
1000 int ret;
1001
1002
1003
1004
1005
1006
1007
1008 ret = ath10k_sdio_mbox_read_int_status(ar,
1009 &host_int_status,
1010 &lookahead);
1011 if (ret) {
1012 *done = true;
1013 goto out;
1014 }
1015
1016 if (!host_int_status && !lookahead) {
1017 ret = 0;
1018 *done = true;
1019 goto out;
1020 }
1021
1022 if (lookahead) {
1023 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1024 "sdio pending mailbox msg lookahead 0x%08x\n",
1025 lookahead);
1026
1027 ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1028 lookahead,
1029 done);
1030 if (ret)
1031 goto out;
1032 }
1033
1034
1035 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1036 "sdio host_int_status 0x%x\n", host_int_status);
1037
1038 if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1039
1040 ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1041 if (ret)
1042 goto out;
1043 }
1044
1045 if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1046
1047 ret = ath10k_sdio_mbox_proc_err_intr(ar);
1048 if (ret)
1049 goto out;
1050 }
1051
1052 if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1053
1054 ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1055
1056 ret = 0;
1057
1058 out:
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1072 "sdio pending irqs done %d status %d",
1073 *done, ret);
1074
1075 return ret;
1076 }
1077
1078 static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1079 {
1080 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1081 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1082 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1083
1084 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1085 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1086 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1087 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1088 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1089
1090 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1091
1092 dev_id_base = (device & 0x0F00);
1093 dev_id_chiprev = (device & 0x00FF);
1094 switch (dev_id_base) {
1095 case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
1096 if (dev_id_chiprev < 4)
1097 mbox_info->ext_info[0].htc_ext_sz =
1098 ATH10K_HIF_MBOX0_EXT_WIDTH;
1099 else
1100
1101
1102
1103 mbox_info->ext_info[0].htc_ext_sz =
1104 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1105 break;
1106 case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
1107 mbox_info->ext_info[0].htc_ext_sz =
1108 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1109 break;
1110 default:
1111 mbox_info->ext_info[0].htc_ext_sz =
1112 ATH10K_HIF_MBOX0_EXT_WIDTH;
1113 }
1114
1115 mbox_info->ext_info[1].htc_ext_addr =
1116 mbox_info->ext_info[0].htc_ext_addr +
1117 mbox_info->ext_info[0].htc_ext_sz +
1118 ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1119 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1120 }
1121
1122
1123
1124 static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1125 {
1126 u32 addr, cmd_credits;
1127 unsigned long timeout;
1128 int ret;
1129
1130
1131 addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1132 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1133 cmd_credits = 0;
1134
1135 while (time_before(jiffies, timeout) && !cmd_credits) {
1136
1137
1138
1139
1140
1141 ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1142 if (ret) {
1143 ath10k_warn(ar,
1144 "unable to decrement the command credit count register: %d\n",
1145 ret);
1146 return ret;
1147 }
1148
1149
1150
1151
1152 cmd_credits &= 0xFF;
1153 }
1154
1155 if (!cmd_credits) {
1156 ath10k_warn(ar, "bmi communication timeout\n");
1157 return -ETIMEDOUT;
1158 }
1159
1160 return 0;
1161 }
1162
1163 static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1164 {
1165 unsigned long timeout;
1166 u32 rx_word;
1167 int ret;
1168
1169 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1170 rx_word = 0;
1171
1172 while ((time_before(jiffies, timeout)) && !rx_word) {
1173 ret = ath10k_sdio_read32(ar,
1174 MBOX_HOST_INT_STATUS_ADDRESS,
1175 &rx_word);
1176 if (ret) {
1177 ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1178 return ret;
1179 }
1180
1181
1182 rx_word &= 1;
1183 }
1184
1185 if (!rx_word) {
1186 ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1187 return -EINVAL;
1188 }
1189
1190 return ret;
1191 }
1192
1193 static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1194 void *req, u32 req_len,
1195 void *resp, u32 *resp_len)
1196 {
1197 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1198 u32 addr;
1199 int ret;
1200
1201 if (req) {
1202 ret = ath10k_sdio_bmi_credits(ar);
1203 if (ret)
1204 return ret;
1205
1206 addr = ar_sdio->mbox_info.htc_addr;
1207
1208 memcpy(ar_sdio->bmi_buf, req, req_len);
1209 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1210 if (ret) {
1211 ath10k_warn(ar,
1212 "unable to send the bmi data to the device: %d\n",
1213 ret);
1214 return ret;
1215 }
1216 }
1217
1218 if (!resp || !resp_len)
1219
1220 return 0;
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1268 if (ret)
1269 return ret;
1270
1271
1272 addr = ar_sdio->mbox_info.htc_addr;
1273 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1274 if (ret) {
1275 ath10k_warn(ar,
1276 "unable to read the bmi data from the device: %d\n",
1277 ret);
1278 return ret;
1279 }
1280
1281 memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1282
1283 return 0;
1284 }
1285
1286
1287
1288 static struct ath10k_sdio_bus_request
1289 *ath10k_sdio_alloc_busreq(struct ath10k *ar)
1290 {
1291 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1292 struct ath10k_sdio_bus_request *bus_req;
1293
1294 spin_lock_bh(&ar_sdio->lock);
1295
1296 if (list_empty(&ar_sdio->bus_req_freeq)) {
1297 bus_req = NULL;
1298 goto out;
1299 }
1300
1301 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1302 struct ath10k_sdio_bus_request, list);
1303 list_del(&bus_req->list);
1304
1305 out:
1306 spin_unlock_bh(&ar_sdio->lock);
1307 return bus_req;
1308 }
1309
1310 static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1311 struct ath10k_sdio_bus_request *bus_req)
1312 {
1313 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1314
1315 memset(bus_req, 0, sizeof(*bus_req));
1316
1317 spin_lock_bh(&ar_sdio->lock);
1318 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1319 spin_unlock_bh(&ar_sdio->lock);
1320 }
1321
1322 static void __ath10k_sdio_write_async(struct ath10k *ar,
1323 struct ath10k_sdio_bus_request *req)
1324 {
1325 struct ath10k_htc_ep *ep;
1326 struct sk_buff *skb;
1327 int ret;
1328
1329 skb = req->skb;
1330 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1331 if (ret)
1332 ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1333 req->address, ret);
1334
1335 if (req->htc_msg) {
1336 ep = &ar->htc.endpoint[req->eid];
1337 ath10k_htc_notify_tx_completion(ep, skb);
1338 } else if (req->comp) {
1339 complete(req->comp);
1340 }
1341
1342 ath10k_sdio_free_bus_req(ar, req);
1343 }
1344
1345
1346
1347
1348 static void ath10k_rx_indication_async_work(struct work_struct *work)
1349 {
1350 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1351 async_work_rx);
1352 struct ath10k *ar = ar_sdio->ar;
1353 struct ath10k_htc_ep *ep;
1354 struct ath10k_skb_rxcb *cb;
1355 struct sk_buff *skb;
1356
1357 while (true) {
1358 skb = skb_dequeue(&ar_sdio->rx_head);
1359 if (!skb)
1360 break;
1361 cb = ATH10K_SKB_RXCB(skb);
1362 ep = &ar->htc.endpoint[cb->eid];
1363 ep->ep_ops.ep_rx_complete(ar, skb);
1364 }
1365
1366 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
1367 local_bh_disable();
1368 napi_schedule(&ar->napi);
1369 local_bh_enable();
1370 }
1371 }
1372
1373 static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1374 {
1375 struct ath10k *ar = ar_sdio->ar;
1376 unsigned char rtc_state = 0;
1377 int ret = 0;
1378
1379 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1380 if (ret) {
1381 ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1382 return ret;
1383 }
1384
1385 *state = rtc_state & 0x3;
1386
1387 return ret;
1388 }
1389
1390 static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1391 {
1392 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1393 u32 val;
1394 int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1395 unsigned char rtc_state = 0;
1396
1397 sdio_claim_host(ar_sdio->func);
1398
1399 ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1400 if (ret) {
1401 ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1402 ret);
1403 goto release;
1404 }
1405
1406 if (enable_sleep) {
1407 val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1408 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1409 } else {
1410 val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1411 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1412 }
1413
1414 ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1415 if (ret) {
1416 ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1417 ret);
1418 }
1419
1420 if (!enable_sleep) {
1421 do {
1422 udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1423 ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1424
1425 if (ret) {
1426 ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1427 break;
1428 }
1429
1430 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1431 rtc_state);
1432
1433 if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1434 break;
1435
1436 udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1437 retry--;
1438 } while (retry > 0);
1439 }
1440
1441 release:
1442 sdio_release_host(ar_sdio->func);
1443
1444 return ret;
1445 }
1446
1447 static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1448 {
1449 struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
1450
1451 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1452 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1453 }
1454
1455 static void ath10k_sdio_write_async_work(struct work_struct *work)
1456 {
1457 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1458 wr_async_work);
1459 struct ath10k *ar = ar_sdio->ar;
1460 struct ath10k_sdio_bus_request *req, *tmp_req;
1461 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1462
1463 spin_lock_bh(&ar_sdio->wr_async_lock);
1464
1465 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1466 list_del(&req->list);
1467 spin_unlock_bh(&ar_sdio->wr_async_lock);
1468
1469 if (req->address >= mbox_info->htc_addr &&
1470 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1471 ath10k_sdio_set_mbox_sleep(ar, false);
1472 mod_timer(&ar_sdio->sleep_timer, jiffies +
1473 msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1474 }
1475
1476 __ath10k_sdio_write_async(ar, req);
1477 spin_lock_bh(&ar_sdio->wr_async_lock);
1478 }
1479
1480 spin_unlock_bh(&ar_sdio->wr_async_lock);
1481
1482 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1483 ath10k_sdio_set_mbox_sleep(ar, true);
1484 }
1485
1486 static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1487 struct sk_buff *skb,
1488 struct completion *comp,
1489 bool htc_msg, enum ath10k_htc_ep_id eid)
1490 {
1491 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1492 struct ath10k_sdio_bus_request *bus_req;
1493
1494
1495
1496
1497 bus_req = ath10k_sdio_alloc_busreq(ar);
1498 if (!bus_req) {
1499 ath10k_warn(ar,
1500 "unable to allocate bus request for async request\n");
1501 return -ENOMEM;
1502 }
1503
1504 bus_req->skb = skb;
1505 bus_req->eid = eid;
1506 bus_req->address = addr;
1507 bus_req->htc_msg = htc_msg;
1508 bus_req->comp = comp;
1509
1510 spin_lock_bh(&ar_sdio->wr_async_lock);
1511 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1512 spin_unlock_bh(&ar_sdio->wr_async_lock);
1513
1514 return 0;
1515 }
1516
1517
1518
1519 static void ath10k_sdio_irq_handler(struct sdio_func *func)
1520 {
1521 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1522 struct ath10k *ar = ar_sdio->ar;
1523 unsigned long timeout;
1524 bool done = false;
1525 int ret;
1526
1527
1528
1529
1530 sdio_release_host(ar_sdio->func);
1531
1532 timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1533 do {
1534 ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1535 if (ret)
1536 break;
1537 } while (time_before(jiffies, timeout) && !done);
1538
1539 ath10k_mac_tx_push_pending(ar);
1540
1541 sdio_claim_host(ar_sdio->func);
1542
1543 if (ret && ret != -ECANCELED)
1544 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1545 ret);
1546 }
1547
1548
1549
1550 static int ath10k_sdio_disable_intrs(struct ath10k *ar)
1551 {
1552 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1553 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1554 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1555 int ret;
1556
1557 mutex_lock(&irq_data->mtx);
1558
1559 memset(regs, 0, sizeof(*regs));
1560 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1561 ®s->int_status_en, sizeof(*regs));
1562 if (ret)
1563 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1564
1565 mutex_unlock(&irq_data->mtx);
1566
1567 return ret;
1568 }
1569
1570 static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1571 enum ath10k_firmware_mode fw_mode)
1572 {
1573 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1574 struct sdio_func *func = ar_sdio->func;
1575 int ret;
1576
1577 if (!ar_sdio->is_disabled)
1578 return 0;
1579
1580 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1581
1582 ret = ath10k_sdio_config(ar);
1583 if (ret) {
1584 ath10k_err(ar, "failed to config sdio: %d\n", ret);
1585 return ret;
1586 }
1587
1588 sdio_claim_host(func);
1589
1590 ret = sdio_enable_func(func);
1591 if (ret) {
1592 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1593 sdio_release_host(func);
1594 return ret;
1595 }
1596
1597 sdio_release_host(func);
1598
1599
1600
1601
1602 msleep(20);
1603
1604 ar_sdio->is_disabled = false;
1605
1606 ret = ath10k_sdio_disable_intrs(ar);
1607 if (ret)
1608 return ret;
1609
1610 return 0;
1611 }
1612
1613 static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1614 {
1615 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1616 int ret;
1617
1618 if (ar_sdio->is_disabled)
1619 return;
1620
1621 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1622
1623 del_timer_sync(&ar_sdio->sleep_timer);
1624 ath10k_sdio_set_mbox_sleep(ar, true);
1625
1626
1627 sdio_claim_host(ar_sdio->func);
1628
1629 ret = sdio_disable_func(ar_sdio->func);
1630 if (ret) {
1631 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1632 sdio_release_host(ar_sdio->func);
1633 return;
1634 }
1635
1636 ret = mmc_hw_reset(ar_sdio->func->card);
1637 if (ret)
1638 ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1639
1640 sdio_release_host(ar_sdio->func);
1641
1642 ar_sdio->is_disabled = true;
1643 }
1644
1645 static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1646 struct ath10k_hif_sg_item *items, int n_items)
1647 {
1648 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1649 enum ath10k_htc_ep_id eid;
1650 struct sk_buff *skb;
1651 int ret, i;
1652
1653 eid = pipe_id_to_eid(pipe_id);
1654
1655 for (i = 0; i < n_items; i++) {
1656 size_t padded_len;
1657 u32 address;
1658
1659 skb = items[i].transfer_context;
1660 padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1661 skb->len);
1662 skb_trim(skb, padded_len);
1663
1664
1665 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1666 skb->len;
1667 ret = ath10k_sdio_prep_async_req(ar, address, skb,
1668 NULL, true, eid);
1669 if (ret)
1670 return ret;
1671 }
1672
1673 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1674
1675 return 0;
1676 }
1677
1678 static int ath10k_sdio_enable_intrs(struct ath10k *ar)
1679 {
1680 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1681 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1682 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1683 int ret;
1684
1685 mutex_lock(&irq_data->mtx);
1686
1687
1688 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1689 FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1690 FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1691
1692
1693
1694
1695 regs->int_status_en |=
1696 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1697
1698
1699
1700
1701 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1702
1703
1704 regs->err_int_status_en =
1705 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1706 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1707
1708
1709
1710
1711 regs->cntr_int_status_en =
1712 FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1713 ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1714
1715 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1716 ®s->int_status_en, sizeof(*regs));
1717 if (ret)
1718 ath10k_warn(ar,
1719 "failed to update mbox interrupt status register : %d\n",
1720 ret);
1721
1722 mutex_unlock(&irq_data->mtx);
1723 return ret;
1724 }
1725
1726
1727
1728 static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1729 size_t buf_len)
1730 {
1731 int ret;
1732 void *mem;
1733
1734 mem = kzalloc(buf_len, GFP_KERNEL);
1735 if (!mem)
1736 return -ENOMEM;
1737
1738
1739 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1740 if (ret) {
1741 ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1742 goto out;
1743 }
1744
1745
1746 ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
1747 if (ret) {
1748 ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1749 ret);
1750 goto out;
1751 }
1752
1753 memcpy(buf, mem, buf_len);
1754
1755 out:
1756 kfree(mem);
1757
1758 return ret;
1759 }
1760
1761 static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1762 u32 *value)
1763 {
1764 __le32 *val;
1765 int ret;
1766
1767 val = kzalloc(sizeof(*val), GFP_KERNEL);
1768 if (!val)
1769 return -ENOMEM;
1770
1771 ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1772 if (ret)
1773 goto out;
1774
1775 *value = __le32_to_cpu(*val);
1776
1777 out:
1778 kfree(val);
1779
1780 return ret;
1781 }
1782
1783 static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1784 const void *data, int nbytes)
1785 {
1786 int ret;
1787
1788
1789 ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1790 if (ret) {
1791 ath10k_warn(ar,
1792 "failed to write 0x%p to mbox window data address: %d\n",
1793 data, ret);
1794 return ret;
1795 }
1796
1797
1798 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1799 if (ret) {
1800 ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1801 return ret;
1802 }
1803
1804 return 0;
1805 }
1806
1807 static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1808 {
1809 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1810 u32 addr, val;
1811 int ret = 0;
1812
1813 addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1814
1815 ret = ath10k_sdio_diag_read32(ar, addr, &val);
1816 if (ret) {
1817 ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1818 return ret;
1819 }
1820
1821 if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1822 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1823 "sdio mailbox swap service enabled\n");
1824 ar_sdio->swap_mbox = true;
1825 } else {
1826 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1827 "sdio mailbox swap service disabled\n");
1828 ar_sdio->swap_mbox = false;
1829 }
1830
1831 ath10k_sdio_set_mbox_sleep(ar, true);
1832
1833 return 0;
1834 }
1835
1836 static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1837 {
1838 u32 addr, val;
1839 int ret;
1840
1841 addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1842
1843 ret = ath10k_sdio_diag_read32(ar, addr, &val);
1844 if (ret) {
1845 ath10k_warn(ar,
1846 "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
1847 return ret;
1848 }
1849
1850 ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1851
1852 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1853 ret ? " " : " not ");
1854
1855 return ret;
1856 }
1857
1858
1859
1860 static int ath10k_sdio_hif_start(struct ath10k *ar)
1861 {
1862 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1863 int ret;
1864
1865 ath10k_core_napi_enable(ar);
1866
1867
1868
1869
1870
1871 msleep(20);
1872 ret = ath10k_sdio_disable_intrs(ar);
1873 if (ret)
1874 return ret;
1875
1876
1877
1878
1879 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1880 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1881
1882 sdio_claim_host(ar_sdio->func);
1883
1884
1885 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1886 if (ret) {
1887 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1888 sdio_release_host(ar_sdio->func);
1889 return ret;
1890 }
1891
1892 sdio_release_host(ar_sdio->func);
1893
1894 ret = ath10k_sdio_enable_intrs(ar);
1895 if (ret)
1896 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1897
1898
1899 ret = ath10k_sdio_set_mbox_sleep(ar, true);
1900 if (ret)
1901 return ret;
1902
1903
1904 msleep(20);
1905
1906 ret = ath10k_sdio_set_mbox_sleep(ar, false);
1907 if (ret)
1908 return ret;
1909
1910 return 0;
1911 }
1912
1913 #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1914
1915 static void ath10k_sdio_irq_disable(struct ath10k *ar)
1916 {
1917 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1918 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1919 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1920 struct sk_buff *skb;
1921 struct completion irqs_disabled_comp;
1922 int ret;
1923
1924 skb = dev_alloc_skb(sizeof(*regs));
1925 if (!skb)
1926 return;
1927
1928 mutex_lock(&irq_data->mtx);
1929
1930 memset(regs, 0, sizeof(*regs));
1931 memcpy(skb->data, regs, sizeof(*regs));
1932 skb_put(skb, sizeof(*regs));
1933
1934 mutex_unlock(&irq_data->mtx);
1935
1936 init_completion(&irqs_disabled_comp);
1937 ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1938 skb, &irqs_disabled_comp, false, 0);
1939 if (ret)
1940 goto out;
1941
1942 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1943
1944
1945
1946
1947 ret = wait_for_completion_timeout(&irqs_disabled_comp,
1948 SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1949 if (!ret)
1950 ath10k_warn(ar, "sdio irq disable request timed out\n");
1951
1952 sdio_claim_host(ar_sdio->func);
1953
1954 ret = sdio_release_irq(ar_sdio->func);
1955 if (ret)
1956 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1957
1958 sdio_release_host(ar_sdio->func);
1959
1960 out:
1961 kfree_skb(skb);
1962 }
1963
1964 static void ath10k_sdio_hif_stop(struct ath10k *ar)
1965 {
1966 struct ath10k_sdio_bus_request *req, *tmp_req;
1967 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1968 struct sk_buff *skb;
1969
1970 ath10k_sdio_irq_disable(ar);
1971
1972 cancel_work_sync(&ar_sdio->async_work_rx);
1973
1974 while ((skb = skb_dequeue(&ar_sdio->rx_head)))
1975 dev_kfree_skb_any(skb);
1976
1977 cancel_work_sync(&ar_sdio->wr_async_work);
1978
1979 spin_lock_bh(&ar_sdio->wr_async_lock);
1980
1981
1982 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1983 struct ath10k_htc_ep *ep;
1984
1985 list_del(&req->list);
1986
1987 if (req->htc_msg) {
1988 ep = &ar->htc.endpoint[req->eid];
1989 ath10k_htc_notify_tx_completion(ep, req->skb);
1990 } else if (req->skb) {
1991 kfree_skb(req->skb);
1992 }
1993 ath10k_sdio_free_bus_req(ar, req);
1994 }
1995
1996 spin_unlock_bh(&ar_sdio->wr_async_lock);
1997
1998 ath10k_core_napi_sync_disable(ar);
1999 }
2000
2001 #ifdef CONFIG_PM
2002
2003 static int ath10k_sdio_hif_suspend(struct ath10k *ar)
2004 {
2005 return 0;
2006 }
2007
2008 static int ath10k_sdio_hif_resume(struct ath10k *ar)
2009 {
2010 switch (ar->state) {
2011 case ATH10K_STATE_OFF:
2012 ath10k_dbg(ar, ATH10K_DBG_SDIO,
2013 "sdio resume configuring sdio\n");
2014
2015
2016 ath10k_sdio_config(ar);
2017 break;
2018
2019 case ATH10K_STATE_ON:
2020 default:
2021 break;
2022 }
2023
2024 return 0;
2025 }
2026 #endif
2027
2028 static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
2029 u16 service_id,
2030 u8 *ul_pipe, u8 *dl_pipe)
2031 {
2032 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
2033 struct ath10k_htc *htc = &ar->htc;
2034 u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
2035 enum ath10k_htc_ep_id eid;
2036 bool ep_found = false;
2037 int i;
2038
2039
2040
2041
2042
2043
2044 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
2045 if (htc->endpoint[i].service_id == service_id) {
2046 eid = htc->endpoint[i].eid;
2047 ep_found = true;
2048 break;
2049 }
2050 }
2051
2052 if (!ep_found)
2053 return -EINVAL;
2054
2055
2056
2057
2058 *ul_pipe = *dl_pipe = (u8)eid;
2059
2060
2061
2062
2063
2064
2065 if (ar_sdio->swap_mbox) {
2066 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2067 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2068 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2069 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2070 } else {
2071 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2072 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2073 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2074 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2075 }
2076
2077 switch (service_id) {
2078 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
2079
2080
2081
2082 break;
2083 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
2084 ar_sdio->mbox_addr[eid] = wmi_addr;
2085 ar_sdio->mbox_size[eid] = wmi_mbox_size;
2086 ath10k_dbg(ar, ATH10K_DBG_SDIO,
2087 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
2088 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2089 break;
2090 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
2091 ar_sdio->mbox_addr[eid] = htt_addr;
2092 ar_sdio->mbox_size[eid] = htt_mbox_size;
2093 ath10k_dbg(ar, ATH10K_DBG_SDIO,
2094 "sdio htt data mbox_addr 0x%x mbox_size %d\n",
2095 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2096 break;
2097 default:
2098 ath10k_warn(ar, "unsupported HTC service id: %d\n",
2099 service_id);
2100 return -EINVAL;
2101 }
2102
2103 return 0;
2104 }
2105
2106 static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
2107 u8 *ul_pipe, u8 *dl_pipe)
2108 {
2109 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
2110
2111
2112
2113
2114 *ul_pipe = 0;
2115 *dl_pipe = 0;
2116 }
2117
2118 static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2119 .tx_sg = ath10k_sdio_hif_tx_sg,
2120 .diag_read = ath10k_sdio_hif_diag_read,
2121 .diag_write = ath10k_sdio_hif_diag_write_mem,
2122 .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
2123 .start = ath10k_sdio_hif_start,
2124 .stop = ath10k_sdio_hif_stop,
2125 .start_post = ath10k_sdio_hif_start_post,
2126 .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
2127 .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
2128 .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
2129 .power_up = ath10k_sdio_hif_power_up,
2130 .power_down = ath10k_sdio_hif_power_down,
2131 #ifdef CONFIG_PM
2132 .suspend = ath10k_sdio_hif_suspend,
2133 .resume = ath10k_sdio_hif_resume,
2134 #endif
2135 };
2136
2137 #ifdef CONFIG_PM_SLEEP
2138
2139
2140
2141
2142 static int ath10k_sdio_pm_suspend(struct device *device)
2143 {
2144 struct sdio_func *func = dev_to_sdio_func(device);
2145 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2146 struct ath10k *ar = ar_sdio->ar;
2147 mmc_pm_flag_t pm_flag, pm_caps;
2148 int ret;
2149
2150 if (!device_may_wakeup(ar->dev))
2151 return 0;
2152
2153 ath10k_sdio_set_mbox_sleep(ar, true);
2154
2155 pm_flag = MMC_PM_KEEP_POWER;
2156
2157 ret = sdio_set_host_pm_flags(func, pm_flag);
2158 if (ret) {
2159 pm_caps = sdio_get_host_pm_caps(func);
2160 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2161 pm_flag, pm_caps, ret);
2162 return ret;
2163 }
2164
2165 return ret;
2166 }
2167
2168 static int ath10k_sdio_pm_resume(struct device *device)
2169 {
2170 return 0;
2171 }
2172
2173 static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2174 ath10k_sdio_pm_resume);
2175
2176 #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2177
2178 #else
2179
2180 #define ATH10K_SDIO_PM_OPS NULL
2181
2182 #endif
2183
2184 static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2185 {
2186 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2187 int done;
2188
2189 done = ath10k_htt_rx_hl_indication(ar, budget);
2190 ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2191
2192 if (done < budget)
2193 napi_complete_done(ctx, done);
2194
2195 return done;
2196 }
2197
2198 static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
2199 u32 item_offset,
2200 u32 *val)
2201 {
2202 u32 addr;
2203 int ret;
2204
2205 addr = host_interest_item_address(item_offset);
2206
2207 ret = ath10k_sdio_diag_read32(ar, addr, val);
2208
2209 if (ret)
2210 ath10k_warn(ar, "unable to read host interest offset %d value\n",
2211 item_offset);
2212
2213 return ret;
2214 }
2215
2216 static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
2217 u32 buf_len)
2218 {
2219 u32 val;
2220 int i, ret;
2221
2222 for (i = 0; i < buf_len; i += 4) {
2223 ret = ath10k_sdio_diag_read32(ar, address + i, &val);
2224 if (ret) {
2225 ath10k_warn(ar, "unable to read mem %d value\n", address + i);
2226 break;
2227 }
2228 memcpy(buf + i, &val, 4);
2229 }
2230
2231 return ret;
2232 }
2233
2234 static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
2235 {
2236 u32 param;
2237
2238 ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), ¶m);
2239
2240 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
2241
2242 return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
2243 }
2244
2245 static void ath10k_sdio_dump_registers(struct ath10k *ar,
2246 struct ath10k_fw_crash_data *crash_data,
2247 bool fast_dump)
2248 {
2249 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
2250 int i, ret;
2251 u32 reg_dump_area;
2252
2253 ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
2254 ®_dump_area);
2255 if (ret) {
2256 ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
2257 return;
2258 }
2259
2260 if (fast_dump)
2261 ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
2262 sizeof(reg_dump_values));
2263 else
2264 ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
2265 sizeof(reg_dump_values));
2266
2267 if (ret) {
2268 ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
2269 return;
2270 }
2271
2272 ath10k_err(ar, "firmware register dump:\n");
2273 for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
2274 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
2275 i,
2276 reg_dump_values[i],
2277 reg_dump_values[i + 1],
2278 reg_dump_values[i + 2],
2279 reg_dump_values[i + 3]);
2280
2281 if (!crash_data)
2282 return;
2283
2284 for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
2285 crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
2286 }
2287
2288 static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
2289 const struct ath10k_mem_region *mem_region,
2290 u8 *buf, size_t buf_len)
2291 {
2292 const struct ath10k_mem_section *cur_section, *next_section;
2293 unsigned int count, section_size, skip_size;
2294 int ret, i, j;
2295
2296 if (!mem_region || !buf)
2297 return 0;
2298
2299 cur_section = &mem_region->section_table.sections[0];
2300
2301 if (mem_region->start > cur_section->start) {
2302 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
2303 mem_region->start, cur_section->start);
2304 return 0;
2305 }
2306
2307 skip_size = cur_section->start - mem_region->start;
2308
2309
2310
2311
2312 for (i = 0; i < skip_size; i++) {
2313 *buf = ATH10K_MAGIC_NOT_COPIED;
2314 buf++;
2315 }
2316
2317 count = 0;
2318 i = 0;
2319 for (; cur_section; cur_section = next_section) {
2320 section_size = cur_section->end - cur_section->start;
2321
2322 if (section_size <= 0) {
2323 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
2324 cur_section->start,
2325 cur_section->end);
2326 break;
2327 }
2328
2329 if (++i == mem_region->section_table.size) {
2330
2331 next_section = NULL;
2332 skip_size = 0;
2333 } else {
2334 next_section = cur_section + 1;
2335
2336 if (cur_section->end > next_section->start) {
2337 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
2338 next_section->start,
2339 cur_section->end);
2340 break;
2341 }
2342
2343 skip_size = next_section->start - cur_section->end;
2344 }
2345
2346 if (buf_len < (skip_size + section_size)) {
2347 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
2348 break;
2349 }
2350
2351 buf_len -= skip_size + section_size;
2352
2353
2354 ret = ath10k_sdio_read_mem(ar, cur_section->start,
2355 buf, section_size);
2356 if (ret) {
2357 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
2358 cur_section->start, ret);
2359 break;
2360 }
2361
2362 buf += section_size;
2363 count += section_size;
2364
2365
2366 for (j = 0; j < skip_size; j++) {
2367 *buf = ATH10K_MAGIC_NOT_COPIED;
2368 buf++;
2369 }
2370
2371 count += skip_size;
2372 }
2373
2374 return count;
2375 }
2376
2377
2378 static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
2379 const struct ath10k_mem_region *current_region,
2380 u8 *buf,
2381 bool fast_dump)
2382 {
2383 int ret;
2384
2385 if (current_region->section_table.size > 0)
2386
2387 return ath10k_sdio_dump_memory_section(ar,
2388 current_region,
2389 buf,
2390 current_region->len);
2391
2392
2393
2394
2395 if (fast_dump)
2396 ret = ath10k_bmi_read_memory(ar,
2397 current_region->start,
2398 buf,
2399 current_region->len);
2400 else
2401 ret = ath10k_sdio_read_mem(ar,
2402 current_region->start,
2403 buf,
2404 current_region->len);
2405
2406 if (ret) {
2407 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
2408 current_region->name, ret);
2409 return ret;
2410 }
2411
2412 return current_region->len;
2413 }
2414
2415 static void ath10k_sdio_dump_memory(struct ath10k *ar,
2416 struct ath10k_fw_crash_data *crash_data,
2417 bool fast_dump)
2418 {
2419 const struct ath10k_hw_mem_layout *mem_layout;
2420 const struct ath10k_mem_region *current_region;
2421 struct ath10k_dump_ram_data_hdr *hdr;
2422 u32 count;
2423 size_t buf_len;
2424 int ret, i;
2425 u8 *buf;
2426
2427 if (!crash_data)
2428 return;
2429
2430 mem_layout = ath10k_coredump_get_mem_layout(ar);
2431 if (!mem_layout)
2432 return;
2433
2434 current_region = &mem_layout->region_table.regions[0];
2435
2436 buf = crash_data->ramdump_buf;
2437 buf_len = crash_data->ramdump_buf_len;
2438
2439 memset(buf, 0, buf_len);
2440
2441 for (i = 0; i < mem_layout->region_table.size; i++) {
2442 count = 0;
2443
2444 if (current_region->len > buf_len) {
2445 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
2446 current_region->name,
2447 current_region->len,
2448 buf_len);
2449 break;
2450 }
2451
2452
2453 hdr = (void *)buf;
2454 buf += sizeof(*hdr);
2455 buf_len -= sizeof(*hdr);
2456
2457 ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
2458 fast_dump);
2459 if (ret >= 0)
2460 count = ret;
2461
2462 hdr->region_type = cpu_to_le32(current_region->type);
2463 hdr->start = cpu_to_le32(current_region->start);
2464 hdr->length = cpu_to_le32(count);
2465
2466 if (count == 0)
2467
2468 break;
2469
2470 buf += count;
2471 buf_len -= count;
2472
2473 current_region++;
2474 }
2475 }
2476
2477 void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
2478 {
2479 struct ath10k_fw_crash_data *crash_data;
2480 char guid[UUID_STRING_LEN + 1];
2481 bool fast_dump;
2482
2483 fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
2484
2485 if (fast_dump)
2486 ath10k_bmi_start(ar);
2487
2488 ar->stats.fw_crash_counter++;
2489
2490 ath10k_sdio_disable_intrs(ar);
2491
2492 crash_data = ath10k_coredump_new(ar);
2493
2494 if (crash_data)
2495 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
2496 else
2497 scnprintf(guid, sizeof(guid), "n/a");
2498
2499 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
2500 ath10k_print_driver_info(ar);
2501 ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
2502 ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
2503
2504 ath10k_sdio_enable_intrs(ar);
2505
2506 ath10k_core_start_recovery(ar);
2507 }
2508
2509 static int ath10k_sdio_probe(struct sdio_func *func,
2510 const struct sdio_device_id *id)
2511 {
2512 struct ath10k_sdio *ar_sdio;
2513 struct ath10k *ar;
2514 enum ath10k_hw_rev hw_rev;
2515 u32 dev_id_base;
2516 struct ath10k_bus_params bus_params = {};
2517 int ret, i;
2518
2519
2520
2521
2522
2523
2524
2525 hw_rev = ATH10K_HW_QCA6174;
2526
2527 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2528 hw_rev, &ath10k_sdio_hif_ops);
2529 if (!ar) {
2530 dev_err(&func->dev, "failed to allocate core\n");
2531 return -ENOMEM;
2532 }
2533
2534 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
2535 NAPI_POLL_WEIGHT);
2536
2537 ath10k_dbg(ar, ATH10K_DBG_BOOT,
2538 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2539 func->num, func->vendor, func->device,
2540 func->max_blksize, func->cur_blksize);
2541
2542 ar_sdio = ath10k_sdio_priv(ar);
2543
2544 ar_sdio->irq_data.irq_proc_reg =
2545 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2546 GFP_KERNEL);
2547 if (!ar_sdio->irq_data.irq_proc_reg) {
2548 ret = -ENOMEM;
2549 goto err_core_destroy;
2550 }
2551
2552 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2553 if (!ar_sdio->vsg_buffer) {
2554 ret = -ENOMEM;
2555 goto err_core_destroy;
2556 }
2557
2558 ar_sdio->irq_data.irq_en_reg =
2559 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2560 GFP_KERNEL);
2561 if (!ar_sdio->irq_data.irq_en_reg) {
2562 ret = -ENOMEM;
2563 goto err_core_destroy;
2564 }
2565
2566 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2567 if (!ar_sdio->bmi_buf) {
2568 ret = -ENOMEM;
2569 goto err_core_destroy;
2570 }
2571
2572 ar_sdio->func = func;
2573 sdio_set_drvdata(func, ar_sdio);
2574
2575 ar_sdio->is_disabled = true;
2576 ar_sdio->ar = ar;
2577
2578 spin_lock_init(&ar_sdio->lock);
2579 spin_lock_init(&ar_sdio->wr_async_lock);
2580 mutex_init(&ar_sdio->irq_data.mtx);
2581
2582 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2583 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2584
2585 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2586 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2587 if (!ar_sdio->workqueue) {
2588 ret = -ENOMEM;
2589 goto err_core_destroy;
2590 }
2591
2592 for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2593 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2594
2595 skb_queue_head_init(&ar_sdio->rx_head);
2596 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2597
2598 dev_id_base = (id->device & 0x0F00);
2599 if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2600 dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
2601 ret = -ENODEV;
2602 ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2603 dev_id_base, id->device);
2604 goto err_free_wq;
2605 }
2606
2607 ar->dev_id = QCA9377_1_0_DEVICE_ID;
2608 ar->id.vendor = id->vendor;
2609 ar->id.device = id->device;
2610
2611 ath10k_sdio_set_mbox_info(ar);
2612
2613 bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2614
2615 bus_params.chip_id = 0;
2616 bus_params.hl_msdu_ids = true;
2617
2618 ar->hw->max_mtu = ETH_DATA_LEN;
2619
2620 ret = ath10k_core_register(ar, &bus_params);
2621 if (ret) {
2622 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2623 goto err_free_wq;
2624 }
2625
2626 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
2627
2628 return 0;
2629
2630 err_free_wq:
2631 destroy_workqueue(ar_sdio->workqueue);
2632 err_core_destroy:
2633 ath10k_core_destroy(ar);
2634
2635 return ret;
2636 }
2637
2638 static void ath10k_sdio_remove(struct sdio_func *func)
2639 {
2640 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2641 struct ath10k *ar = ar_sdio->ar;
2642
2643 ath10k_dbg(ar, ATH10K_DBG_BOOT,
2644 "sdio removed func %d vendor 0x%x device 0x%x\n",
2645 func->num, func->vendor, func->device);
2646
2647 ath10k_core_unregister(ar);
2648
2649 netif_napi_del(&ar->napi);
2650
2651 ath10k_core_destroy(ar);
2652
2653 destroy_workqueue(ar_sdio->workqueue);
2654 }
2655
2656 static const struct sdio_device_id ath10k_sdio_devices[] = {
2657 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2658 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
2659 {},
2660 };
2661
2662 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2663
2664 static struct sdio_driver ath10k_sdio_driver = {
2665 .name = "ath10k_sdio",
2666 .id_table = ath10k_sdio_devices,
2667 .probe = ath10k_sdio_probe,
2668 .remove = ath10k_sdio_remove,
2669 .drv = {
2670 .owner = THIS_MODULE,
2671 .pm = ATH10K_SDIO_PM_OPS,
2672 },
2673 };
2674
2675 static int __init ath10k_sdio_init(void)
2676 {
2677 int ret;
2678
2679 ret = sdio_register_driver(&ath10k_sdio_driver);
2680 if (ret)
2681 pr_err("sdio driver registration failed: %d\n", ret);
2682
2683 return ret;
2684 }
2685
2686 static void __exit ath10k_sdio_exit(void)
2687 {
2688 sdio_unregister_driver(&ath10k_sdio_driver);
2689 }
2690
2691 module_init(ath10k_sdio_init);
2692 module_exit(ath10k_sdio_exit);
2693
2694 MODULE_AUTHOR("Qualcomm Atheros");
2695 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2696 MODULE_LICENSE("Dual BSD/GPL");