0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/module.h>
0019 #include <linux/mmc/card.h>
0020 #include <linux/mmc/mmc.h>
0021 #include <linux/mmc/host.h>
0022 #include <linux/mmc/sdio_func.h>
0023 #include <linux/mmc/sdio_ids.h>
0024 #include <linux/mmc/sdio.h>
0025 #include <linux/mmc/sd.h>
0026 #include "hif.h"
0027 #include "hif-ops.h"
0028 #include "target.h"
0029 #include "debug.h"
0030 #include "cfg80211.h"
0031 #include "trace.h"
0032
0033 struct ath6kl_sdio {
0034 struct sdio_func *func;
0035
0036
0037 spinlock_t lock;
0038
0039
0040 struct list_head bus_req_freeq;
0041
0042
0043 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
0044
0045 struct ath6kl *ar;
0046
0047 u8 *dma_buffer;
0048
0049
0050 struct mutex dma_buffer_mutex;
0051
0052
0053 struct list_head scat_req;
0054
0055 atomic_t irq_handling;
0056 wait_queue_head_t irq_wq;
0057
0058
0059 spinlock_t scat_lock;
0060
0061 bool scatter_enabled;
0062
0063 bool is_disabled;
0064 const struct sdio_device_id *id;
0065 struct work_struct wr_async_work;
0066 struct list_head wr_asyncq;
0067
0068
0069 spinlock_t wr_async_lock;
0070 };
0071
0072 #define CMD53_ARG_READ 0
0073 #define CMD53_ARG_WRITE 1
0074 #define CMD53_ARG_BLOCK_BASIS 1
0075 #define CMD53_ARG_FIXED_ADDRESS 0
0076 #define CMD53_ARG_INCR_ADDRESS 1
0077
0078 static int ath6kl_sdio_config(struct ath6kl *ar);
0079
0080 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
0081 {
0082 return ar->hif_priv;
0083 }
0084
0085
0086
0087
0088
0089
0090
0091 static inline bool buf_needs_bounce(u8 *buf)
0092 {
0093 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
0094 }
0095
0096 static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
0097 {
0098 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
0099
0100
0101 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
0102 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
0103 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
0104 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
0105 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
0106 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
0107 }
0108
0109 static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
0110 u8 mode, u8 opcode, u32 addr,
0111 u16 blksz)
0112 {
0113 *arg = (((rw & 1) << 31) |
0114 ((func & 0x7) << 28) |
0115 ((mode & 1) << 27) |
0116 ((opcode & 1) << 26) |
0117 ((addr & 0x1FFFF) << 9) |
0118 (blksz & 0x1FF));
0119 }
0120
0121 static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
0122 unsigned int address,
0123 unsigned char val)
0124 {
0125 const u8 func = 0;
0126
0127 *arg = ((write & 1) << 31) |
0128 ((func & 0x7) << 28) |
0129 ((raw & 1) << 27) |
0130 (1 << 26) |
0131 ((address & 0x1FFFF) << 9) |
0132 (1 << 8) |
0133 (val & 0xFF);
0134 }
0135
0136 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
0137 unsigned int address,
0138 unsigned char byte)
0139 {
0140 struct mmc_command io_cmd;
0141
0142 memset(&io_cmd, 0, sizeof(io_cmd));
0143 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
0144 io_cmd.opcode = SD_IO_RW_DIRECT;
0145 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
0146
0147 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
0148 }
0149
0150 static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
0151 u8 *buf, u32 len)
0152 {
0153 int ret = 0;
0154
0155 sdio_claim_host(func);
0156
0157 if (request & HIF_WRITE) {
0158
0159 if (addr >= HIF_MBOX_BASE_ADDR &&
0160 addr <= HIF_MBOX_END_ADDR)
0161 addr += (HIF_MBOX_WIDTH - len);
0162
0163
0164 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
0165 addr += HIF_MBOX0_EXT_WIDTH - len;
0166
0167 if (request & HIF_FIXED_ADDRESS)
0168 ret = sdio_writesb(func, addr, buf, len);
0169 else
0170 ret = sdio_memcpy_toio(func, addr, buf, len);
0171 } else {
0172 if (request & HIF_FIXED_ADDRESS)
0173 ret = sdio_readsb(func, buf, addr, len);
0174 else
0175 ret = sdio_memcpy_fromio(func, buf, addr, len);
0176 }
0177
0178 sdio_release_host(func);
0179
0180 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
0181 request & HIF_WRITE ? "wr" : "rd", addr,
0182 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
0183 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
0184
0185 trace_ath6kl_sdio(addr, request, buf, len);
0186
0187 return ret;
0188 }
0189
0190 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
0191 {
0192 struct bus_request *bus_req;
0193
0194 spin_lock_bh(&ar_sdio->lock);
0195
0196 if (list_empty(&ar_sdio->bus_req_freeq)) {
0197 spin_unlock_bh(&ar_sdio->lock);
0198 return NULL;
0199 }
0200
0201 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
0202 struct bus_request, list);
0203 list_del(&bus_req->list);
0204
0205 spin_unlock_bh(&ar_sdio->lock);
0206 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
0207 __func__, bus_req);
0208
0209 return bus_req;
0210 }
0211
0212 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
0213 struct bus_request *bus_req)
0214 {
0215 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
0216 __func__, bus_req);
0217
0218 spin_lock_bh(&ar_sdio->lock);
0219 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
0220 spin_unlock_bh(&ar_sdio->lock);
0221 }
0222
0223 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
0224 struct mmc_data *data)
0225 {
0226 struct scatterlist *sg;
0227 int i;
0228
0229 data->blksz = HIF_MBOX_BLOCK_SIZE;
0230 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
0231
0232 ath6kl_dbg(ATH6KL_DBG_SCATTER,
0233 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
0234 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
0235 data->blksz, data->blocks, scat_req->len,
0236 scat_req->scat_entries);
0237
0238 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
0239 MMC_DATA_READ;
0240
0241
0242 sg = scat_req->sgentries;
0243 sg_init_table(sg, scat_req->scat_entries);
0244
0245
0246 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
0247 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
0248 i, scat_req->scat_list[i].buf,
0249 scat_req->scat_list[i].len);
0250
0251 sg_set_buf(sg, scat_req->scat_list[i].buf,
0252 scat_req->scat_list[i].len);
0253 }
0254
0255
0256 data->sg = scat_req->sgentries;
0257 data->sg_len = scat_req->scat_entries;
0258 }
0259
0260 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
0261 struct bus_request *req)
0262 {
0263 struct mmc_request mmc_req;
0264 struct mmc_command cmd;
0265 struct mmc_data data;
0266 struct hif_scatter_req *scat_req;
0267 u8 opcode, rw;
0268 int status, len;
0269
0270 scat_req = req->scat_req;
0271
0272 if (scat_req->virt_scat) {
0273 len = scat_req->len;
0274 if (scat_req->req & HIF_BLOCK_BASIS)
0275 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
0276
0277 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
0278 scat_req->addr, scat_req->virt_dma_buf,
0279 len);
0280 goto scat_complete;
0281 }
0282
0283 memset(&mmc_req, 0, sizeof(struct mmc_request));
0284 memset(&cmd, 0, sizeof(struct mmc_command));
0285 memset(&data, 0, sizeof(struct mmc_data));
0286
0287 ath6kl_sdio_setup_scat_data(scat_req, &data);
0288
0289 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
0290 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
0291
0292 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
0293
0294
0295 if (scat_req->req & HIF_WRITE) {
0296 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
0297 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
0298 else
0299
0300 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
0301 }
0302
0303
0304 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
0305 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
0306 data.blocks);
0307
0308 cmd.opcode = SD_IO_RW_EXTENDED;
0309 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
0310
0311 mmc_req.cmd = &cmd;
0312 mmc_req.data = &data;
0313
0314 sdio_claim_host(ar_sdio->func);
0315
0316 mmc_set_data_timeout(&data, ar_sdio->func->card);
0317
0318 trace_ath6kl_sdio_scat(scat_req->addr,
0319 scat_req->req,
0320 scat_req->len,
0321 scat_req->scat_entries,
0322 scat_req->scat_list);
0323
0324
0325 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
0326
0327 sdio_release_host(ar_sdio->func);
0328
0329 status = cmd.error ? cmd.error : data.error;
0330
0331 scat_complete:
0332 scat_req->status = status;
0333
0334 if (scat_req->status)
0335 ath6kl_err("Scatter write request failed:%d\n",
0336 scat_req->status);
0337
0338 if (scat_req->req & HIF_ASYNCHRONOUS)
0339 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
0340
0341 return status;
0342 }
0343
0344 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
0345 int n_scat_entry, int n_scat_req,
0346 bool virt_scat)
0347 {
0348 struct hif_scatter_req *s_req;
0349 struct bus_request *bus_req;
0350 int i, scat_req_sz, scat_list_sz, size;
0351 u8 *virt_buf;
0352
0353 scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
0354 scat_req_sz = sizeof(*s_req) + scat_list_sz;
0355
0356 if (!virt_scat)
0357 size = sizeof(struct scatterlist) * n_scat_entry;
0358 else
0359 size = 2 * L1_CACHE_BYTES +
0360 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
0361
0362 for (i = 0; i < n_scat_req; i++) {
0363
0364 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
0365 if (!s_req)
0366 return -ENOMEM;
0367
0368 if (virt_scat) {
0369 virt_buf = kzalloc(size, GFP_KERNEL);
0370 if (!virt_buf) {
0371 kfree(s_req);
0372 return -ENOMEM;
0373 }
0374
0375 s_req->virt_dma_buf =
0376 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
0377 } else {
0378
0379 s_req->sgentries = kzalloc(size, GFP_KERNEL);
0380
0381 if (!s_req->sgentries) {
0382 kfree(s_req);
0383 return -ENOMEM;
0384 }
0385 }
0386
0387
0388 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
0389 if (!bus_req) {
0390 kfree(s_req->sgentries);
0391 kfree(s_req->virt_dma_buf);
0392 kfree(s_req);
0393 return -ENOMEM;
0394 }
0395
0396
0397 bus_req->scat_req = s_req;
0398 s_req->busrequest = bus_req;
0399
0400 s_req->virt_scat = virt_scat;
0401
0402
0403 hif_scatter_req_add(ar_sdio->ar, s_req);
0404 }
0405
0406 return 0;
0407 }
0408
0409 static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
0410 u32 len, u32 request)
0411 {
0412 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0413 u8 *tbuf = NULL;
0414 int ret;
0415 bool bounced = false;
0416
0417 if (request & HIF_BLOCK_BASIS)
0418 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
0419
0420 if (buf_needs_bounce(buf)) {
0421 if (!ar_sdio->dma_buffer)
0422 return -ENOMEM;
0423 mutex_lock(&ar_sdio->dma_buffer_mutex);
0424 tbuf = ar_sdio->dma_buffer;
0425
0426 if (request & HIF_WRITE)
0427 memcpy(tbuf, buf, len);
0428
0429 bounced = true;
0430 } else {
0431 tbuf = buf;
0432 }
0433
0434 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
0435 if ((request & HIF_READ) && bounced)
0436 memcpy(buf, tbuf, len);
0437
0438 if (bounced)
0439 mutex_unlock(&ar_sdio->dma_buffer_mutex);
0440
0441 return ret;
0442 }
0443
0444 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
0445 struct bus_request *req)
0446 {
0447 if (req->scat_req) {
0448 ath6kl_sdio_scat_rw(ar_sdio, req);
0449 } else {
0450 void *context;
0451 int status;
0452
0453 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
0454 req->buffer, req->length,
0455 req->request);
0456 context = req->packet;
0457 ath6kl_sdio_free_bus_req(ar_sdio, req);
0458 ath6kl_hif_rw_comp_handler(context, status);
0459 }
0460 }
0461
0462 static void ath6kl_sdio_write_async_work(struct work_struct *work)
0463 {
0464 struct ath6kl_sdio *ar_sdio;
0465 struct bus_request *req, *tmp_req;
0466
0467 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
0468
0469 spin_lock_bh(&ar_sdio->wr_async_lock);
0470 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
0471 list_del(&req->list);
0472 spin_unlock_bh(&ar_sdio->wr_async_lock);
0473 __ath6kl_sdio_write_async(ar_sdio, req);
0474 spin_lock_bh(&ar_sdio->wr_async_lock);
0475 }
0476 spin_unlock_bh(&ar_sdio->wr_async_lock);
0477 }
0478
0479 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
0480 {
0481 int status;
0482 struct ath6kl_sdio *ar_sdio;
0483
0484 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
0485
0486 ar_sdio = sdio_get_drvdata(func);
0487 atomic_set(&ar_sdio->irq_handling, 1);
0488
0489
0490
0491
0492 sdio_release_host(ar_sdio->func);
0493
0494 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
0495 sdio_claim_host(ar_sdio->func);
0496
0497 atomic_set(&ar_sdio->irq_handling, 0);
0498 wake_up(&ar_sdio->irq_wq);
0499
0500 WARN_ON(status && status != -ECANCELED);
0501 }
0502
0503 static int ath6kl_sdio_power_on(struct ath6kl *ar)
0504 {
0505 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0506 struct sdio_func *func = ar_sdio->func;
0507 int ret = 0;
0508
0509 if (!ar_sdio->is_disabled)
0510 return 0;
0511
0512 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
0513
0514 sdio_claim_host(func);
0515
0516 ret = sdio_enable_func(func);
0517 if (ret) {
0518 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
0519 sdio_release_host(func);
0520 return ret;
0521 }
0522
0523 sdio_release_host(func);
0524
0525
0526
0527
0528
0529 msleep(10);
0530
0531 ret = ath6kl_sdio_config(ar);
0532 if (ret) {
0533 ath6kl_err("Failed to config sdio: %d\n", ret);
0534 goto out;
0535 }
0536
0537 ar_sdio->is_disabled = false;
0538
0539 out:
0540 return ret;
0541 }
0542
0543 static int ath6kl_sdio_power_off(struct ath6kl *ar)
0544 {
0545 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0546 int ret;
0547
0548 if (ar_sdio->is_disabled)
0549 return 0;
0550
0551 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
0552
0553
0554 sdio_claim_host(ar_sdio->func);
0555 ret = sdio_disable_func(ar_sdio->func);
0556 sdio_release_host(ar_sdio->func);
0557
0558 if (ret)
0559 return ret;
0560
0561 ar_sdio->is_disabled = true;
0562
0563 return ret;
0564 }
0565
0566 static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
0567 u32 length, u32 request,
0568 struct htc_packet *packet)
0569 {
0570 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0571 struct bus_request *bus_req;
0572
0573 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
0574
0575 if (WARN_ON_ONCE(!bus_req))
0576 return -ENOMEM;
0577
0578 bus_req->address = address;
0579 bus_req->buffer = buffer;
0580 bus_req->length = length;
0581 bus_req->request = request;
0582 bus_req->packet = packet;
0583
0584 spin_lock_bh(&ar_sdio->wr_async_lock);
0585 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
0586 spin_unlock_bh(&ar_sdio->wr_async_lock);
0587 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
0588
0589 return 0;
0590 }
0591
0592 static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
0593 {
0594 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0595 int ret;
0596
0597 sdio_claim_host(ar_sdio->func);
0598
0599
0600 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
0601 if (ret)
0602 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
0603
0604 sdio_release_host(ar_sdio->func);
0605 }
0606
0607 static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
0608 {
0609 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0610
0611 return !atomic_read(&ar_sdio->irq_handling);
0612 }
0613
0614 static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
0615 {
0616 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0617 int ret;
0618
0619 sdio_claim_host(ar_sdio->func);
0620
0621 if (atomic_read(&ar_sdio->irq_handling)) {
0622 sdio_release_host(ar_sdio->func);
0623
0624 ret = wait_event_interruptible(ar_sdio->irq_wq,
0625 ath6kl_sdio_is_on_irq(ar));
0626 if (ret)
0627 return;
0628
0629 sdio_claim_host(ar_sdio->func);
0630 }
0631
0632 ret = sdio_release_irq(ar_sdio->func);
0633 if (ret)
0634 ath6kl_err("Failed to release sdio irq: %d\n", ret);
0635
0636 sdio_release_host(ar_sdio->func);
0637 }
0638
0639 static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
0640 {
0641 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0642 struct hif_scatter_req *node = NULL;
0643
0644 spin_lock_bh(&ar_sdio->scat_lock);
0645
0646 if (!list_empty(&ar_sdio->scat_req)) {
0647 node = list_first_entry(&ar_sdio->scat_req,
0648 struct hif_scatter_req, list);
0649 list_del(&node->list);
0650
0651 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
0652 }
0653
0654 spin_unlock_bh(&ar_sdio->scat_lock);
0655
0656 return node;
0657 }
0658
0659 static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
0660 struct hif_scatter_req *s_req)
0661 {
0662 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0663
0664 spin_lock_bh(&ar_sdio->scat_lock);
0665
0666 list_add_tail(&s_req->list, &ar_sdio->scat_req);
0667
0668 spin_unlock_bh(&ar_sdio->scat_lock);
0669 }
0670
0671
0672 static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
0673 struct hif_scatter_req *scat_req)
0674 {
0675 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0676 u32 request = scat_req->req;
0677 int status = 0;
0678
0679 if (!scat_req->len)
0680 return -EINVAL;
0681
0682 ath6kl_dbg(ATH6KL_DBG_SCATTER,
0683 "hif-scatter: total len: %d scatter entries: %d\n",
0684 scat_req->len, scat_req->scat_entries);
0685
0686 if (request & HIF_SYNCHRONOUS) {
0687 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
0688 } else {
0689 spin_lock_bh(&ar_sdio->wr_async_lock);
0690 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
0691 spin_unlock_bh(&ar_sdio->wr_async_lock);
0692 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
0693 }
0694
0695 return status;
0696 }
0697
0698
0699 static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
0700 {
0701 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0702 struct hif_scatter_req *s_req, *tmp_req;
0703
0704
0705 spin_lock_bh(&ar_sdio->scat_lock);
0706 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
0707 list_del(&s_req->list);
0708 spin_unlock_bh(&ar_sdio->scat_lock);
0709
0710
0711
0712
0713
0714
0715 if (s_req->busrequest) {
0716 s_req->busrequest->scat_req = NULL;
0717 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
0718 }
0719 kfree(s_req->virt_dma_buf);
0720 kfree(s_req->sgentries);
0721 kfree(s_req);
0722
0723 spin_lock_bh(&ar_sdio->scat_lock);
0724 }
0725 spin_unlock_bh(&ar_sdio->scat_lock);
0726
0727 ar_sdio->scatter_enabled = false;
0728 }
0729
0730
0731 static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
0732 {
0733 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0734 struct htc_target *target = ar->htc_target;
0735 int ret = 0;
0736 bool virt_scat = false;
0737
0738 if (ar_sdio->scatter_enabled)
0739 return 0;
0740
0741 ar_sdio->scatter_enabled = true;
0742
0743
0744 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
0745 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
0746 ar_sdio->func->card->host->max_segs,
0747 MAX_SCATTER_ENTRIES_PER_REQ);
0748 virt_scat = true;
0749 }
0750
0751 if (!virt_scat) {
0752 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
0753 MAX_SCATTER_ENTRIES_PER_REQ,
0754 MAX_SCATTER_REQUESTS, virt_scat);
0755
0756 if (!ret) {
0757 ath6kl_dbg(ATH6KL_DBG_BOOT,
0758 "hif-scatter enabled requests %d entries %d\n",
0759 MAX_SCATTER_REQUESTS,
0760 MAX_SCATTER_ENTRIES_PER_REQ);
0761
0762 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
0763 target->max_xfer_szper_scatreq =
0764 MAX_SCATTER_REQ_TRANSFER_SIZE;
0765 } else {
0766 ath6kl_sdio_cleanup_scatter(ar);
0767 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
0768 }
0769 }
0770
0771 if (virt_scat || ret) {
0772 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
0773 ATH6KL_SCATTER_ENTRIES_PER_REQ,
0774 ATH6KL_SCATTER_REQS, virt_scat);
0775
0776 if (ret) {
0777 ath6kl_err("failed to alloc virtual scatter resources !\n");
0778 ath6kl_sdio_cleanup_scatter(ar);
0779 return ret;
0780 }
0781
0782 ath6kl_dbg(ATH6KL_DBG_BOOT,
0783 "virtual scatter enabled requests %d entries %d\n",
0784 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
0785
0786 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
0787 target->max_xfer_szper_scatreq =
0788 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
0789 }
0790
0791 return 0;
0792 }
0793
0794 static int ath6kl_sdio_config(struct ath6kl *ar)
0795 {
0796 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0797 struct sdio_func *func = ar_sdio->func;
0798 int ret;
0799
0800 sdio_claim_host(func);
0801
0802 if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) {
0803
0804 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
0805 CCCR_SDIO_IRQ_MODE_REG,
0806 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
0807 if (ret) {
0808 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
0809 ret);
0810 goto out;
0811 }
0812
0813 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
0814 }
0815
0816
0817 func->enable_timeout = 100;
0818
0819 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
0820 if (ret) {
0821 ath6kl_err("Set sdio block size %d failed: %d)\n",
0822 HIF_MBOX_BLOCK_SIZE, ret);
0823 goto out;
0824 }
0825
0826 out:
0827 sdio_release_host(func);
0828
0829 return ret;
0830 }
0831
0832 static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
0833 {
0834 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0835 struct sdio_func *func = ar_sdio->func;
0836 mmc_pm_flag_t flags;
0837 int ret;
0838
0839 flags = sdio_get_host_pm_caps(func);
0840
0841 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
0842
0843 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
0844 !(flags & MMC_PM_KEEP_POWER))
0845 return -EINVAL;
0846
0847 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
0848 if (ret) {
0849 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
0850 return ret;
0851 }
0852
0853
0854 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
0855 if (ret)
0856 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
0857
0858 return ret;
0859 }
0860
0861 static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
0862 {
0863 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
0864 struct sdio_func *func = ar_sdio->func;
0865 mmc_pm_flag_t flags;
0866 bool try_deepsleep = false;
0867 int ret;
0868
0869 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
0870 (!ar->suspend_mode && wow)) {
0871 ret = ath6kl_set_sdio_pm_caps(ar);
0872 if (ret)
0873 goto cut_pwr;
0874
0875 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
0876 if (ret && ret != -ENOTCONN)
0877 ath6kl_err("wow suspend failed: %d\n", ret);
0878
0879 if (ret &&
0880 (!ar->wow_suspend_mode ||
0881 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
0882 try_deepsleep = true;
0883 else if (ret &&
0884 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
0885 goto cut_pwr;
0886 if (!ret)
0887 return 0;
0888 }
0889
0890 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
0891 !ar->suspend_mode || try_deepsleep) {
0892 flags = sdio_get_host_pm_caps(func);
0893 if (!(flags & MMC_PM_KEEP_POWER))
0894 goto cut_pwr;
0895
0896 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
0897 if (ret)
0898 goto cut_pwr;
0899
0900
0901
0902
0903
0904
0905
0906 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
0907 ret = sdio_set_host_pm_flags(func,
0908 MMC_PM_WAKE_SDIO_IRQ);
0909 if (ret)
0910 goto cut_pwr;
0911 }
0912
0913 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
0914 NULL);
0915 if (ret)
0916 goto cut_pwr;
0917
0918 return 0;
0919 }
0920
0921 cut_pwr:
0922 if (func->card && func->card->host)
0923 func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
0924
0925 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
0926 }
0927
0928 static int ath6kl_sdio_resume(struct ath6kl *ar)
0929 {
0930 switch (ar->state) {
0931 case ATH6KL_STATE_OFF:
0932 case ATH6KL_STATE_CUTPOWER:
0933 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
0934 "sdio resume configuring sdio\n");
0935
0936
0937 ath6kl_sdio_config(ar);
0938 break;
0939
0940 case ATH6KL_STATE_ON:
0941 break;
0942
0943 case ATH6KL_STATE_DEEPSLEEP:
0944 break;
0945
0946 case ATH6KL_STATE_WOW:
0947 break;
0948
0949 case ATH6KL_STATE_SUSPENDING:
0950 break;
0951
0952 case ATH6KL_STATE_RESUMING:
0953 break;
0954
0955 case ATH6KL_STATE_RECOVERY:
0956 break;
0957 }
0958
0959 ath6kl_cfg80211_resume(ar);
0960
0961 return 0;
0962 }
0963
0964
0965 static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
0966 {
0967 int status;
0968 u8 addr_val[4];
0969 s32 i;
0970
0971
0972
0973
0974
0975
0976 for (i = 1; i <= 3; i++) {
0977
0978
0979
0980
0981 memset(addr_val, ((u8 *)&addr)[i], 4);
0982
0983
0984
0985
0986
0987
0988 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
0989 4, HIF_WR_SYNC_BYTE_FIX);
0990 if (status)
0991 break;
0992 }
0993
0994 if (status) {
0995 ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
0996 __func__, addr, reg_addr);
0997 return status;
0998 }
0999
1000
1001
1002
1003
1004
1005
1006 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
1007 4, HIF_WR_SYNC_BYTE_INC);
1008
1009 if (status) {
1010 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1011 __func__, addr, reg_addr);
1012 return status;
1013 }
1014
1015 return 0;
1016 }
1017
1018 static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1019 {
1020 int status;
1021
1022
1023 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1024 address);
1025
1026 if (status)
1027 return status;
1028
1029
1030 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1031 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1032 if (status) {
1033 ath6kl_err("%s: failed to read from window data addr\n",
1034 __func__);
1035 return status;
1036 }
1037
1038 return status;
1039 }
1040
1041 static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1042 __le32 data)
1043 {
1044 int status;
1045 u32 val = (__force u32) data;
1046
1047
1048 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1049 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1050 if (status) {
1051 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1052 __func__, data);
1053 return status;
1054 }
1055
1056
1057 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1058 address);
1059 }
1060
1061 static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1062 {
1063 u32 addr;
1064 unsigned long timeout;
1065 int ret;
1066
1067 ar->bmi.cmd_credits = 0;
1068
1069
1070 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1071
1072 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1073 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1074
1075
1076
1077
1078
1079
1080 ret = ath6kl_sdio_read_write_sync(ar, addr,
1081 (u8 *)&ar->bmi.cmd_credits, 4,
1082 HIF_RD_SYNC_BYTE_INC);
1083 if (ret) {
1084 ath6kl_err("Unable to decrement the command credit count register: %d\n",
1085 ret);
1086 return ret;
1087 }
1088
1089
1090
1091
1092 ar->bmi.cmd_credits &= 0xFF;
1093 }
1094
1095 if (!ar->bmi.cmd_credits) {
1096 ath6kl_err("bmi communication timeout\n");
1097 return -ETIMEDOUT;
1098 }
1099
1100 return 0;
1101 }
1102
1103 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1104 {
1105 unsigned long timeout;
1106 u32 rx_word = 0;
1107 int ret = 0;
1108
1109 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1110 while ((time_before(jiffies, timeout)) && !rx_word) {
1111 ret = ath6kl_sdio_read_write_sync(ar,
1112 RX_LOOKAHEAD_VALID_ADDRESS,
1113 (u8 *)&rx_word, sizeof(rx_word),
1114 HIF_RD_SYNC_BYTE_INC);
1115 if (ret) {
1116 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1117 return ret;
1118 }
1119
1120
1121 rx_word &= (1 << ENDPOINT1);
1122 }
1123
1124 if (!rx_word) {
1125 ath6kl_err("bmi_recv_buf FIFO empty\n");
1126 return -EINVAL;
1127 }
1128
1129 return ret;
1130 }
1131
1132 static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1133 {
1134 int ret;
1135 u32 addr;
1136
1137 ret = ath6kl_sdio_bmi_credits(ar);
1138 if (ret)
1139 return ret;
1140
1141 addr = ar->mbox_info.htc_addr;
1142
1143 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1144 HIF_WR_SYNC_BYTE_INC);
1145 if (ret) {
1146 ath6kl_err("unable to send the bmi data to the device\n");
1147 return ret;
1148 }
1149
1150 return 0;
1151 }
1152
1153 static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1154 {
1155 int ret;
1156 u32 addr;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 if (len >= 4) {
1205 ret = ath6kl_bmi_get_rx_lkahd(ar);
1206 if (ret)
1207 return ret;
1208 }
1209
1210 addr = ar->mbox_info.htc_addr;
1211 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1212 HIF_RD_SYNC_BYTE_INC);
1213 if (ret) {
1214 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1215 ret);
1216 return ret;
1217 }
1218
1219 return 0;
1220 }
1221
1222 static void ath6kl_sdio_stop(struct ath6kl *ar)
1223 {
1224 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1225 struct bus_request *req, *tmp_req;
1226 void *context;
1227
1228
1229
1230 cancel_work_sync(&ar_sdio->wr_async_work);
1231
1232 spin_lock_bh(&ar_sdio->wr_async_lock);
1233
1234 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1235 list_del(&req->list);
1236
1237 if (req->scat_req) {
1238
1239 req->scat_req->status = -ECANCELED;
1240 req->scat_req->complete(ar_sdio->ar->htc_target,
1241 req->scat_req);
1242 } else {
1243 context = req->packet;
1244 ath6kl_sdio_free_bus_req(ar_sdio, req);
1245 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1246 }
1247 }
1248
1249 spin_unlock_bh(&ar_sdio->wr_async_lock);
1250
1251 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1252 }
1253
1254 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1255 .read_write_sync = ath6kl_sdio_read_write_sync,
1256 .write_async = ath6kl_sdio_write_async,
1257 .irq_enable = ath6kl_sdio_irq_enable,
1258 .irq_disable = ath6kl_sdio_irq_disable,
1259 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1260 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1261 .enable_scatter = ath6kl_sdio_enable_scatter,
1262 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1263 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1264 .suspend = ath6kl_sdio_suspend,
1265 .resume = ath6kl_sdio_resume,
1266 .diag_read32 = ath6kl_sdio_diag_read32,
1267 .diag_write32 = ath6kl_sdio_diag_write32,
1268 .bmi_read = ath6kl_sdio_bmi_read,
1269 .bmi_write = ath6kl_sdio_bmi_write,
1270 .power_on = ath6kl_sdio_power_on,
1271 .power_off = ath6kl_sdio_power_off,
1272 .stop = ath6kl_sdio_stop,
1273 };
1274
1275 #ifdef CONFIG_PM_SLEEP
1276
1277
1278
1279
1280
1281 static int ath6kl_sdio_pm_suspend(struct device *device)
1282 {
1283 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1284
1285 return 0;
1286 }
1287
1288 static int ath6kl_sdio_pm_resume(struct device *device)
1289 {
1290 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1291
1292 return 0;
1293 }
1294
1295 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1296 ath6kl_sdio_pm_resume);
1297
1298 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1299
1300 #else
1301
1302 #define ATH6KL_SDIO_PM_OPS NULL
1303
1304 #endif
1305
1306 static int ath6kl_sdio_probe(struct sdio_func *func,
1307 const struct sdio_device_id *id)
1308 {
1309 int ret;
1310 struct ath6kl_sdio *ar_sdio;
1311 struct ath6kl *ar;
1312 int count;
1313
1314 ath6kl_dbg(ATH6KL_DBG_BOOT,
1315 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1316 func->num, func->vendor, func->device,
1317 func->max_blksize, func->cur_blksize);
1318
1319 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1320 if (!ar_sdio)
1321 return -ENOMEM;
1322
1323 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1324 if (!ar_sdio->dma_buffer) {
1325 ret = -ENOMEM;
1326 goto err_hif;
1327 }
1328
1329 ar_sdio->func = func;
1330 sdio_set_drvdata(func, ar_sdio);
1331
1332 ar_sdio->id = id;
1333 ar_sdio->is_disabled = true;
1334
1335 spin_lock_init(&ar_sdio->lock);
1336 spin_lock_init(&ar_sdio->scat_lock);
1337 spin_lock_init(&ar_sdio->wr_async_lock);
1338 mutex_init(&ar_sdio->dma_buffer_mutex);
1339
1340 INIT_LIST_HEAD(&ar_sdio->scat_req);
1341 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1342 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1343
1344 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1345
1346 init_waitqueue_head(&ar_sdio->irq_wq);
1347
1348 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1349 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1350
1351 ar = ath6kl_core_create(&ar_sdio->func->dev);
1352 if (!ar) {
1353 ath6kl_err("Failed to alloc ath6kl core\n");
1354 ret = -ENOMEM;
1355 goto err_dma;
1356 }
1357
1358 ar_sdio->ar = ar;
1359 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1360 ar->hif_priv = ar_sdio;
1361 ar->hif_ops = &ath6kl_sdio_ops;
1362 ar->bmi.max_data_size = 256;
1363
1364 ath6kl_sdio_set_mbox_info(ar);
1365
1366 ret = ath6kl_sdio_config(ar);
1367 if (ret) {
1368 ath6kl_err("Failed to config sdio: %d\n", ret);
1369 goto err_core_alloc;
1370 }
1371
1372 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1373 if (ret) {
1374 ath6kl_err("Failed to init ath6kl core\n");
1375 goto err_core_alloc;
1376 }
1377
1378 return ret;
1379
1380 err_core_alloc:
1381 ath6kl_core_destroy(ar_sdio->ar);
1382 err_dma:
1383 kfree(ar_sdio->dma_buffer);
1384 err_hif:
1385 kfree(ar_sdio);
1386
1387 return ret;
1388 }
1389
1390 static void ath6kl_sdio_remove(struct sdio_func *func)
1391 {
1392 struct ath6kl_sdio *ar_sdio;
1393
1394 ath6kl_dbg(ATH6KL_DBG_BOOT,
1395 "sdio removed func %d vendor 0x%x device 0x%x\n",
1396 func->num, func->vendor, func->device);
1397
1398 ar_sdio = sdio_get_drvdata(func);
1399
1400 ath6kl_stop_txrx(ar_sdio->ar);
1401 cancel_work_sync(&ar_sdio->wr_async_work);
1402
1403 ath6kl_core_cleanup(ar_sdio->ar);
1404 ath6kl_core_destroy(ar_sdio->ar);
1405
1406 kfree(ar_sdio->dma_buffer);
1407 kfree(ar_sdio);
1408 }
1409
1410 static const struct sdio_device_id ath6kl_sdio_devices[] = {
1411 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_00)},
1412 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_01)},
1413 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_00)},
1414 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_01)},
1415 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_02)},
1416 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_18)},
1417 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_19)},
1418 {},
1419 };
1420
1421 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1422
1423 static struct sdio_driver ath6kl_sdio_driver = {
1424 .name = "ath6kl_sdio",
1425 .id_table = ath6kl_sdio_devices,
1426 .probe = ath6kl_sdio_probe,
1427 .remove = ath6kl_sdio_remove,
1428 .drv.pm = ATH6KL_SDIO_PM_OPS,
1429 };
1430
1431 static int __init ath6kl_sdio_init(void)
1432 {
1433 int ret;
1434
1435 ret = sdio_register_driver(&ath6kl_sdio_driver);
1436 if (ret)
1437 ath6kl_err("sdio driver registration failed: %d\n", ret);
1438
1439 return ret;
1440 }
1441
1442 static void __exit ath6kl_sdio_exit(void)
1443 {
1444 sdio_unregister_driver(&ath6kl_sdio_driver);
1445 }
1446
1447 module_init(ath6kl_sdio_init);
1448 module_exit(ath6kl_sdio_exit);
1449
1450 MODULE_AUTHOR("Atheros Communications, Inc.");
1451 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1452 MODULE_LICENSE("Dual BSD/GPL");
1453
1454 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1455 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1456 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
1457 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1458 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1459 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1460 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1461 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
1462 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1463 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1464 MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
1465 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1466 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1467 MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
1468 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1469 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1470 MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
1471 MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1472 MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1473 MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1474 MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1475 MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);