0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/kernel.h>
0010 #include <linux/errno.h>
0011 #include <linux/usb.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/slab.h>
0015 #include <linux/dmaengine.h>
0016
0017 #include "musb_core.h"
0018 #include "tusb6010.h"
0019
0020 #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
0021
0022 #define MAX_DMAREQ 5
0023
0024 struct tusb_dma_data {
0025 s8 dmareq;
0026 struct dma_chan *chan;
0027 };
0028
0029 struct tusb_omap_dma_ch {
0030 struct musb *musb;
0031 void __iomem *tbase;
0032 unsigned long phys_offset;
0033 int epnum;
0034 u8 tx;
0035 struct musb_hw_ep *hw_ep;
0036
0037 struct tusb_dma_data *dma_data;
0038
0039 struct tusb_omap_dma *tusb_dma;
0040
0041 dma_addr_t dma_addr;
0042
0043 u32 len;
0044 u16 packet_sz;
0045 u16 transfer_packet_sz;
0046 u32 transfer_len;
0047 u32 completed_len;
0048 };
0049
0050 struct tusb_omap_dma {
0051 struct dma_controller controller;
0052 void __iomem *tbase;
0053
0054 struct tusb_dma_data dma_pool[MAX_DMAREQ];
0055 unsigned multichannel:1;
0056 };
0057
0058
0059
0060
0061 static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
0062 {
0063 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
0064
0065 if (reg != 0) {
0066 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
0067 chdat->epnum, reg & 0xf);
0068 return -EAGAIN;
0069 }
0070
0071 if (chdat->tx)
0072 reg = (1 << 4) | chdat->epnum;
0073 else
0074 reg = chdat->epnum;
0075
0076 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
0077
0078 return 0;
0079 }
0080
0081 static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
0082 {
0083 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
0084
0085 if ((reg & 0xf) != chdat->epnum) {
0086 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
0087 chdat->epnum, reg & 0xf);
0088 return;
0089 }
0090 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
0091 }
0092
0093
0094
0095
0096
0097 static void tusb_omap_dma_cb(void *data)
0098 {
0099 struct dma_channel *channel = (struct dma_channel *)data;
0100 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
0101 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
0102 struct musb *musb = chdat->musb;
0103 struct device *dev = musb->controller;
0104 struct musb_hw_ep *hw_ep = chdat->hw_ep;
0105 void __iomem *ep_conf = hw_ep->conf;
0106 void __iomem *mbase = musb->mregs;
0107 unsigned long remaining, flags, pio;
0108
0109 spin_lock_irqsave(&musb->lock, flags);
0110
0111 dev_dbg(musb->controller, "ep%i %s dma callback\n",
0112 chdat->epnum, chdat->tx ? "tx" : "rx");
0113
0114 if (chdat->tx)
0115 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
0116 else
0117 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
0118
0119 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
0120
0121
0122 if (unlikely(remaining > chdat->transfer_len)) {
0123 dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
0124 chdat->tx ? "tx" : "rx", remaining);
0125 remaining = 0;
0126 }
0127
0128 channel->actual_len = chdat->transfer_len - remaining;
0129 pio = chdat->len - channel->actual_len;
0130
0131 dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
0132
0133
0134 if (pio > 0 && pio < 32) {
0135 u8 *buf;
0136
0137 dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
0138 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
0139 if (chdat->tx) {
0140 dma_unmap_single(dev, chdat->dma_addr,
0141 chdat->transfer_len,
0142 DMA_TO_DEVICE);
0143 musb_write_fifo(hw_ep, pio, buf);
0144 } else {
0145 dma_unmap_single(dev, chdat->dma_addr,
0146 chdat->transfer_len,
0147 DMA_FROM_DEVICE);
0148 musb_read_fifo(hw_ep, pio, buf);
0149 }
0150 channel->actual_len += pio;
0151 }
0152
0153 if (!tusb_dma->multichannel)
0154 tusb_omap_free_shared_dmareq(chdat);
0155
0156 channel->status = MUSB_DMA_STATUS_FREE;
0157
0158 musb_dma_completion(musb, chdat->epnum, chdat->tx);
0159
0160
0161
0162
0163
0164 if ((chdat->transfer_len < chdat->packet_sz)
0165 || (chdat->transfer_len % chdat->packet_sz != 0)) {
0166 u16 csr;
0167
0168 if (chdat->tx) {
0169 dev_dbg(musb->controller, "terminating short tx packet\n");
0170 musb_ep_select(mbase, chdat->epnum);
0171 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
0172 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
0173 | MUSB_TXCSR_P_WZC_BITS;
0174 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
0175 }
0176 }
0177
0178 spin_unlock_irqrestore(&musb->lock, flags);
0179 }
0180
0181 static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
0182 u8 rndis_mode, dma_addr_t dma_addr, u32 len)
0183 {
0184 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
0185 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
0186 struct musb *musb = chdat->musb;
0187 struct device *dev = musb->controller;
0188 struct musb_hw_ep *hw_ep = chdat->hw_ep;
0189 void __iomem *mbase = musb->mregs;
0190 void __iomem *ep_conf = hw_ep->conf;
0191 dma_addr_t fifo_addr = hw_ep->fifo_sync;
0192 u32 dma_remaining;
0193 u16 csr;
0194 u32 psize;
0195 struct tusb_dma_data *dma_data;
0196 struct dma_async_tx_descriptor *dma_desc;
0197 struct dma_slave_config dma_cfg;
0198 enum dma_transfer_direction dma_dir;
0199 u32 port_window;
0200 int ret;
0201
0202 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
0203 return false;
0204
0205
0206
0207
0208
0209
0210
0211 if (dma_addr & 0x2)
0212 return false;
0213
0214
0215
0216
0217
0218
0219 if (chdat->tx)
0220 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
0221 else
0222 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
0223
0224 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
0225 if (dma_remaining) {
0226 dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
0227 chdat->tx ? "tx" : "rx", dma_remaining);
0228 return false;
0229 }
0230
0231 chdat->transfer_len = len & ~0x1f;
0232
0233 if (len < packet_sz)
0234 chdat->transfer_packet_sz = chdat->transfer_len;
0235 else
0236 chdat->transfer_packet_sz = packet_sz;
0237
0238 dma_data = chdat->dma_data;
0239 if (!tusb_dma->multichannel) {
0240 if (tusb_omap_use_shared_dmareq(chdat) != 0) {
0241 dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
0242 return false;
0243 }
0244 if (dma_data->dmareq < 0) {
0245
0246
0247
0248 WARN_ON(1);
0249 return false;
0250 }
0251 }
0252
0253 chdat->packet_sz = packet_sz;
0254 chdat->len = len;
0255 channel->actual_len = 0;
0256 chdat->dma_addr = dma_addr;
0257 channel->status = MUSB_DMA_STATUS_BUSY;
0258
0259
0260 if (chdat->tx) {
0261 dma_dir = DMA_MEM_TO_DEV;
0262 dma_map_single(dev, phys_to_virt(dma_addr), len,
0263 DMA_TO_DEVICE);
0264 } else {
0265 dma_dir = DMA_DEV_TO_MEM;
0266 dma_map_single(dev, phys_to_virt(dma_addr), len,
0267 DMA_FROM_DEVICE);
0268 }
0269
0270 memset(&dma_cfg, 0, sizeof(dma_cfg));
0271
0272
0273 if ((dma_addr & 0x3) == 0) {
0274 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0275 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0276 port_window = 8;
0277 } else {
0278 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0279 dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0280 port_window = 16;
0281
0282 fifo_addr = hw_ep->fifo_async;
0283 }
0284
0285 dev_dbg(musb->controller,
0286 "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
0287 chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
0288 chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
0289
0290 dma_cfg.src_addr = fifo_addr;
0291 dma_cfg.dst_addr = fifo_addr;
0292 dma_cfg.src_port_window_size = port_window;
0293 dma_cfg.src_maxburst = port_window;
0294 dma_cfg.dst_port_window_size = port_window;
0295 dma_cfg.dst_maxburst = port_window;
0296
0297 ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
0298 if (ret) {
0299 dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
0300 return false;
0301 }
0302
0303 dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
0304 chdat->transfer_len, dma_dir,
0305 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0306 if (!dma_desc) {
0307 dev_err(musb->controller, "DMA prep_slave_single failed\n");
0308 return false;
0309 }
0310
0311 dma_desc->callback = tusb_omap_dma_cb;
0312 dma_desc->callback_param = channel;
0313 dmaengine_submit(dma_desc);
0314
0315 dev_dbg(musb->controller,
0316 "ep%i %s using %i-bit %s dma from %pad to %pad\n",
0317 chdat->epnum, chdat->tx ? "tx" : "rx",
0318 dma_cfg.src_addr_width * 8,
0319 ((dma_addr & 0x3) == 0) ? "sync" : "async",
0320 (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
0321 (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
0322
0323
0324
0325
0326 musb_ep_select(mbase, chdat->epnum);
0327 if (chdat->tx) {
0328 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
0329 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
0330 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
0331 csr &= ~MUSB_TXCSR_P_UNDERRUN;
0332 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
0333 } else {
0334 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0335 csr |= MUSB_RXCSR_DMAENAB;
0336 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
0337 musb_writew(hw_ep->regs, MUSB_RXCSR,
0338 csr | MUSB_RXCSR_P_WZC_BITS);
0339 }
0340
0341
0342 dma_async_issue_pending(dma_data->chan);
0343
0344 if (chdat->tx) {
0345
0346 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
0347 psize &= ~0x7ff;
0348 psize |= chdat->transfer_packet_sz;
0349 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
0350
0351 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
0352 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
0353 } else {
0354
0355 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
0356 psize &= ~(0x7ff << 16);
0357 psize |= (chdat->transfer_packet_sz << 16);
0358 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
0359
0360 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
0361 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
0362 }
0363
0364 return true;
0365 }
0366
0367 static int tusb_omap_dma_abort(struct dma_channel *channel)
0368 {
0369 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
0370
0371 if (chdat->dma_data)
0372 dmaengine_terminate_all(chdat->dma_data->chan);
0373
0374 channel->status = MUSB_DMA_STATUS_FREE;
0375
0376 return 0;
0377 }
0378
0379 static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
0380 {
0381 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
0382 int i, dmareq_nr = -1;
0383
0384 for (i = 0; i < MAX_DMAREQ; i++) {
0385 int cur = (reg & (0xf << (i * 5))) >> (i * 5);
0386 if (cur == 0) {
0387 dmareq_nr = i;
0388 break;
0389 }
0390 }
0391
0392 if (dmareq_nr == -1)
0393 return -EAGAIN;
0394
0395 reg |= (chdat->epnum << (dmareq_nr * 5));
0396 if (chdat->tx)
0397 reg |= ((1 << 4) << (dmareq_nr * 5));
0398 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
0399
0400 chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
0401
0402 return 0;
0403 }
0404
0405 static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
0406 {
0407 u32 reg;
0408
0409 if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
0410 return;
0411
0412 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
0413 reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
0414 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
0415
0416 chdat->dma_data = NULL;
0417 }
0418
0419 static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
0420
0421 static struct dma_channel *
0422 tusb_omap_dma_allocate(struct dma_controller *c,
0423 struct musb_hw_ep *hw_ep,
0424 u8 tx)
0425 {
0426 int ret, i;
0427 struct tusb_omap_dma *tusb_dma;
0428 struct musb *musb;
0429 struct dma_channel *channel = NULL;
0430 struct tusb_omap_dma_ch *chdat = NULL;
0431 struct tusb_dma_data *dma_data = NULL;
0432
0433 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
0434 musb = tusb_dma->controller.musb;
0435
0436
0437 if (hw_ep->epnum == 0) {
0438 dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
0439 return NULL;
0440 }
0441
0442 for (i = 0; i < MAX_DMAREQ; i++) {
0443 struct dma_channel *ch = dma_channel_pool[i];
0444 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
0445 ch->status = MUSB_DMA_STATUS_FREE;
0446 channel = ch;
0447 chdat = ch->private_data;
0448 break;
0449 }
0450 }
0451
0452 if (!channel)
0453 return NULL;
0454
0455 chdat->musb = tusb_dma->controller.musb;
0456 chdat->tbase = tusb_dma->tbase;
0457 chdat->hw_ep = hw_ep;
0458 chdat->epnum = hw_ep->epnum;
0459 chdat->completed_len = 0;
0460 chdat->tusb_dma = tusb_dma;
0461 if (tx)
0462 chdat->tx = 1;
0463 else
0464 chdat->tx = 0;
0465
0466 channel->max_len = 0x7fffffff;
0467 channel->desired_mode = 0;
0468 channel->actual_len = 0;
0469
0470 if (!chdat->dma_data) {
0471 if (tusb_dma->multichannel) {
0472 ret = tusb_omap_dma_allocate_dmareq(chdat);
0473 if (ret != 0)
0474 goto free_dmareq;
0475 } else {
0476 chdat->dma_data = &tusb_dma->dma_pool[0];
0477 }
0478 }
0479
0480 dma_data = chdat->dma_data;
0481
0482 dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
0483 chdat->epnum,
0484 chdat->tx ? "tx" : "rx",
0485 tusb_dma->multichannel ? "shared" : "dedicated",
0486 dma_data->dmareq);
0487
0488 return channel;
0489
0490 free_dmareq:
0491 tusb_omap_dma_free_dmareq(chdat);
0492
0493 dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
0494 channel->status = MUSB_DMA_STATUS_UNKNOWN;
0495
0496 return NULL;
0497 }
0498
0499 static void tusb_omap_dma_release(struct dma_channel *channel)
0500 {
0501 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
0502 struct musb *musb = chdat->musb;
0503
0504 dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
0505
0506 channel->status = MUSB_DMA_STATUS_UNKNOWN;
0507
0508 dmaengine_terminate_sync(chdat->dma_data->chan);
0509 tusb_omap_dma_free_dmareq(chdat);
0510
0511 channel = NULL;
0512 }
0513
0514 void tusb_dma_controller_destroy(struct dma_controller *c)
0515 {
0516 struct tusb_omap_dma *tusb_dma;
0517 int i;
0518
0519 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
0520 for (i = 0; i < MAX_DMAREQ; i++) {
0521 struct dma_channel *ch = dma_channel_pool[i];
0522 if (ch) {
0523 kfree(ch->private_data);
0524 kfree(ch);
0525 }
0526
0527
0528 if (tusb_dma && tusb_dma->dma_pool[i].chan)
0529 dma_release_channel(tusb_dma->dma_pool[i].chan);
0530 }
0531
0532 kfree(tusb_dma);
0533 }
0534 EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
0535
0536 static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
0537 {
0538 struct musb *musb = tusb_dma->controller.musb;
0539 int i;
0540 int ret = 0;
0541
0542 for (i = 0; i < MAX_DMAREQ; i++) {
0543 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
0544
0545
0546
0547
0548
0549
0550 if (i == 0 || tusb_dma->multichannel) {
0551 char ch_name[8];
0552
0553 sprintf(ch_name, "dmareq%d", i);
0554 dma_data->chan = dma_request_chan(musb->controller,
0555 ch_name);
0556 if (IS_ERR(dma_data->chan)) {
0557 dev_err(musb->controller,
0558 "Failed to request %s\n", ch_name);
0559 ret = PTR_ERR(dma_data->chan);
0560 goto dma_error;
0561 }
0562
0563 dma_data->dmareq = i;
0564 } else {
0565 dma_data->dmareq = -1;
0566 }
0567 }
0568
0569 return 0;
0570
0571 dma_error:
0572 for (; i >= 0; i--) {
0573 struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
0574
0575 if (dma_data->dmareq >= 0)
0576 dma_release_channel(dma_data->chan);
0577 }
0578
0579 return ret;
0580 }
0581
0582 struct dma_controller *
0583 tusb_dma_controller_create(struct musb *musb, void __iomem *base)
0584 {
0585 void __iomem *tbase = musb->ctrl_base;
0586 struct tusb_omap_dma *tusb_dma;
0587 int i;
0588
0589
0590
0591 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
0592 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
0593
0594 musb_writel(tbase, TUSB_DMA_REQ_CONF,
0595 TUSB_DMA_REQ_CONF_BURST_SIZE(2)
0596 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
0597 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
0598
0599 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
0600 if (!tusb_dma)
0601 goto out;
0602
0603 tusb_dma->controller.musb = musb;
0604 tusb_dma->tbase = musb->ctrl_base;
0605
0606 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
0607 tusb_dma->controller.channel_release = tusb_omap_dma_release;
0608 tusb_dma->controller.channel_program = tusb_omap_dma_program;
0609 tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
0610
0611 if (musb->tusb_revision >= TUSB_REV_30)
0612 tusb_dma->multichannel = 1;
0613
0614 for (i = 0; i < MAX_DMAREQ; i++) {
0615 struct dma_channel *ch;
0616 struct tusb_omap_dma_ch *chdat;
0617
0618 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
0619 if (!ch)
0620 goto cleanup;
0621
0622 dma_channel_pool[i] = ch;
0623
0624 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
0625 if (!chdat)
0626 goto cleanup;
0627
0628 ch->status = MUSB_DMA_STATUS_UNKNOWN;
0629 ch->private_data = chdat;
0630 }
0631
0632 if (tusb_omap_allocate_dma_pool(tusb_dma))
0633 goto cleanup;
0634
0635 return &tusb_dma->controller;
0636
0637 cleanup:
0638 musb_dma_controller_destroy(&tusb_dma->controller);
0639 out:
0640 return NULL;
0641 }
0642 EXPORT_SYMBOL_GPL(tusb_dma_controller_create);