Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/device.h>
0003 #include <linux/dma-mapping.h>
0004 #include <linux/dmaengine.h>
0005 #include <linux/sizes.h>
0006 #include <linux/platform_device.h>
0007 #include <linux/of.h>
0008 
0009 #include "cppi_dma.h"
0010 #include "musb_core.h"
0011 #include "musb_trace.h"
0012 
0013 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
0014 
0015 #define EP_MODE_AUTOREQ_NONE        0
0016 #define EP_MODE_AUTOREQ_ALL_NEOP    1
0017 #define EP_MODE_AUTOREQ_ALWAYS      3
0018 
0019 #define EP_MODE_DMA_TRANSPARENT     0
0020 #define EP_MODE_DMA_RNDIS       1
0021 #define EP_MODE_DMA_GEN_RNDIS       3
0022 
0023 #define USB_CTRL_TX_MODE    0x70
0024 #define USB_CTRL_RX_MODE    0x74
0025 #define USB_CTRL_AUTOREQ    0xd0
0026 #define USB_TDOWN       0xd8
0027 
0028 #define MUSB_DMA_NUM_CHANNELS 15
0029 
0030 #define DA8XX_USB_MODE      0x10
0031 #define DA8XX_USB_AUTOREQ   0x14
0032 #define DA8XX_USB_TEARDOWN  0x1c
0033 
0034 #define DA8XX_DMA_NUM_CHANNELS 4
0035 
0036 struct cppi41_dma_controller {
0037     struct dma_controller controller;
0038     struct cppi41_dma_channel *rx_channel;
0039     struct cppi41_dma_channel *tx_channel;
0040     struct hrtimer early_tx;
0041     struct list_head early_tx_list;
0042     u32 rx_mode;
0043     u32 tx_mode;
0044     u32 auto_req;
0045 
0046     u32 tdown_reg;
0047     u32 autoreq_reg;
0048 
0049     void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
0050                  unsigned int mode);
0051     u8 num_channels;
0052 };
0053 
0054 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
0055 {
0056     u16 csr;
0057     u8 toggle;
0058 
0059     if (cppi41_channel->is_tx)
0060         return;
0061     if (!is_host_active(cppi41_channel->controller->controller.musb))
0062         return;
0063 
0064     csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
0065     toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
0066 
0067     cppi41_channel->usb_toggle = toggle;
0068 }
0069 
0070 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
0071 {
0072     struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
0073     struct musb *musb = hw_ep->musb;
0074     u16 csr;
0075     u8 toggle;
0076 
0077     if (cppi41_channel->is_tx)
0078         return;
0079     if (!is_host_active(musb))
0080         return;
0081 
0082     musb_ep_select(musb->mregs, hw_ep->epnum);
0083     csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0084     toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
0085 
0086     /*
0087      * AM335x Advisory 1.0.13: Due to internal synchronisation error the
0088      * data toggle may reset from DATA1 to DATA0 during receiving data from
0089      * more than one endpoint.
0090      */
0091     if (!toggle && toggle == cppi41_channel->usb_toggle) {
0092         csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
0093         musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
0094         musb_dbg(musb, "Restoring DATA1 toggle.");
0095     }
0096 
0097     cppi41_channel->usb_toggle = toggle;
0098 }
0099 
0100 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
0101 {
0102     u8      epnum = hw_ep->epnum;
0103     struct musb *musb = hw_ep->musb;
0104     void __iomem    *epio = musb->endpoints[epnum].regs;
0105     u16     csr;
0106 
0107     musb_ep_select(musb->mregs, hw_ep->epnum);
0108     csr = musb_readw(epio, MUSB_TXCSR);
0109     if (csr & MUSB_TXCSR_TXPKTRDY)
0110         return false;
0111     return true;
0112 }
0113 
0114 static void cppi41_dma_callback(void *private_data,
0115                 const struct dmaengine_result *result);
0116 
0117 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
0118 {
0119     struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
0120     struct musb *musb = hw_ep->musb;
0121     void __iomem *epio = hw_ep->regs;
0122     u16 csr;
0123 
0124     if (!cppi41_channel->prog_len ||
0125         (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
0126 
0127         /* done, complete */
0128         cppi41_channel->channel.actual_len =
0129             cppi41_channel->transferred;
0130         cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
0131         cppi41_channel->channel.rx_packet_done = true;
0132 
0133         /*
0134          * transmit ZLP using PIO mode for transfers which size is
0135          * multiple of EP packet size.
0136          */
0137         if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
0138                     cppi41_channel->packet_sz) == 0) {
0139             musb_ep_select(musb->mregs, hw_ep->epnum);
0140             csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
0141             musb_writew(epio, MUSB_TXCSR, csr);
0142         }
0143 
0144         trace_musb_cppi41_done(cppi41_channel);
0145         musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
0146     } else {
0147         /* next iteration, reload */
0148         struct dma_chan *dc = cppi41_channel->dc;
0149         struct dma_async_tx_descriptor *dma_desc;
0150         enum dma_transfer_direction direction;
0151         u32 remain_bytes;
0152 
0153         cppi41_channel->buf_addr += cppi41_channel->packet_sz;
0154 
0155         remain_bytes = cppi41_channel->total_len;
0156         remain_bytes -= cppi41_channel->transferred;
0157         remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
0158         cppi41_channel->prog_len = remain_bytes;
0159 
0160         direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
0161             : DMA_DEV_TO_MEM;
0162         dma_desc = dmaengine_prep_slave_single(dc,
0163                 cppi41_channel->buf_addr,
0164                 remain_bytes,
0165                 direction,
0166                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0167         if (WARN_ON(!dma_desc))
0168             return;
0169 
0170         dma_desc->callback_result = cppi41_dma_callback;
0171         dma_desc->callback_param = &cppi41_channel->channel;
0172         cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
0173         trace_musb_cppi41_cont(cppi41_channel);
0174         dma_async_issue_pending(dc);
0175 
0176         if (!cppi41_channel->is_tx) {
0177             musb_ep_select(musb->mregs, hw_ep->epnum);
0178             csr = musb_readw(epio, MUSB_RXCSR);
0179             csr |= MUSB_RXCSR_H_REQPKT;
0180             musb_writew(epio, MUSB_RXCSR, csr);
0181         }
0182     }
0183 }
0184 
0185 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
0186 {
0187     struct cppi41_dma_controller *controller;
0188     struct cppi41_dma_channel *cppi41_channel, *n;
0189     struct musb *musb;
0190     unsigned long flags;
0191     enum hrtimer_restart ret = HRTIMER_NORESTART;
0192 
0193     controller = container_of(timer, struct cppi41_dma_controller,
0194             early_tx);
0195     musb = controller->controller.musb;
0196 
0197     spin_lock_irqsave(&musb->lock, flags);
0198     list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
0199             tx_check) {
0200         bool empty;
0201         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
0202 
0203         empty = musb_is_tx_fifo_empty(hw_ep);
0204         if (empty) {
0205             list_del_init(&cppi41_channel->tx_check);
0206             cppi41_trans_done(cppi41_channel);
0207         }
0208     }
0209 
0210     if (!list_empty(&controller->early_tx_list) &&
0211         !hrtimer_is_queued(&controller->early_tx)) {
0212         ret = HRTIMER_RESTART;
0213         hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
0214     }
0215 
0216     spin_unlock_irqrestore(&musb->lock, flags);
0217     return ret;
0218 }
0219 
0220 static void cppi41_dma_callback(void *private_data,
0221                 const struct dmaengine_result *result)
0222 {
0223     struct dma_channel *channel = private_data;
0224     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0225     struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
0226     struct cppi41_dma_controller *controller;
0227     struct musb *musb = hw_ep->musb;
0228     unsigned long flags;
0229     struct dma_tx_state txstate;
0230     u32 transferred;
0231     int is_hs = 0;
0232     bool empty;
0233 
0234     controller = cppi41_channel->controller;
0235     if (controller->controller.dma_callback)
0236         controller->controller.dma_callback(&controller->controller);
0237 
0238     if (result->result == DMA_TRANS_ABORTED)
0239         return;
0240 
0241     spin_lock_irqsave(&musb->lock, flags);
0242 
0243     dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
0244             &txstate);
0245     transferred = cppi41_channel->prog_len - txstate.residue;
0246     cppi41_channel->transferred += transferred;
0247 
0248     trace_musb_cppi41_gb(cppi41_channel);
0249     update_rx_toggle(cppi41_channel);
0250 
0251     if (cppi41_channel->transferred == cppi41_channel->total_len ||
0252             transferred < cppi41_channel->packet_sz)
0253         cppi41_channel->prog_len = 0;
0254 
0255     if (cppi41_channel->is_tx) {
0256         u8 type;
0257 
0258         if (is_host_active(musb))
0259             type = hw_ep->out_qh->type;
0260         else
0261             type = hw_ep->ep_in.type;
0262 
0263         if (type == USB_ENDPOINT_XFER_ISOC)
0264             /*
0265              * Don't use the early-TX-interrupt workaround below
0266              * for Isoch transfter. Since Isoch are periodic
0267              * transfer, by the time the next transfer is
0268              * scheduled, the current one should be done already.
0269              *
0270              * This avoids audio playback underrun issue.
0271              */
0272             empty = true;
0273         else
0274             empty = musb_is_tx_fifo_empty(hw_ep);
0275     }
0276 
0277     if (!cppi41_channel->is_tx || empty) {
0278         cppi41_trans_done(cppi41_channel);
0279         goto out;
0280     }
0281 
0282     /*
0283      * On AM335x it has been observed that the TX interrupt fires
0284      * too early that means the TXFIFO is not yet empty but the DMA
0285      * engine says that it is done with the transfer. We don't
0286      * receive a FIFO empty interrupt so the only thing we can do is
0287      * to poll for the bit. On HS it usually takes 2us, on FS around
0288      * 110us - 150us depending on the transfer size.
0289      * We spin on HS (no longer than 25us and setup a timer on
0290      * FS to check for the bit and complete the transfer.
0291      */
0292     if (is_host_active(musb)) {
0293         if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
0294             is_hs = 1;
0295     } else {
0296         if (musb->g.speed == USB_SPEED_HIGH)
0297             is_hs = 1;
0298     }
0299     if (is_hs) {
0300         unsigned wait = 25;
0301 
0302         do {
0303             empty = musb_is_tx_fifo_empty(hw_ep);
0304             if (empty) {
0305                 cppi41_trans_done(cppi41_channel);
0306                 goto out;
0307             }
0308             wait--;
0309             if (!wait)
0310                 break;
0311             cpu_relax();
0312         } while (1);
0313     }
0314     list_add_tail(&cppi41_channel->tx_check,
0315             &controller->early_tx_list);
0316     if (!hrtimer_is_queued(&controller->early_tx)) {
0317         unsigned long usecs = cppi41_channel->total_len / 10;
0318 
0319         hrtimer_start_range_ns(&controller->early_tx,
0320                        usecs * NSEC_PER_USEC,
0321                        20 * NSEC_PER_USEC,
0322                        HRTIMER_MODE_REL);
0323     }
0324 
0325 out:
0326     spin_unlock_irqrestore(&musb->lock, flags);
0327 }
0328 
0329 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
0330 {
0331     unsigned shift;
0332 
0333     shift = (ep - 1) * 2;
0334     old &= ~(3 << shift);
0335     old |= mode << shift;
0336     return old;
0337 }
0338 
0339 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
0340         unsigned mode)
0341 {
0342     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0343     struct musb *musb = controller->controller.musb;
0344     u32 port;
0345     u32 new_mode;
0346     u32 old_mode;
0347 
0348     if (cppi41_channel->is_tx)
0349         old_mode = controller->tx_mode;
0350     else
0351         old_mode = controller->rx_mode;
0352     port = cppi41_channel->port_num;
0353     new_mode = update_ep_mode(port, mode, old_mode);
0354 
0355     if (new_mode == old_mode)
0356         return;
0357     if (cppi41_channel->is_tx) {
0358         controller->tx_mode = new_mode;
0359         musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
0360     } else {
0361         controller->rx_mode = new_mode;
0362         musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
0363     }
0364 }
0365 
0366 static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
0367         unsigned int mode)
0368 {
0369     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0370     struct musb *musb = controller->controller.musb;
0371     unsigned int shift;
0372     u32 port;
0373     u32 new_mode;
0374     u32 old_mode;
0375 
0376     old_mode = controller->tx_mode;
0377     port = cppi41_channel->port_num;
0378 
0379     shift = (port - 1) * 4;
0380     if (!cppi41_channel->is_tx)
0381         shift += 16;
0382     new_mode = old_mode & ~(3 << shift);
0383     new_mode |= mode << shift;
0384 
0385     if (new_mode == old_mode)
0386         return;
0387     controller->tx_mode = new_mode;
0388     musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
0389 }
0390 
0391 
0392 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
0393         unsigned mode)
0394 {
0395     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0396     u32 port;
0397     u32 new_mode;
0398     u32 old_mode;
0399 
0400     old_mode = controller->auto_req;
0401     port = cppi41_channel->port_num;
0402     new_mode = update_ep_mode(port, mode, old_mode);
0403 
0404     if (new_mode == old_mode)
0405         return;
0406     controller->auto_req = new_mode;
0407     musb_writel(controller->controller.musb->ctrl_base,
0408             controller->autoreq_reg, new_mode);
0409 }
0410 
0411 static bool cppi41_configure_channel(struct dma_channel *channel,
0412                 u16 packet_sz, u8 mode,
0413                 dma_addr_t dma_addr, u32 len)
0414 {
0415     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0416     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0417     struct dma_chan *dc = cppi41_channel->dc;
0418     struct dma_async_tx_descriptor *dma_desc;
0419     enum dma_transfer_direction direction;
0420     struct musb *musb = cppi41_channel->controller->controller.musb;
0421     unsigned use_gen_rndis = 0;
0422 
0423     cppi41_channel->buf_addr = dma_addr;
0424     cppi41_channel->total_len = len;
0425     cppi41_channel->transferred = 0;
0426     cppi41_channel->packet_sz = packet_sz;
0427     cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
0428 
0429     /*
0430      * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
0431      * than max packet size at a time.
0432      */
0433     if (cppi41_channel->is_tx)
0434         use_gen_rndis = 1;
0435 
0436     if (use_gen_rndis) {
0437         /* RNDIS mode */
0438         if (len > packet_sz) {
0439             musb_writel(musb->ctrl_base,
0440                 RNDIS_REG(cppi41_channel->port_num), len);
0441             /* gen rndis */
0442             controller->set_dma_mode(cppi41_channel,
0443                     EP_MODE_DMA_GEN_RNDIS);
0444 
0445             /* auto req */
0446             cppi41_set_autoreq_mode(cppi41_channel,
0447                     EP_MODE_AUTOREQ_ALL_NEOP);
0448         } else {
0449             musb_writel(musb->ctrl_base,
0450                     RNDIS_REG(cppi41_channel->port_num), 0);
0451             controller->set_dma_mode(cppi41_channel,
0452                     EP_MODE_DMA_TRANSPARENT);
0453             cppi41_set_autoreq_mode(cppi41_channel,
0454                     EP_MODE_AUTOREQ_NONE);
0455         }
0456     } else {
0457         /* fallback mode */
0458         controller->set_dma_mode(cppi41_channel,
0459                 EP_MODE_DMA_TRANSPARENT);
0460         cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
0461         len = min_t(u32, packet_sz, len);
0462     }
0463     cppi41_channel->prog_len = len;
0464     direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
0465     dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
0466             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0467     if (!dma_desc)
0468         return false;
0469 
0470     dma_desc->callback_result = cppi41_dma_callback;
0471     dma_desc->callback_param = channel;
0472     cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
0473     cppi41_channel->channel.rx_packet_done = false;
0474 
0475     trace_musb_cppi41_config(cppi41_channel);
0476 
0477     save_rx_toggle(cppi41_channel);
0478     dma_async_issue_pending(dc);
0479     return true;
0480 }
0481 
0482 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
0483                 struct musb_hw_ep *hw_ep, u8 is_tx)
0484 {
0485     struct cppi41_dma_controller *controller = container_of(c,
0486             struct cppi41_dma_controller, controller);
0487     struct cppi41_dma_channel *cppi41_channel = NULL;
0488     u8 ch_num = hw_ep->epnum - 1;
0489 
0490     if (ch_num >= controller->num_channels)
0491         return NULL;
0492 
0493     if (is_tx)
0494         cppi41_channel = &controller->tx_channel[ch_num];
0495     else
0496         cppi41_channel = &controller->rx_channel[ch_num];
0497 
0498     if (!cppi41_channel->dc)
0499         return NULL;
0500 
0501     if (cppi41_channel->is_allocated)
0502         return NULL;
0503 
0504     cppi41_channel->hw_ep = hw_ep;
0505     cppi41_channel->is_allocated = 1;
0506 
0507     trace_musb_cppi41_alloc(cppi41_channel);
0508     return &cppi41_channel->channel;
0509 }
0510 
0511 static void cppi41_dma_channel_release(struct dma_channel *channel)
0512 {
0513     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0514 
0515     trace_musb_cppi41_free(cppi41_channel);
0516     if (cppi41_channel->is_allocated) {
0517         cppi41_channel->is_allocated = 0;
0518         channel->status = MUSB_DMA_STATUS_FREE;
0519         channel->actual_len = 0;
0520     }
0521 }
0522 
0523 static int cppi41_dma_channel_program(struct dma_channel *channel,
0524                 u16 packet_sz, u8 mode,
0525                 dma_addr_t dma_addr, u32 len)
0526 {
0527     int ret;
0528     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0529     int hb_mult = 0;
0530 
0531     BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
0532         channel->status == MUSB_DMA_STATUS_BUSY);
0533 
0534     if (is_host_active(cppi41_channel->controller->controller.musb)) {
0535         if (cppi41_channel->is_tx)
0536             hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
0537         else
0538             hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
0539     }
0540 
0541     channel->status = MUSB_DMA_STATUS_BUSY;
0542     channel->actual_len = 0;
0543 
0544     if (hb_mult)
0545         packet_sz = hb_mult * (packet_sz & 0x7FF);
0546 
0547     ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
0548     if (!ret)
0549         channel->status = MUSB_DMA_STATUS_FREE;
0550 
0551     return ret;
0552 }
0553 
0554 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
0555         void *buf, u32 length)
0556 {
0557     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0558     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0559     struct musb *musb = controller->controller.musb;
0560 
0561     if (is_host_active(musb)) {
0562         WARN_ON(1);
0563         return 1;
0564     }
0565     if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
0566         return 0;
0567     if (cppi41_channel->is_tx)
0568         return 1;
0569     /* AM335x Advisory 1.0.13. No workaround for device RX mode */
0570     return 0;
0571 }
0572 
0573 static int cppi41_dma_channel_abort(struct dma_channel *channel)
0574 {
0575     struct cppi41_dma_channel *cppi41_channel = channel->private_data;
0576     struct cppi41_dma_controller *controller = cppi41_channel->controller;
0577     struct musb *musb = controller->controller.musb;
0578     void __iomem *epio = cppi41_channel->hw_ep->regs;
0579     int tdbit;
0580     int ret;
0581     unsigned is_tx;
0582     u16 csr;
0583 
0584     is_tx = cppi41_channel->is_tx;
0585     trace_musb_cppi41_abort(cppi41_channel);
0586 
0587     if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
0588         return 0;
0589 
0590     list_del_init(&cppi41_channel->tx_check);
0591     if (is_tx) {
0592         csr = musb_readw(epio, MUSB_TXCSR);
0593         csr &= ~MUSB_TXCSR_DMAENAB;
0594         musb_writew(epio, MUSB_TXCSR, csr);
0595     } else {
0596         cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
0597 
0598         /* delay to drain to cppi dma pipeline for isoch */
0599         udelay(250);
0600 
0601         csr = musb_readw(epio, MUSB_RXCSR);
0602         csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
0603         musb_writew(epio, MUSB_RXCSR, csr);
0604 
0605         /* wait to drain cppi dma pipe line */
0606         udelay(50);
0607 
0608         csr = musb_readw(epio, MUSB_RXCSR);
0609         if (csr & MUSB_RXCSR_RXPKTRDY) {
0610             csr |= MUSB_RXCSR_FLUSHFIFO;
0611             musb_writew(epio, MUSB_RXCSR, csr);
0612             musb_writew(epio, MUSB_RXCSR, csr);
0613         }
0614     }
0615 
0616     /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
0617     if (musb->ops->quirks & MUSB_DA8XX)
0618         mdelay(250);
0619 
0620     tdbit = 1 << cppi41_channel->port_num;
0621     if (is_tx)
0622         tdbit <<= 16;
0623 
0624     do {
0625         if (is_tx)
0626             musb_writel(musb->ctrl_base, controller->tdown_reg,
0627                     tdbit);
0628         ret = dmaengine_terminate_all(cppi41_channel->dc);
0629     } while (ret == -EAGAIN);
0630 
0631     if (is_tx) {
0632         musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
0633 
0634         csr = musb_readw(epio, MUSB_TXCSR);
0635         if (csr & MUSB_TXCSR_TXPKTRDY) {
0636             csr |= MUSB_TXCSR_FLUSHFIFO;
0637             musb_writew(epio, MUSB_TXCSR, csr);
0638         }
0639     }
0640 
0641     cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
0642     return 0;
0643 }
0644 
0645 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
0646 {
0647     struct dma_chan *dc;
0648     int i;
0649 
0650     for (i = 0; i < ctrl->num_channels; i++) {
0651         dc = ctrl->tx_channel[i].dc;
0652         if (dc)
0653             dma_release_channel(dc);
0654         dc = ctrl->rx_channel[i].dc;
0655         if (dc)
0656             dma_release_channel(dc);
0657     }
0658 }
0659 
0660 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
0661 {
0662     cppi41_release_all_dma_chans(controller);
0663 }
0664 
0665 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
0666 {
0667     struct musb *musb = controller->controller.musb;
0668     struct device *dev = musb->controller;
0669     struct device_node *np = dev->parent->of_node;
0670     struct cppi41_dma_channel *cppi41_channel;
0671     int count;
0672     int i;
0673     int ret;
0674 
0675     count = of_property_count_strings(np, "dma-names");
0676     if (count < 0)
0677         return count;
0678 
0679     for (i = 0; i < count; i++) {
0680         struct dma_chan *dc;
0681         struct dma_channel *musb_dma;
0682         const char *str;
0683         unsigned is_tx;
0684         unsigned int port;
0685 
0686         ret = of_property_read_string_index(np, "dma-names", i, &str);
0687         if (ret)
0688             goto err;
0689         if (strstarts(str, "tx"))
0690             is_tx = 1;
0691         else if (strstarts(str, "rx"))
0692             is_tx = 0;
0693         else {
0694             dev_err(dev, "Wrong dmatype %s\n", str);
0695             goto err;
0696         }
0697         ret = kstrtouint(str + 2, 0, &port);
0698         if (ret)
0699             goto err;
0700 
0701         ret = -EINVAL;
0702         if (port > controller->num_channels || !port)
0703             goto err;
0704         if (is_tx)
0705             cppi41_channel = &controller->tx_channel[port - 1];
0706         else
0707             cppi41_channel = &controller->rx_channel[port - 1];
0708 
0709         cppi41_channel->controller = controller;
0710         cppi41_channel->port_num = port;
0711         cppi41_channel->is_tx = is_tx;
0712         INIT_LIST_HEAD(&cppi41_channel->tx_check);
0713 
0714         musb_dma = &cppi41_channel->channel;
0715         musb_dma->private_data = cppi41_channel;
0716         musb_dma->status = MUSB_DMA_STATUS_FREE;
0717         musb_dma->max_len = SZ_4M;
0718 
0719         dc = dma_request_chan(dev->parent, str);
0720         if (IS_ERR(dc)) {
0721             ret = PTR_ERR(dc);
0722             if (ret != -EPROBE_DEFER)
0723                 dev_err(dev, "Failed to request %s: %d.\n",
0724                     str, ret);
0725             goto err;
0726         }
0727 
0728         cppi41_channel->dc = dc;
0729     }
0730     return 0;
0731 err:
0732     cppi41_release_all_dma_chans(controller);
0733     return ret;
0734 }
0735 
0736 void cppi41_dma_controller_destroy(struct dma_controller *c)
0737 {
0738     struct cppi41_dma_controller *controller = container_of(c,
0739             struct cppi41_dma_controller, controller);
0740 
0741     hrtimer_cancel(&controller->early_tx);
0742     cppi41_dma_controller_stop(controller);
0743     kfree(controller->rx_channel);
0744     kfree(controller->tx_channel);
0745     kfree(controller);
0746 }
0747 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
0748 
0749 struct dma_controller *
0750 cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
0751 {
0752     struct cppi41_dma_controller *controller;
0753     int channel_size;
0754     int ret = 0;
0755 
0756     if (!musb->controller->parent->of_node) {
0757         dev_err(musb->controller, "Need DT for the DMA engine.\n");
0758         return NULL;
0759     }
0760 
0761     controller = kzalloc(sizeof(*controller), GFP_KERNEL);
0762     if (!controller)
0763         goto kzalloc_fail;
0764 
0765     hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
0766     controller->early_tx.function = cppi41_recheck_tx_req;
0767     INIT_LIST_HEAD(&controller->early_tx_list);
0768 
0769     controller->controller.channel_alloc = cppi41_dma_channel_allocate;
0770     controller->controller.channel_release = cppi41_dma_channel_release;
0771     controller->controller.channel_program = cppi41_dma_channel_program;
0772     controller->controller.channel_abort = cppi41_dma_channel_abort;
0773     controller->controller.is_compatible = cppi41_is_compatible;
0774     controller->controller.musb = musb;
0775 
0776     if (musb->ops->quirks & MUSB_DA8XX) {
0777         controller->tdown_reg = DA8XX_USB_TEARDOWN;
0778         controller->autoreq_reg = DA8XX_USB_AUTOREQ;
0779         controller->set_dma_mode = da8xx_set_dma_mode;
0780         controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
0781     } else {
0782         controller->tdown_reg = USB_TDOWN;
0783         controller->autoreq_reg = USB_CTRL_AUTOREQ;
0784         controller->set_dma_mode = cppi41_set_dma_mode;
0785         controller->num_channels = MUSB_DMA_NUM_CHANNELS;
0786     }
0787 
0788     channel_size = controller->num_channels *
0789             sizeof(struct cppi41_dma_channel);
0790     controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
0791     if (!controller->rx_channel)
0792         goto rx_channel_alloc_fail;
0793     controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
0794     if (!controller->tx_channel)
0795         goto tx_channel_alloc_fail;
0796 
0797     ret = cppi41_dma_controller_start(controller);
0798     if (ret)
0799         goto plat_get_fail;
0800     return &controller->controller;
0801 
0802 plat_get_fail:
0803     kfree(controller->tx_channel);
0804 tx_channel_alloc_fail:
0805     kfree(controller->rx_channel);
0806 rx_channel_alloc_fail:
0807     kfree(controller);
0808 kzalloc_fail:
0809     if (ret == -EPROBE_DEFER)
0810         return ERR_PTR(ret);
0811     return NULL;
0812 }
0813 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);