Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * MUSB OTG driver host support
0004  *
0005  * Copyright 2005 Mentor Graphics Corporation
0006  * Copyright (C) 2005-2006 by Texas Instruments
0007  * Copyright (C) 2006-2007 Nokia Corporation
0008  * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
0009  */
0010 
0011 #include <linux/module.h>
0012 #include <linux/kernel.h>
0013 #include <linux/delay.h>
0014 #include <linux/sched.h>
0015 #include <linux/slab.h>
0016 #include <linux/errno.h>
0017 #include <linux/list.h>
0018 #include <linux/dma-mapping.h>
0019 
0020 #include "musb_core.h"
0021 #include "musb_host.h"
0022 #include "musb_trace.h"
0023 
0024 /* MUSB HOST status 22-mar-2006
0025  *
0026  * - There's still lots of partial code duplication for fault paths, so
0027  *   they aren't handled as consistently as they need to be.
0028  *
0029  * - PIO mostly behaved when last tested.
0030  *     + including ep0, with all usbtest cases 9, 10
0031  *     + usbtest 14 (ep0out) doesn't seem to run at all
0032  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
0033  *       configurations, but otherwise double buffering passes basic tests.
0034  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
0035  *
0036  * - DMA (CPPI) ... partially behaves, not currently recommended
0037  *     + about 1/15 the speed of typical EHCI implementations (PCI)
0038  *     + RX, all too often reqpkt seems to misbehave after tx
0039  *     + TX, no known issues (other than evident silicon issue)
0040  *
0041  * - DMA (Mentor/OMAP) ...has at least toggle update problems
0042  *
0043  * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
0044  *   starvation ... nothing yet for TX, interrupt, or bulk.
0045  *
0046  * - Not tested with HNP, but some SRP paths seem to behave.
0047  *
0048  * NOTE 24-August-2006:
0049  *
0050  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
0051  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
0052  *   mostly works, except that with "usbnet" it's easy to trigger cases
0053  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
0054  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
0055  *   although ARP RX wins.  (That test was done with a full speed link.)
0056  */
0057 
0058 
0059 /*
0060  * NOTE on endpoint usage:
0061  *
0062  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
0063  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
0064  * (Yes, bulk _could_ use more of the endpoints than that, and would even
0065  * benefit from it.)
0066  *
0067  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
0068  * So far that scheduling is both dumb and optimistic:  the endpoint will be
0069  * "claimed" until its software queue is no longer refilled.  No multiplexing
0070  * of transfers between endpoints, or anything clever.
0071  */
0072 
0073 struct musb *hcd_to_musb(struct usb_hcd *hcd)
0074 {
0075     return *(struct musb **) hcd->hcd_priv;
0076 }
0077 
0078 
0079 static void musb_ep_program(struct musb *musb, u8 epnum,
0080             struct urb *urb, int is_out,
0081             u8 *buf, u32 offset, u32 len);
0082 
0083 /*
0084  * Clear TX fifo. Needed to avoid BABBLE errors.
0085  */
0086 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
0087 {
0088     struct musb *musb = ep->musb;
0089     void __iomem    *epio = ep->regs;
0090     u16     csr;
0091     int     retries = 1000;
0092 
0093     csr = musb_readw(epio, MUSB_TXCSR);
0094     while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
0095         csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
0096         musb_writew(epio, MUSB_TXCSR, csr);
0097         csr = musb_readw(epio, MUSB_TXCSR);
0098 
0099         /*
0100          * FIXME: sometimes the tx fifo flush failed, it has been
0101          * observed during device disconnect on AM335x.
0102          *
0103          * To reproduce the issue, ensure tx urb(s) are queued when
0104          * unplug the usb device which is connected to AM335x usb
0105          * host port.
0106          *
0107          * I found using a usb-ethernet device and running iperf
0108          * (client on AM335x) has very high chance to trigger it.
0109          *
0110          * Better to turn on musb_dbg() in musb_cleanup_urb() with
0111          * CPPI enabled to see the issue when aborting the tx channel.
0112          */
0113         if (dev_WARN_ONCE(musb->controller, retries-- < 1,
0114                 "Could not flush host TX%d fifo: csr: %04x\n",
0115                 ep->epnum, csr))
0116             return;
0117         mdelay(1);
0118     }
0119 }
0120 
0121 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
0122 {
0123     void __iomem    *epio = ep->regs;
0124     u16     csr;
0125     int     retries = 5;
0126 
0127     /* scrub any data left in the fifo */
0128     do {
0129         csr = musb_readw(epio, MUSB_TXCSR);
0130         if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
0131             break;
0132         musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
0133         csr = musb_readw(epio, MUSB_TXCSR);
0134         udelay(10);
0135     } while (--retries);
0136 
0137     WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
0138             ep->epnum, csr);
0139 
0140     /* and reset for the next transfer */
0141     musb_writew(epio, MUSB_TXCSR, 0);
0142 }
0143 
0144 /*
0145  * Start transmit. Caller is responsible for locking shared resources.
0146  * musb must be locked.
0147  */
0148 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
0149 {
0150     u16 txcsr;
0151 
0152     /* NOTE: no locks here; caller should lock and select EP */
0153     if (ep->epnum) {
0154         txcsr = musb_readw(ep->regs, MUSB_TXCSR);
0155         txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
0156         musb_writew(ep->regs, MUSB_TXCSR, txcsr);
0157     } else {
0158         txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
0159         musb_writew(ep->regs, MUSB_CSR0, txcsr);
0160     }
0161 
0162 }
0163 
0164 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
0165 {
0166     u16 txcsr;
0167 
0168     /* NOTE: no locks here; caller should lock and select EP */
0169     txcsr = musb_readw(ep->regs, MUSB_TXCSR);
0170     txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
0171     if (is_cppi_enabled(ep->musb))
0172         txcsr |= MUSB_TXCSR_DMAMODE;
0173     musb_writew(ep->regs, MUSB_TXCSR, txcsr);
0174 }
0175 
0176 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
0177 {
0178     if (is_in != 0 || ep->is_shared_fifo)
0179         ep->in_qh  = qh;
0180     if (is_in == 0 || ep->is_shared_fifo)
0181         ep->out_qh = qh;
0182 }
0183 
0184 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
0185 {
0186     return is_in ? ep->in_qh : ep->out_qh;
0187 }
0188 
0189 /*
0190  * Start the URB at the front of an endpoint's queue
0191  * end must be claimed from the caller.
0192  *
0193  * Context: controller locked, irqs blocked
0194  */
0195 static void
0196 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
0197 {
0198     u32         len;
0199     void __iomem        *mbase =  musb->mregs;
0200     struct urb      *urb = next_urb(qh);
0201     void            *buf = urb->transfer_buffer;
0202     u32         offset = 0;
0203     struct musb_hw_ep   *hw_ep = qh->hw_ep;
0204     int         epnum = hw_ep->epnum;
0205 
0206     /* initialize software qh state */
0207     qh->offset = 0;
0208     qh->segsize = 0;
0209 
0210     /* gather right source of data */
0211     switch (qh->type) {
0212     case USB_ENDPOINT_XFER_CONTROL:
0213         /* control transfers always start with SETUP */
0214         is_in = 0;
0215         musb->ep0_stage = MUSB_EP0_START;
0216         buf = urb->setup_packet;
0217         len = 8;
0218         break;
0219     case USB_ENDPOINT_XFER_ISOC:
0220         qh->iso_idx = 0;
0221         qh->frame = 0;
0222         offset = urb->iso_frame_desc[0].offset;
0223         len = urb->iso_frame_desc[0].length;
0224         break;
0225     default:        /* bulk, interrupt */
0226         /* actual_length may be nonzero on retry paths */
0227         buf = urb->transfer_buffer + urb->actual_length;
0228         len = urb->transfer_buffer_length - urb->actual_length;
0229     }
0230 
0231     trace_musb_urb_start(musb, urb);
0232 
0233     /* Configure endpoint */
0234     musb_ep_set_qh(hw_ep, is_in, qh);
0235     musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
0236 
0237     /* transmit may have more work: start it when it is time */
0238     if (is_in)
0239         return;
0240 
0241     /* determine if the time is right for a periodic transfer */
0242     switch (qh->type) {
0243     case USB_ENDPOINT_XFER_ISOC:
0244     case USB_ENDPOINT_XFER_INT:
0245         musb_dbg(musb, "check whether there's still time for periodic Tx");
0246         /* FIXME this doesn't implement that scheduling policy ...
0247          * or handle framecounter wrapping
0248          */
0249         if (1) {    /* Always assume URB_ISO_ASAP */
0250             /* REVISIT the SOF irq handler shouldn't duplicate
0251              * this code; and we don't init urb->start_frame...
0252              */
0253             qh->frame = 0;
0254             goto start;
0255         } else {
0256             qh->frame = urb->start_frame;
0257             /* enable SOF interrupt so we can count down */
0258             musb_dbg(musb, "SOF for %d", epnum);
0259 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
0260             musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
0261 #endif
0262         }
0263         break;
0264     default:
0265 start:
0266         musb_dbg(musb, "Start TX%d %s", epnum,
0267             hw_ep->tx_channel ? "dma" : "pio");
0268 
0269         if (!hw_ep->tx_channel)
0270             musb_h_tx_start(hw_ep);
0271         else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
0272             musb_h_tx_dma_start(hw_ep);
0273     }
0274 }
0275 
0276 /* Context: caller owns controller lock, IRQs are blocked */
0277 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
0278 __releases(musb->lock)
0279 __acquires(musb->lock)
0280 {
0281     trace_musb_urb_gb(musb, urb);
0282 
0283     usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
0284     spin_unlock(&musb->lock);
0285     usb_hcd_giveback_urb(musb->hcd, urb, status);
0286     spin_lock(&musb->lock);
0287 }
0288 
0289 /*
0290  * Advance this hardware endpoint's queue, completing the specified URB and
0291  * advancing to either the next URB queued to that qh, or else invalidating
0292  * that qh and advancing to the next qh scheduled after the current one.
0293  *
0294  * Context: caller owns controller lock, IRQs are blocked
0295  */
0296 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
0297                   struct musb_hw_ep *hw_ep, int is_in)
0298 {
0299     struct musb_qh      *qh = musb_ep_get_qh(hw_ep, is_in);
0300     struct musb_hw_ep   *ep = qh->hw_ep;
0301     int         ready = qh->is_ready;
0302     int         status;
0303     u16         toggle;
0304 
0305     status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
0306 
0307     /* save toggle eagerly, for paranoia */
0308     switch (qh->type) {
0309     case USB_ENDPOINT_XFER_BULK:
0310     case USB_ENDPOINT_XFER_INT:
0311         toggle = musb->io.get_toggle(qh, !is_in);
0312         usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
0313         break;
0314     case USB_ENDPOINT_XFER_ISOC:
0315         if (status == 0 && urb->error_count)
0316             status = -EXDEV;
0317         break;
0318     }
0319 
0320     qh->is_ready = 0;
0321     musb_giveback(musb, urb, status);
0322     qh->is_ready = ready;
0323 
0324     /* reclaim resources (and bandwidth) ASAP; deschedule it, and
0325      * invalidate qh as soon as list_empty(&hep->urb_list)
0326      */
0327     if (list_empty(&qh->hep->urb_list)) {
0328         struct list_head    *head;
0329         struct dma_controller   *dma = musb->dma_controller;
0330 
0331         if (is_in) {
0332             ep->rx_reinit = 1;
0333             if (ep->rx_channel) {
0334                 dma->channel_release(ep->rx_channel);
0335                 ep->rx_channel = NULL;
0336             }
0337         } else {
0338             ep->tx_reinit = 1;
0339             if (ep->tx_channel) {
0340                 dma->channel_release(ep->tx_channel);
0341                 ep->tx_channel = NULL;
0342             }
0343         }
0344 
0345         /* Clobber old pointers to this qh */
0346         musb_ep_set_qh(ep, is_in, NULL);
0347         qh->hep->hcpriv = NULL;
0348 
0349         switch (qh->type) {
0350 
0351         case USB_ENDPOINT_XFER_CONTROL:
0352         case USB_ENDPOINT_XFER_BULK:
0353             /* fifo policy for these lists, except that NAKing
0354              * should rotate a qh to the end (for fairness).
0355              */
0356             if (qh->mux == 1) {
0357                 head = qh->ring.prev;
0358                 list_del(&qh->ring);
0359                 kfree(qh);
0360                 qh = first_qh(head);
0361                 break;
0362             }
0363             fallthrough;
0364 
0365         case USB_ENDPOINT_XFER_ISOC:
0366         case USB_ENDPOINT_XFER_INT:
0367             /* this is where periodic bandwidth should be
0368              * de-allocated if it's tracked and allocated;
0369              * and where we'd update the schedule tree...
0370              */
0371             kfree(qh);
0372             qh = NULL;
0373             break;
0374         }
0375     }
0376 
0377     if (qh != NULL && qh->is_ready) {
0378         musb_dbg(musb, "... next ep%d %cX urb %p",
0379             hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
0380         musb_start_urb(musb, is_in, qh);
0381     }
0382 }
0383 
0384 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
0385 {
0386     /* we don't want fifo to fill itself again;
0387      * ignore dma (various models),
0388      * leave toggle alone (may not have been saved yet)
0389      */
0390     csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
0391     csr &= ~(MUSB_RXCSR_H_REQPKT
0392         | MUSB_RXCSR_H_AUTOREQ
0393         | MUSB_RXCSR_AUTOCLEAR);
0394 
0395     /* write 2x to allow double buffering */
0396     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0397     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0398 
0399     /* flush writebuffer */
0400     return musb_readw(hw_ep->regs, MUSB_RXCSR);
0401 }
0402 
0403 /*
0404  * PIO RX for a packet (or part of it).
0405  */
0406 static bool
0407 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
0408 {
0409     u16         rx_count;
0410     u8          *buf;
0411     u16         csr;
0412     bool            done = false;
0413     u32         length;
0414     int         do_flush = 0;
0415     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
0416     void __iomem        *epio = hw_ep->regs;
0417     struct musb_qh      *qh = hw_ep->in_qh;
0418     int         pipe = urb->pipe;
0419     void            *buffer = urb->transfer_buffer;
0420 
0421     /* musb_ep_select(mbase, epnum); */
0422     rx_count = musb_readw(epio, MUSB_RXCOUNT);
0423     musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
0424             urb->transfer_buffer, qh->offset,
0425             urb->transfer_buffer_length);
0426 
0427     /* unload FIFO */
0428     if (usb_pipeisoc(pipe)) {
0429         int                 status = 0;
0430         struct usb_iso_packet_descriptor    *d;
0431 
0432         if (iso_err) {
0433             status = -EILSEQ;
0434             urb->error_count++;
0435         }
0436 
0437         d = urb->iso_frame_desc + qh->iso_idx;
0438         buf = buffer + d->offset;
0439         length = d->length;
0440         if (rx_count > length) {
0441             if (status == 0) {
0442                 status = -EOVERFLOW;
0443                 urb->error_count++;
0444             }
0445             musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
0446             do_flush = 1;
0447         } else
0448             length = rx_count;
0449         urb->actual_length += length;
0450         d->actual_length = length;
0451 
0452         d->status = status;
0453 
0454         /* see if we are done */
0455         done = (++qh->iso_idx >= urb->number_of_packets);
0456     } else {
0457         /* non-isoch */
0458         buf = buffer + qh->offset;
0459         length = urb->transfer_buffer_length - qh->offset;
0460         if (rx_count > length) {
0461             if (urb->status == -EINPROGRESS)
0462                 urb->status = -EOVERFLOW;
0463             musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
0464             do_flush = 1;
0465         } else
0466             length = rx_count;
0467         urb->actual_length += length;
0468         qh->offset += length;
0469 
0470         /* see if we are done */
0471         done = (urb->actual_length == urb->transfer_buffer_length)
0472             || (rx_count < qh->maxpacket)
0473             || (urb->status != -EINPROGRESS);
0474         if (done
0475                 && (urb->status == -EINPROGRESS)
0476                 && (urb->transfer_flags & URB_SHORT_NOT_OK)
0477                 && (urb->actual_length
0478                     < urb->transfer_buffer_length))
0479             urb->status = -EREMOTEIO;
0480     }
0481 
0482     musb_read_fifo(hw_ep, length, buf);
0483 
0484     csr = musb_readw(epio, MUSB_RXCSR);
0485     csr |= MUSB_RXCSR_H_WZC_BITS;
0486     if (unlikely(do_flush))
0487         musb_h_flush_rxfifo(hw_ep, csr);
0488     else {
0489         /* REVISIT this assumes AUTOCLEAR is never set */
0490         csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
0491         if (!done)
0492             csr |= MUSB_RXCSR_H_REQPKT;
0493         musb_writew(epio, MUSB_RXCSR, csr);
0494     }
0495 
0496     return done;
0497 }
0498 
0499 /* we don't always need to reinit a given side of an endpoint...
0500  * when we do, use tx/rx reinit routine and then construct a new CSR
0501  * to address data toggle, NYET, and DMA or PIO.
0502  *
0503  * it's possible that driver bugs (especially for DMA) or aborting a
0504  * transfer might have left the endpoint busier than it should be.
0505  * the busy/not-empty tests are basically paranoia.
0506  */
0507 static void
0508 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
0509 {
0510     struct musb_hw_ep *ep = musb->endpoints + epnum;
0511     u16 csr;
0512 
0513     /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
0514      * That always uses tx_reinit since ep0 repurposes TX register
0515      * offsets; the initial SETUP packet is also a kind of OUT.
0516      */
0517 
0518     /* if programmed for Tx, put it in RX mode */
0519     if (ep->is_shared_fifo) {
0520         csr = musb_readw(ep->regs, MUSB_TXCSR);
0521         if (csr & MUSB_TXCSR_MODE) {
0522             musb_h_tx_flush_fifo(ep);
0523             csr = musb_readw(ep->regs, MUSB_TXCSR);
0524             musb_writew(ep->regs, MUSB_TXCSR,
0525                     csr | MUSB_TXCSR_FRCDATATOG);
0526         }
0527 
0528         /*
0529          * Clear the MODE bit (and everything else) to enable Rx.
0530          * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
0531          */
0532         if (csr & MUSB_TXCSR_DMAMODE)
0533             musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
0534         musb_writew(ep->regs, MUSB_TXCSR, 0);
0535 
0536     /* scrub all previous state, clearing toggle */
0537     }
0538     csr = musb_readw(ep->regs, MUSB_RXCSR);
0539     if (csr & MUSB_RXCSR_RXPKTRDY)
0540         WARNING("rx%d, packet/%d ready?\n", ep->epnum,
0541             musb_readw(ep->regs, MUSB_RXCOUNT));
0542 
0543     musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
0544 
0545     /* target addr and (for multipoint) hub addr/port */
0546     if (musb->is_multipoint) {
0547         musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
0548         musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
0549         musb_write_rxhubport(musb, epnum, qh->h_port_reg);
0550     } else
0551         musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
0552 
0553     /* protocol/endpoint, interval/NAKlimit, i/o size */
0554     musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
0555     musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
0556     /* NOTE: bulk combining rewrites high bits of maxpacket */
0557     /* Set RXMAXP with the FIFO size of the endpoint
0558      * to disable double buffer mode.
0559      */
0560     musb_writew(ep->regs, MUSB_RXMAXP,
0561             qh->maxpacket | ((qh->hb_mult - 1) << 11));
0562 
0563     ep->rx_reinit = 0;
0564 }
0565 
0566 static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep, 
0567                     struct musb_qh *qh,
0568                     u32 *length, u8 *mode)
0569 {
0570     struct dma_channel  *channel = hw_ep->tx_channel;
0571     void __iomem        *epio = hw_ep->regs;
0572     u16         pkt_size = qh->maxpacket;
0573     u16         csr;
0574 
0575     if (*length > channel->max_len)
0576         *length = channel->max_len;
0577 
0578     csr = musb_readw(epio, MUSB_TXCSR);
0579     if (*length > pkt_size) {
0580         *mode = 1;
0581         csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
0582         /* autoset shouldn't be set in high bandwidth */
0583         /*
0584          * Enable Autoset according to table
0585          * below
0586          * bulk_split hb_mult   Autoset_Enable
0587          *  0   1   Yes(Normal)
0588          *  0   >1  No(High BW ISO)
0589          *  1   1   Yes(HS bulk)
0590          *  1   >1  Yes(FS bulk)
0591          */
0592         if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
0593                     can_bulk_split(hw_ep->musb, qh->type)))
0594             csr |= MUSB_TXCSR_AUTOSET;
0595     } else {
0596         *mode = 0;
0597         csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
0598         csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
0599     }
0600     channel->desired_mode = *mode;
0601     musb_writew(epio, MUSB_TXCSR, csr);
0602 }
0603 
0604 static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
0605                        struct urb *urb,
0606                        u8 *mode)
0607 {
0608     struct dma_channel *channel = hw_ep->tx_channel;
0609 
0610     channel->actual_len = 0;
0611 
0612     /*
0613      * TX uses "RNDIS" mode automatically but needs help
0614      * to identify the zero-length-final-packet case.
0615      */
0616     *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
0617 }
0618 
0619 static bool musb_tx_dma_program(struct dma_controller *dma,
0620         struct musb_hw_ep *hw_ep, struct musb_qh *qh,
0621         struct urb *urb, u32 offset, u32 length)
0622 {
0623     struct dma_channel  *channel = hw_ep->tx_channel;
0624     u16         pkt_size = qh->maxpacket;
0625     u8          mode;
0626 
0627     if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
0628         musb_tx_dma_set_mode_mentor(hw_ep, qh,
0629                         &length, &mode);
0630     else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
0631         musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
0632     else
0633         return false;
0634 
0635     qh->segsize = length;
0636 
0637     /*
0638      * Ensure the data reaches to main memory before starting
0639      * DMA transfer
0640      */
0641     wmb();
0642 
0643     if (!dma->channel_program(channel, pkt_size, mode,
0644             urb->transfer_dma + offset, length)) {
0645         void __iomem *epio = hw_ep->regs;
0646         u16 csr;
0647 
0648         dma->channel_release(channel);
0649         hw_ep->tx_channel = NULL;
0650 
0651         csr = musb_readw(epio, MUSB_TXCSR);
0652         csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
0653         musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
0654         return false;
0655     }
0656     return true;
0657 }
0658 
0659 /*
0660  * Program an HDRC endpoint as per the given URB
0661  * Context: irqs blocked, controller lock held
0662  */
0663 static void musb_ep_program(struct musb *musb, u8 epnum,
0664             struct urb *urb, int is_out,
0665             u8 *buf, u32 offset, u32 len)
0666 {
0667     struct dma_controller   *dma_controller;
0668     struct dma_channel  *dma_channel;
0669     u8          dma_ok;
0670     void __iomem        *mbase = musb->mregs;
0671     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
0672     void __iomem        *epio = hw_ep->regs;
0673     struct musb_qh      *qh = musb_ep_get_qh(hw_ep, !is_out);
0674     u16         packet_sz = qh->maxpacket;
0675     u8          use_dma = 1;
0676     u16         csr;
0677 
0678     musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
0679                 "h_addr%02x h_port%02x bytes %d",
0680             is_out ? "-->" : "<--",
0681             epnum, urb, urb->dev->speed,
0682             qh->addr_reg, qh->epnum, is_out ? "out" : "in",
0683             qh->h_addr_reg, qh->h_port_reg,
0684             len);
0685 
0686     musb_ep_select(mbase, epnum);
0687 
0688     if (is_out && !len) {
0689         use_dma = 0;
0690         csr = musb_readw(epio, MUSB_TXCSR);
0691         csr &= ~MUSB_TXCSR_DMAENAB;
0692         musb_writew(epio, MUSB_TXCSR, csr);
0693         hw_ep->tx_channel = NULL;
0694     }
0695 
0696     /* candidate for DMA? */
0697     dma_controller = musb->dma_controller;
0698     if (use_dma && is_dma_capable() && epnum && dma_controller) {
0699         dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
0700         if (!dma_channel) {
0701             dma_channel = dma_controller->channel_alloc(
0702                     dma_controller, hw_ep, is_out);
0703             if (is_out)
0704                 hw_ep->tx_channel = dma_channel;
0705             else
0706                 hw_ep->rx_channel = dma_channel;
0707         }
0708     } else
0709         dma_channel = NULL;
0710 
0711     /* make sure we clear DMAEnab, autoSet bits from previous run */
0712 
0713     /* OUT/transmit/EP0 or IN/receive? */
0714     if (is_out) {
0715         u16 csr;
0716         u16 int_txe;
0717         u16 load_count;
0718 
0719         csr = musb_readw(epio, MUSB_TXCSR);
0720 
0721         /* disable interrupt in case we flush */
0722         int_txe = musb->intrtxe;
0723         musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
0724 
0725         /* general endpoint setup */
0726         if (epnum) {
0727             /* flush all old state, set default */
0728             /*
0729              * We could be flushing valid
0730              * packets in double buffering
0731              * case
0732              */
0733             if (!hw_ep->tx_double_buffered)
0734                 musb_h_tx_flush_fifo(hw_ep);
0735 
0736             /*
0737              * We must not clear the DMAMODE bit before or in
0738              * the same cycle with the DMAENAB bit, so we clear
0739              * the latter first...
0740              */
0741             csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
0742                     | MUSB_TXCSR_AUTOSET
0743                     | MUSB_TXCSR_DMAENAB
0744                     | MUSB_TXCSR_FRCDATATOG
0745                     | MUSB_TXCSR_H_RXSTALL
0746                     | MUSB_TXCSR_H_ERROR
0747                     | MUSB_TXCSR_TXPKTRDY
0748                     );
0749             csr |= MUSB_TXCSR_MODE;
0750 
0751             if (!hw_ep->tx_double_buffered)
0752                 csr |= musb->io.set_toggle(qh, is_out, urb);
0753 
0754             musb_writew(epio, MUSB_TXCSR, csr);
0755             /* REVISIT may need to clear FLUSHFIFO ... */
0756             csr &= ~MUSB_TXCSR_DMAMODE;
0757             musb_writew(epio, MUSB_TXCSR, csr);
0758             csr = musb_readw(epio, MUSB_TXCSR);
0759         } else {
0760             /* endpoint 0: just flush */
0761             musb_h_ep0_flush_fifo(hw_ep);
0762         }
0763 
0764         /* target addr and (for multipoint) hub addr/port */
0765         if (musb->is_multipoint) {
0766             musb_write_txfunaddr(musb, epnum, qh->addr_reg);
0767             musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
0768             musb_write_txhubport(musb, epnum, qh->h_port_reg);
0769 /* FIXME if !epnum, do the same for RX ... */
0770         } else
0771             musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
0772 
0773         /* protocol/endpoint/interval/NAKlimit */
0774         if (epnum) {
0775             musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
0776             if (can_bulk_split(musb, qh->type)) {
0777                 qh->hb_mult = hw_ep->max_packet_sz_tx
0778                         / packet_sz;
0779                 musb_writew(epio, MUSB_TXMAXP, packet_sz
0780                     | ((qh->hb_mult) - 1) << 11);
0781             } else {
0782                 musb_writew(epio, MUSB_TXMAXP,
0783                         qh->maxpacket |
0784                         ((qh->hb_mult - 1) << 11));
0785             }
0786             musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
0787         } else {
0788             musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
0789             if (musb->is_multipoint)
0790                 musb_writeb(epio, MUSB_TYPE0,
0791                         qh->type_reg);
0792         }
0793 
0794         if (can_bulk_split(musb, qh->type))
0795             load_count = min((u32) hw_ep->max_packet_sz_tx,
0796                         len);
0797         else
0798             load_count = min((u32) packet_sz, len);
0799 
0800         if (dma_channel && musb_tx_dma_program(dma_controller,
0801                     hw_ep, qh, urb, offset, len))
0802             load_count = 0;
0803 
0804         if (load_count) {
0805             /* PIO to load FIFO */
0806             qh->segsize = load_count;
0807             if (!buf) {
0808                 sg_miter_start(&qh->sg_miter, urb->sg, 1,
0809                         SG_MITER_ATOMIC
0810                         | SG_MITER_FROM_SG);
0811                 if (!sg_miter_next(&qh->sg_miter)) {
0812                     dev_err(musb->controller,
0813                             "error: sg"
0814                             "list empty\n");
0815                     sg_miter_stop(&qh->sg_miter);
0816                     goto finish;
0817                 }
0818                 buf = qh->sg_miter.addr + urb->sg->offset +
0819                     urb->actual_length;
0820                 load_count = min_t(u32, load_count,
0821                         qh->sg_miter.length);
0822                 musb_write_fifo(hw_ep, load_count, buf);
0823                 qh->sg_miter.consumed = load_count;
0824                 sg_miter_stop(&qh->sg_miter);
0825             } else
0826                 musb_write_fifo(hw_ep, load_count, buf);
0827         }
0828 finish:
0829         /* re-enable interrupt */
0830         musb_writew(mbase, MUSB_INTRTXE, int_txe);
0831 
0832     /* IN/receive */
0833     } else {
0834         u16 csr = 0;
0835 
0836         if (hw_ep->rx_reinit) {
0837             musb_rx_reinit(musb, qh, epnum);
0838             csr |= musb->io.set_toggle(qh, is_out, urb);
0839 
0840             if (qh->type == USB_ENDPOINT_XFER_INT)
0841                 csr |= MUSB_RXCSR_DISNYET;
0842 
0843         } else {
0844             csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0845 
0846             if (csr & (MUSB_RXCSR_RXPKTRDY
0847                     | MUSB_RXCSR_DMAENAB
0848                     | MUSB_RXCSR_H_REQPKT))
0849                 ERR("broken !rx_reinit, ep%d csr %04x\n",
0850                         hw_ep->epnum, csr);
0851 
0852             /* scrub any stale state, leaving toggle alone */
0853             csr &= MUSB_RXCSR_DISNYET;
0854         }
0855 
0856         /* kick things off */
0857 
0858         if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
0859             /* Candidate for DMA */
0860             dma_channel->actual_len = 0L;
0861             qh->segsize = len;
0862 
0863             /* AUTOREQ is in a DMA register */
0864             musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0865             csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0866 
0867             /*
0868              * Unless caller treats short RX transfers as
0869              * errors, we dare not queue multiple transfers.
0870              */
0871             dma_ok = dma_controller->channel_program(dma_channel,
0872                     packet_sz, !(urb->transfer_flags &
0873                              URB_SHORT_NOT_OK),
0874                     urb->transfer_dma + offset,
0875                     qh->segsize);
0876             if (!dma_ok) {
0877                 dma_controller->channel_release(dma_channel);
0878                 hw_ep->rx_channel = dma_channel = NULL;
0879             } else
0880                 csr |= MUSB_RXCSR_DMAENAB;
0881         }
0882 
0883         csr |= MUSB_RXCSR_H_REQPKT;
0884         musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
0885         musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0886         csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0887     }
0888 }
0889 
0890 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
0891  * the end; avoids starvation for other endpoints.
0892  */
0893 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
0894     int is_in)
0895 {
0896     struct dma_channel  *dma;
0897     struct urb      *urb;
0898     void __iomem        *mbase = musb->mregs;
0899     void __iomem        *epio = ep->regs;
0900     struct musb_qh      *cur_qh, *next_qh;
0901     u16         rx_csr, tx_csr;
0902     u16         toggle;
0903 
0904     musb_ep_select(mbase, ep->epnum);
0905     if (is_in) {
0906         dma = is_dma_capable() ? ep->rx_channel : NULL;
0907 
0908         /*
0909          * Need to stop the transaction by clearing REQPKT first
0910          * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
0911          * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
0912          */
0913         rx_csr = musb_readw(epio, MUSB_RXCSR);
0914         rx_csr |= MUSB_RXCSR_H_WZC_BITS;
0915         rx_csr &= ~MUSB_RXCSR_H_REQPKT;
0916         musb_writew(epio, MUSB_RXCSR, rx_csr);
0917         rx_csr &= ~MUSB_RXCSR_DATAERROR;
0918         musb_writew(epio, MUSB_RXCSR, rx_csr);
0919 
0920         cur_qh = first_qh(&musb->in_bulk);
0921     } else {
0922         dma = is_dma_capable() ? ep->tx_channel : NULL;
0923 
0924         /* clear nak timeout bit */
0925         tx_csr = musb_readw(epio, MUSB_TXCSR);
0926         tx_csr |= MUSB_TXCSR_H_WZC_BITS;
0927         tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
0928         musb_writew(epio, MUSB_TXCSR, tx_csr);
0929 
0930         cur_qh = first_qh(&musb->out_bulk);
0931     }
0932     if (cur_qh) {
0933         urb = next_urb(cur_qh);
0934         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
0935             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
0936             musb->dma_controller->channel_abort(dma);
0937             urb->actual_length += dma->actual_len;
0938             dma->actual_len = 0L;
0939         }
0940         toggle = musb->io.get_toggle(cur_qh, !is_in);
0941         usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
0942 
0943         if (is_in) {
0944             /* move cur_qh to end of queue */
0945             list_move_tail(&cur_qh->ring, &musb->in_bulk);
0946 
0947             /* get the next qh from musb->in_bulk */
0948             next_qh = first_qh(&musb->in_bulk);
0949 
0950             /* set rx_reinit and schedule the next qh */
0951             ep->rx_reinit = 1;
0952         } else {
0953             /* move cur_qh to end of queue */
0954             list_move_tail(&cur_qh->ring, &musb->out_bulk);
0955 
0956             /* get the next qh from musb->out_bulk */
0957             next_qh = first_qh(&musb->out_bulk);
0958 
0959             /* set tx_reinit and schedule the next qh */
0960             ep->tx_reinit = 1;
0961         }
0962 
0963         if (next_qh)
0964             musb_start_urb(musb, is_in, next_qh);
0965     }
0966 }
0967 
0968 /*
0969  * Service the default endpoint (ep0) as host.
0970  * Return true until it's time to start the status stage.
0971  */
0972 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
0973 {
0974     bool             more = false;
0975     u8          *fifo_dest = NULL;
0976     u16         fifo_count = 0;
0977     struct musb_hw_ep   *hw_ep = musb->control_ep;
0978     struct musb_qh      *qh = hw_ep->in_qh;
0979     struct usb_ctrlrequest  *request;
0980 
0981     switch (musb->ep0_stage) {
0982     case MUSB_EP0_IN:
0983         fifo_dest = urb->transfer_buffer + urb->actual_length;
0984         fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
0985                    urb->actual_length);
0986         if (fifo_count < len)
0987             urb->status = -EOVERFLOW;
0988 
0989         musb_read_fifo(hw_ep, fifo_count, fifo_dest);
0990 
0991         urb->actual_length += fifo_count;
0992         if (len < qh->maxpacket) {
0993             /* always terminate on short read; it's
0994              * rarely reported as an error.
0995              */
0996         } else if (urb->actual_length <
0997                 urb->transfer_buffer_length)
0998             more = true;
0999         break;
1000     case MUSB_EP0_START:
1001         request = (struct usb_ctrlrequest *) urb->setup_packet;
1002 
1003         if (!request->wLength) {
1004             musb_dbg(musb, "start no-DATA");
1005             break;
1006         } else if (request->bRequestType & USB_DIR_IN) {
1007             musb_dbg(musb, "start IN-DATA");
1008             musb->ep0_stage = MUSB_EP0_IN;
1009             more = true;
1010             break;
1011         } else {
1012             musb_dbg(musb, "start OUT-DATA");
1013             musb->ep0_stage = MUSB_EP0_OUT;
1014             more = true;
1015         }
1016         fallthrough;
1017     case MUSB_EP0_OUT:
1018         fifo_count = min_t(size_t, qh->maxpacket,
1019                    urb->transfer_buffer_length -
1020                    urb->actual_length);
1021         if (fifo_count) {
1022             fifo_dest = (u8 *) (urb->transfer_buffer
1023                     + urb->actual_length);
1024             musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1025                     fifo_count,
1026                     (fifo_count == 1) ? "" : "s",
1027                     fifo_dest);
1028             musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1029 
1030             urb->actual_length += fifo_count;
1031             more = true;
1032         }
1033         break;
1034     default:
1035         ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1036         break;
1037     }
1038 
1039     return more;
1040 }
1041 
1042 /*
1043  * Handle default endpoint interrupt as host. Only called in IRQ time
1044  * from musb_interrupt().
1045  *
1046  * called with controller irqlocked
1047  */
1048 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1049 {
1050     struct urb      *urb;
1051     u16         csr, len;
1052     int         status = 0;
1053     void __iomem        *mbase = musb->mregs;
1054     struct musb_hw_ep   *hw_ep = musb->control_ep;
1055     void __iomem        *epio = hw_ep->regs;
1056     struct musb_qh      *qh = hw_ep->in_qh;
1057     bool            complete = false;
1058     irqreturn_t     retval = IRQ_NONE;
1059 
1060     /* ep0 only has one queue, "in" */
1061     urb = next_urb(qh);
1062 
1063     musb_ep_select(mbase, 0);
1064     csr = musb_readw(epio, MUSB_CSR0);
1065     len = (csr & MUSB_CSR0_RXPKTRDY)
1066             ? musb_readb(epio, MUSB_COUNT0)
1067             : 0;
1068 
1069     musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1070         csr, qh, len, urb, musb->ep0_stage);
1071 
1072     /* if we just did status stage, we are done */
1073     if (MUSB_EP0_STATUS == musb->ep0_stage) {
1074         retval = IRQ_HANDLED;
1075         complete = true;
1076     }
1077 
1078     /* prepare status */
1079     if (csr & MUSB_CSR0_H_RXSTALL) {
1080         musb_dbg(musb, "STALLING ENDPOINT");
1081         status = -EPIPE;
1082 
1083     } else if (csr & MUSB_CSR0_H_ERROR) {
1084         musb_dbg(musb, "no response, csr0 %04x", csr);
1085         status = -EPROTO;
1086 
1087     } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1088         musb_dbg(musb, "control NAK timeout");
1089 
1090         /* NOTE:  this code path would be a good place to PAUSE a
1091          * control transfer, if another one is queued, so that
1092          * ep0 is more likely to stay busy.  That's already done
1093          * for bulk RX transfers.
1094          *
1095          * if (qh->ring.next != &musb->control), then
1096          * we have a candidate... NAKing is *NOT* an error
1097          */
1098         musb_writew(epio, MUSB_CSR0, 0);
1099         retval = IRQ_HANDLED;
1100     }
1101 
1102     if (status) {
1103         musb_dbg(musb, "aborting");
1104         retval = IRQ_HANDLED;
1105         if (urb)
1106             urb->status = status;
1107         complete = true;
1108 
1109         /* use the proper sequence to abort the transfer */
1110         if (csr & MUSB_CSR0_H_REQPKT) {
1111             csr &= ~MUSB_CSR0_H_REQPKT;
1112             musb_writew(epio, MUSB_CSR0, csr);
1113             csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1114             musb_writew(epio, MUSB_CSR0, csr);
1115         } else {
1116             musb_h_ep0_flush_fifo(hw_ep);
1117         }
1118 
1119         musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1120 
1121         /* clear it */
1122         musb_writew(epio, MUSB_CSR0, 0);
1123     }
1124 
1125     if (unlikely(!urb)) {
1126         /* stop endpoint since we have no place for its data, this
1127          * SHOULD NEVER HAPPEN! */
1128         ERR("no URB for end 0\n");
1129 
1130         musb_h_ep0_flush_fifo(hw_ep);
1131         goto done;
1132     }
1133 
1134     if (!complete) {
1135         /* call common logic and prepare response */
1136         if (musb_h_ep0_continue(musb, len, urb)) {
1137             /* more packets required */
1138             csr = (MUSB_EP0_IN == musb->ep0_stage)
1139                 ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1140         } else {
1141             /* data transfer complete; perform status phase */
1142             if (usb_pipeout(urb->pipe)
1143                     || !urb->transfer_buffer_length)
1144                 csr = MUSB_CSR0_H_STATUSPKT
1145                     | MUSB_CSR0_H_REQPKT;
1146             else
1147                 csr = MUSB_CSR0_H_STATUSPKT
1148                     | MUSB_CSR0_TXPKTRDY;
1149 
1150             /* disable ping token in status phase */
1151             csr |= MUSB_CSR0_H_DIS_PING;
1152 
1153             /* flag status stage */
1154             musb->ep0_stage = MUSB_EP0_STATUS;
1155 
1156             musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1157 
1158         }
1159         musb_writew(epio, MUSB_CSR0, csr);
1160         retval = IRQ_HANDLED;
1161     } else
1162         musb->ep0_stage = MUSB_EP0_IDLE;
1163 
1164     /* call completion handler if done */
1165     if (complete)
1166         musb_advance_schedule(musb, urb, hw_ep, 1);
1167 done:
1168     return retval;
1169 }
1170 
1171 
1172 #ifdef CONFIG_USB_INVENTRA_DMA
1173 
1174 /* Host side TX (OUT) using Mentor DMA works as follows:
1175     submit_urb ->
1176         - if queue was empty, Program Endpoint
1177         - ... which starts DMA to fifo in mode 1 or 0
1178 
1179     DMA Isr (transfer complete) -> TxAvail()
1180         - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
1181                     only in musb_cleanup_urb)
1182         - TxPktRdy has to be set in mode 0 or for
1183             short packets in mode 1.
1184 */
1185 
1186 #endif
1187 
1188 /* Service a Tx-Available or dma completion irq for the endpoint */
1189 void musb_host_tx(struct musb *musb, u8 epnum)
1190 {
1191     int         pipe;
1192     bool            done = false;
1193     u16         tx_csr;
1194     size_t          length = 0;
1195     size_t          offset = 0;
1196     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
1197     void __iomem        *epio = hw_ep->regs;
1198     struct musb_qh      *qh = hw_ep->out_qh;
1199     struct urb      *urb = next_urb(qh);
1200     u32         status = 0;
1201     void __iomem        *mbase = musb->mregs;
1202     struct dma_channel  *dma;
1203     bool            transfer_pending = false;
1204 
1205     musb_ep_select(mbase, epnum);
1206     tx_csr = musb_readw(epio, MUSB_TXCSR);
1207 
1208     /* with CPPI, DMA sometimes triggers "extra" irqs */
1209     if (!urb) {
1210         musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1211         return;
1212     }
1213 
1214     pipe = urb->pipe;
1215     dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1216     trace_musb_urb_tx(musb, urb);
1217     musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1218             dma ? ", dma" : "");
1219 
1220     /* check for errors */
1221     if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1222         /* dma was disabled, fifo flushed */
1223         musb_dbg(musb, "TX end %d stall", epnum);
1224 
1225         /* stall; record URB status */
1226         status = -EPIPE;
1227 
1228     } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1229         /* (NON-ISO) dma was disabled, fifo flushed */
1230         musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1231 
1232         status = -ETIMEDOUT;
1233 
1234     } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1235         if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1236                 && !list_is_singular(&musb->out_bulk)) {
1237             musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1238             musb_bulk_nak_timeout(musb, hw_ep, 0);
1239         } else {
1240             musb_dbg(musb, "TX ep%d device not responding", epnum);
1241             /* NOTE:  this code path would be a good place to PAUSE a
1242              * transfer, if there's some other (nonperiodic) tx urb
1243              * that could use this fifo.  (dma complicates it...)
1244              * That's already done for bulk RX transfers.
1245              *
1246              * if (bulk && qh->ring.next != &musb->out_bulk), then
1247              * we have a candidate... NAKing is *NOT* an error
1248              */
1249             musb_ep_select(mbase, epnum);
1250             musb_writew(epio, MUSB_TXCSR,
1251                     MUSB_TXCSR_H_WZC_BITS
1252                     | MUSB_TXCSR_TXPKTRDY);
1253         }
1254         return;
1255     }
1256 
1257 done:
1258     if (status) {
1259         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1260             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1261             musb->dma_controller->channel_abort(dma);
1262         }
1263 
1264         /* do the proper sequence to abort the transfer in the
1265          * usb core; the dma engine should already be stopped.
1266          */
1267         musb_h_tx_flush_fifo(hw_ep);
1268         tx_csr &= ~(MUSB_TXCSR_AUTOSET
1269                 | MUSB_TXCSR_DMAENAB
1270                 | MUSB_TXCSR_H_ERROR
1271                 | MUSB_TXCSR_H_RXSTALL
1272                 | MUSB_TXCSR_H_NAKTIMEOUT
1273                 );
1274 
1275         musb_ep_select(mbase, epnum);
1276         musb_writew(epio, MUSB_TXCSR, tx_csr);
1277         /* REVISIT may need to clear FLUSHFIFO ... */
1278         musb_writew(epio, MUSB_TXCSR, tx_csr);
1279         musb_writeb(epio, MUSB_TXINTERVAL, 0);
1280 
1281         done = true;
1282     }
1283 
1284     /* second cppi case */
1285     if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1286         musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1287         return;
1288     }
1289 
1290     if (is_dma_capable() && dma && !status) {
1291         /*
1292          * DMA has completed.  But if we're using DMA mode 1 (multi
1293          * packet DMA), we need a terminal TXPKTRDY interrupt before
1294          * we can consider this transfer completed, lest we trash
1295          * its last packet when writing the next URB's data.  So we
1296          * switch back to mode 0 to get that interrupt; we'll come
1297          * back here once it happens.
1298          */
1299         if (tx_csr & MUSB_TXCSR_DMAMODE) {
1300             /*
1301              * We shouldn't clear DMAMODE with DMAENAB set; so
1302              * clear them in a safe order.  That should be OK
1303              * once TXPKTRDY has been set (and I've never seen
1304              * it being 0 at this moment -- DMA interrupt latency
1305              * is significant) but if it hasn't been then we have
1306              * no choice but to stop being polite and ignore the
1307              * programmer's guide... :-)
1308              *
1309              * Note that we must write TXCSR with TXPKTRDY cleared
1310              * in order not to re-trigger the packet send (this bit
1311              * can't be cleared by CPU), and there's another caveat:
1312              * TXPKTRDY may be set shortly and then cleared in the
1313              * double-buffered FIFO mode, so we do an extra TXCSR
1314              * read for debouncing...
1315              */
1316             tx_csr &= musb_readw(epio, MUSB_TXCSR);
1317             if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1318                 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1319                         MUSB_TXCSR_TXPKTRDY);
1320                 musb_writew(epio, MUSB_TXCSR,
1321                         tx_csr | MUSB_TXCSR_H_WZC_BITS);
1322             }
1323             tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1324                     MUSB_TXCSR_TXPKTRDY);
1325             musb_writew(epio, MUSB_TXCSR,
1326                     tx_csr | MUSB_TXCSR_H_WZC_BITS);
1327 
1328             /*
1329              * There is no guarantee that we'll get an interrupt
1330              * after clearing DMAMODE as we might have done this
1331              * too late (after TXPKTRDY was cleared by controller).
1332              * Re-read TXCSR as we have spoiled its previous value.
1333              */
1334             tx_csr = musb_readw(epio, MUSB_TXCSR);
1335         }
1336 
1337         /*
1338          * We may get here from a DMA completion or TXPKTRDY interrupt.
1339          * In any case, we must check the FIFO status here and bail out
1340          * only if the FIFO still has data -- that should prevent the
1341          * "missed" TXPKTRDY interrupts and deal with double-buffered
1342          * FIFO mode too...
1343          */
1344         if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1345             musb_dbg(musb,
1346                 "DMA complete but FIFO not empty, CSR %04x",
1347                 tx_csr);
1348             return;
1349         }
1350     }
1351 
1352     if (!status || dma || usb_pipeisoc(pipe)) {
1353         if (dma)
1354             length = dma->actual_len;
1355         else
1356             length = qh->segsize;
1357         qh->offset += length;
1358 
1359         if (usb_pipeisoc(pipe)) {
1360             struct usb_iso_packet_descriptor    *d;
1361 
1362             d = urb->iso_frame_desc + qh->iso_idx;
1363             d->actual_length = length;
1364             d->status = status;
1365             if (++qh->iso_idx >= urb->number_of_packets) {
1366                 done = true;
1367             } else {
1368                 d++;
1369                 offset = d->offset;
1370                 length = d->length;
1371             }
1372         } else if (dma && urb->transfer_buffer_length == qh->offset) {
1373             done = true;
1374         } else {
1375             /* see if we need to send more data, or ZLP */
1376             if (qh->segsize < qh->maxpacket)
1377                 done = true;
1378             else if (qh->offset == urb->transfer_buffer_length
1379                     && !(urb->transfer_flags
1380                         & URB_ZERO_PACKET))
1381                 done = true;
1382             if (!done) {
1383                 offset = qh->offset;
1384                 length = urb->transfer_buffer_length - offset;
1385                 transfer_pending = true;
1386             }
1387         }
1388     }
1389 
1390     /* urb->status != -EINPROGRESS means request has been faulted,
1391      * so we must abort this transfer after cleanup
1392      */
1393     if (urb->status != -EINPROGRESS) {
1394         done = true;
1395         if (status == 0)
1396             status = urb->status;
1397     }
1398 
1399     if (done) {
1400         /* set status */
1401         urb->status = status;
1402         urb->actual_length = qh->offset;
1403         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1404         return;
1405     } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1406         if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1407                 offset, length)) {
1408             if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1409                 musb_h_tx_dma_start(hw_ep);
1410             return;
1411         }
1412     } else  if (tx_csr & MUSB_TXCSR_DMAENAB) {
1413         musb_dbg(musb, "not complete, but DMA enabled?");
1414         return;
1415     }
1416 
1417     /*
1418      * PIO: start next packet in this URB.
1419      *
1420      * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1421      * (and presumably, FIFO is not half-full) we should write *two*
1422      * packets before updating TXCSR; other docs disagree...
1423      */
1424     if (length > qh->maxpacket)
1425         length = qh->maxpacket;
1426     /* Unmap the buffer so that CPU can use it */
1427     usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1428 
1429     /*
1430      * We need to map sg if the transfer_buffer is
1431      * NULL.
1432      */
1433     if (!urb->transfer_buffer) {
1434         /* sg_miter_start is already done in musb_ep_program */
1435         if (!sg_miter_next(&qh->sg_miter)) {
1436             dev_err(musb->controller, "error: sg list empty\n");
1437             sg_miter_stop(&qh->sg_miter);
1438             status = -EINVAL;
1439             goto done;
1440         }
1441         length = min_t(u32, length, qh->sg_miter.length);
1442         musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1443         qh->sg_miter.consumed = length;
1444         sg_miter_stop(&qh->sg_miter);
1445     } else {
1446         musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1447     }
1448 
1449     qh->segsize = length;
1450 
1451     musb_ep_select(mbase, epnum);
1452     musb_writew(epio, MUSB_TXCSR,
1453             MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1454 }
1455 
1456 #ifdef CONFIG_USB_TI_CPPI41_DMA
1457 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1458 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1459                   struct musb_hw_ep *hw_ep,
1460                   struct musb_qh *qh,
1461                   struct urb *urb,
1462                   size_t len)
1463 {
1464     struct dma_channel *channel = hw_ep->rx_channel;
1465     void __iomem *epio = hw_ep->regs;
1466     dma_addr_t *buf;
1467     u32 length;
1468     u16 val;
1469 
1470     buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1471         (u32)urb->transfer_dma;
1472 
1473     length = urb->iso_frame_desc[qh->iso_idx].length;
1474 
1475     val = musb_readw(epio, MUSB_RXCSR);
1476     val |= MUSB_RXCSR_DMAENAB;
1477     musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1478 
1479     return dma->channel_program(channel, qh->maxpacket, 0,
1480                    (u32)buf, length);
1481 }
1482 #else
1483 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1484                      struct musb_hw_ep *hw_ep,
1485                      struct musb_qh *qh,
1486                      struct urb *urb,
1487                      size_t len)
1488 {
1489     return false;
1490 }
1491 #endif
1492 
1493 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1494     defined(CONFIG_USB_TI_CPPI41_DMA)
1495 /* Host side RX (IN) using Mentor DMA works as follows:
1496     submit_urb ->
1497         - if queue was empty, ProgramEndpoint
1498         - first IN token is sent out (by setting ReqPkt)
1499     LinuxIsr -> RxReady()
1500     /\  => first packet is received
1501     |   - Set in mode 0 (DmaEnab, ~ReqPkt)
1502     |       -> DMA Isr (transfer complete) -> RxReady()
1503     |           - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1504     |           - if urb not complete, send next IN token (ReqPkt)
1505     |              |        else complete urb.
1506     |              |
1507     ---------------------------
1508  *
1509  * Nuances of mode 1:
1510  *  For short packets, no ack (+RxPktRdy) is sent automatically
1511  *  (even if AutoClear is ON)
1512  *  For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1513  *  automatically => major problem, as collecting the next packet becomes
1514  *  difficult. Hence mode 1 is not used.
1515  *
1516  * REVISIT
1517  *  All we care about at this driver level is that
1518  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1519  *       (b) termination conditions are: short RX, or buffer full;
1520  *       (c) fault modes include
1521  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1522  *             (and that endpoint's dma queue stops immediately)
1523  *           - overflow (full, PLUS more bytes in the terminal packet)
1524  *
1525  *  So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1526  *  thus be a great candidate for using mode 1 ... for all but the
1527  *  last packet of one URB's transfer.
1528  */
1529 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1530                        struct musb_hw_ep *hw_ep,
1531                        struct musb_qh *qh,
1532                        struct urb *urb,
1533                        size_t len)
1534 {
1535     struct dma_channel *channel = hw_ep->rx_channel;
1536     void __iomem *epio = hw_ep->regs;
1537     u16 val;
1538     int pipe;
1539     bool done;
1540 
1541     pipe = urb->pipe;
1542 
1543     if (usb_pipeisoc(pipe)) {
1544         struct usb_iso_packet_descriptor *d;
1545 
1546         d = urb->iso_frame_desc + qh->iso_idx;
1547         d->actual_length = len;
1548 
1549         /* even if there was an error, we did the dma
1550          * for iso_frame_desc->length
1551          */
1552         if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1553             d->status = 0;
1554 
1555         if (++qh->iso_idx >= urb->number_of_packets) {
1556             done = true;
1557         } else {
1558             /* REVISIT: Why ignore return value here? */
1559             if (musb_dma_cppi41(hw_ep->musb))
1560                 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1561                                   urb, len);
1562             done = false;
1563         }
1564 
1565     } else  {
1566         /* done if urb buffer is full or short packet is recd */
1567         done = (urb->actual_length + len >=
1568             urb->transfer_buffer_length
1569             || channel->actual_len < qh->maxpacket
1570             || channel->rx_packet_done);
1571     }
1572 
1573     /* send IN token for next packet, without AUTOREQ */
1574     if (!done) {
1575         val = musb_readw(epio, MUSB_RXCSR);
1576         val |= MUSB_RXCSR_H_REQPKT;
1577         musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1578     }
1579 
1580     return done;
1581 }
1582 
1583 /* Disadvantage of using mode 1:
1584  *  It's basically usable only for mass storage class; essentially all
1585  *  other protocols also terminate transfers on short packets.
1586  *
1587  * Details:
1588  *  An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1589  *  If you try to use mode 1 for (transfer_buffer_length - 512), and try
1590  *  to use the extra IN token to grab the last packet using mode 0, then
1591  *  the problem is that you cannot be sure when the device will send the
1592  *  last packet and RxPktRdy set. Sometimes the packet is recd too soon
1593  *  such that it gets lost when RxCSR is re-set at the end of the mode 1
1594  *  transfer, while sometimes it is recd just a little late so that if you
1595  *  try to configure for mode 0 soon after the mode 1 transfer is
1596  *  completed, you will find rxcount 0. Okay, so you might think why not
1597  *  wait for an interrupt when the pkt is recd. Well, you won't get any!
1598  */
1599 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1600                       struct musb_hw_ep *hw_ep,
1601                       struct musb_qh *qh,
1602                       struct urb *urb,
1603                       size_t len,
1604                       u8 iso_err)
1605 {
1606     struct musb *musb = hw_ep->musb;
1607     void __iomem *epio = hw_ep->regs;
1608     struct dma_channel *channel = hw_ep->rx_channel;
1609     u16 rx_count, val;
1610     int length, pipe, done;
1611     dma_addr_t buf;
1612 
1613     rx_count = musb_readw(epio, MUSB_RXCOUNT);
1614     pipe = urb->pipe;
1615 
1616     if (usb_pipeisoc(pipe)) {
1617         int d_status = 0;
1618         struct usb_iso_packet_descriptor *d;
1619 
1620         d = urb->iso_frame_desc + qh->iso_idx;
1621 
1622         if (iso_err) {
1623             d_status = -EILSEQ;
1624             urb->error_count++;
1625         }
1626         if (rx_count > d->length) {
1627             if (d_status == 0) {
1628                 d_status = -EOVERFLOW;
1629                 urb->error_count++;
1630             }
1631             musb_dbg(musb, "** OVERFLOW %d into %d",
1632                 rx_count, d->length);
1633 
1634             length = d->length;
1635         } else
1636             length = rx_count;
1637         d->status = d_status;
1638         buf = urb->transfer_dma + d->offset;
1639     } else {
1640         length = rx_count;
1641         buf = urb->transfer_dma + urb->actual_length;
1642     }
1643 
1644     channel->desired_mode = 0;
1645 #ifdef USE_MODE1
1646     /* because of the issue below, mode 1 will
1647      * only rarely behave with correct semantics.
1648      */
1649     if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1650         && (urb->transfer_buffer_length - urb->actual_length)
1651         > qh->maxpacket)
1652         channel->desired_mode = 1;
1653     if (rx_count < hw_ep->max_packet_sz_rx) {
1654         length = rx_count;
1655         channel->desired_mode = 0;
1656     } else {
1657         length = urb->transfer_buffer_length;
1658     }
1659 #endif
1660 
1661     /* See comments above on disadvantages of using mode 1 */
1662     val = musb_readw(epio, MUSB_RXCSR);
1663     val &= ~MUSB_RXCSR_H_REQPKT;
1664 
1665     if (channel->desired_mode == 0)
1666         val &= ~MUSB_RXCSR_H_AUTOREQ;
1667     else
1668         val |= MUSB_RXCSR_H_AUTOREQ;
1669     val |= MUSB_RXCSR_DMAENAB;
1670 
1671     /* autoclear shouldn't be set in high bandwidth */
1672     if (qh->hb_mult == 1)
1673         val |= MUSB_RXCSR_AUTOCLEAR;
1674 
1675     musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1676 
1677     /* REVISIT if when actual_length != 0,
1678      * transfer_buffer_length needs to be
1679      * adjusted first...
1680      */
1681     done = dma->channel_program(channel, qh->maxpacket,
1682                    channel->desired_mode,
1683                    buf, length);
1684 
1685     if (!done) {
1686         dma->channel_release(channel);
1687         hw_ep->rx_channel = NULL;
1688         channel = NULL;
1689         val = musb_readw(epio, MUSB_RXCSR);
1690         val &= ~(MUSB_RXCSR_DMAENAB
1691              | MUSB_RXCSR_H_AUTOREQ
1692              | MUSB_RXCSR_AUTOCLEAR);
1693         musb_writew(epio, MUSB_RXCSR, val);
1694     }
1695 
1696     return done;
1697 }
1698 #else
1699 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1700                           struct musb_hw_ep *hw_ep,
1701                           struct musb_qh *qh,
1702                           struct urb *urb,
1703                           size_t len)
1704 {
1705     return false;
1706 }
1707 
1708 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1709                          struct musb_hw_ep *hw_ep,
1710                          struct musb_qh *qh,
1711                          struct urb *urb,
1712                          size_t len,
1713                          u8 iso_err)
1714 {
1715     return false;
1716 }
1717 #endif
1718 
1719 /*
1720  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1721  * and high-bandwidth IN transfer cases.
1722  */
1723 void musb_host_rx(struct musb *musb, u8 epnum)
1724 {
1725     struct urb      *urb;
1726     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
1727     struct dma_controller   *c = musb->dma_controller;
1728     void __iomem        *epio = hw_ep->regs;
1729     struct musb_qh      *qh = hw_ep->in_qh;
1730     size_t          xfer_len;
1731     void __iomem        *mbase = musb->mregs;
1732     u16         rx_csr, val;
1733     bool            iso_err = false;
1734     bool            done = false;
1735     u32         status;
1736     struct dma_channel  *dma;
1737     unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1738 
1739     musb_ep_select(mbase, epnum);
1740 
1741     urb = next_urb(qh);
1742     dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1743     status = 0;
1744     xfer_len = 0;
1745 
1746     rx_csr = musb_readw(epio, MUSB_RXCSR);
1747     val = rx_csr;
1748 
1749     if (unlikely(!urb)) {
1750         /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1751          * usbtest #11 (unlinks) triggers it regularly, sometimes
1752          * with fifo full.  (Only with DMA??)
1753          */
1754         musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1755             epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1756         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1757         return;
1758     }
1759 
1760     trace_musb_urb_rx(musb, urb);
1761 
1762     /* check for errors, concurrent stall & unlink is not really
1763      * handled yet! */
1764     if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1765         musb_dbg(musb, "RX end %d STALL", epnum);
1766 
1767         /* stall; record URB status */
1768         status = -EPIPE;
1769 
1770     } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1771         dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
1772 
1773         /*
1774          * The three-strikes error could only happen when the USB
1775          * device is not accessible, for example detached or powered
1776          * off. So return the fatal error -ESHUTDOWN so hopefully the
1777          * USB device drivers won't immediately resubmit the same URB.
1778          */
1779         status = -ESHUTDOWN;
1780         musb_writeb(epio, MUSB_RXINTERVAL, 0);
1781 
1782         rx_csr &= ~MUSB_RXCSR_H_ERROR;
1783         musb_writew(epio, MUSB_RXCSR, rx_csr);
1784 
1785     } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1786 
1787         if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1788             musb_dbg(musb, "RX end %d NAK timeout", epnum);
1789 
1790             /* NOTE: NAKing is *NOT* an error, so we want to
1791              * continue.  Except ... if there's a request for
1792              * another QH, use that instead of starving it.
1793              *
1794              * Devices like Ethernet and serial adapters keep
1795              * reads posted at all times, which will starve
1796              * other devices without this logic.
1797              */
1798             if (usb_pipebulk(urb->pipe)
1799                     && qh->mux == 1
1800                     && !list_is_singular(&musb->in_bulk)) {
1801                 musb_bulk_nak_timeout(musb, hw_ep, 1);
1802                 return;
1803             }
1804             musb_ep_select(mbase, epnum);
1805             rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1806             rx_csr &= ~MUSB_RXCSR_DATAERROR;
1807             musb_writew(epio, MUSB_RXCSR, rx_csr);
1808 
1809             goto finish;
1810         } else {
1811             musb_dbg(musb, "RX end %d ISO data error", epnum);
1812             /* packet error reported later */
1813             iso_err = true;
1814         }
1815     } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1816         musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1817                 epnum);
1818         status = -EPROTO;
1819     }
1820 
1821     /* faults abort the transfer */
1822     if (status) {
1823         /* clean up dma and collect transfer count */
1824         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1825             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1826             musb->dma_controller->channel_abort(dma);
1827             xfer_len = dma->actual_len;
1828         }
1829         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1830         musb_writeb(epio, MUSB_RXINTERVAL, 0);
1831         done = true;
1832         goto finish;
1833     }
1834 
1835     if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1836         /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1837         ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1838         goto finish;
1839     }
1840 
1841     /* thorough shutdown for now ... given more precise fault handling
1842      * and better queueing support, we might keep a DMA pipeline going
1843      * while processing this irq for earlier completions.
1844      */
1845 
1846     /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1847     if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1848         (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1849         /* REVISIT this happened for a while on some short reads...
1850          * the cleanup still needs investigation... looks bad...
1851          * and also duplicates dma cleanup code above ... plus,
1852          * shouldn't this be the "half full" double buffer case?
1853          */
1854         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1855             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1856             musb->dma_controller->channel_abort(dma);
1857             xfer_len = dma->actual_len;
1858             done = true;
1859         }
1860 
1861         musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1862                 xfer_len, dma ? ", dma" : "");
1863         rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1864 
1865         musb_ep_select(mbase, epnum);
1866         musb_writew(epio, MUSB_RXCSR,
1867                 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1868     }
1869 
1870     if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1871         xfer_len = dma->actual_len;
1872 
1873         val &= ~(MUSB_RXCSR_DMAENAB
1874             | MUSB_RXCSR_H_AUTOREQ
1875             | MUSB_RXCSR_AUTOCLEAR
1876             | MUSB_RXCSR_RXPKTRDY);
1877         musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1878 
1879         if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1880             musb_dma_cppi41(musb)) {
1881                 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1882                 musb_dbg(hw_ep->musb,
1883                     "ep %d dma %s, rxcsr %04x, rxcount %d",
1884                     epnum, done ? "off" : "reset",
1885                     musb_readw(epio, MUSB_RXCSR),
1886                     musb_readw(epio, MUSB_RXCOUNT));
1887         } else {
1888             done = true;
1889         }
1890 
1891     } else if (urb->status == -EINPROGRESS) {
1892         /* if no errors, be sure a packet is ready for unloading */
1893         if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1894             status = -EPROTO;
1895             ERR("Rx interrupt with no errors or packet!\n");
1896 
1897             /* FIXME this is another "SHOULD NEVER HAPPEN" */
1898 
1899 /* SCRUB (RX) */
1900             /* do the proper sequence to abort the transfer */
1901             musb_ep_select(mbase, epnum);
1902             val &= ~MUSB_RXCSR_H_REQPKT;
1903             musb_writew(epio, MUSB_RXCSR, val);
1904             goto finish;
1905         }
1906 
1907         /* we are expecting IN packets */
1908         if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1909             musb_dma_cppi41(musb)) && dma) {
1910             musb_dbg(hw_ep->musb,
1911                 "RX%d count %d, buffer 0x%llx len %d/%d",
1912                 epnum, musb_readw(epio, MUSB_RXCOUNT),
1913                 (unsigned long long) urb->transfer_dma
1914                 + urb->actual_length,
1915                 qh->offset,
1916                 urb->transfer_buffer_length);
1917 
1918             if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1919                                xfer_len, iso_err))
1920                 goto finish;
1921             else
1922                 dev_err(musb->controller, "error: rx_dma failed\n");
1923         }
1924 
1925         if (!dma) {
1926             unsigned int received_len;
1927 
1928             /* Unmap the buffer so that CPU can use it */
1929             usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1930 
1931             /*
1932              * We need to map sg if the transfer_buffer is
1933              * NULL.
1934              */
1935             if (!urb->transfer_buffer) {
1936                 qh->use_sg = true;
1937                 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1938                         sg_flags);
1939             }
1940 
1941             if (qh->use_sg) {
1942                 if (!sg_miter_next(&qh->sg_miter)) {
1943                     dev_err(musb->controller, "error: sg list empty\n");
1944                     sg_miter_stop(&qh->sg_miter);
1945                     status = -EINVAL;
1946                     done = true;
1947                     goto finish;
1948                 }
1949                 urb->transfer_buffer = qh->sg_miter.addr;
1950                 received_len = urb->actual_length;
1951                 qh->offset = 0x0;
1952                 done = musb_host_packet_rx(musb, urb, epnum,
1953                         iso_err);
1954                 /* Calculate the number of bytes received */
1955                 received_len = urb->actual_length -
1956                     received_len;
1957                 qh->sg_miter.consumed = received_len;
1958                 sg_miter_stop(&qh->sg_miter);
1959             } else {
1960                 done = musb_host_packet_rx(musb, urb,
1961                         epnum, iso_err);
1962             }
1963             musb_dbg(musb, "read %spacket", done ? "last " : "");
1964         }
1965     }
1966 
1967 finish:
1968     urb->actual_length += xfer_len;
1969     qh->offset += xfer_len;
1970     if (done) {
1971         if (qh->use_sg) {
1972             qh->use_sg = false;
1973             urb->transfer_buffer = NULL;
1974         }
1975 
1976         if (urb->status == -EINPROGRESS)
1977             urb->status = status;
1978         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1979     }
1980 }
1981 
1982 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1983  * the software schedule associates multiple such nodes with a given
1984  * host side hardware endpoint + direction; scheduling may activate
1985  * that hardware endpoint.
1986  */
1987 static int musb_schedule(
1988     struct musb     *musb,
1989     struct musb_qh      *qh,
1990     int         is_in)
1991 {
1992     int         idle = 0;
1993     int         best_diff;
1994     int         best_end, epnum;
1995     struct musb_hw_ep   *hw_ep = NULL;
1996     struct list_head    *head = NULL;
1997     u8          toggle;
1998     u8          txtype;
1999     struct urb      *urb = next_urb(qh);
2000 
2001     /* use fixed hardware for control and bulk */
2002     if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2003         head = &musb->control;
2004         hw_ep = musb->control_ep;
2005         goto success;
2006     }
2007 
2008     /* else, periodic transfers get muxed to other endpoints */
2009 
2010     /*
2011      * We know this qh hasn't been scheduled, so all we need to do
2012      * is choose which hardware endpoint to put it on ...
2013      *
2014      * REVISIT what we really want here is a regular schedule tree
2015      * like e.g. OHCI uses.
2016      */
2017     best_diff = 4096;
2018     best_end = -1;
2019 
2020     for (epnum = 1, hw_ep = musb->endpoints + 1;
2021             epnum < musb->nr_endpoints;
2022             epnum++, hw_ep++) {
2023         int diff;
2024 
2025         if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2026             continue;
2027 
2028         if (hw_ep == musb->bulk_ep)
2029             continue;
2030 
2031         if (is_in)
2032             diff = hw_ep->max_packet_sz_rx;
2033         else
2034             diff = hw_ep->max_packet_sz_tx;
2035         diff -= (qh->maxpacket * qh->hb_mult);
2036 
2037         if (diff >= 0 && best_diff > diff) {
2038 
2039             /*
2040              * Mentor controller has a bug in that if we schedule
2041              * a BULK Tx transfer on an endpoint that had earlier
2042              * handled ISOC then the BULK transfer has to start on
2043              * a zero toggle.  If the BULK transfer starts on a 1
2044              * toggle then this transfer will fail as the mentor
2045              * controller starts the Bulk transfer on a 0 toggle
2046              * irrespective of the programming of the toggle bits
2047              * in the TXCSR register.  Check for this condition
2048              * while allocating the EP for a Tx Bulk transfer.  If
2049              * so skip this EP.
2050              */
2051             hw_ep = musb->endpoints + epnum;
2052             toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2053             txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2054                     >> 4) & 0x3;
2055             if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2056                 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2057                 continue;
2058 
2059             best_diff = diff;
2060             best_end = epnum;
2061         }
2062     }
2063     /* use bulk reserved ep1 if no other ep is free */
2064     if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2065         hw_ep = musb->bulk_ep;
2066         if (is_in)
2067             head = &musb->in_bulk;
2068         else
2069             head = &musb->out_bulk;
2070 
2071         /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2072          * multiplexed. This scheme does not work in high speed to full
2073          * speed scenario as NAK interrupts are not coming from a
2074          * full speed device connected to a high speed device.
2075          * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2076          * 4 (8 frame or 8ms) for FS device.
2077          */
2078         if (qh->dev)
2079             qh->intv_reg =
2080                 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2081         goto success;
2082     } else if (best_end < 0) {
2083         dev_err(musb->controller,
2084                 "%s hwep alloc failed for %dx%d\n",
2085                 musb_ep_xfertype_string(qh->type),
2086                 qh->hb_mult, qh->maxpacket);
2087         return -ENOSPC;
2088     }
2089 
2090     idle = 1;
2091     qh->mux = 0;
2092     hw_ep = musb->endpoints + best_end;
2093     musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2094 success:
2095     if (head) {
2096         idle = list_empty(head);
2097         list_add_tail(&qh->ring, head);
2098         qh->mux = 1;
2099     }
2100     qh->hw_ep = hw_ep;
2101     qh->hep->hcpriv = qh;
2102     if (idle)
2103         musb_start_urb(musb, is_in, qh);
2104     return 0;
2105 }
2106 
2107 static int musb_urb_enqueue(
2108     struct usb_hcd          *hcd,
2109     struct urb          *urb,
2110     gfp_t               mem_flags)
2111 {
2112     unsigned long           flags;
2113     struct musb         *musb = hcd_to_musb(hcd);
2114     struct usb_host_endpoint    *hep = urb->ep;
2115     struct musb_qh          *qh;
2116     struct usb_endpoint_descriptor  *epd = &hep->desc;
2117     int             ret;
2118     unsigned            type_reg;
2119     unsigned            interval;
2120 
2121     /* host role must be active */
2122     if (!is_host_active(musb) || !musb->is_active)
2123         return -ENODEV;
2124 
2125     trace_musb_urb_enq(musb, urb);
2126 
2127     spin_lock_irqsave(&musb->lock, flags);
2128     ret = usb_hcd_link_urb_to_ep(hcd, urb);
2129     qh = ret ? NULL : hep->hcpriv;
2130     if (qh)
2131         urb->hcpriv = qh;
2132     spin_unlock_irqrestore(&musb->lock, flags);
2133 
2134     /* DMA mapping was already done, if needed, and this urb is on
2135      * hep->urb_list now ... so we're done, unless hep wasn't yet
2136      * scheduled onto a live qh.
2137      *
2138      * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2139      * disabled, testing for empty qh->ring and avoiding qh setup costs
2140      * except for the first urb queued after a config change.
2141      */
2142     if (qh || ret)
2143         return ret;
2144 
2145     /* Allocate and initialize qh, minimizing the work done each time
2146      * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
2147      *
2148      * REVISIT consider a dedicated qh kmem_cache, so it's harder
2149      * for bugs in other kernel code to break this driver...
2150      */
2151     qh = kzalloc(sizeof *qh, mem_flags);
2152     if (!qh) {
2153         spin_lock_irqsave(&musb->lock, flags);
2154         usb_hcd_unlink_urb_from_ep(hcd, urb);
2155         spin_unlock_irqrestore(&musb->lock, flags);
2156         return -ENOMEM;
2157     }
2158 
2159     qh->hep = hep;
2160     qh->dev = urb->dev;
2161     INIT_LIST_HEAD(&qh->ring);
2162     qh->is_ready = 1;
2163 
2164     qh->maxpacket = usb_endpoint_maxp(epd);
2165     qh->type = usb_endpoint_type(epd);
2166 
2167     /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2168      * Some musb cores don't support high bandwidth ISO transfers; and
2169      * we don't (yet!) support high bandwidth interrupt transfers.
2170      */
2171     qh->hb_mult = usb_endpoint_maxp_mult(epd);
2172     if (qh->hb_mult > 1) {
2173         int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2174 
2175         if (ok)
2176             ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2177                 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2178         if (!ok) {
2179             dev_err(musb->controller,
2180                 "high bandwidth %s (%dx%d) not supported\n",
2181                 musb_ep_xfertype_string(qh->type),
2182                 qh->hb_mult, qh->maxpacket & 0x7ff);
2183             ret = -EMSGSIZE;
2184             goto done;
2185         }
2186         qh->maxpacket &= 0x7ff;
2187     }
2188 
2189     qh->epnum = usb_endpoint_num(epd);
2190 
2191     /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2192     qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2193 
2194     /* precompute rxtype/txtype/type0 register */
2195     type_reg = (qh->type << 4) | qh->epnum;
2196     switch (urb->dev->speed) {
2197     case USB_SPEED_LOW:
2198         type_reg |= 0xc0;
2199         break;
2200     case USB_SPEED_FULL:
2201         type_reg |= 0x80;
2202         break;
2203     default:
2204         type_reg |= 0x40;
2205     }
2206     qh->type_reg = type_reg;
2207 
2208     /* Precompute RXINTERVAL/TXINTERVAL register */
2209     switch (qh->type) {
2210     case USB_ENDPOINT_XFER_INT:
2211         /*
2212          * Full/low speeds use the  linear encoding,
2213          * high speed uses the logarithmic encoding.
2214          */
2215         if (urb->dev->speed <= USB_SPEED_FULL) {
2216             interval = max_t(u8, epd->bInterval, 1);
2217             break;
2218         }
2219         fallthrough;
2220     case USB_ENDPOINT_XFER_ISOC:
2221         /* ISO always uses logarithmic encoding */
2222         interval = min_t(u8, epd->bInterval, 16);
2223         break;
2224     default:
2225         /* REVISIT we actually want to use NAK limits, hinting to the
2226          * transfer scheduling logic to try some other qh, e.g. try
2227          * for 2 msec first:
2228          *
2229          * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2230          *
2231          * The downside of disabling this is that transfer scheduling
2232          * gets VERY unfair for nonperiodic transfers; a misbehaving
2233          * peripheral could make that hurt.  That's perfectly normal
2234          * for reads from network or serial adapters ... so we have
2235          * partial NAKlimit support for bulk RX.
2236          *
2237          * The upside of disabling it is simpler transfer scheduling.
2238          */
2239         interval = 0;
2240     }
2241     qh->intv_reg = interval;
2242 
2243     /* precompute addressing for external hub/tt ports */
2244     if (musb->is_multipoint) {
2245         struct usb_device   *parent = urb->dev->parent;
2246 
2247         if (parent != hcd->self.root_hub) {
2248             qh->h_addr_reg = (u8) parent->devnum;
2249 
2250             /* set up tt info if needed */
2251             if (urb->dev->tt) {
2252                 qh->h_port_reg = (u8) urb->dev->ttport;
2253                 if (urb->dev->tt->hub)
2254                     qh->h_addr_reg =
2255                         (u8) urb->dev->tt->hub->devnum;
2256                 if (urb->dev->tt->multi)
2257                     qh->h_addr_reg |= 0x80;
2258             }
2259         }
2260     }
2261 
2262     /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2263      * until we get real dma queues (with an entry for each urb/buffer),
2264      * we only have work to do in the former case.
2265      */
2266     spin_lock_irqsave(&musb->lock, flags);
2267     if (hep->hcpriv || !next_urb(qh)) {
2268         /* some concurrent activity submitted another urb to hep...
2269          * odd, rare, error prone, but legal.
2270          */
2271         kfree(qh);
2272         qh = NULL;
2273         ret = 0;
2274     } else
2275         ret = musb_schedule(musb, qh,
2276                 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2277 
2278     if (ret == 0) {
2279         urb->hcpriv = qh;
2280         /* FIXME set urb->start_frame for iso/intr, it's tested in
2281          * musb_start_urb(), but otherwise only konicawc cares ...
2282          */
2283     }
2284     spin_unlock_irqrestore(&musb->lock, flags);
2285 
2286 done:
2287     if (ret != 0) {
2288         spin_lock_irqsave(&musb->lock, flags);
2289         usb_hcd_unlink_urb_from_ep(hcd, urb);
2290         spin_unlock_irqrestore(&musb->lock, flags);
2291         kfree(qh);
2292     }
2293     return ret;
2294 }
2295 
2296 
2297 /*
2298  * abort a transfer that's at the head of a hardware queue.
2299  * called with controller locked, irqs blocked
2300  * that hardware queue advances to the next transfer, unless prevented
2301  */
2302 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2303 {
2304     struct musb_hw_ep   *ep = qh->hw_ep;
2305     struct musb     *musb = ep->musb;
2306     void __iomem        *epio = ep->regs;
2307     unsigned        hw_end = ep->epnum;
2308     void __iomem        *regs = ep->musb->mregs;
2309     int         is_in = usb_pipein(urb->pipe);
2310     int         status = 0;
2311     u16         csr;
2312     struct dma_channel  *dma = NULL;
2313 
2314     musb_ep_select(regs, hw_end);
2315 
2316     if (is_dma_capable()) {
2317         dma = is_in ? ep->rx_channel : ep->tx_channel;
2318         if (dma) {
2319             status = ep->musb->dma_controller->channel_abort(dma);
2320             musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2321                 is_in ? 'R' : 'T', ep->epnum,
2322                 urb, status);
2323             urb->actual_length += dma->actual_len;
2324         }
2325     }
2326 
2327     /* turn off DMA requests, discard state, stop polling ... */
2328     if (ep->epnum && is_in) {
2329         /* giveback saves bulk toggle */
2330         csr = musb_h_flush_rxfifo(ep, 0);
2331 
2332         /* clear the endpoint's irq status here to avoid bogus irqs */
2333         if (is_dma_capable() && dma)
2334             musb_platform_clear_ep_rxintr(musb, ep->epnum);
2335     } else if (ep->epnum) {
2336         musb_h_tx_flush_fifo(ep);
2337         csr = musb_readw(epio, MUSB_TXCSR);
2338         csr &= ~(MUSB_TXCSR_AUTOSET
2339             | MUSB_TXCSR_DMAENAB
2340             | MUSB_TXCSR_H_RXSTALL
2341             | MUSB_TXCSR_H_NAKTIMEOUT
2342             | MUSB_TXCSR_H_ERROR
2343             | MUSB_TXCSR_TXPKTRDY);
2344         musb_writew(epio, MUSB_TXCSR, csr);
2345         /* REVISIT may need to clear FLUSHFIFO ... */
2346         musb_writew(epio, MUSB_TXCSR, csr);
2347         /* flush cpu writebuffer */
2348         csr = musb_readw(epio, MUSB_TXCSR);
2349     } else  {
2350         musb_h_ep0_flush_fifo(ep);
2351     }
2352     if (status == 0)
2353         musb_advance_schedule(ep->musb, urb, ep, is_in);
2354     return status;
2355 }
2356 
2357 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2358 {
2359     struct musb     *musb = hcd_to_musb(hcd);
2360     struct musb_qh      *qh;
2361     unsigned long       flags;
2362     int         is_in  = usb_pipein(urb->pipe);
2363     int         ret;
2364 
2365     trace_musb_urb_deq(musb, urb);
2366 
2367     spin_lock_irqsave(&musb->lock, flags);
2368     ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2369     if (ret)
2370         goto done;
2371 
2372     qh = urb->hcpriv;
2373     if (!qh)
2374         goto done;
2375 
2376     /*
2377      * Any URB not actively programmed into endpoint hardware can be
2378      * immediately given back; that's any URB not at the head of an
2379      * endpoint queue, unless someday we get real DMA queues.  And even
2380      * if it's at the head, it might not be known to the hardware...
2381      *
2382      * Otherwise abort current transfer, pending DMA, etc.; urb->status
2383      * has already been updated.  This is a synchronous abort; it'd be
2384      * OK to hold off until after some IRQ, though.
2385      *
2386      * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2387      */
2388     if (!qh->is_ready
2389             || urb->urb_list.prev != &qh->hep->urb_list
2390             || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2391         int ready = qh->is_ready;
2392 
2393         qh->is_ready = 0;
2394         musb_giveback(musb, urb, 0);
2395         qh->is_ready = ready;
2396 
2397         /* If nothing else (usually musb_giveback) is using it
2398          * and its URB list has emptied, recycle this qh.
2399          */
2400         if (ready && list_empty(&qh->hep->urb_list)) {
2401             qh->hep->hcpriv = NULL;
2402             list_del(&qh->ring);
2403             kfree(qh);
2404         }
2405     } else
2406         ret = musb_cleanup_urb(urb, qh);
2407 done:
2408     spin_unlock_irqrestore(&musb->lock, flags);
2409     return ret;
2410 }
2411 
2412 /* disable an endpoint */
2413 static void
2414 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2415 {
2416     u8          is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2417     unsigned long       flags;
2418     struct musb     *musb = hcd_to_musb(hcd);
2419     struct musb_qh      *qh;
2420     struct urb      *urb;
2421 
2422     spin_lock_irqsave(&musb->lock, flags);
2423 
2424     qh = hep->hcpriv;
2425     if (qh == NULL)
2426         goto exit;
2427 
2428     /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2429 
2430     /* Kick the first URB off the hardware, if needed */
2431     qh->is_ready = 0;
2432     if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2433         urb = next_urb(qh);
2434 
2435         /* make software (then hardware) stop ASAP */
2436         if (!urb->unlinked)
2437             urb->status = -ESHUTDOWN;
2438 
2439         /* cleanup */
2440         musb_cleanup_urb(urb, qh);
2441 
2442         /* Then nuke all the others ... and advance the
2443          * queue on hw_ep (e.g. bulk ring) when we're done.
2444          */
2445         while (!list_empty(&hep->urb_list)) {
2446             urb = next_urb(qh);
2447             urb->status = -ESHUTDOWN;
2448             musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2449         }
2450     } else {
2451         /* Just empty the queue; the hardware is busy with
2452          * other transfers, and since !qh->is_ready nothing
2453          * will activate any of these as it advances.
2454          */
2455         while (!list_empty(&hep->urb_list))
2456             musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2457 
2458         hep->hcpriv = NULL;
2459         list_del(&qh->ring);
2460         kfree(qh);
2461     }
2462 exit:
2463     spin_unlock_irqrestore(&musb->lock, flags);
2464 }
2465 
2466 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2467 {
2468     struct musb *musb = hcd_to_musb(hcd);
2469 
2470     return musb_readw(musb->mregs, MUSB_FRAME);
2471 }
2472 
2473 static int musb_h_start(struct usb_hcd *hcd)
2474 {
2475     struct musb *musb = hcd_to_musb(hcd);
2476 
2477     /* NOTE: musb_start() is called when the hub driver turns
2478      * on port power, or when (OTG) peripheral starts.
2479      */
2480     hcd->state = HC_STATE_RUNNING;
2481     musb->port1_status = 0;
2482     return 0;
2483 }
2484 
2485 static void musb_h_stop(struct usb_hcd *hcd)
2486 {
2487     musb_stop(hcd_to_musb(hcd));
2488     hcd->state = HC_STATE_HALT;
2489 }
2490 
2491 static int musb_bus_suspend(struct usb_hcd *hcd)
2492 {
2493     struct musb *musb = hcd_to_musb(hcd);
2494     u8      devctl;
2495     int     ret;
2496 
2497     ret = musb_port_suspend(musb, true);
2498     if (ret)
2499         return ret;
2500 
2501     if (!is_host_active(musb))
2502         return 0;
2503 
2504     switch (musb->xceiv->otg->state) {
2505     case OTG_STATE_A_SUSPEND:
2506         return 0;
2507     case OTG_STATE_A_WAIT_VRISE:
2508         /* ID could be grounded even if there's no device
2509          * on the other end of the cable.  NOTE that the
2510          * A_WAIT_VRISE timers are messy with MUSB...
2511          */
2512         devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2513         if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2514             musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2515         break;
2516     default:
2517         break;
2518     }
2519 
2520     if (musb->is_active) {
2521         WARNING("trying to suspend as %s while active\n",
2522                 usb_otg_state_string(musb->xceiv->otg->state));
2523         return -EBUSY;
2524     } else
2525         return 0;
2526 }
2527 
2528 static int musb_bus_resume(struct usb_hcd *hcd)
2529 {
2530     struct musb *musb = hcd_to_musb(hcd);
2531 
2532     if (musb->config &&
2533         musb->config->host_port_deassert_reset_at_resume)
2534         musb_port_reset(musb, false);
2535 
2536     return 0;
2537 }
2538 
2539 #ifndef CONFIG_MUSB_PIO_ONLY
2540 
2541 #define MUSB_USB_DMA_ALIGN 4
2542 
2543 struct musb_temp_buffer {
2544     void *kmalloc_ptr;
2545     void *old_xfer_buffer;
2546     u8 data[];
2547 };
2548 
2549 static void musb_free_temp_buffer(struct urb *urb)
2550 {
2551     enum dma_data_direction dir;
2552     struct musb_temp_buffer *temp;
2553     size_t length;
2554 
2555     if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2556         return;
2557 
2558     dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2559 
2560     temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2561                 data);
2562 
2563     if (dir == DMA_FROM_DEVICE) {
2564         if (usb_pipeisoc(urb->pipe))
2565             length = urb->transfer_buffer_length;
2566         else
2567             length = urb->actual_length;
2568 
2569         memcpy(temp->old_xfer_buffer, temp->data, length);
2570     }
2571     urb->transfer_buffer = temp->old_xfer_buffer;
2572     kfree(temp->kmalloc_ptr);
2573 
2574     urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2575 }
2576 
2577 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2578 {
2579     enum dma_data_direction dir;
2580     struct musb_temp_buffer *temp;
2581     void *kmalloc_ptr;
2582     size_t kmalloc_size;
2583 
2584     if (urb->num_sgs || urb->sg ||
2585         urb->transfer_buffer_length == 0 ||
2586         !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2587         return 0;
2588 
2589     dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2590 
2591     /* Allocate a buffer with enough padding for alignment */
2592     kmalloc_size = urb->transfer_buffer_length +
2593         sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2594 
2595     kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2596     if (!kmalloc_ptr)
2597         return -ENOMEM;
2598 
2599     /* Position our struct temp_buffer such that data is aligned */
2600     temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2601 
2602 
2603     temp->kmalloc_ptr = kmalloc_ptr;
2604     temp->old_xfer_buffer = urb->transfer_buffer;
2605     if (dir == DMA_TO_DEVICE)
2606         memcpy(temp->data, urb->transfer_buffer,
2607                urb->transfer_buffer_length);
2608     urb->transfer_buffer = temp->data;
2609 
2610     urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2611 
2612     return 0;
2613 }
2614 
2615 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2616                       gfp_t mem_flags)
2617 {
2618     struct musb *musb = hcd_to_musb(hcd);
2619     int ret;
2620 
2621     /*
2622      * The DMA engine in RTL1.8 and above cannot handle
2623      * DMA addresses that are not aligned to a 4 byte boundary.
2624      * For such engine implemented (un)map_urb_for_dma hooks.
2625      * Do not use these hooks for RTL<1.8
2626      */
2627     if (musb->hwvers < MUSB_HWVERS_1800)
2628         return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2629 
2630     ret = musb_alloc_temp_buffer(urb, mem_flags);
2631     if (ret)
2632         return ret;
2633 
2634     ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2635     if (ret)
2636         musb_free_temp_buffer(urb);
2637 
2638     return ret;
2639 }
2640 
2641 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2642 {
2643     struct musb *musb = hcd_to_musb(hcd);
2644 
2645     usb_hcd_unmap_urb_for_dma(hcd, urb);
2646 
2647     /* Do not use this hook for RTL<1.8 (see description above) */
2648     if (musb->hwvers < MUSB_HWVERS_1800)
2649         return;
2650 
2651     musb_free_temp_buffer(urb);
2652 }
2653 #endif /* !CONFIG_MUSB_PIO_ONLY */
2654 
2655 static const struct hc_driver musb_hc_driver = {
2656     .description        = "musb-hcd",
2657     .product_desc       = "MUSB HDRC host driver",
2658     .hcd_priv_size      = sizeof(struct musb *),
2659     .flags          = HCD_USB2 | HCD_DMA | HCD_MEMORY,
2660 
2661     /* not using irq handler or reset hooks from usbcore, since
2662      * those must be shared with peripheral code for OTG configs
2663      */
2664 
2665     .start          = musb_h_start,
2666     .stop           = musb_h_stop,
2667 
2668     .get_frame_number   = musb_h_get_frame_number,
2669 
2670     .urb_enqueue        = musb_urb_enqueue,
2671     .urb_dequeue        = musb_urb_dequeue,
2672     .endpoint_disable   = musb_h_disable,
2673 
2674 #ifndef CONFIG_MUSB_PIO_ONLY
2675     .map_urb_for_dma    = musb_map_urb_for_dma,
2676     .unmap_urb_for_dma  = musb_unmap_urb_for_dma,
2677 #endif
2678 
2679     .hub_status_data    = musb_hub_status_data,
2680     .hub_control        = musb_hub_control,
2681     .bus_suspend        = musb_bus_suspend,
2682     .bus_resume     = musb_bus_resume,
2683     /* .start_port_reset    = NULL, */
2684     /* .hub_irq_enable  = NULL, */
2685 };
2686 
2687 int musb_host_alloc(struct musb *musb)
2688 {
2689     struct device   *dev = musb->controller;
2690 
2691     /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2692     musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2693     if (!musb->hcd)
2694         return -EINVAL;
2695 
2696     *musb->hcd->hcd_priv = (unsigned long) musb;
2697     musb->hcd->self.uses_pio_for_control = 1;
2698     musb->hcd->uses_new_polling = 1;
2699     musb->hcd->has_tt = 1;
2700 
2701     return 0;
2702 }
2703 
2704 void musb_host_cleanup(struct musb *musb)
2705 {
2706     if (musb->port_mode == MUSB_PERIPHERAL)
2707         return;
2708     usb_remove_hcd(musb->hcd);
2709 }
2710 
2711 void musb_host_free(struct musb *musb)
2712 {
2713     usb_put_hcd(musb->hcd);
2714 }
2715 
2716 int musb_host_setup(struct musb *musb, int power_budget)
2717 {
2718     int ret;
2719     struct usb_hcd *hcd = musb->hcd;
2720 
2721     if (musb->port_mode == MUSB_HOST) {
2722         MUSB_HST_MODE(musb);
2723         musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2724     }
2725     otg_set_host(musb->xceiv->otg, &hcd->self);
2726     /* don't support otg protocols */
2727     hcd->self.otg_port = 0;
2728     musb->xceiv->otg->host = &hcd->self;
2729     hcd->power_budget = 2 * (power_budget ? : 250);
2730     hcd->skip_phy_initialization = 1;
2731 
2732     ret = usb_add_hcd(hcd, 0, 0);
2733     if (ret < 0)
2734         return ret;
2735 
2736     device_wakeup_enable(hcd->self.controller);
2737     return 0;
2738 }
2739 
2740 void musb_host_resume_root_hub(struct musb *musb)
2741 {
2742     usb_hcd_resume_root_hub(musb->hcd);
2743 }
2744 
2745 void musb_host_poke_root_hub(struct musb *musb)
2746 {
2747     MUSB_HST_MODE(musb);
2748     if (musb->hcd->status_urb)
2749         usb_hcd_poll_rh_status(musb->hcd);
2750     else
2751         usb_hcd_resume_root_hub(musb->hcd);
2752 }