0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/module.h>
0012 #include <linux/kernel.h>
0013 #include <linux/delay.h>
0014 #include <linux/sched.h>
0015 #include <linux/slab.h>
0016 #include <linux/errno.h>
0017 #include <linux/list.h>
0018 #include <linux/dma-mapping.h>
0019
0020 #include "musb_core.h"
0021 #include "musb_host.h"
0022 #include "musb_trace.h"
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 struct musb *hcd_to_musb(struct usb_hcd *hcd)
0074 {
0075 return *(struct musb **) hcd->hcd_priv;
0076 }
0077
0078
0079 static void musb_ep_program(struct musb *musb, u8 epnum,
0080 struct urb *urb, int is_out,
0081 u8 *buf, u32 offset, u32 len);
0082
0083
0084
0085
0086 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
0087 {
0088 struct musb *musb = ep->musb;
0089 void __iomem *epio = ep->regs;
0090 u16 csr;
0091 int retries = 1000;
0092
0093 csr = musb_readw(epio, MUSB_TXCSR);
0094 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
0095 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
0096 musb_writew(epio, MUSB_TXCSR, csr);
0097 csr = musb_readw(epio, MUSB_TXCSR);
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
0114 "Could not flush host TX%d fifo: csr: %04x\n",
0115 ep->epnum, csr))
0116 return;
0117 mdelay(1);
0118 }
0119 }
0120
0121 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
0122 {
0123 void __iomem *epio = ep->regs;
0124 u16 csr;
0125 int retries = 5;
0126
0127
0128 do {
0129 csr = musb_readw(epio, MUSB_TXCSR);
0130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
0131 break;
0132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
0133 csr = musb_readw(epio, MUSB_TXCSR);
0134 udelay(10);
0135 } while (--retries);
0136
0137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
0138 ep->epnum, csr);
0139
0140
0141 musb_writew(epio, MUSB_TXCSR, 0);
0142 }
0143
0144
0145
0146
0147
0148 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
0149 {
0150 u16 txcsr;
0151
0152
0153 if (ep->epnum) {
0154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
0155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
0156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
0157 } else {
0158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
0159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
0160 }
0161
0162 }
0163
0164 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
0165 {
0166 u16 txcsr;
0167
0168
0169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
0170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
0171 if (is_cppi_enabled(ep->musb))
0172 txcsr |= MUSB_TXCSR_DMAMODE;
0173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
0174 }
0175
0176 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
0177 {
0178 if (is_in != 0 || ep->is_shared_fifo)
0179 ep->in_qh = qh;
0180 if (is_in == 0 || ep->is_shared_fifo)
0181 ep->out_qh = qh;
0182 }
0183
0184 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
0185 {
0186 return is_in ? ep->in_qh : ep->out_qh;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195 static void
0196 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
0197 {
0198 u32 len;
0199 void __iomem *mbase = musb->mregs;
0200 struct urb *urb = next_urb(qh);
0201 void *buf = urb->transfer_buffer;
0202 u32 offset = 0;
0203 struct musb_hw_ep *hw_ep = qh->hw_ep;
0204 int epnum = hw_ep->epnum;
0205
0206
0207 qh->offset = 0;
0208 qh->segsize = 0;
0209
0210
0211 switch (qh->type) {
0212 case USB_ENDPOINT_XFER_CONTROL:
0213
0214 is_in = 0;
0215 musb->ep0_stage = MUSB_EP0_START;
0216 buf = urb->setup_packet;
0217 len = 8;
0218 break;
0219 case USB_ENDPOINT_XFER_ISOC:
0220 qh->iso_idx = 0;
0221 qh->frame = 0;
0222 offset = urb->iso_frame_desc[0].offset;
0223 len = urb->iso_frame_desc[0].length;
0224 break;
0225 default:
0226
0227 buf = urb->transfer_buffer + urb->actual_length;
0228 len = urb->transfer_buffer_length - urb->actual_length;
0229 }
0230
0231 trace_musb_urb_start(musb, urb);
0232
0233
0234 musb_ep_set_qh(hw_ep, is_in, qh);
0235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
0236
0237
0238 if (is_in)
0239 return;
0240
0241
0242 switch (qh->type) {
0243 case USB_ENDPOINT_XFER_ISOC:
0244 case USB_ENDPOINT_XFER_INT:
0245 musb_dbg(musb, "check whether there's still time for periodic Tx");
0246
0247
0248
0249 if (1) {
0250
0251
0252
0253 qh->frame = 0;
0254 goto start;
0255 } else {
0256 qh->frame = urb->start_frame;
0257
0258 musb_dbg(musb, "SOF for %d", epnum);
0259 #if 1
0260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
0261 #endif
0262 }
0263 break;
0264 default:
0265 start:
0266 musb_dbg(musb, "Start TX%d %s", epnum,
0267 hw_ep->tx_channel ? "dma" : "pio");
0268
0269 if (!hw_ep->tx_channel)
0270 musb_h_tx_start(hw_ep);
0271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
0272 musb_h_tx_dma_start(hw_ep);
0273 }
0274 }
0275
0276
0277 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
0278 __releases(musb->lock)
0279 __acquires(musb->lock)
0280 {
0281 trace_musb_urb_gb(musb, urb);
0282
0283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
0284 spin_unlock(&musb->lock);
0285 usb_hcd_giveback_urb(musb->hcd, urb, status);
0286 spin_lock(&musb->lock);
0287 }
0288
0289
0290
0291
0292
0293
0294
0295
0296 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
0297 struct musb_hw_ep *hw_ep, int is_in)
0298 {
0299 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
0300 struct musb_hw_ep *ep = qh->hw_ep;
0301 int ready = qh->is_ready;
0302 int status;
0303 u16 toggle;
0304
0305 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
0306
0307
0308 switch (qh->type) {
0309 case USB_ENDPOINT_XFER_BULK:
0310 case USB_ENDPOINT_XFER_INT:
0311 toggle = musb->io.get_toggle(qh, !is_in);
0312 usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
0313 break;
0314 case USB_ENDPOINT_XFER_ISOC:
0315 if (status == 0 && urb->error_count)
0316 status = -EXDEV;
0317 break;
0318 }
0319
0320 qh->is_ready = 0;
0321 musb_giveback(musb, urb, status);
0322 qh->is_ready = ready;
0323
0324
0325
0326
0327 if (list_empty(&qh->hep->urb_list)) {
0328 struct list_head *head;
0329 struct dma_controller *dma = musb->dma_controller;
0330
0331 if (is_in) {
0332 ep->rx_reinit = 1;
0333 if (ep->rx_channel) {
0334 dma->channel_release(ep->rx_channel);
0335 ep->rx_channel = NULL;
0336 }
0337 } else {
0338 ep->tx_reinit = 1;
0339 if (ep->tx_channel) {
0340 dma->channel_release(ep->tx_channel);
0341 ep->tx_channel = NULL;
0342 }
0343 }
0344
0345
0346 musb_ep_set_qh(ep, is_in, NULL);
0347 qh->hep->hcpriv = NULL;
0348
0349 switch (qh->type) {
0350
0351 case USB_ENDPOINT_XFER_CONTROL:
0352 case USB_ENDPOINT_XFER_BULK:
0353
0354
0355
0356 if (qh->mux == 1) {
0357 head = qh->ring.prev;
0358 list_del(&qh->ring);
0359 kfree(qh);
0360 qh = first_qh(head);
0361 break;
0362 }
0363 fallthrough;
0364
0365 case USB_ENDPOINT_XFER_ISOC:
0366 case USB_ENDPOINT_XFER_INT:
0367
0368
0369
0370
0371 kfree(qh);
0372 qh = NULL;
0373 break;
0374 }
0375 }
0376
0377 if (qh != NULL && qh->is_ready) {
0378 musb_dbg(musb, "... next ep%d %cX urb %p",
0379 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
0380 musb_start_urb(musb, is_in, qh);
0381 }
0382 }
0383
0384 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
0385 {
0386
0387
0388
0389
0390 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
0391 csr &= ~(MUSB_RXCSR_H_REQPKT
0392 | MUSB_RXCSR_H_AUTOREQ
0393 | MUSB_RXCSR_AUTOCLEAR);
0394
0395
0396 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0397 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0398
0399
0400 return musb_readw(hw_ep->regs, MUSB_RXCSR);
0401 }
0402
0403
0404
0405
0406 static bool
0407 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
0408 {
0409 u16 rx_count;
0410 u8 *buf;
0411 u16 csr;
0412 bool done = false;
0413 u32 length;
0414 int do_flush = 0;
0415 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
0416 void __iomem *epio = hw_ep->regs;
0417 struct musb_qh *qh = hw_ep->in_qh;
0418 int pipe = urb->pipe;
0419 void *buffer = urb->transfer_buffer;
0420
0421
0422 rx_count = musb_readw(epio, MUSB_RXCOUNT);
0423 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
0424 urb->transfer_buffer, qh->offset,
0425 urb->transfer_buffer_length);
0426
0427
0428 if (usb_pipeisoc(pipe)) {
0429 int status = 0;
0430 struct usb_iso_packet_descriptor *d;
0431
0432 if (iso_err) {
0433 status = -EILSEQ;
0434 urb->error_count++;
0435 }
0436
0437 d = urb->iso_frame_desc + qh->iso_idx;
0438 buf = buffer + d->offset;
0439 length = d->length;
0440 if (rx_count > length) {
0441 if (status == 0) {
0442 status = -EOVERFLOW;
0443 urb->error_count++;
0444 }
0445 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
0446 do_flush = 1;
0447 } else
0448 length = rx_count;
0449 urb->actual_length += length;
0450 d->actual_length = length;
0451
0452 d->status = status;
0453
0454
0455 done = (++qh->iso_idx >= urb->number_of_packets);
0456 } else {
0457
0458 buf = buffer + qh->offset;
0459 length = urb->transfer_buffer_length - qh->offset;
0460 if (rx_count > length) {
0461 if (urb->status == -EINPROGRESS)
0462 urb->status = -EOVERFLOW;
0463 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
0464 do_flush = 1;
0465 } else
0466 length = rx_count;
0467 urb->actual_length += length;
0468 qh->offset += length;
0469
0470
0471 done = (urb->actual_length == urb->transfer_buffer_length)
0472 || (rx_count < qh->maxpacket)
0473 || (urb->status != -EINPROGRESS);
0474 if (done
0475 && (urb->status == -EINPROGRESS)
0476 && (urb->transfer_flags & URB_SHORT_NOT_OK)
0477 && (urb->actual_length
0478 < urb->transfer_buffer_length))
0479 urb->status = -EREMOTEIO;
0480 }
0481
0482 musb_read_fifo(hw_ep, length, buf);
0483
0484 csr = musb_readw(epio, MUSB_RXCSR);
0485 csr |= MUSB_RXCSR_H_WZC_BITS;
0486 if (unlikely(do_flush))
0487 musb_h_flush_rxfifo(hw_ep, csr);
0488 else {
0489
0490 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
0491 if (!done)
0492 csr |= MUSB_RXCSR_H_REQPKT;
0493 musb_writew(epio, MUSB_RXCSR, csr);
0494 }
0495
0496 return done;
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 static void
0508 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
0509 {
0510 struct musb_hw_ep *ep = musb->endpoints + epnum;
0511 u16 csr;
0512
0513
0514
0515
0516
0517
0518
0519 if (ep->is_shared_fifo) {
0520 csr = musb_readw(ep->regs, MUSB_TXCSR);
0521 if (csr & MUSB_TXCSR_MODE) {
0522 musb_h_tx_flush_fifo(ep);
0523 csr = musb_readw(ep->regs, MUSB_TXCSR);
0524 musb_writew(ep->regs, MUSB_TXCSR,
0525 csr | MUSB_TXCSR_FRCDATATOG);
0526 }
0527
0528
0529
0530
0531
0532 if (csr & MUSB_TXCSR_DMAMODE)
0533 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
0534 musb_writew(ep->regs, MUSB_TXCSR, 0);
0535
0536
0537 }
0538 csr = musb_readw(ep->regs, MUSB_RXCSR);
0539 if (csr & MUSB_RXCSR_RXPKTRDY)
0540 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
0541 musb_readw(ep->regs, MUSB_RXCOUNT));
0542
0543 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
0544
0545
0546 if (musb->is_multipoint) {
0547 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
0548 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
0549 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
0550 } else
0551 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
0552
0553
0554 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
0555 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
0556
0557
0558
0559
0560 musb_writew(ep->regs, MUSB_RXMAXP,
0561 qh->maxpacket | ((qh->hb_mult - 1) << 11));
0562
0563 ep->rx_reinit = 0;
0564 }
0565
0566 static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
0567 struct musb_qh *qh,
0568 u32 *length, u8 *mode)
0569 {
0570 struct dma_channel *channel = hw_ep->tx_channel;
0571 void __iomem *epio = hw_ep->regs;
0572 u16 pkt_size = qh->maxpacket;
0573 u16 csr;
0574
0575 if (*length > channel->max_len)
0576 *length = channel->max_len;
0577
0578 csr = musb_readw(epio, MUSB_TXCSR);
0579 if (*length > pkt_size) {
0580 *mode = 1;
0581 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
0593 can_bulk_split(hw_ep->musb, qh->type)))
0594 csr |= MUSB_TXCSR_AUTOSET;
0595 } else {
0596 *mode = 0;
0597 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
0598 csr |= MUSB_TXCSR_DMAENAB;
0599 }
0600 channel->desired_mode = *mode;
0601 musb_writew(epio, MUSB_TXCSR, csr);
0602 }
0603
0604 static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
0605 struct urb *urb,
0606 u8 *mode)
0607 {
0608 struct dma_channel *channel = hw_ep->tx_channel;
0609
0610 channel->actual_len = 0;
0611
0612
0613
0614
0615
0616 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
0617 }
0618
0619 static bool musb_tx_dma_program(struct dma_controller *dma,
0620 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
0621 struct urb *urb, u32 offset, u32 length)
0622 {
0623 struct dma_channel *channel = hw_ep->tx_channel;
0624 u16 pkt_size = qh->maxpacket;
0625 u8 mode;
0626
0627 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
0628 musb_tx_dma_set_mode_mentor(hw_ep, qh,
0629 &length, &mode);
0630 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
0631 musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
0632 else
0633 return false;
0634
0635 qh->segsize = length;
0636
0637
0638
0639
0640
0641 wmb();
0642
0643 if (!dma->channel_program(channel, pkt_size, mode,
0644 urb->transfer_dma + offset, length)) {
0645 void __iomem *epio = hw_ep->regs;
0646 u16 csr;
0647
0648 dma->channel_release(channel);
0649 hw_ep->tx_channel = NULL;
0650
0651 csr = musb_readw(epio, MUSB_TXCSR);
0652 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
0653 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
0654 return false;
0655 }
0656 return true;
0657 }
0658
0659
0660
0661
0662
0663 static void musb_ep_program(struct musb *musb, u8 epnum,
0664 struct urb *urb, int is_out,
0665 u8 *buf, u32 offset, u32 len)
0666 {
0667 struct dma_controller *dma_controller;
0668 struct dma_channel *dma_channel;
0669 u8 dma_ok;
0670 void __iomem *mbase = musb->mregs;
0671 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
0672 void __iomem *epio = hw_ep->regs;
0673 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
0674 u16 packet_sz = qh->maxpacket;
0675 u8 use_dma = 1;
0676 u16 csr;
0677
0678 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
0679 "h_addr%02x h_port%02x bytes %d",
0680 is_out ? "-->" : "<--",
0681 epnum, urb, urb->dev->speed,
0682 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
0683 qh->h_addr_reg, qh->h_port_reg,
0684 len);
0685
0686 musb_ep_select(mbase, epnum);
0687
0688 if (is_out && !len) {
0689 use_dma = 0;
0690 csr = musb_readw(epio, MUSB_TXCSR);
0691 csr &= ~MUSB_TXCSR_DMAENAB;
0692 musb_writew(epio, MUSB_TXCSR, csr);
0693 hw_ep->tx_channel = NULL;
0694 }
0695
0696
0697 dma_controller = musb->dma_controller;
0698 if (use_dma && is_dma_capable() && epnum && dma_controller) {
0699 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
0700 if (!dma_channel) {
0701 dma_channel = dma_controller->channel_alloc(
0702 dma_controller, hw_ep, is_out);
0703 if (is_out)
0704 hw_ep->tx_channel = dma_channel;
0705 else
0706 hw_ep->rx_channel = dma_channel;
0707 }
0708 } else
0709 dma_channel = NULL;
0710
0711
0712
0713
0714 if (is_out) {
0715 u16 csr;
0716 u16 int_txe;
0717 u16 load_count;
0718
0719 csr = musb_readw(epio, MUSB_TXCSR);
0720
0721
0722 int_txe = musb->intrtxe;
0723 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
0724
0725
0726 if (epnum) {
0727
0728
0729
0730
0731
0732
0733 if (!hw_ep->tx_double_buffered)
0734 musb_h_tx_flush_fifo(hw_ep);
0735
0736
0737
0738
0739
0740
0741 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
0742 | MUSB_TXCSR_AUTOSET
0743 | MUSB_TXCSR_DMAENAB
0744 | MUSB_TXCSR_FRCDATATOG
0745 | MUSB_TXCSR_H_RXSTALL
0746 | MUSB_TXCSR_H_ERROR
0747 | MUSB_TXCSR_TXPKTRDY
0748 );
0749 csr |= MUSB_TXCSR_MODE;
0750
0751 if (!hw_ep->tx_double_buffered)
0752 csr |= musb->io.set_toggle(qh, is_out, urb);
0753
0754 musb_writew(epio, MUSB_TXCSR, csr);
0755
0756 csr &= ~MUSB_TXCSR_DMAMODE;
0757 musb_writew(epio, MUSB_TXCSR, csr);
0758 csr = musb_readw(epio, MUSB_TXCSR);
0759 } else {
0760
0761 musb_h_ep0_flush_fifo(hw_ep);
0762 }
0763
0764
0765 if (musb->is_multipoint) {
0766 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
0767 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
0768 musb_write_txhubport(musb, epnum, qh->h_port_reg);
0769
0770 } else
0771 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
0772
0773
0774 if (epnum) {
0775 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
0776 if (can_bulk_split(musb, qh->type)) {
0777 qh->hb_mult = hw_ep->max_packet_sz_tx
0778 / packet_sz;
0779 musb_writew(epio, MUSB_TXMAXP, packet_sz
0780 | ((qh->hb_mult) - 1) << 11);
0781 } else {
0782 musb_writew(epio, MUSB_TXMAXP,
0783 qh->maxpacket |
0784 ((qh->hb_mult - 1) << 11));
0785 }
0786 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
0787 } else {
0788 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
0789 if (musb->is_multipoint)
0790 musb_writeb(epio, MUSB_TYPE0,
0791 qh->type_reg);
0792 }
0793
0794 if (can_bulk_split(musb, qh->type))
0795 load_count = min((u32) hw_ep->max_packet_sz_tx,
0796 len);
0797 else
0798 load_count = min((u32) packet_sz, len);
0799
0800 if (dma_channel && musb_tx_dma_program(dma_controller,
0801 hw_ep, qh, urb, offset, len))
0802 load_count = 0;
0803
0804 if (load_count) {
0805
0806 qh->segsize = load_count;
0807 if (!buf) {
0808 sg_miter_start(&qh->sg_miter, urb->sg, 1,
0809 SG_MITER_ATOMIC
0810 | SG_MITER_FROM_SG);
0811 if (!sg_miter_next(&qh->sg_miter)) {
0812 dev_err(musb->controller,
0813 "error: sg"
0814 "list empty\n");
0815 sg_miter_stop(&qh->sg_miter);
0816 goto finish;
0817 }
0818 buf = qh->sg_miter.addr + urb->sg->offset +
0819 urb->actual_length;
0820 load_count = min_t(u32, load_count,
0821 qh->sg_miter.length);
0822 musb_write_fifo(hw_ep, load_count, buf);
0823 qh->sg_miter.consumed = load_count;
0824 sg_miter_stop(&qh->sg_miter);
0825 } else
0826 musb_write_fifo(hw_ep, load_count, buf);
0827 }
0828 finish:
0829
0830 musb_writew(mbase, MUSB_INTRTXE, int_txe);
0831
0832
0833 } else {
0834 u16 csr = 0;
0835
0836 if (hw_ep->rx_reinit) {
0837 musb_rx_reinit(musb, qh, epnum);
0838 csr |= musb->io.set_toggle(qh, is_out, urb);
0839
0840 if (qh->type == USB_ENDPOINT_XFER_INT)
0841 csr |= MUSB_RXCSR_DISNYET;
0842
0843 } else {
0844 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0845
0846 if (csr & (MUSB_RXCSR_RXPKTRDY
0847 | MUSB_RXCSR_DMAENAB
0848 | MUSB_RXCSR_H_REQPKT))
0849 ERR("broken !rx_reinit, ep%d csr %04x\n",
0850 hw_ep->epnum, csr);
0851
0852
0853 csr &= MUSB_RXCSR_DISNYET;
0854 }
0855
0856
0857
0858 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
0859
0860 dma_channel->actual_len = 0L;
0861 qh->segsize = len;
0862
0863
0864 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0865 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0866
0867
0868
0869
0870
0871 dma_ok = dma_controller->channel_program(dma_channel,
0872 packet_sz, !(urb->transfer_flags &
0873 URB_SHORT_NOT_OK),
0874 urb->transfer_dma + offset,
0875 qh->segsize);
0876 if (!dma_ok) {
0877 dma_controller->channel_release(dma_channel);
0878 hw_ep->rx_channel = dma_channel = NULL;
0879 } else
0880 csr |= MUSB_RXCSR_DMAENAB;
0881 }
0882
0883 csr |= MUSB_RXCSR_H_REQPKT;
0884 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
0885 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
0886 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
0887 }
0888 }
0889
0890
0891
0892
0893 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
0894 int is_in)
0895 {
0896 struct dma_channel *dma;
0897 struct urb *urb;
0898 void __iomem *mbase = musb->mregs;
0899 void __iomem *epio = ep->regs;
0900 struct musb_qh *cur_qh, *next_qh;
0901 u16 rx_csr, tx_csr;
0902 u16 toggle;
0903
0904 musb_ep_select(mbase, ep->epnum);
0905 if (is_in) {
0906 dma = is_dma_capable() ? ep->rx_channel : NULL;
0907
0908
0909
0910
0911
0912
0913 rx_csr = musb_readw(epio, MUSB_RXCSR);
0914 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
0915 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
0916 musb_writew(epio, MUSB_RXCSR, rx_csr);
0917 rx_csr &= ~MUSB_RXCSR_DATAERROR;
0918 musb_writew(epio, MUSB_RXCSR, rx_csr);
0919
0920 cur_qh = first_qh(&musb->in_bulk);
0921 } else {
0922 dma = is_dma_capable() ? ep->tx_channel : NULL;
0923
0924
0925 tx_csr = musb_readw(epio, MUSB_TXCSR);
0926 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
0927 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
0928 musb_writew(epio, MUSB_TXCSR, tx_csr);
0929
0930 cur_qh = first_qh(&musb->out_bulk);
0931 }
0932 if (cur_qh) {
0933 urb = next_urb(cur_qh);
0934 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
0935 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
0936 musb->dma_controller->channel_abort(dma);
0937 urb->actual_length += dma->actual_len;
0938 dma->actual_len = 0L;
0939 }
0940 toggle = musb->io.get_toggle(cur_qh, !is_in);
0941 usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
0942
0943 if (is_in) {
0944
0945 list_move_tail(&cur_qh->ring, &musb->in_bulk);
0946
0947
0948 next_qh = first_qh(&musb->in_bulk);
0949
0950
0951 ep->rx_reinit = 1;
0952 } else {
0953
0954 list_move_tail(&cur_qh->ring, &musb->out_bulk);
0955
0956
0957 next_qh = first_qh(&musb->out_bulk);
0958
0959
0960 ep->tx_reinit = 1;
0961 }
0962
0963 if (next_qh)
0964 musb_start_urb(musb, is_in, next_qh);
0965 }
0966 }
0967
0968
0969
0970
0971
0972 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
0973 {
0974 bool more = false;
0975 u8 *fifo_dest = NULL;
0976 u16 fifo_count = 0;
0977 struct musb_hw_ep *hw_ep = musb->control_ep;
0978 struct musb_qh *qh = hw_ep->in_qh;
0979 struct usb_ctrlrequest *request;
0980
0981 switch (musb->ep0_stage) {
0982 case MUSB_EP0_IN:
0983 fifo_dest = urb->transfer_buffer + urb->actual_length;
0984 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
0985 urb->actual_length);
0986 if (fifo_count < len)
0987 urb->status = -EOVERFLOW;
0988
0989 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
0990
0991 urb->actual_length += fifo_count;
0992 if (len < qh->maxpacket) {
0993
0994
0995
0996 } else if (urb->actual_length <
0997 urb->transfer_buffer_length)
0998 more = true;
0999 break;
1000 case MUSB_EP0_START:
1001 request = (struct usb_ctrlrequest *) urb->setup_packet;
1002
1003 if (!request->wLength) {
1004 musb_dbg(musb, "start no-DATA");
1005 break;
1006 } else if (request->bRequestType & USB_DIR_IN) {
1007 musb_dbg(musb, "start IN-DATA");
1008 musb->ep0_stage = MUSB_EP0_IN;
1009 more = true;
1010 break;
1011 } else {
1012 musb_dbg(musb, "start OUT-DATA");
1013 musb->ep0_stage = MUSB_EP0_OUT;
1014 more = true;
1015 }
1016 fallthrough;
1017 case MUSB_EP0_OUT:
1018 fifo_count = min_t(size_t, qh->maxpacket,
1019 urb->transfer_buffer_length -
1020 urb->actual_length);
1021 if (fifo_count) {
1022 fifo_dest = (u8 *) (urb->transfer_buffer
1023 + urb->actual_length);
1024 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1025 fifo_count,
1026 (fifo_count == 1) ? "" : "s",
1027 fifo_dest);
1028 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1029
1030 urb->actual_length += fifo_count;
1031 more = true;
1032 }
1033 break;
1034 default:
1035 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1036 break;
1037 }
1038
1039 return more;
1040 }
1041
1042
1043
1044
1045
1046
1047
1048 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1049 {
1050 struct urb *urb;
1051 u16 csr, len;
1052 int status = 0;
1053 void __iomem *mbase = musb->mregs;
1054 struct musb_hw_ep *hw_ep = musb->control_ep;
1055 void __iomem *epio = hw_ep->regs;
1056 struct musb_qh *qh = hw_ep->in_qh;
1057 bool complete = false;
1058 irqreturn_t retval = IRQ_NONE;
1059
1060
1061 urb = next_urb(qh);
1062
1063 musb_ep_select(mbase, 0);
1064 csr = musb_readw(epio, MUSB_CSR0);
1065 len = (csr & MUSB_CSR0_RXPKTRDY)
1066 ? musb_readb(epio, MUSB_COUNT0)
1067 : 0;
1068
1069 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1070 csr, qh, len, urb, musb->ep0_stage);
1071
1072
1073 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1074 retval = IRQ_HANDLED;
1075 complete = true;
1076 }
1077
1078
1079 if (csr & MUSB_CSR0_H_RXSTALL) {
1080 musb_dbg(musb, "STALLING ENDPOINT");
1081 status = -EPIPE;
1082
1083 } else if (csr & MUSB_CSR0_H_ERROR) {
1084 musb_dbg(musb, "no response, csr0 %04x", csr);
1085 status = -EPROTO;
1086
1087 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1088 musb_dbg(musb, "control NAK timeout");
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 musb_writew(epio, MUSB_CSR0, 0);
1099 retval = IRQ_HANDLED;
1100 }
1101
1102 if (status) {
1103 musb_dbg(musb, "aborting");
1104 retval = IRQ_HANDLED;
1105 if (urb)
1106 urb->status = status;
1107 complete = true;
1108
1109
1110 if (csr & MUSB_CSR0_H_REQPKT) {
1111 csr &= ~MUSB_CSR0_H_REQPKT;
1112 musb_writew(epio, MUSB_CSR0, csr);
1113 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1114 musb_writew(epio, MUSB_CSR0, csr);
1115 } else {
1116 musb_h_ep0_flush_fifo(hw_ep);
1117 }
1118
1119 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1120
1121
1122 musb_writew(epio, MUSB_CSR0, 0);
1123 }
1124
1125 if (unlikely(!urb)) {
1126
1127
1128 ERR("no URB for end 0\n");
1129
1130 musb_h_ep0_flush_fifo(hw_ep);
1131 goto done;
1132 }
1133
1134 if (!complete) {
1135
1136 if (musb_h_ep0_continue(musb, len, urb)) {
1137
1138 csr = (MUSB_EP0_IN == musb->ep0_stage)
1139 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1140 } else {
1141
1142 if (usb_pipeout(urb->pipe)
1143 || !urb->transfer_buffer_length)
1144 csr = MUSB_CSR0_H_STATUSPKT
1145 | MUSB_CSR0_H_REQPKT;
1146 else
1147 csr = MUSB_CSR0_H_STATUSPKT
1148 | MUSB_CSR0_TXPKTRDY;
1149
1150
1151 csr |= MUSB_CSR0_H_DIS_PING;
1152
1153
1154 musb->ep0_stage = MUSB_EP0_STATUS;
1155
1156 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1157
1158 }
1159 musb_writew(epio, MUSB_CSR0, csr);
1160 retval = IRQ_HANDLED;
1161 } else
1162 musb->ep0_stage = MUSB_EP0_IDLE;
1163
1164
1165 if (complete)
1166 musb_advance_schedule(musb, urb, hw_ep, 1);
1167 done:
1168 return retval;
1169 }
1170
1171
1172 #ifdef CONFIG_USB_INVENTRA_DMA
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 #endif
1187
1188
1189 void musb_host_tx(struct musb *musb, u8 epnum)
1190 {
1191 int pipe;
1192 bool done = false;
1193 u16 tx_csr;
1194 size_t length = 0;
1195 size_t offset = 0;
1196 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1197 void __iomem *epio = hw_ep->regs;
1198 struct musb_qh *qh = hw_ep->out_qh;
1199 struct urb *urb = next_urb(qh);
1200 u32 status = 0;
1201 void __iomem *mbase = musb->mregs;
1202 struct dma_channel *dma;
1203 bool transfer_pending = false;
1204
1205 musb_ep_select(mbase, epnum);
1206 tx_csr = musb_readw(epio, MUSB_TXCSR);
1207
1208
1209 if (!urb) {
1210 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1211 return;
1212 }
1213
1214 pipe = urb->pipe;
1215 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1216 trace_musb_urb_tx(musb, urb);
1217 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1218 dma ? ", dma" : "");
1219
1220
1221 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1222
1223 musb_dbg(musb, "TX end %d stall", epnum);
1224
1225
1226 status = -EPIPE;
1227
1228 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1229
1230 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1231
1232 status = -ETIMEDOUT;
1233
1234 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1235 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1236 && !list_is_singular(&musb->out_bulk)) {
1237 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1238 musb_bulk_nak_timeout(musb, hw_ep, 0);
1239 } else {
1240 musb_dbg(musb, "TX ep%d device not responding", epnum);
1241
1242
1243
1244
1245
1246
1247
1248
1249 musb_ep_select(mbase, epnum);
1250 musb_writew(epio, MUSB_TXCSR,
1251 MUSB_TXCSR_H_WZC_BITS
1252 | MUSB_TXCSR_TXPKTRDY);
1253 }
1254 return;
1255 }
1256
1257 done:
1258 if (status) {
1259 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1260 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1261 musb->dma_controller->channel_abort(dma);
1262 }
1263
1264
1265
1266
1267 musb_h_tx_flush_fifo(hw_ep);
1268 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1269 | MUSB_TXCSR_DMAENAB
1270 | MUSB_TXCSR_H_ERROR
1271 | MUSB_TXCSR_H_RXSTALL
1272 | MUSB_TXCSR_H_NAKTIMEOUT
1273 );
1274
1275 musb_ep_select(mbase, epnum);
1276 musb_writew(epio, MUSB_TXCSR, tx_csr);
1277
1278 musb_writew(epio, MUSB_TXCSR, tx_csr);
1279 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1280
1281 done = true;
1282 }
1283
1284
1285 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1286 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1287 return;
1288 }
1289
1290 if (is_dma_capable() && dma && !status) {
1291
1292
1293
1294
1295
1296
1297
1298
1299 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1317 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1318 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1319 MUSB_TXCSR_TXPKTRDY);
1320 musb_writew(epio, MUSB_TXCSR,
1321 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1322 }
1323 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1324 MUSB_TXCSR_TXPKTRDY);
1325 musb_writew(epio, MUSB_TXCSR,
1326 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1327
1328
1329
1330
1331
1332
1333
1334 tx_csr = musb_readw(epio, MUSB_TXCSR);
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1345 musb_dbg(musb,
1346 "DMA complete but FIFO not empty, CSR %04x",
1347 tx_csr);
1348 return;
1349 }
1350 }
1351
1352 if (!status || dma || usb_pipeisoc(pipe)) {
1353 if (dma)
1354 length = dma->actual_len;
1355 else
1356 length = qh->segsize;
1357 qh->offset += length;
1358
1359 if (usb_pipeisoc(pipe)) {
1360 struct usb_iso_packet_descriptor *d;
1361
1362 d = urb->iso_frame_desc + qh->iso_idx;
1363 d->actual_length = length;
1364 d->status = status;
1365 if (++qh->iso_idx >= urb->number_of_packets) {
1366 done = true;
1367 } else {
1368 d++;
1369 offset = d->offset;
1370 length = d->length;
1371 }
1372 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1373 done = true;
1374 } else {
1375
1376 if (qh->segsize < qh->maxpacket)
1377 done = true;
1378 else if (qh->offset == urb->transfer_buffer_length
1379 && !(urb->transfer_flags
1380 & URB_ZERO_PACKET))
1381 done = true;
1382 if (!done) {
1383 offset = qh->offset;
1384 length = urb->transfer_buffer_length - offset;
1385 transfer_pending = true;
1386 }
1387 }
1388 }
1389
1390
1391
1392
1393 if (urb->status != -EINPROGRESS) {
1394 done = true;
1395 if (status == 0)
1396 status = urb->status;
1397 }
1398
1399 if (done) {
1400
1401 urb->status = status;
1402 urb->actual_length = qh->offset;
1403 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1404 return;
1405 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1406 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1407 offset, length)) {
1408 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1409 musb_h_tx_dma_start(hw_ep);
1410 return;
1411 }
1412 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1413 musb_dbg(musb, "not complete, but DMA enabled?");
1414 return;
1415 }
1416
1417
1418
1419
1420
1421
1422
1423
1424 if (length > qh->maxpacket)
1425 length = qh->maxpacket;
1426
1427 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1428
1429
1430
1431
1432
1433 if (!urb->transfer_buffer) {
1434
1435 if (!sg_miter_next(&qh->sg_miter)) {
1436 dev_err(musb->controller, "error: sg list empty\n");
1437 sg_miter_stop(&qh->sg_miter);
1438 status = -EINVAL;
1439 goto done;
1440 }
1441 length = min_t(u32, length, qh->sg_miter.length);
1442 musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1443 qh->sg_miter.consumed = length;
1444 sg_miter_stop(&qh->sg_miter);
1445 } else {
1446 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1447 }
1448
1449 qh->segsize = length;
1450
1451 musb_ep_select(mbase, epnum);
1452 musb_writew(epio, MUSB_TXCSR,
1453 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1454 }
1455
1456 #ifdef CONFIG_USB_TI_CPPI41_DMA
1457
1458 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1459 struct musb_hw_ep *hw_ep,
1460 struct musb_qh *qh,
1461 struct urb *urb,
1462 size_t len)
1463 {
1464 struct dma_channel *channel = hw_ep->rx_channel;
1465 void __iomem *epio = hw_ep->regs;
1466 dma_addr_t *buf;
1467 u32 length;
1468 u16 val;
1469
1470 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1471 (u32)urb->transfer_dma;
1472
1473 length = urb->iso_frame_desc[qh->iso_idx].length;
1474
1475 val = musb_readw(epio, MUSB_RXCSR);
1476 val |= MUSB_RXCSR_DMAENAB;
1477 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1478
1479 return dma->channel_program(channel, qh->maxpacket, 0,
1480 (u32)buf, length);
1481 }
1482 #else
1483 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1484 struct musb_hw_ep *hw_ep,
1485 struct musb_qh *qh,
1486 struct urb *urb,
1487 size_t len)
1488 {
1489 return false;
1490 }
1491 #endif
1492
1493 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1494 defined(CONFIG_USB_TI_CPPI41_DMA)
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1530 struct musb_hw_ep *hw_ep,
1531 struct musb_qh *qh,
1532 struct urb *urb,
1533 size_t len)
1534 {
1535 struct dma_channel *channel = hw_ep->rx_channel;
1536 void __iomem *epio = hw_ep->regs;
1537 u16 val;
1538 int pipe;
1539 bool done;
1540
1541 pipe = urb->pipe;
1542
1543 if (usb_pipeisoc(pipe)) {
1544 struct usb_iso_packet_descriptor *d;
1545
1546 d = urb->iso_frame_desc + qh->iso_idx;
1547 d->actual_length = len;
1548
1549
1550
1551
1552 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1553 d->status = 0;
1554
1555 if (++qh->iso_idx >= urb->number_of_packets) {
1556 done = true;
1557 } else {
1558
1559 if (musb_dma_cppi41(hw_ep->musb))
1560 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1561 urb, len);
1562 done = false;
1563 }
1564
1565 } else {
1566
1567 done = (urb->actual_length + len >=
1568 urb->transfer_buffer_length
1569 || channel->actual_len < qh->maxpacket
1570 || channel->rx_packet_done);
1571 }
1572
1573
1574 if (!done) {
1575 val = musb_readw(epio, MUSB_RXCSR);
1576 val |= MUSB_RXCSR_H_REQPKT;
1577 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1578 }
1579
1580 return done;
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1600 struct musb_hw_ep *hw_ep,
1601 struct musb_qh *qh,
1602 struct urb *urb,
1603 size_t len,
1604 u8 iso_err)
1605 {
1606 struct musb *musb = hw_ep->musb;
1607 void __iomem *epio = hw_ep->regs;
1608 struct dma_channel *channel = hw_ep->rx_channel;
1609 u16 rx_count, val;
1610 int length, pipe, done;
1611 dma_addr_t buf;
1612
1613 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1614 pipe = urb->pipe;
1615
1616 if (usb_pipeisoc(pipe)) {
1617 int d_status = 0;
1618 struct usb_iso_packet_descriptor *d;
1619
1620 d = urb->iso_frame_desc + qh->iso_idx;
1621
1622 if (iso_err) {
1623 d_status = -EILSEQ;
1624 urb->error_count++;
1625 }
1626 if (rx_count > d->length) {
1627 if (d_status == 0) {
1628 d_status = -EOVERFLOW;
1629 urb->error_count++;
1630 }
1631 musb_dbg(musb, "** OVERFLOW %d into %d",
1632 rx_count, d->length);
1633
1634 length = d->length;
1635 } else
1636 length = rx_count;
1637 d->status = d_status;
1638 buf = urb->transfer_dma + d->offset;
1639 } else {
1640 length = rx_count;
1641 buf = urb->transfer_dma + urb->actual_length;
1642 }
1643
1644 channel->desired_mode = 0;
1645 #ifdef USE_MODE1
1646
1647
1648
1649 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1650 && (urb->transfer_buffer_length - urb->actual_length)
1651 > qh->maxpacket)
1652 channel->desired_mode = 1;
1653 if (rx_count < hw_ep->max_packet_sz_rx) {
1654 length = rx_count;
1655 channel->desired_mode = 0;
1656 } else {
1657 length = urb->transfer_buffer_length;
1658 }
1659 #endif
1660
1661
1662 val = musb_readw(epio, MUSB_RXCSR);
1663 val &= ~MUSB_RXCSR_H_REQPKT;
1664
1665 if (channel->desired_mode == 0)
1666 val &= ~MUSB_RXCSR_H_AUTOREQ;
1667 else
1668 val |= MUSB_RXCSR_H_AUTOREQ;
1669 val |= MUSB_RXCSR_DMAENAB;
1670
1671
1672 if (qh->hb_mult == 1)
1673 val |= MUSB_RXCSR_AUTOCLEAR;
1674
1675 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1676
1677
1678
1679
1680
1681 done = dma->channel_program(channel, qh->maxpacket,
1682 channel->desired_mode,
1683 buf, length);
1684
1685 if (!done) {
1686 dma->channel_release(channel);
1687 hw_ep->rx_channel = NULL;
1688 channel = NULL;
1689 val = musb_readw(epio, MUSB_RXCSR);
1690 val &= ~(MUSB_RXCSR_DMAENAB
1691 | MUSB_RXCSR_H_AUTOREQ
1692 | MUSB_RXCSR_AUTOCLEAR);
1693 musb_writew(epio, MUSB_RXCSR, val);
1694 }
1695
1696 return done;
1697 }
1698 #else
1699 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1700 struct musb_hw_ep *hw_ep,
1701 struct musb_qh *qh,
1702 struct urb *urb,
1703 size_t len)
1704 {
1705 return false;
1706 }
1707
1708 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1709 struct musb_hw_ep *hw_ep,
1710 struct musb_qh *qh,
1711 struct urb *urb,
1712 size_t len,
1713 u8 iso_err)
1714 {
1715 return false;
1716 }
1717 #endif
1718
1719
1720
1721
1722
1723 void musb_host_rx(struct musb *musb, u8 epnum)
1724 {
1725 struct urb *urb;
1726 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1727 struct dma_controller *c = musb->dma_controller;
1728 void __iomem *epio = hw_ep->regs;
1729 struct musb_qh *qh = hw_ep->in_qh;
1730 size_t xfer_len;
1731 void __iomem *mbase = musb->mregs;
1732 u16 rx_csr, val;
1733 bool iso_err = false;
1734 bool done = false;
1735 u32 status;
1736 struct dma_channel *dma;
1737 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1738
1739 musb_ep_select(mbase, epnum);
1740
1741 urb = next_urb(qh);
1742 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1743 status = 0;
1744 xfer_len = 0;
1745
1746 rx_csr = musb_readw(epio, MUSB_RXCSR);
1747 val = rx_csr;
1748
1749 if (unlikely(!urb)) {
1750
1751
1752
1753
1754 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1755 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1756 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1757 return;
1758 }
1759
1760 trace_musb_urb_rx(musb, urb);
1761
1762
1763
1764 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1765 musb_dbg(musb, "RX end %d STALL", epnum);
1766
1767
1768 status = -EPIPE;
1769
1770 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1771 dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
1772
1773
1774
1775
1776
1777
1778
1779 status = -ESHUTDOWN;
1780 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1781
1782 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1783 musb_writew(epio, MUSB_RXCSR, rx_csr);
1784
1785 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1786
1787 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1788 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 if (usb_pipebulk(urb->pipe)
1799 && qh->mux == 1
1800 && !list_is_singular(&musb->in_bulk)) {
1801 musb_bulk_nak_timeout(musb, hw_ep, 1);
1802 return;
1803 }
1804 musb_ep_select(mbase, epnum);
1805 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1806 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1807 musb_writew(epio, MUSB_RXCSR, rx_csr);
1808
1809 goto finish;
1810 } else {
1811 musb_dbg(musb, "RX end %d ISO data error", epnum);
1812
1813 iso_err = true;
1814 }
1815 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1816 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1817 epnum);
1818 status = -EPROTO;
1819 }
1820
1821
1822 if (status) {
1823
1824 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1825 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1826 musb->dma_controller->channel_abort(dma);
1827 xfer_len = dma->actual_len;
1828 }
1829 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1830 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1831 done = true;
1832 goto finish;
1833 }
1834
1835 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1836
1837 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1838 goto finish;
1839 }
1840
1841
1842
1843
1844
1845
1846
1847 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1848 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1849
1850
1851
1852
1853
1854 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1855 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1856 musb->dma_controller->channel_abort(dma);
1857 xfer_len = dma->actual_len;
1858 done = true;
1859 }
1860
1861 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1862 xfer_len, dma ? ", dma" : "");
1863 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1864
1865 musb_ep_select(mbase, epnum);
1866 musb_writew(epio, MUSB_RXCSR,
1867 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1868 }
1869
1870 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1871 xfer_len = dma->actual_len;
1872
1873 val &= ~(MUSB_RXCSR_DMAENAB
1874 | MUSB_RXCSR_H_AUTOREQ
1875 | MUSB_RXCSR_AUTOCLEAR
1876 | MUSB_RXCSR_RXPKTRDY);
1877 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1878
1879 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1880 musb_dma_cppi41(musb)) {
1881 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1882 musb_dbg(hw_ep->musb,
1883 "ep %d dma %s, rxcsr %04x, rxcount %d",
1884 epnum, done ? "off" : "reset",
1885 musb_readw(epio, MUSB_RXCSR),
1886 musb_readw(epio, MUSB_RXCOUNT));
1887 } else {
1888 done = true;
1889 }
1890
1891 } else if (urb->status == -EINPROGRESS) {
1892
1893 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1894 status = -EPROTO;
1895 ERR("Rx interrupt with no errors or packet!\n");
1896
1897
1898
1899
1900
1901 musb_ep_select(mbase, epnum);
1902 val &= ~MUSB_RXCSR_H_REQPKT;
1903 musb_writew(epio, MUSB_RXCSR, val);
1904 goto finish;
1905 }
1906
1907
1908 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1909 musb_dma_cppi41(musb)) && dma) {
1910 musb_dbg(hw_ep->musb,
1911 "RX%d count %d, buffer 0x%llx len %d/%d",
1912 epnum, musb_readw(epio, MUSB_RXCOUNT),
1913 (unsigned long long) urb->transfer_dma
1914 + urb->actual_length,
1915 qh->offset,
1916 urb->transfer_buffer_length);
1917
1918 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1919 xfer_len, iso_err))
1920 goto finish;
1921 else
1922 dev_err(musb->controller, "error: rx_dma failed\n");
1923 }
1924
1925 if (!dma) {
1926 unsigned int received_len;
1927
1928
1929 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1930
1931
1932
1933
1934
1935 if (!urb->transfer_buffer) {
1936 qh->use_sg = true;
1937 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1938 sg_flags);
1939 }
1940
1941 if (qh->use_sg) {
1942 if (!sg_miter_next(&qh->sg_miter)) {
1943 dev_err(musb->controller, "error: sg list empty\n");
1944 sg_miter_stop(&qh->sg_miter);
1945 status = -EINVAL;
1946 done = true;
1947 goto finish;
1948 }
1949 urb->transfer_buffer = qh->sg_miter.addr;
1950 received_len = urb->actual_length;
1951 qh->offset = 0x0;
1952 done = musb_host_packet_rx(musb, urb, epnum,
1953 iso_err);
1954
1955 received_len = urb->actual_length -
1956 received_len;
1957 qh->sg_miter.consumed = received_len;
1958 sg_miter_stop(&qh->sg_miter);
1959 } else {
1960 done = musb_host_packet_rx(musb, urb,
1961 epnum, iso_err);
1962 }
1963 musb_dbg(musb, "read %spacket", done ? "last " : "");
1964 }
1965 }
1966
1967 finish:
1968 urb->actual_length += xfer_len;
1969 qh->offset += xfer_len;
1970 if (done) {
1971 if (qh->use_sg) {
1972 qh->use_sg = false;
1973 urb->transfer_buffer = NULL;
1974 }
1975
1976 if (urb->status == -EINPROGRESS)
1977 urb->status = status;
1978 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1979 }
1980 }
1981
1982
1983
1984
1985
1986
1987 static int musb_schedule(
1988 struct musb *musb,
1989 struct musb_qh *qh,
1990 int is_in)
1991 {
1992 int idle = 0;
1993 int best_diff;
1994 int best_end, epnum;
1995 struct musb_hw_ep *hw_ep = NULL;
1996 struct list_head *head = NULL;
1997 u8 toggle;
1998 u8 txtype;
1999 struct urb *urb = next_urb(qh);
2000
2001
2002 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2003 head = &musb->control;
2004 hw_ep = musb->control_ep;
2005 goto success;
2006 }
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017 best_diff = 4096;
2018 best_end = -1;
2019
2020 for (epnum = 1, hw_ep = musb->endpoints + 1;
2021 epnum < musb->nr_endpoints;
2022 epnum++, hw_ep++) {
2023 int diff;
2024
2025 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2026 continue;
2027
2028 if (hw_ep == musb->bulk_ep)
2029 continue;
2030
2031 if (is_in)
2032 diff = hw_ep->max_packet_sz_rx;
2033 else
2034 diff = hw_ep->max_packet_sz_tx;
2035 diff -= (qh->maxpacket * qh->hb_mult);
2036
2037 if (diff >= 0 && best_diff > diff) {
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 hw_ep = musb->endpoints + epnum;
2052 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2053 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2054 >> 4) & 0x3;
2055 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2056 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2057 continue;
2058
2059 best_diff = diff;
2060 best_end = epnum;
2061 }
2062 }
2063
2064 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2065 hw_ep = musb->bulk_ep;
2066 if (is_in)
2067 head = &musb->in_bulk;
2068 else
2069 head = &musb->out_bulk;
2070
2071
2072
2073
2074
2075
2076
2077
2078 if (qh->dev)
2079 qh->intv_reg =
2080 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2081 goto success;
2082 } else if (best_end < 0) {
2083 dev_err(musb->controller,
2084 "%s hwep alloc failed for %dx%d\n",
2085 musb_ep_xfertype_string(qh->type),
2086 qh->hb_mult, qh->maxpacket);
2087 return -ENOSPC;
2088 }
2089
2090 idle = 1;
2091 qh->mux = 0;
2092 hw_ep = musb->endpoints + best_end;
2093 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2094 success:
2095 if (head) {
2096 idle = list_empty(head);
2097 list_add_tail(&qh->ring, head);
2098 qh->mux = 1;
2099 }
2100 qh->hw_ep = hw_ep;
2101 qh->hep->hcpriv = qh;
2102 if (idle)
2103 musb_start_urb(musb, is_in, qh);
2104 return 0;
2105 }
2106
2107 static int musb_urb_enqueue(
2108 struct usb_hcd *hcd,
2109 struct urb *urb,
2110 gfp_t mem_flags)
2111 {
2112 unsigned long flags;
2113 struct musb *musb = hcd_to_musb(hcd);
2114 struct usb_host_endpoint *hep = urb->ep;
2115 struct musb_qh *qh;
2116 struct usb_endpoint_descriptor *epd = &hep->desc;
2117 int ret;
2118 unsigned type_reg;
2119 unsigned interval;
2120
2121
2122 if (!is_host_active(musb) || !musb->is_active)
2123 return -ENODEV;
2124
2125 trace_musb_urb_enq(musb, urb);
2126
2127 spin_lock_irqsave(&musb->lock, flags);
2128 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2129 qh = ret ? NULL : hep->hcpriv;
2130 if (qh)
2131 urb->hcpriv = qh;
2132 spin_unlock_irqrestore(&musb->lock, flags);
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 if (qh || ret)
2143 return ret;
2144
2145
2146
2147
2148
2149
2150
2151 qh = kzalloc(sizeof *qh, mem_flags);
2152 if (!qh) {
2153 spin_lock_irqsave(&musb->lock, flags);
2154 usb_hcd_unlink_urb_from_ep(hcd, urb);
2155 spin_unlock_irqrestore(&musb->lock, flags);
2156 return -ENOMEM;
2157 }
2158
2159 qh->hep = hep;
2160 qh->dev = urb->dev;
2161 INIT_LIST_HEAD(&qh->ring);
2162 qh->is_ready = 1;
2163
2164 qh->maxpacket = usb_endpoint_maxp(epd);
2165 qh->type = usb_endpoint_type(epd);
2166
2167
2168
2169
2170
2171 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2172 if (qh->hb_mult > 1) {
2173 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2174
2175 if (ok)
2176 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2177 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2178 if (!ok) {
2179 dev_err(musb->controller,
2180 "high bandwidth %s (%dx%d) not supported\n",
2181 musb_ep_xfertype_string(qh->type),
2182 qh->hb_mult, qh->maxpacket & 0x7ff);
2183 ret = -EMSGSIZE;
2184 goto done;
2185 }
2186 qh->maxpacket &= 0x7ff;
2187 }
2188
2189 qh->epnum = usb_endpoint_num(epd);
2190
2191
2192 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2193
2194
2195 type_reg = (qh->type << 4) | qh->epnum;
2196 switch (urb->dev->speed) {
2197 case USB_SPEED_LOW:
2198 type_reg |= 0xc0;
2199 break;
2200 case USB_SPEED_FULL:
2201 type_reg |= 0x80;
2202 break;
2203 default:
2204 type_reg |= 0x40;
2205 }
2206 qh->type_reg = type_reg;
2207
2208
2209 switch (qh->type) {
2210 case USB_ENDPOINT_XFER_INT:
2211
2212
2213
2214
2215 if (urb->dev->speed <= USB_SPEED_FULL) {
2216 interval = max_t(u8, epd->bInterval, 1);
2217 break;
2218 }
2219 fallthrough;
2220 case USB_ENDPOINT_XFER_ISOC:
2221
2222 interval = min_t(u8, epd->bInterval, 16);
2223 break;
2224 default:
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239 interval = 0;
2240 }
2241 qh->intv_reg = interval;
2242
2243
2244 if (musb->is_multipoint) {
2245 struct usb_device *parent = urb->dev->parent;
2246
2247 if (parent != hcd->self.root_hub) {
2248 qh->h_addr_reg = (u8) parent->devnum;
2249
2250
2251 if (urb->dev->tt) {
2252 qh->h_port_reg = (u8) urb->dev->ttport;
2253 if (urb->dev->tt->hub)
2254 qh->h_addr_reg =
2255 (u8) urb->dev->tt->hub->devnum;
2256 if (urb->dev->tt->multi)
2257 qh->h_addr_reg |= 0x80;
2258 }
2259 }
2260 }
2261
2262
2263
2264
2265
2266 spin_lock_irqsave(&musb->lock, flags);
2267 if (hep->hcpriv || !next_urb(qh)) {
2268
2269
2270
2271 kfree(qh);
2272 qh = NULL;
2273 ret = 0;
2274 } else
2275 ret = musb_schedule(musb, qh,
2276 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2277
2278 if (ret == 0) {
2279 urb->hcpriv = qh;
2280
2281
2282
2283 }
2284 spin_unlock_irqrestore(&musb->lock, flags);
2285
2286 done:
2287 if (ret != 0) {
2288 spin_lock_irqsave(&musb->lock, flags);
2289 usb_hcd_unlink_urb_from_ep(hcd, urb);
2290 spin_unlock_irqrestore(&musb->lock, flags);
2291 kfree(qh);
2292 }
2293 return ret;
2294 }
2295
2296
2297
2298
2299
2300
2301
2302 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2303 {
2304 struct musb_hw_ep *ep = qh->hw_ep;
2305 struct musb *musb = ep->musb;
2306 void __iomem *epio = ep->regs;
2307 unsigned hw_end = ep->epnum;
2308 void __iomem *regs = ep->musb->mregs;
2309 int is_in = usb_pipein(urb->pipe);
2310 int status = 0;
2311 u16 csr;
2312 struct dma_channel *dma = NULL;
2313
2314 musb_ep_select(regs, hw_end);
2315
2316 if (is_dma_capable()) {
2317 dma = is_in ? ep->rx_channel : ep->tx_channel;
2318 if (dma) {
2319 status = ep->musb->dma_controller->channel_abort(dma);
2320 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2321 is_in ? 'R' : 'T', ep->epnum,
2322 urb, status);
2323 urb->actual_length += dma->actual_len;
2324 }
2325 }
2326
2327
2328 if (ep->epnum && is_in) {
2329
2330 csr = musb_h_flush_rxfifo(ep, 0);
2331
2332
2333 if (is_dma_capable() && dma)
2334 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2335 } else if (ep->epnum) {
2336 musb_h_tx_flush_fifo(ep);
2337 csr = musb_readw(epio, MUSB_TXCSR);
2338 csr &= ~(MUSB_TXCSR_AUTOSET
2339 | MUSB_TXCSR_DMAENAB
2340 | MUSB_TXCSR_H_RXSTALL
2341 | MUSB_TXCSR_H_NAKTIMEOUT
2342 | MUSB_TXCSR_H_ERROR
2343 | MUSB_TXCSR_TXPKTRDY);
2344 musb_writew(epio, MUSB_TXCSR, csr);
2345
2346 musb_writew(epio, MUSB_TXCSR, csr);
2347
2348 csr = musb_readw(epio, MUSB_TXCSR);
2349 } else {
2350 musb_h_ep0_flush_fifo(ep);
2351 }
2352 if (status == 0)
2353 musb_advance_schedule(ep->musb, urb, ep, is_in);
2354 return status;
2355 }
2356
2357 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2358 {
2359 struct musb *musb = hcd_to_musb(hcd);
2360 struct musb_qh *qh;
2361 unsigned long flags;
2362 int is_in = usb_pipein(urb->pipe);
2363 int ret;
2364
2365 trace_musb_urb_deq(musb, urb);
2366
2367 spin_lock_irqsave(&musb->lock, flags);
2368 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2369 if (ret)
2370 goto done;
2371
2372 qh = urb->hcpriv;
2373 if (!qh)
2374 goto done;
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 if (!qh->is_ready
2389 || urb->urb_list.prev != &qh->hep->urb_list
2390 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2391 int ready = qh->is_ready;
2392
2393 qh->is_ready = 0;
2394 musb_giveback(musb, urb, 0);
2395 qh->is_ready = ready;
2396
2397
2398
2399
2400 if (ready && list_empty(&qh->hep->urb_list)) {
2401 qh->hep->hcpriv = NULL;
2402 list_del(&qh->ring);
2403 kfree(qh);
2404 }
2405 } else
2406 ret = musb_cleanup_urb(urb, qh);
2407 done:
2408 spin_unlock_irqrestore(&musb->lock, flags);
2409 return ret;
2410 }
2411
2412
2413 static void
2414 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2415 {
2416 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2417 unsigned long flags;
2418 struct musb *musb = hcd_to_musb(hcd);
2419 struct musb_qh *qh;
2420 struct urb *urb;
2421
2422 spin_lock_irqsave(&musb->lock, flags);
2423
2424 qh = hep->hcpriv;
2425 if (qh == NULL)
2426 goto exit;
2427
2428
2429
2430
2431 qh->is_ready = 0;
2432 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2433 urb = next_urb(qh);
2434
2435
2436 if (!urb->unlinked)
2437 urb->status = -ESHUTDOWN;
2438
2439
2440 musb_cleanup_urb(urb, qh);
2441
2442
2443
2444
2445 while (!list_empty(&hep->urb_list)) {
2446 urb = next_urb(qh);
2447 urb->status = -ESHUTDOWN;
2448 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2449 }
2450 } else {
2451
2452
2453
2454
2455 while (!list_empty(&hep->urb_list))
2456 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2457
2458 hep->hcpriv = NULL;
2459 list_del(&qh->ring);
2460 kfree(qh);
2461 }
2462 exit:
2463 spin_unlock_irqrestore(&musb->lock, flags);
2464 }
2465
2466 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2467 {
2468 struct musb *musb = hcd_to_musb(hcd);
2469
2470 return musb_readw(musb->mregs, MUSB_FRAME);
2471 }
2472
2473 static int musb_h_start(struct usb_hcd *hcd)
2474 {
2475 struct musb *musb = hcd_to_musb(hcd);
2476
2477
2478
2479
2480 hcd->state = HC_STATE_RUNNING;
2481 musb->port1_status = 0;
2482 return 0;
2483 }
2484
2485 static void musb_h_stop(struct usb_hcd *hcd)
2486 {
2487 musb_stop(hcd_to_musb(hcd));
2488 hcd->state = HC_STATE_HALT;
2489 }
2490
2491 static int musb_bus_suspend(struct usb_hcd *hcd)
2492 {
2493 struct musb *musb = hcd_to_musb(hcd);
2494 u8 devctl;
2495 int ret;
2496
2497 ret = musb_port_suspend(musb, true);
2498 if (ret)
2499 return ret;
2500
2501 if (!is_host_active(musb))
2502 return 0;
2503
2504 switch (musb->xceiv->otg->state) {
2505 case OTG_STATE_A_SUSPEND:
2506 return 0;
2507 case OTG_STATE_A_WAIT_VRISE:
2508
2509
2510
2511
2512 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2513 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2514 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2515 break;
2516 default:
2517 break;
2518 }
2519
2520 if (musb->is_active) {
2521 WARNING("trying to suspend as %s while active\n",
2522 usb_otg_state_string(musb->xceiv->otg->state));
2523 return -EBUSY;
2524 } else
2525 return 0;
2526 }
2527
2528 static int musb_bus_resume(struct usb_hcd *hcd)
2529 {
2530 struct musb *musb = hcd_to_musb(hcd);
2531
2532 if (musb->config &&
2533 musb->config->host_port_deassert_reset_at_resume)
2534 musb_port_reset(musb, false);
2535
2536 return 0;
2537 }
2538
2539 #ifndef CONFIG_MUSB_PIO_ONLY
2540
2541 #define MUSB_USB_DMA_ALIGN 4
2542
2543 struct musb_temp_buffer {
2544 void *kmalloc_ptr;
2545 void *old_xfer_buffer;
2546 u8 data[];
2547 };
2548
2549 static void musb_free_temp_buffer(struct urb *urb)
2550 {
2551 enum dma_data_direction dir;
2552 struct musb_temp_buffer *temp;
2553 size_t length;
2554
2555 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2556 return;
2557
2558 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2559
2560 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2561 data);
2562
2563 if (dir == DMA_FROM_DEVICE) {
2564 if (usb_pipeisoc(urb->pipe))
2565 length = urb->transfer_buffer_length;
2566 else
2567 length = urb->actual_length;
2568
2569 memcpy(temp->old_xfer_buffer, temp->data, length);
2570 }
2571 urb->transfer_buffer = temp->old_xfer_buffer;
2572 kfree(temp->kmalloc_ptr);
2573
2574 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2575 }
2576
2577 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2578 {
2579 enum dma_data_direction dir;
2580 struct musb_temp_buffer *temp;
2581 void *kmalloc_ptr;
2582 size_t kmalloc_size;
2583
2584 if (urb->num_sgs || urb->sg ||
2585 urb->transfer_buffer_length == 0 ||
2586 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2587 return 0;
2588
2589 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2590
2591
2592 kmalloc_size = urb->transfer_buffer_length +
2593 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2594
2595 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2596 if (!kmalloc_ptr)
2597 return -ENOMEM;
2598
2599
2600 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2601
2602
2603 temp->kmalloc_ptr = kmalloc_ptr;
2604 temp->old_xfer_buffer = urb->transfer_buffer;
2605 if (dir == DMA_TO_DEVICE)
2606 memcpy(temp->data, urb->transfer_buffer,
2607 urb->transfer_buffer_length);
2608 urb->transfer_buffer = temp->data;
2609
2610 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2611
2612 return 0;
2613 }
2614
2615 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2616 gfp_t mem_flags)
2617 {
2618 struct musb *musb = hcd_to_musb(hcd);
2619 int ret;
2620
2621
2622
2623
2624
2625
2626
2627 if (musb->hwvers < MUSB_HWVERS_1800)
2628 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2629
2630 ret = musb_alloc_temp_buffer(urb, mem_flags);
2631 if (ret)
2632 return ret;
2633
2634 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2635 if (ret)
2636 musb_free_temp_buffer(urb);
2637
2638 return ret;
2639 }
2640
2641 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2642 {
2643 struct musb *musb = hcd_to_musb(hcd);
2644
2645 usb_hcd_unmap_urb_for_dma(hcd, urb);
2646
2647
2648 if (musb->hwvers < MUSB_HWVERS_1800)
2649 return;
2650
2651 musb_free_temp_buffer(urb);
2652 }
2653 #endif
2654
2655 static const struct hc_driver musb_hc_driver = {
2656 .description = "musb-hcd",
2657 .product_desc = "MUSB HDRC host driver",
2658 .hcd_priv_size = sizeof(struct musb *),
2659 .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
2660
2661
2662
2663
2664
2665 .start = musb_h_start,
2666 .stop = musb_h_stop,
2667
2668 .get_frame_number = musb_h_get_frame_number,
2669
2670 .urb_enqueue = musb_urb_enqueue,
2671 .urb_dequeue = musb_urb_dequeue,
2672 .endpoint_disable = musb_h_disable,
2673
2674 #ifndef CONFIG_MUSB_PIO_ONLY
2675 .map_urb_for_dma = musb_map_urb_for_dma,
2676 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2677 #endif
2678
2679 .hub_status_data = musb_hub_status_data,
2680 .hub_control = musb_hub_control,
2681 .bus_suspend = musb_bus_suspend,
2682 .bus_resume = musb_bus_resume,
2683
2684
2685 };
2686
2687 int musb_host_alloc(struct musb *musb)
2688 {
2689 struct device *dev = musb->controller;
2690
2691
2692 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2693 if (!musb->hcd)
2694 return -EINVAL;
2695
2696 *musb->hcd->hcd_priv = (unsigned long) musb;
2697 musb->hcd->self.uses_pio_for_control = 1;
2698 musb->hcd->uses_new_polling = 1;
2699 musb->hcd->has_tt = 1;
2700
2701 return 0;
2702 }
2703
2704 void musb_host_cleanup(struct musb *musb)
2705 {
2706 if (musb->port_mode == MUSB_PERIPHERAL)
2707 return;
2708 usb_remove_hcd(musb->hcd);
2709 }
2710
2711 void musb_host_free(struct musb *musb)
2712 {
2713 usb_put_hcd(musb->hcd);
2714 }
2715
2716 int musb_host_setup(struct musb *musb, int power_budget)
2717 {
2718 int ret;
2719 struct usb_hcd *hcd = musb->hcd;
2720
2721 if (musb->port_mode == MUSB_HOST) {
2722 MUSB_HST_MODE(musb);
2723 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2724 }
2725 otg_set_host(musb->xceiv->otg, &hcd->self);
2726
2727 hcd->self.otg_port = 0;
2728 musb->xceiv->otg->host = &hcd->self;
2729 hcd->power_budget = 2 * (power_budget ? : 250);
2730 hcd->skip_phy_initialization = 1;
2731
2732 ret = usb_add_hcd(hcd, 0, 0);
2733 if (ret < 0)
2734 return ret;
2735
2736 device_wakeup_enable(hcd->self.controller);
2737 return 0;
2738 }
2739
2740 void musb_host_resume_root_hub(struct musb *musb)
2741 {
2742 usb_hcd_resume_root_hub(musb->hcd);
2743 }
2744
2745 void musb_host_poke_root_hub(struct musb *musb)
2746 {
2747 MUSB_HST_MODE(musb);
2748 if (musb->hcd->status_urb)
2749 usb_hcd_poll_rh_status(musb->hcd);
2750 else
2751 usb_hcd_resume_root_hub(musb->hcd);
2752 }