0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #define PID_CODE_IN 1
0032 #define PID_CODE_SETUP 2
0033
0034
0035
0036 static unsigned int
0037 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
0038 size_t len, int token, int maxpacket)
0039 {
0040 unsigned int count;
0041 u64 addr = buf;
0042 int i;
0043
0044
0045 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
0046 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
0047 count = 0x1000 - (buf & 0x0fff);
0048 if (likely (len < count))
0049 count = len;
0050 else {
0051 buf += 0x1000;
0052 buf &= ~0x0fff;
0053
0054
0055 for (i = 1; count < len && i < 5; i++) {
0056 addr = buf;
0057 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
0058 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
0059 (u32)(addr >> 32));
0060 buf += 0x1000;
0061 if ((count + 0x1000) < len)
0062 count += 0x1000;
0063 else
0064 count = len;
0065 }
0066
0067
0068 if (count != len)
0069 count -= (count % maxpacket);
0070 }
0071 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
0072 qtd->length = count;
0073
0074 return count;
0075 }
0076
0077
0078
0079 static inline void
0080 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
0081 {
0082 struct ehci_qh_hw *hw = qh->hw;
0083
0084
0085 WARN_ON(qh->qh_state != QH_STATE_IDLE);
0086
0087 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
0088 hw->hw_alt_next = EHCI_LIST_END(ehci);
0089
0090
0091
0092
0093
0094
0095 if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
0096 unsigned is_out, epnum;
0097
0098 is_out = qh->is_out;
0099 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
0100 if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
0101 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
0102 usb_settoggle(qh->ps.udev, epnum, is_out, 1);
0103 }
0104 }
0105
0106 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
0107 }
0108
0109
0110
0111
0112
0113 static void
0114 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
0115 {
0116 struct ehci_qtd *qtd;
0117
0118 qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
0119
0120
0121
0122
0123
0124
0125
0126
0127 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
0128 qh->hw->hw_qtd_next = qtd->hw_next;
0129 if (qh->should_be_inactive)
0130 ehci_warn(ehci, "qh %p should be inactive!\n", qh);
0131 } else {
0132 qh_update(ehci, qh, qtd);
0133 }
0134 qh->should_be_inactive = 0;
0135 }
0136
0137
0138
0139 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
0140
0141 static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
0142 struct usb_host_endpoint *ep)
0143 {
0144 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
0145 struct ehci_qh *qh = ep->hcpriv;
0146 unsigned long flags;
0147
0148 spin_lock_irqsave(&ehci->lock, flags);
0149 qh->clearing_tt = 0;
0150 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
0151 && ehci->rh_state == EHCI_RH_RUNNING)
0152 qh_link_async(ehci, qh);
0153 spin_unlock_irqrestore(&ehci->lock, flags);
0154 }
0155
0156 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
0157 struct urb *urb, u32 token)
0158 {
0159
0160
0161
0162
0163
0164
0165
0166 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
0167 #ifdef CONFIG_DYNAMIC_DEBUG
0168 struct usb_device *tt = urb->dev->tt->hub;
0169 dev_dbg(&tt->dev,
0170 "clear tt buffer port %d, a%d ep%d t%08x\n",
0171 urb->dev->ttport, urb->dev->devnum,
0172 usb_pipeendpoint(urb->pipe), token);
0173 #endif
0174 if (!ehci_is_TDI(ehci)
0175 || urb->dev->tt->hub !=
0176 ehci_to_hcd(ehci)->self.root_hub) {
0177 if (usb_hub_clear_tt_buffer(urb) == 0)
0178 qh->clearing_tt = 1;
0179 } else {
0180
0181
0182
0183
0184 }
0185 }
0186 }
0187
0188 static int qtd_copy_status (
0189 struct ehci_hcd *ehci,
0190 struct urb *urb,
0191 size_t length,
0192 u32 token
0193 )
0194 {
0195 int status = -EINPROGRESS;
0196
0197
0198 if (likely(QTD_PID(token) != PID_CODE_SETUP))
0199 urb->actual_length += length - QTD_LENGTH (token);
0200
0201
0202 if (unlikely(urb->unlinked))
0203 return status;
0204
0205
0206 if (unlikely (IS_SHORT_READ (token)))
0207 status = -EREMOTEIO;
0208
0209
0210 if (token & QTD_STS_HALT) {
0211 if (token & QTD_STS_BABBLE) {
0212
0213 status = -EOVERFLOW;
0214
0215
0216
0217
0218 } else if ((token & QTD_STS_MMF) &&
0219 (QTD_PID(token) == PID_CODE_IN)) {
0220 status = -EPROTO;
0221
0222 } else if (QTD_CERR(token)) {
0223 status = -EPIPE;
0224
0225
0226
0227
0228
0229 } else if (token & QTD_STS_MMF) {
0230
0231 status = -EPROTO;
0232 } else if (token & QTD_STS_DBE) {
0233 status = (QTD_PID (token) == 1)
0234 ? -ENOSR
0235 : -ECOMM;
0236 } else if (token & QTD_STS_XACT) {
0237
0238 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
0239 urb->dev->devpath,
0240 usb_pipeendpoint(urb->pipe),
0241 usb_pipein(urb->pipe) ? "in" : "out");
0242 status = -EPROTO;
0243 } else {
0244 status = -EPROTO;
0245 }
0246 }
0247
0248 return status;
0249 }
0250
0251 static void
0252 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
0253 {
0254 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
0255
0256 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
0257 }
0258
0259 if (unlikely(urb->unlinked)) {
0260 INCR(ehci->stats.unlink);
0261 } else {
0262
0263 if (status == -EINPROGRESS || status == -EREMOTEIO)
0264 status = 0;
0265 INCR(ehci->stats.complete);
0266 }
0267
0268 #ifdef EHCI_URB_TRACE
0269 ehci_dbg (ehci,
0270 "%s %s urb %p ep%d%s status %d len %d/%d\n",
0271 __func__, urb->dev->devpath, urb,
0272 usb_pipeendpoint (urb->pipe),
0273 usb_pipein (urb->pipe) ? "in" : "out",
0274 status,
0275 urb->actual_length, urb->transfer_buffer_length);
0276 #endif
0277
0278 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
0279 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
0280 }
0281
0282 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
0283
0284
0285
0286
0287
0288
0289 static unsigned
0290 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
0291 {
0292 struct ehci_qtd *last, *end = qh->dummy;
0293 struct list_head *entry, *tmp;
0294 int last_status;
0295 int stopped;
0296 u8 state;
0297 struct ehci_qh_hw *hw = qh->hw;
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 state = qh->qh_state;
0310 qh->qh_state = QH_STATE_COMPLETING;
0311 stopped = (state == QH_STATE_IDLE);
0312
0313 rescan:
0314 last = NULL;
0315 last_status = -EINPROGRESS;
0316 qh->dequeue_during_giveback = 0;
0317
0318
0319
0320
0321
0322
0323 list_for_each_safe (entry, tmp, &qh->qtd_list) {
0324 struct ehci_qtd *qtd;
0325 struct urb *urb;
0326 u32 token = 0;
0327
0328 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
0329 urb = qtd->urb;
0330
0331
0332 if (last) {
0333 if (likely (last->urb != urb)) {
0334 ehci_urb_done(ehci, last->urb, last_status);
0335 last_status = -EINPROGRESS;
0336 }
0337 ehci_qtd_free (ehci, last);
0338 last = NULL;
0339 }
0340
0341
0342 if (qtd == end)
0343 break;
0344
0345
0346 rmb ();
0347 token = hc32_to_cpu(ehci, qtd->hw_token);
0348
0349
0350 retry_xacterr:
0351 if ((token & QTD_STS_ACTIVE) == 0) {
0352
0353
0354 if (token & QTD_STS_DBE)
0355 ehci_dbg(ehci,
0356 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
0357 urb,
0358 usb_endpoint_num(&urb->ep->desc),
0359 usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
0360 urb->transfer_buffer_length,
0361 qtd,
0362 qh);
0363
0364
0365
0366
0367 if ((token & QTD_STS_HALT) != 0) {
0368
0369
0370
0371
0372 if ((token & QTD_STS_XACT) &&
0373 QTD_CERR(token) == 0 &&
0374 ++qh->xacterrs < QH_XACTERR_MAX &&
0375 !urb->unlinked) {
0376 ehci_dbg(ehci,
0377 "detected XactErr len %zu/%zu retry %d\n",
0378 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
0379
0380
0381
0382
0383
0384
0385 token &= ~QTD_STS_HALT;
0386 token |= QTD_STS_ACTIVE |
0387 (EHCI_TUNE_CERR << 10);
0388 qtd->hw_token = cpu_to_hc32(ehci,
0389 token);
0390 wmb();
0391 hw->hw_token = cpu_to_hc32(ehci,
0392 token);
0393 goto retry_xacterr;
0394 }
0395 stopped = 1;
0396 qh->unlink_reason |= QH_UNLINK_HALTED;
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 } else if (IS_SHORT_READ (token)
0408 && !(qtd->hw_alt_next
0409 & EHCI_LIST_END(ehci))) {
0410 stopped = 1;
0411 qh->unlink_reason |= QH_UNLINK_SHORT_READ;
0412 }
0413
0414
0415 } else if (likely (!stopped
0416 && ehci->rh_state >= EHCI_RH_RUNNING)) {
0417 break;
0418
0419
0420 } else {
0421 stopped = 1;
0422
0423
0424 if (ehci->rh_state < EHCI_RH_RUNNING) {
0425 last_status = -ESHUTDOWN;
0426 qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
0427 }
0428
0429
0430
0431
0432 else if (last_status == -EINPROGRESS && !urb->unlinked)
0433 continue;
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 if (state == QH_STATE_IDLE &&
0444 qh->qtd_list.next == &qtd->qtd_list &&
0445 (hw->hw_token & ACTIVE_BIT(ehci))) {
0446 token = hc32_to_cpu(ehci, hw->hw_token);
0447 hw->hw_token &= ~ACTIVE_BIT(ehci);
0448 qh->should_be_inactive = 1;
0449
0450
0451
0452
0453
0454 ehci_clear_tt_buffer(ehci, qh, urb, token);
0455 }
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465 if (last_status == -EINPROGRESS) {
0466 last_status = qtd_copy_status(ehci, urb,
0467 qtd->length, token);
0468 if (last_status == -EREMOTEIO
0469 && (qtd->hw_alt_next
0470 & EHCI_LIST_END(ehci)))
0471 last_status = -EINPROGRESS;
0472
0473
0474
0475
0476 if (unlikely(last_status != -EINPROGRESS &&
0477 last_status != -EREMOTEIO)) {
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 if (last_status != -EPIPE)
0488 ehci_clear_tt_buffer(ehci, qh, urb,
0489 token);
0490 }
0491 }
0492
0493
0494
0495
0496 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
0497 last = list_entry (qtd->qtd_list.prev,
0498 struct ehci_qtd, qtd_list);
0499 last->hw_next = qtd->hw_next;
0500 }
0501
0502
0503 list_del (&qtd->qtd_list);
0504 last = qtd;
0505
0506
0507 qh->xacterrs = 0;
0508 }
0509
0510
0511 if (likely (last != NULL)) {
0512 ehci_urb_done(ehci, last->urb, last_status);
0513 ehci_qtd_free (ehci, last);
0514 }
0515
0516
0517 if (unlikely(qh->dequeue_during_giveback)) {
0518
0519 if (state == QH_STATE_IDLE)
0520 goto rescan;
0521
0522
0523 }
0524
0525
0526 qh->qh_state = state;
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
0544 qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
0545
0546
0547 return qh->unlink_reason;
0548 }
0549
0550
0551
0552
0553
0554
0555
0556 static void qtd_list_free (
0557 struct ehci_hcd *ehci,
0558 struct urb *urb,
0559 struct list_head *qtd_list
0560 ) {
0561 struct list_head *entry, *temp;
0562
0563 list_for_each_safe (entry, temp, qtd_list) {
0564 struct ehci_qtd *qtd;
0565
0566 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
0567 list_del (&qtd->qtd_list);
0568 ehci_qtd_free (ehci, qtd);
0569 }
0570 }
0571
0572
0573
0574
0575 static struct list_head *
0576 qh_urb_transaction (
0577 struct ehci_hcd *ehci,
0578 struct urb *urb,
0579 struct list_head *head,
0580 gfp_t flags
0581 ) {
0582 struct ehci_qtd *qtd, *qtd_prev;
0583 dma_addr_t buf;
0584 int len, this_sg_len, maxpacket;
0585 int is_input;
0586 u32 token;
0587 int i;
0588 struct scatterlist *sg;
0589
0590
0591
0592
0593 qtd = ehci_qtd_alloc (ehci, flags);
0594 if (unlikely (!qtd))
0595 return NULL;
0596 list_add_tail (&qtd->qtd_list, head);
0597 qtd->urb = urb;
0598
0599 token = QTD_STS_ACTIVE;
0600 token |= (EHCI_TUNE_CERR << 10);
0601
0602
0603 len = urb->transfer_buffer_length;
0604 is_input = usb_pipein (urb->pipe);
0605 if (usb_pipecontrol (urb->pipe)) {
0606
0607 qtd_fill(ehci, qtd, urb->setup_dma,
0608 sizeof (struct usb_ctrlrequest),
0609 token | (2 << 8), 8);
0610
0611
0612 token ^= QTD_TOGGLE;
0613 qtd_prev = qtd;
0614 qtd = ehci_qtd_alloc (ehci, flags);
0615 if (unlikely (!qtd))
0616 goto cleanup;
0617 qtd->urb = urb;
0618 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
0619 list_add_tail (&qtd->qtd_list, head);
0620
0621
0622 if (len == 0)
0623 token |= (1 << 8);
0624 }
0625
0626
0627
0628
0629 i = urb->num_mapped_sgs;
0630 if (len > 0 && i > 0) {
0631 sg = urb->sg;
0632 buf = sg_dma_address(sg);
0633
0634
0635
0636
0637 this_sg_len = min_t(int, sg_dma_len(sg), len);
0638 } else {
0639 sg = NULL;
0640 buf = urb->transfer_dma;
0641 this_sg_len = len;
0642 }
0643
0644 if (is_input)
0645 token |= (1 << 8);
0646
0647
0648 maxpacket = usb_maxpacket(urb->dev, urb->pipe);
0649
0650
0651
0652
0653
0654
0655 for (;;) {
0656 unsigned int this_qtd_len;
0657
0658 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
0659 maxpacket);
0660 this_sg_len -= this_qtd_len;
0661 len -= this_qtd_len;
0662 buf += this_qtd_len;
0663
0664
0665
0666
0667
0668
0669 if (is_input)
0670 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
0671
0672
0673 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
0674 token ^= QTD_TOGGLE;
0675
0676 if (likely(this_sg_len <= 0)) {
0677 if (--i <= 0 || len <= 0)
0678 break;
0679 sg = sg_next(sg);
0680 buf = sg_dma_address(sg);
0681 this_sg_len = min_t(int, sg_dma_len(sg), len);
0682 }
0683
0684 qtd_prev = qtd;
0685 qtd = ehci_qtd_alloc (ehci, flags);
0686 if (unlikely (!qtd))
0687 goto cleanup;
0688 qtd->urb = urb;
0689 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
0690 list_add_tail (&qtd->qtd_list, head);
0691 }
0692
0693
0694
0695
0696
0697
0698 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
0699 || usb_pipecontrol (urb->pipe)))
0700 qtd->hw_alt_next = EHCI_LIST_END(ehci);
0701
0702
0703
0704
0705
0706
0707 if (likely (urb->transfer_buffer_length != 0)) {
0708 int one_more = 0;
0709
0710 if (usb_pipecontrol (urb->pipe)) {
0711 one_more = 1;
0712 token ^= 0x0100;
0713 token |= QTD_TOGGLE;
0714 } else if (usb_pipeout(urb->pipe)
0715 && (urb->transfer_flags & URB_ZERO_PACKET)
0716 && !(urb->transfer_buffer_length % maxpacket)) {
0717 one_more = 1;
0718 }
0719 if (one_more) {
0720 qtd_prev = qtd;
0721 qtd = ehci_qtd_alloc (ehci, flags);
0722 if (unlikely (!qtd))
0723 goto cleanup;
0724 qtd->urb = urb;
0725 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
0726 list_add_tail (&qtd->qtd_list, head);
0727
0728
0729 qtd_fill(ehci, qtd, 0, 0, token, 0);
0730 }
0731 }
0732
0733
0734 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
0735 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
0736 return head;
0737
0738 cleanup:
0739 qtd_list_free (ehci, urb, head);
0740 return NULL;
0741 }
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760 static struct ehci_qh *
0761 qh_make (
0762 struct ehci_hcd *ehci,
0763 struct urb *urb,
0764 gfp_t flags
0765 ) {
0766 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
0767 struct usb_host_endpoint *ep;
0768 u32 info1 = 0, info2 = 0;
0769 int is_input, type;
0770 int maxp = 0;
0771 int mult;
0772 struct usb_tt *tt = urb->dev->tt;
0773 struct ehci_qh_hw *hw;
0774
0775 if (!qh)
0776 return qh;
0777
0778
0779
0780
0781 info1 |= usb_pipeendpoint (urb->pipe) << 8;
0782 info1 |= usb_pipedevice (urb->pipe) << 0;
0783
0784 is_input = usb_pipein (urb->pipe);
0785 type = usb_pipetype (urb->pipe);
0786 ep = usb_pipe_endpoint (urb->dev, urb->pipe);
0787 maxp = usb_endpoint_maxp (&ep->desc);
0788 mult = usb_endpoint_maxp_mult (&ep->desc);
0789
0790
0791
0792
0793 if (maxp > 1024) {
0794 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
0795 goto done;
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 if (type == PIPE_INTERRUPT) {
0807 unsigned tmp;
0808
0809 qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
0810 is_input, 0, mult * maxp));
0811 qh->ps.phase = NO_FRAME;
0812
0813 if (urb->dev->speed == USB_SPEED_HIGH) {
0814 qh->ps.c_usecs = 0;
0815 qh->gap_uf = 0;
0816
0817 if (urb->interval > 1 && urb->interval < 8) {
0818
0819
0820
0821
0822 urb->interval = 1;
0823 } else if (urb->interval > ehci->periodic_size << 3) {
0824 urb->interval = ehci->periodic_size << 3;
0825 }
0826 qh->ps.period = urb->interval >> 3;
0827
0828
0829 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
0830 1 << (urb->ep->desc.bInterval - 1));
0831
0832
0833 qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
0834 qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
0835 } else {
0836 int think_time;
0837
0838
0839 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
0840 is_input, 0, maxp) / (125 * 1000);
0841
0842
0843 if (is_input) {
0844 qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
0845 qh->ps.usecs = HS_USECS(1);
0846 } else {
0847 qh->ps.usecs += HS_USECS(1);
0848 qh->ps.c_usecs = HS_USECS(0);
0849 }
0850
0851 think_time = tt ? tt->think_time : 0;
0852 qh->ps.tt_usecs = NS_TO_US(think_time +
0853 usb_calc_bus_time (urb->dev->speed,
0854 is_input, 0, maxp));
0855 if (urb->interval > ehci->periodic_size)
0856 urb->interval = ehci->periodic_size;
0857 qh->ps.period = urb->interval;
0858
0859
0860 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
0861 urb->ep->desc.bInterval);
0862 tmp = rounddown_pow_of_two(tmp);
0863
0864
0865 qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
0866 qh->ps.bw_uperiod = qh->ps.bw_period << 3;
0867 }
0868 }
0869
0870
0871 qh->ps.udev = urb->dev;
0872 qh->ps.ep = urb->ep;
0873
0874
0875 switch (urb->dev->speed) {
0876 case USB_SPEED_LOW:
0877 info1 |= QH_LOW_SPEED;
0878 fallthrough;
0879
0880 case USB_SPEED_FULL:
0881
0882 if (type != PIPE_INTERRUPT)
0883 info1 |= (EHCI_TUNE_RL_TT << 28);
0884 if (type == PIPE_CONTROL) {
0885 info1 |= QH_CONTROL_EP;
0886 info1 |= QH_TOGGLE_CTL;
0887 }
0888 info1 |= maxp << 16;
0889
0890 info2 |= (EHCI_TUNE_MULT_TT << 30);
0891
0892
0893
0894
0895 if (ehci_has_fsl_portno_bug(ehci))
0896 info2 |= (urb->dev->ttport-1) << 23;
0897 else
0898 info2 |= urb->dev->ttport << 23;
0899
0900
0901
0902
0903 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
0904 info2 |= tt->hub->devnum << 16;
0905
0906
0907
0908 break;
0909
0910 case USB_SPEED_HIGH:
0911 info1 |= QH_HIGH_SPEED;
0912 if (type == PIPE_CONTROL) {
0913 info1 |= (EHCI_TUNE_RL_HS << 28);
0914 info1 |= 64 << 16;
0915 info1 |= QH_TOGGLE_CTL;
0916 info2 |= (EHCI_TUNE_MULT_HS << 30);
0917 } else if (type == PIPE_BULK) {
0918 info1 |= (EHCI_TUNE_RL_HS << 28);
0919
0920
0921
0922
0923
0924
0925 info1 |= maxp << 16;
0926 info2 |= (EHCI_TUNE_MULT_HS << 30);
0927 } else {
0928 info1 |= maxp << 16;
0929 info2 |= mult << 30;
0930 }
0931 break;
0932 default:
0933 ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
0934 urb->dev->speed);
0935 done:
0936 qh_destroy(ehci, qh);
0937 return NULL;
0938 }
0939
0940
0941
0942
0943 qh->qh_state = QH_STATE_IDLE;
0944 hw = qh->hw;
0945 hw->hw_info1 = cpu_to_hc32(ehci, info1);
0946 hw->hw_info2 = cpu_to_hc32(ehci, info2);
0947 qh->is_out = !is_input;
0948 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
0949 return qh;
0950 }
0951
0952
0953
0954 static void enable_async(struct ehci_hcd *ehci)
0955 {
0956 if (ehci->async_count++)
0957 return;
0958
0959
0960 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
0961
0962
0963 ehci_poll_ASS(ehci);
0964 turn_on_io_watchdog(ehci);
0965 }
0966
0967 static void disable_async(struct ehci_hcd *ehci)
0968 {
0969 if (--ehci->async_count)
0970 return;
0971
0972
0973 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
0974 !list_empty(&ehci->async_idle));
0975
0976
0977 ehci_poll_ASS(ehci);
0978 }
0979
0980
0981
0982 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
0983 {
0984 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
0985 struct ehci_qh *head;
0986
0987
0988 if (unlikely(qh->clearing_tt))
0989 return;
0990
0991 WARN_ON(qh->qh_state != QH_STATE_IDLE);
0992
0993
0994 qh_refresh(ehci, qh);
0995
0996
0997 head = ehci->async;
0998 qh->qh_next = head->qh_next;
0999 qh->hw->hw_next = head->hw->hw_next;
1000 wmb ();
1001
1002 head->qh_next.qh = qh;
1003 head->hw->hw_next = dma;
1004
1005 qh->qh_state = QH_STATE_LINKED;
1006 qh->xacterrs = 0;
1007 qh->unlink_reason = 0;
1008
1009
1010 enable_async(ehci);
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 static struct ehci_qh *qh_append_tds (
1022 struct ehci_hcd *ehci,
1023 struct urb *urb,
1024 struct list_head *qtd_list,
1025 int epnum,
1026 void **ptr
1027 )
1028 {
1029 struct ehci_qh *qh = NULL;
1030 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1031
1032 qh = (struct ehci_qh *) *ptr;
1033 if (unlikely (qh == NULL)) {
1034
1035 qh = qh_make (ehci, urb, GFP_ATOMIC);
1036 *ptr = qh;
1037 }
1038 if (likely (qh != NULL)) {
1039 struct ehci_qtd *qtd;
1040
1041 if (unlikely (list_empty (qtd_list)))
1042 qtd = NULL;
1043 else
1044 qtd = list_entry (qtd_list->next, struct ehci_qtd,
1045 qtd_list);
1046
1047
1048 if (unlikely (epnum == 0)) {
1049
1050
1051 if (usb_pipedevice (urb->pipe) == 0)
1052 qh->hw->hw_info1 &= ~qh_addr_mask;
1053 }
1054
1055
1056
1057
1058 if (likely (qtd != NULL)) {
1059 struct ehci_qtd *dummy;
1060 dma_addr_t dma;
1061 __hc32 token;
1062
1063
1064
1065
1066
1067
1068 token = qtd->hw_token;
1069 qtd->hw_token = HALT_BIT(ehci);
1070
1071 dummy = qh->dummy;
1072
1073 dma = dummy->qtd_dma;
1074 *dummy = *qtd;
1075 dummy->qtd_dma = dma;
1076
1077 list_del (&qtd->qtd_list);
1078 list_add (&dummy->qtd_list, qtd_list);
1079 list_splice_tail(qtd_list, &qh->qtd_list);
1080
1081 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1082 qh->dummy = qtd;
1083
1084
1085 dma = qtd->qtd_dma;
1086 qtd = list_entry (qh->qtd_list.prev,
1087 struct ehci_qtd, qtd_list);
1088 qtd->hw_next = QTD_NEXT(ehci, dma);
1089
1090
1091 wmb ();
1092 dummy->hw_token = token;
1093
1094 urb->hcpriv = qh;
1095 }
1096 }
1097 return qh;
1098 }
1099
1100
1101
1102 static int
1103 submit_async (
1104 struct ehci_hcd *ehci,
1105 struct urb *urb,
1106 struct list_head *qtd_list,
1107 gfp_t mem_flags
1108 ) {
1109 int epnum;
1110 unsigned long flags;
1111 struct ehci_qh *qh = NULL;
1112 int rc;
1113
1114 epnum = urb->ep->desc.bEndpointAddress;
1115
1116 #ifdef EHCI_URB_TRACE
1117 {
1118 struct ehci_qtd *qtd;
1119 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1120 ehci_dbg(ehci,
1121 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1122 __func__, urb->dev->devpath, urb,
1123 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
1124 urb->transfer_buffer_length,
1125 qtd, urb->ep->hcpriv);
1126 }
1127 #endif
1128
1129 spin_lock_irqsave (&ehci->lock, flags);
1130 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1131 rc = -ESHUTDOWN;
1132 goto done;
1133 }
1134 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1135 if (unlikely(rc))
1136 goto done;
1137
1138 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1139 if (unlikely(qh == NULL)) {
1140 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1141 rc = -ENOMEM;
1142 goto done;
1143 }
1144
1145
1146
1147
1148 if (likely (qh->qh_state == QH_STATE_IDLE))
1149 qh_link_async(ehci, qh);
1150 done:
1151 spin_unlock_irqrestore (&ehci->lock, flags);
1152 if (unlikely (qh == NULL))
1153 qtd_list_free (ehci, urb, qtd_list);
1154 return rc;
1155 }
1156
1157
1158 #ifdef CONFIG_USB_HCD_TEST_MODE
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 static int ehci_submit_single_step_set_feature(
1170 struct usb_hcd *hcd,
1171 struct urb *urb,
1172 int is_setup
1173 ) {
1174 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1175 struct list_head qtd_list;
1176 struct list_head *head;
1177
1178 struct ehci_qtd *qtd, *qtd_prev;
1179 dma_addr_t buf;
1180 int len, maxpacket;
1181 u32 token;
1182
1183 INIT_LIST_HEAD(&qtd_list);
1184 head = &qtd_list;
1185
1186
1187 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
1188 if (unlikely(!qtd))
1189 return -1;
1190 list_add_tail(&qtd->qtd_list, head);
1191 qtd->urb = urb;
1192
1193 token = QTD_STS_ACTIVE;
1194 token |= (EHCI_TUNE_CERR << 10);
1195
1196 len = urb->transfer_buffer_length;
1197
1198
1199
1200
1201
1202 if (is_setup) {
1203
1204 qtd_fill(ehci, qtd, urb->setup_dma,
1205 sizeof(struct usb_ctrlrequest),
1206 QTD_IOC | token | (2 << 8), 8);
1207
1208 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1209 return 0;
1210 }
1211
1212
1213
1214
1215
1216 token ^= QTD_TOGGLE;
1217 buf = urb->transfer_dma;
1218
1219 token |= (1 << 8);
1220
1221 maxpacket = usb_maxpacket(urb->dev, urb->pipe);
1222
1223 qtd_fill(ehci, qtd, buf, len, token, maxpacket);
1224
1225
1226
1227
1228
1229 qtd->hw_alt_next = EHCI_LIST_END(ehci);
1230
1231
1232 token ^= 0x0100;
1233 token |= QTD_TOGGLE;
1234
1235 qtd_prev = qtd;
1236 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
1237 if (unlikely(!qtd))
1238 goto cleanup;
1239 qtd->urb = urb;
1240 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1241 list_add_tail(&qtd->qtd_list, head);
1242
1243
1244 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
1245
1246 submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1247
1248 return 0;
1249
1250 cleanup:
1251 qtd_list_free(ehci, urb, head);
1252 return -1;
1253 }
1254 #endif
1255
1256
1257
1258 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1259 {
1260 struct ehci_qh *prev;
1261
1262
1263 qh->qh_state = QH_STATE_UNLINK_WAIT;
1264 list_add_tail(&qh->unlink_node, &ehci->async_unlink);
1265
1266
1267 prev = ehci->async;
1268 while (prev->qh_next.qh != qh)
1269 prev = prev->qh_next.qh;
1270
1271 prev->hw->hw_next = qh->hw->hw_next;
1272 prev->qh_next = qh->qh_next;
1273 if (ehci->qh_scan_next == qh)
1274 ehci->qh_scan_next = qh->qh_next.qh;
1275 }
1276
1277 static void start_iaa_cycle(struct ehci_hcd *ehci)
1278 {
1279
1280 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1281 end_unlink_async(ehci);
1282
1283
1284 } else if (ehci->rh_state == EHCI_RH_RUNNING &&
1285 !ehci->iaa_in_progress) {
1286
1287
1288 wmb();
1289
1290 ehci_writel(ehci, ehci->command | CMD_IAAD,
1291 &ehci->regs->command);
1292 ehci_readl(ehci, &ehci->regs->command);
1293 ehci->iaa_in_progress = true;
1294 ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
1295 }
1296 }
1297
1298 static void end_iaa_cycle(struct ehci_hcd *ehci)
1299 {
1300 if (ehci->has_synopsys_hc_bug)
1301 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1302 &ehci->regs->async_next);
1303
1304
1305 ehci->iaa_in_progress = false;
1306
1307 end_unlink_async(ehci);
1308 }
1309
1310
1311
1312 static void end_unlink_async(struct ehci_hcd *ehci)
1313 {
1314 struct ehci_qh *qh;
1315 bool early_exit;
1316
1317 if (list_empty(&ehci->async_unlink))
1318 return;
1319 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
1320 unlink_node);
1321
1322
1323
1324
1325
1326 early_exit = ehci->async_unlinking;
1327
1328
1329 if (ehci->rh_state < EHCI_RH_RUNNING)
1330 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
1331
1332
1333
1334
1335
1336
1337 else if (qh->qh_state == QH_STATE_UNLINK) {
1338
1339
1340
1341
1342 list_move_tail(&qh->unlink_node, &ehci->async_idle);
1343 }
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359 else if (qh->unlink_reason & (QH_UNLINK_HALTED |
1360 QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY))
1361 goto DelayDone;
1362
1363
1364 else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
1365 list_empty(&qh->qtd_list))
1366 goto DelayDone;
1367
1368
1369 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
1370 goto DelayDone;
1371
1372
1373 else {
1374 __hc32 qh_current, qh_token;
1375
1376 qh_current = qh->hw->hw_current;
1377 qh_token = qh->hw->hw_token;
1378 if (qh_current != ehci->old_current ||
1379 qh_token != ehci->old_token) {
1380 ehci->old_current = qh_current;
1381 ehci->old_token = qh_token;
1382 ehci_enable_event(ehci,
1383 EHCI_HRTIMER_ACTIVE_UNLINK, true);
1384 return;
1385 }
1386 DelayDone:
1387 qh->qh_state = QH_STATE_UNLINK;
1388 early_exit = true;
1389 }
1390 ehci->old_current = ~0;
1391
1392
1393 if (!list_empty(&ehci->async_unlink))
1394 start_iaa_cycle(ehci);
1395
1396
1397
1398
1399
1400 if (early_exit)
1401 return;
1402
1403
1404 ehci->async_unlinking = true;
1405 while (!list_empty(&ehci->async_idle)) {
1406 qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
1407 unlink_node);
1408 list_del(&qh->unlink_node);
1409
1410 qh->qh_state = QH_STATE_IDLE;
1411 qh->qh_next.qh = NULL;
1412
1413 if (!list_empty(&qh->qtd_list))
1414 qh_completions(ehci, qh);
1415 if (!list_empty(&qh->qtd_list) &&
1416 ehci->rh_state == EHCI_RH_RUNNING)
1417 qh_link_async(ehci, qh);
1418 disable_async(ehci);
1419 }
1420 ehci->async_unlinking = false;
1421 }
1422
1423 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1424
1425 static void unlink_empty_async(struct ehci_hcd *ehci)
1426 {
1427 struct ehci_qh *qh;
1428 struct ehci_qh *qh_to_unlink = NULL;
1429 int count = 0;
1430
1431
1432 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1433 if (list_empty(&qh->qtd_list) &&
1434 qh->qh_state == QH_STATE_LINKED) {
1435 ++count;
1436 if (qh->unlink_cycle != ehci->async_unlink_cycle)
1437 qh_to_unlink = qh;
1438 }
1439 }
1440
1441
1442 if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
1443 qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
1444 start_unlink_async(ehci, qh_to_unlink);
1445 --count;
1446 }
1447
1448
1449 if (count > 0) {
1450 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1451 ++ehci->async_unlink_cycle;
1452 }
1453 }
1454
1455 #ifdef CONFIG_PM
1456
1457
1458 static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
1459 {
1460 struct ehci_qh *qh;
1461
1462 while (ehci->async->qh_next.qh) {
1463 qh = ehci->async->qh_next.qh;
1464 WARN_ON(!list_empty(&qh->qtd_list));
1465 single_unlink_async(ehci, qh);
1466 }
1467 }
1468
1469 #endif
1470
1471
1472
1473
1474 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
1475 {
1476
1477 if (qh->qh_state != QH_STATE_LINKED)
1478 return;
1479
1480 single_unlink_async(ehci, qh);
1481 start_iaa_cycle(ehci);
1482 }
1483
1484
1485
1486 static void scan_async (struct ehci_hcd *ehci)
1487 {
1488 struct ehci_qh *qh;
1489 bool check_unlinks_later = false;
1490
1491 ehci->qh_scan_next = ehci->async->qh_next.qh;
1492 while (ehci->qh_scan_next) {
1493 qh = ehci->qh_scan_next;
1494 ehci->qh_scan_next = qh->qh_next.qh;
1495
1496
1497 if (!list_empty(&qh->qtd_list)) {
1498 int temp;
1499
1500
1501
1502
1503
1504
1505
1506
1507 temp = qh_completions(ehci, qh);
1508 if (unlikely(temp)) {
1509 start_unlink_async(ehci, qh);
1510 } else if (list_empty(&qh->qtd_list)
1511 && qh->qh_state == QH_STATE_LINKED) {
1512 qh->unlink_cycle = ehci->async_unlink_cycle;
1513 check_unlinks_later = true;
1514 }
1515 }
1516 }
1517
1518
1519
1520
1521
1522
1523
1524 if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
1525 !(ehci->enabled_hrtimer_events &
1526 BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
1527 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1528 ++ehci->async_unlink_cycle;
1529 }
1530 }