0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
0030 {
0031 if (uhci->is_stopped)
0032 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
0033 uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
0034 }
0035
0036 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
0037 {
0038 uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
0039 }
0040
0041
0042
0043
0044
0045
0046
0047 static void uhci_fsbr_on(struct uhci_hcd *uhci)
0048 {
0049 struct uhci_qh *lqh;
0050
0051
0052
0053
0054 uhci->fsbr_is_on = 1;
0055 lqh = list_entry(uhci->skel_async_qh->node.prev,
0056 struct uhci_qh, node);
0057 lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
0058 }
0059
0060 static void uhci_fsbr_off(struct uhci_hcd *uhci)
0061 {
0062 struct uhci_qh *lqh;
0063
0064
0065
0066 uhci->fsbr_is_on = 0;
0067 lqh = list_entry(uhci->skel_async_qh->node.prev,
0068 struct uhci_qh, node);
0069 lqh->link = UHCI_PTR_TERM(uhci);
0070 }
0071
0072 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
0073 {
0074 struct urb_priv *urbp = urb->hcpriv;
0075
0076 urbp->fsbr = 1;
0077 }
0078
0079 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
0080 {
0081 if (urbp->fsbr) {
0082 uhci->fsbr_is_wanted = 1;
0083 if (!uhci->fsbr_is_on)
0084 uhci_fsbr_on(uhci);
0085 else if (uhci->fsbr_expiring) {
0086 uhci->fsbr_expiring = 0;
0087 del_timer(&uhci->fsbr_timer);
0088 }
0089 }
0090 }
0091
0092 static void uhci_fsbr_timeout(struct timer_list *t)
0093 {
0094 struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
0095 unsigned long flags;
0096
0097 spin_lock_irqsave(&uhci->lock, flags);
0098 if (uhci->fsbr_expiring) {
0099 uhci->fsbr_expiring = 0;
0100 uhci_fsbr_off(uhci);
0101 }
0102 spin_unlock_irqrestore(&uhci->lock, flags);
0103 }
0104
0105
0106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
0107 {
0108 dma_addr_t dma_handle;
0109 struct uhci_td *td;
0110
0111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
0112 if (!td)
0113 return NULL;
0114
0115 td->dma_handle = dma_handle;
0116 td->frame = -1;
0117
0118 INIT_LIST_HEAD(&td->list);
0119 INIT_LIST_HEAD(&td->fl_list);
0120
0121 return td;
0122 }
0123
0124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
0125 {
0126 if (!list_empty(&td->list))
0127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
0128 if (!list_empty(&td->fl_list))
0129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
0130
0131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
0132 }
0133
0134 static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
0135 u32 status, u32 token, u32 buffer)
0136 {
0137 td->status = cpu_to_hc32(uhci, status);
0138 td->token = cpu_to_hc32(uhci, token);
0139 td->buffer = cpu_to_hc32(uhci, buffer);
0140 }
0141
0142 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
0143 {
0144 list_add_tail(&td->list, &urbp->td_list);
0145 }
0146
0147 static void uhci_remove_td_from_urbp(struct uhci_td *td)
0148 {
0149 list_del_init(&td->list);
0150 }
0151
0152
0153
0154
0155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
0156 struct uhci_td *td, unsigned framenum)
0157 {
0158 framenum &= (UHCI_NUMFRAMES - 1);
0159
0160 td->frame = framenum;
0161
0162
0163 if (uhci->frame_cpu[framenum]) {
0164 struct uhci_td *ftd, *ltd;
0165
0166 ftd = uhci->frame_cpu[framenum];
0167 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
0168
0169 list_add_tail(&td->fl_list, &ftd->fl_list);
0170
0171 td->link = ltd->link;
0172 wmb();
0173 ltd->link = LINK_TO_TD(uhci, td);
0174 } else {
0175 td->link = uhci->frame[framenum];
0176 wmb();
0177 uhci->frame[framenum] = LINK_TO_TD(uhci, td);
0178 uhci->frame_cpu[framenum] = td;
0179 }
0180 }
0181
0182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
0183 struct uhci_td *td)
0184 {
0185
0186 if (td->frame == -1) {
0187 WARN_ON(!list_empty(&td->fl_list));
0188 return;
0189 }
0190
0191 if (uhci->frame_cpu[td->frame] == td) {
0192 if (list_empty(&td->fl_list)) {
0193 uhci->frame[td->frame] = td->link;
0194 uhci->frame_cpu[td->frame] = NULL;
0195 } else {
0196 struct uhci_td *ntd;
0197
0198 ntd = list_entry(td->fl_list.next,
0199 struct uhci_td,
0200 fl_list);
0201 uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
0202 uhci->frame_cpu[td->frame] = ntd;
0203 }
0204 } else {
0205 struct uhci_td *ptd;
0206
0207 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
0208 ptd->link = td->link;
0209 }
0210
0211 list_del_init(&td->fl_list);
0212 td->frame = -1;
0213 }
0214
0215 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
0216 unsigned int framenum)
0217 {
0218 struct uhci_td *ftd, *ltd;
0219
0220 framenum &= (UHCI_NUMFRAMES - 1);
0221
0222 ftd = uhci->frame_cpu[framenum];
0223 if (ftd) {
0224 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
0225 uhci->frame[framenum] = ltd->link;
0226 uhci->frame_cpu[framenum] = NULL;
0227
0228 while (!list_empty(&ftd->fl_list))
0229 list_del_init(ftd->fl_list.prev);
0230 }
0231 }
0232
0233
0234
0235
0236 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
0237 {
0238 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
0239 struct uhci_td *td;
0240
0241 list_for_each_entry(td, &urbp->td_list, list)
0242 uhci_remove_td_from_frame_list(uhci, td);
0243 }
0244
0245 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
0246 struct usb_device *udev, struct usb_host_endpoint *hep)
0247 {
0248 dma_addr_t dma_handle;
0249 struct uhci_qh *qh;
0250
0251 qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
0252 if (!qh)
0253 return NULL;
0254
0255 qh->dma_handle = dma_handle;
0256
0257 qh->element = UHCI_PTR_TERM(uhci);
0258 qh->link = UHCI_PTR_TERM(uhci);
0259
0260 INIT_LIST_HEAD(&qh->queue);
0261 INIT_LIST_HEAD(&qh->node);
0262
0263 if (udev) {
0264 qh->type = usb_endpoint_type(&hep->desc);
0265 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
0266 qh->dummy_td = uhci_alloc_td(uhci);
0267 if (!qh->dummy_td) {
0268 dma_pool_free(uhci->qh_pool, qh, dma_handle);
0269 return NULL;
0270 }
0271 }
0272 qh->state = QH_STATE_IDLE;
0273 qh->hep = hep;
0274 qh->udev = udev;
0275 hep->hcpriv = qh;
0276
0277 if (qh->type == USB_ENDPOINT_XFER_INT ||
0278 qh->type == USB_ENDPOINT_XFER_ISOC)
0279 qh->load = usb_calc_bus_time(udev->speed,
0280 usb_endpoint_dir_in(&hep->desc),
0281 qh->type == USB_ENDPOINT_XFER_ISOC,
0282 usb_endpoint_maxp(&hep->desc))
0283 / 1000 + 1;
0284
0285 } else {
0286 qh->state = QH_STATE_ACTIVE;
0287 qh->type = -1;
0288 }
0289 return qh;
0290 }
0291
0292 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
0293 {
0294 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
0295 if (!list_empty(&qh->queue))
0296 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
0297
0298 list_del(&qh->node);
0299 if (qh->udev) {
0300 qh->hep->hcpriv = NULL;
0301 if (qh->dummy_td)
0302 uhci_free_td(uhci, qh->dummy_td);
0303 }
0304 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
0315 struct urb *urb)
0316 {
0317 struct urb_priv *urbp = urb->hcpriv;
0318 struct uhci_td *td;
0319 int ret = 1;
0320
0321
0322
0323
0324
0325 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
0326 ret = (uhci->frame_number + uhci->is_stopped !=
0327 qh->unlink_frame);
0328 goto done;
0329 }
0330
0331
0332
0333
0334 if (qh->queue.next != &urbp->node) {
0335 struct urb_priv *purbp;
0336 struct uhci_td *ptd;
0337
0338 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
0339 WARN_ON(list_empty(&purbp->td_list));
0340 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
0341 list);
0342 td = list_entry(urbp->td_list.prev, struct uhci_td,
0343 list);
0344 ptd->link = td->link;
0345 goto done;
0346 }
0347
0348
0349
0350 if (qh_element(qh) == UHCI_PTR_TERM(uhci))
0351 goto done;
0352 qh->element = UHCI_PTR_TERM(uhci);
0353
0354
0355 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
0356 goto done;
0357
0358
0359 WARN_ON(list_empty(&urbp->td_list));
0360 td = list_entry(urbp->td_list.next, struct uhci_td, list);
0361 qh->needs_fixup = 1;
0362 qh->initial_toggle = uhci_toggle(td_token(uhci, td));
0363
0364 done:
0365 return ret;
0366 }
0367
0368
0369
0370
0371
0372 static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
0373 int skip_first)
0374 {
0375 struct urb_priv *urbp = NULL;
0376 struct uhci_td *td;
0377 unsigned int toggle = qh->initial_toggle;
0378 unsigned int pipe;
0379
0380
0381
0382 if (skip_first)
0383 urbp = list_entry(qh->queue.next, struct urb_priv, node);
0384
0385
0386
0387 else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
0388 toggle = 2;
0389
0390
0391
0392
0393 urbp = list_prepare_entry(urbp, &qh->queue, node);
0394 list_for_each_entry_continue(urbp, &qh->queue, node) {
0395
0396
0397
0398 td = list_entry(urbp->td_list.next, struct uhci_td, list);
0399 if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
0400 td = list_entry(urbp->td_list.prev, struct uhci_td,
0401 list);
0402 toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
0403
0404
0405 } else {
0406 list_for_each_entry(td, &urbp->td_list, list) {
0407 td->token ^= cpu_to_hc32(uhci,
0408 TD_TOKEN_TOGGLE);
0409 toggle ^= 1;
0410 }
0411 }
0412 }
0413
0414 wmb();
0415 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
0416 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
0417 usb_pipeout(pipe), toggle);
0418 qh->needs_fixup = 0;
0419 }
0420
0421
0422
0423
0424 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
0425 {
0426 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
0427
0428
0429 }
0430
0431
0432
0433
0434
0435 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
0436 {
0437 struct uhci_qh *pqh;
0438
0439 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
0440
0441 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
0442 qh->link = pqh->link;
0443 wmb();
0444 pqh->link = LINK_TO_QH(uhci, qh);
0445 }
0446
0447
0448
0449
0450
0451 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
0452 {
0453 struct uhci_qh *pqh;
0454 __hc32 link_to_new_qh;
0455
0456
0457
0458
0459 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
0460 if (pqh->skel <= qh->skel)
0461 break;
0462 }
0463 list_add(&qh->node, &pqh->node);
0464
0465
0466 qh->link = pqh->link;
0467 wmb();
0468 link_to_new_qh = LINK_TO_QH(uhci, qh);
0469 pqh->link = link_to_new_qh;
0470
0471
0472
0473 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
0474 uhci->skel_term_qh->link = link_to_new_qh;
0475 }
0476
0477
0478
0479
0480 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
0481 {
0482 WARN_ON(list_empty(&qh->queue));
0483
0484
0485
0486 if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
0487 struct urb_priv *urbp = list_entry(qh->queue.next,
0488 struct urb_priv, node);
0489 struct uhci_td *td = list_entry(urbp->td_list.next,
0490 struct uhci_td, list);
0491
0492 qh->element = LINK_TO_TD(uhci, td);
0493 }
0494
0495
0496 qh->wait_expired = 0;
0497 qh->advance_jiffies = jiffies;
0498
0499 if (qh->state == QH_STATE_ACTIVE)
0500 return;
0501 qh->state = QH_STATE_ACTIVE;
0502
0503
0504
0505 if (qh == uhci->next_qh)
0506 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
0507 node);
0508 list_del(&qh->node);
0509
0510 if (qh->skel == SKEL_ISO)
0511 link_iso(uhci, qh);
0512 else if (qh->skel < SKEL_ASYNC)
0513 link_interrupt(uhci, qh);
0514 else
0515 link_async(uhci, qh);
0516 }
0517
0518
0519
0520
0521 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
0522 {
0523 struct uhci_qh *pqh;
0524
0525 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
0526 pqh->link = qh->link;
0527 mb();
0528 }
0529
0530
0531
0532
0533 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
0534 {
0535 struct uhci_qh *pqh;
0536 __hc32 link_to_next_qh = qh->link;
0537
0538 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
0539 pqh->link = link_to_next_qh;
0540
0541
0542
0543 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
0544 uhci->skel_term_qh->link = link_to_next_qh;
0545 mb();
0546 }
0547
0548
0549
0550
0551 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
0552 {
0553 if (qh->state == QH_STATE_UNLINKING)
0554 return;
0555 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
0556 qh->state = QH_STATE_UNLINKING;
0557
0558
0559 if (qh->skel == SKEL_ISO)
0560 ;
0561 else if (qh->skel < SKEL_ASYNC)
0562 unlink_interrupt(uhci, qh);
0563 else
0564 unlink_async(uhci, qh);
0565
0566 uhci_get_current_frame_number(uhci);
0567 qh->unlink_frame = uhci->frame_number;
0568
0569
0570 if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
0571 uhci_set_next_interrupt(uhci);
0572
0573
0574 if (qh == uhci->next_qh)
0575 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
0576 node);
0577 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
0578 }
0579
0580
0581
0582
0583
0584
0585
0586 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
0587 {
0588 WARN_ON(qh->state == QH_STATE_ACTIVE);
0589
0590 if (qh == uhci->next_qh)
0591 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
0592 node);
0593 list_move(&qh->node, &uhci->idle_qh_list);
0594 qh->state = QH_STATE_IDLE;
0595
0596
0597 if (qh->post_td) {
0598 uhci_free_td(uhci, qh->post_td);
0599 qh->post_td = NULL;
0600 }
0601
0602
0603 if (uhci->num_waiting)
0604 wake_up_all(&uhci->waitqh);
0605 }
0606
0607
0608
0609
0610 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
0611 {
0612 int highest_load = uhci->load[phase];
0613
0614 for (phase += period; phase < MAX_PHASE; phase += period)
0615 highest_load = max_t(int, highest_load, uhci->load[phase]);
0616 return highest_load;
0617 }
0618
0619
0620
0621
0622
0623 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
0624 {
0625 int minimax_load;
0626
0627
0628
0629 if (qh->phase >= 0)
0630 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
0631 else {
0632 int phase, load;
0633 int max_phase = min_t(int, MAX_PHASE, qh->period);
0634
0635 qh->phase = 0;
0636 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
0637 for (phase = 1; phase < max_phase; ++phase) {
0638 load = uhci_highest_load(uhci, phase, qh->period);
0639 if (load < minimax_load) {
0640 minimax_load = load;
0641 qh->phase = phase;
0642 }
0643 }
0644 }
0645
0646
0647 if (minimax_load + qh->load > 900) {
0648 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
0649 "period %d, phase %d, %d + %d us\n",
0650 qh->period, qh->phase, minimax_load, qh->load);
0651 return -ENOSPC;
0652 }
0653 return 0;
0654 }
0655
0656
0657
0658
0659 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
0660 {
0661 int i;
0662 int load = qh->load;
0663 char *p = "??";
0664
0665 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
0666 uhci->load[i] += load;
0667 uhci->total_load += load;
0668 }
0669 uhci_to_hcd(uhci)->self.bandwidth_allocated =
0670 uhci->total_load / MAX_PHASE;
0671 switch (qh->type) {
0672 case USB_ENDPOINT_XFER_INT:
0673 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
0674 p = "INT";
0675 break;
0676 case USB_ENDPOINT_XFER_ISOC:
0677 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
0678 p = "ISO";
0679 break;
0680 }
0681 qh->bandwidth_reserved = 1;
0682 dev_dbg(uhci_dev(uhci),
0683 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
0684 "reserve", qh->udev->devnum,
0685 qh->hep->desc.bEndpointAddress, p,
0686 qh->period, qh->phase, load);
0687 }
0688
0689
0690
0691
0692 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
0693 {
0694 int i;
0695 int load = qh->load;
0696 char *p = "??";
0697
0698 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
0699 uhci->load[i] -= load;
0700 uhci->total_load -= load;
0701 }
0702 uhci_to_hcd(uhci)->self.bandwidth_allocated =
0703 uhci->total_load / MAX_PHASE;
0704 switch (qh->type) {
0705 case USB_ENDPOINT_XFER_INT:
0706 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
0707 p = "INT";
0708 break;
0709 case USB_ENDPOINT_XFER_ISOC:
0710 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
0711 p = "ISO";
0712 break;
0713 }
0714 qh->bandwidth_reserved = 0;
0715 dev_dbg(uhci_dev(uhci),
0716 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
0717 "release", qh->udev->devnum,
0718 qh->hep->desc.bEndpointAddress, p,
0719 qh->period, qh->phase, load);
0720 }
0721
0722 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
0723 struct urb *urb)
0724 {
0725 struct urb_priv *urbp;
0726
0727 urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
0728 if (!urbp)
0729 return NULL;
0730
0731 urbp->urb = urb;
0732 urb->hcpriv = urbp;
0733
0734 INIT_LIST_HEAD(&urbp->node);
0735 INIT_LIST_HEAD(&urbp->td_list);
0736
0737 return urbp;
0738 }
0739
0740 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
0741 struct urb_priv *urbp)
0742 {
0743 struct uhci_td *td, *tmp;
0744
0745 if (!list_empty(&urbp->node))
0746 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
0747 urbp->urb);
0748
0749 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
0750 uhci_remove_td_from_urbp(td);
0751 uhci_free_td(uhci, td);
0752 }
0753
0754 kmem_cache_free(uhci_up_cachep, urbp);
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 static int uhci_map_status(int status, int dir_out)
0766 {
0767 if (!status)
0768 return 0;
0769 if (status & TD_CTRL_BITSTUFF)
0770 return -EPROTO;
0771 if (status & TD_CTRL_CRCTIMEO) {
0772 if (dir_out)
0773 return -EPROTO;
0774 else
0775 return -EILSEQ;
0776 }
0777 if (status & TD_CTRL_BABBLE)
0778 return -EOVERFLOW;
0779 if (status & TD_CTRL_DBUFERR)
0780 return -ENOSR;
0781 if (status & TD_CTRL_STALLED)
0782 return -EPIPE;
0783 return 0;
0784 }
0785
0786
0787
0788
0789 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
0790 struct uhci_qh *qh)
0791 {
0792 struct uhci_td *td;
0793 unsigned long destination, status;
0794 int maxsze = usb_endpoint_maxp(&qh->hep->desc);
0795 int len = urb->transfer_buffer_length;
0796 dma_addr_t data = urb->transfer_dma;
0797 __hc32 *plink;
0798 struct urb_priv *urbp = urb->hcpriv;
0799 int skel;
0800
0801
0802 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
0803
0804
0805 status = uhci_maxerr(3);
0806 if (urb->dev->speed == USB_SPEED_LOW)
0807 status |= TD_CTRL_LS;
0808
0809
0810
0811
0812 td = qh->dummy_td;
0813 uhci_add_td_to_urbp(td, urbp);
0814 uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
0815 urb->setup_dma);
0816 plink = &td->link;
0817 status |= TD_CTRL_ACTIVE;
0818
0819
0820
0821
0822
0823
0824
0825
0826 if (usb_pipeout(urb->pipe) || len == 0)
0827 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
0828 else {
0829 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
0830 status |= TD_CTRL_SPD;
0831 }
0832
0833
0834
0835
0836 while (len > 0) {
0837 int pktsze = maxsze;
0838
0839 if (len <= pktsze) {
0840 pktsze = len;
0841 status &= ~TD_CTRL_SPD;
0842 }
0843
0844 td = uhci_alloc_td(uhci);
0845 if (!td)
0846 goto nomem;
0847 *plink = LINK_TO_TD(uhci, td);
0848
0849
0850 destination ^= TD_TOKEN_TOGGLE;
0851
0852 uhci_add_td_to_urbp(td, urbp);
0853 uhci_fill_td(uhci, td, status,
0854 destination | uhci_explen(pktsze), data);
0855 plink = &td->link;
0856
0857 data += pktsze;
0858 len -= pktsze;
0859 }
0860
0861
0862
0863
0864 td = uhci_alloc_td(uhci);
0865 if (!td)
0866 goto nomem;
0867 *plink = LINK_TO_TD(uhci, td);
0868
0869
0870 destination ^= (USB_PID_IN ^ USB_PID_OUT);
0871 destination |= TD_TOKEN_TOGGLE;
0872
0873 uhci_add_td_to_urbp(td, urbp);
0874 uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
0875 destination | uhci_explen(0), 0);
0876 plink = &td->link;
0877
0878
0879
0880
0881 td = uhci_alloc_td(uhci);
0882 if (!td)
0883 goto nomem;
0884 *plink = LINK_TO_TD(uhci, td);
0885
0886 uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
0887 wmb();
0888 qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
0889 qh->dummy_td = td;
0890
0891
0892
0893
0894
0895 if (urb->dev->speed == USB_SPEED_LOW ||
0896 urb->dev->state != USB_STATE_CONFIGURED)
0897 skel = SKEL_LS_CONTROL;
0898 else {
0899 skel = SKEL_FS_CONTROL;
0900 uhci_add_fsbr(uhci, urb);
0901 }
0902 if (qh->state != QH_STATE_ACTIVE)
0903 qh->skel = skel;
0904 return 0;
0905
0906 nomem:
0907
0908 uhci_remove_td_from_urbp(qh->dummy_td);
0909 return -ENOMEM;
0910 }
0911
0912
0913
0914
0915 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
0916 struct uhci_qh *qh)
0917 {
0918 struct uhci_td *td;
0919 unsigned long destination, status;
0920 int maxsze = usb_endpoint_maxp(&qh->hep->desc);
0921 int len = urb->transfer_buffer_length;
0922 int this_sg_len;
0923 dma_addr_t data;
0924 __hc32 *plink;
0925 struct urb_priv *urbp = urb->hcpriv;
0926 unsigned int toggle;
0927 struct scatterlist *sg;
0928 int i;
0929
0930 if (len < 0)
0931 return -EINVAL;
0932
0933
0934 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
0935 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
0936 usb_pipeout(urb->pipe));
0937
0938
0939 status = uhci_maxerr(3);
0940 if (urb->dev->speed == USB_SPEED_LOW)
0941 status |= TD_CTRL_LS;
0942 if (usb_pipein(urb->pipe))
0943 status |= TD_CTRL_SPD;
0944
0945 i = urb->num_mapped_sgs;
0946 if (len > 0 && i > 0) {
0947 sg = urb->sg;
0948 data = sg_dma_address(sg);
0949
0950
0951
0952
0953 this_sg_len = min_t(int, sg_dma_len(sg), len);
0954 } else {
0955 sg = NULL;
0956 data = urb->transfer_dma;
0957 this_sg_len = len;
0958 }
0959
0960
0961
0962 plink = NULL;
0963 td = qh->dummy_td;
0964 for (;;) {
0965 int pktsze = maxsze;
0966
0967 if (len <= pktsze) {
0968 pktsze = len;
0969 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
0970 status &= ~TD_CTRL_SPD;
0971 }
0972
0973 if (plink) {
0974 td = uhci_alloc_td(uhci);
0975 if (!td)
0976 goto nomem;
0977 *plink = LINK_TO_TD(uhci, td);
0978 }
0979 uhci_add_td_to_urbp(td, urbp);
0980 uhci_fill_td(uhci, td, status,
0981 destination | uhci_explen(pktsze) |
0982 (toggle << TD_TOKEN_TOGGLE_SHIFT),
0983 data);
0984 plink = &td->link;
0985 status |= TD_CTRL_ACTIVE;
0986
0987 toggle ^= 1;
0988 data += pktsze;
0989 this_sg_len -= pktsze;
0990 len -= maxsze;
0991 if (this_sg_len <= 0) {
0992 if (--i <= 0 || len <= 0)
0993 break;
0994 sg = sg_next(sg);
0995 data = sg_dma_address(sg);
0996 this_sg_len = min_t(int, sg_dma_len(sg), len);
0997 }
0998 }
0999
1000
1001
1002
1003
1004
1005
1006
1007 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1008 usb_pipeout(urb->pipe) && len == 0 &&
1009 urb->transfer_buffer_length > 0) {
1010 td = uhci_alloc_td(uhci);
1011 if (!td)
1012 goto nomem;
1013 *plink = LINK_TO_TD(uhci, td);
1014
1015 uhci_add_td_to_urbp(td, urbp);
1016 uhci_fill_td(uhci, td, status,
1017 destination | uhci_explen(0) |
1018 (toggle << TD_TOKEN_TOGGLE_SHIFT),
1019 data);
1020 plink = &td->link;
1021
1022 toggle ^= 1;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1032
1033
1034
1035
1036 td = uhci_alloc_td(uhci);
1037 if (!td)
1038 goto nomem;
1039 *plink = LINK_TO_TD(uhci, td);
1040
1041 uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
1042 wmb();
1043 qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
1044 qh->dummy_td = td;
1045
1046 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1047 usb_pipeout(urb->pipe), toggle);
1048 return 0;
1049
1050 nomem:
1051
1052 uhci_remove_td_from_urbp(qh->dummy_td);
1053 return -ENOMEM;
1054 }
1055
1056 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1057 struct uhci_qh *qh)
1058 {
1059 int ret;
1060
1061
1062 if (urb->dev->speed == USB_SPEED_LOW)
1063 return -EINVAL;
1064
1065 if (qh->state != QH_STATE_ACTIVE)
1066 qh->skel = SKEL_BULK;
1067 ret = uhci_submit_common(uhci, urb, qh);
1068 if (ret == 0)
1069 uhci_add_fsbr(uhci, urb);
1070 return ret;
1071 }
1072
1073 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1074 struct uhci_qh *qh)
1075 {
1076 int ret;
1077
1078
1079
1080
1081
1082
1083 if (!qh->bandwidth_reserved) {
1084 int exponent;
1085
1086
1087 for (exponent = 7; exponent >= 0; --exponent) {
1088 if ((1 << exponent) <= urb->interval)
1089 break;
1090 }
1091 if (exponent < 0)
1092 return -EINVAL;
1093
1094
1095 do {
1096 qh->period = 1 << exponent;
1097 qh->skel = SKEL_INDEX(exponent);
1098
1099
1100
1101
1102 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1103 ret = uhci_check_bandwidth(uhci, qh);
1104 } while (ret != 0 && --exponent >= 0);
1105 if (ret)
1106 return ret;
1107 } else if (qh->period > urb->interval)
1108 return -EINVAL;
1109
1110 ret = uhci_submit_common(uhci, urb, qh);
1111 if (ret == 0) {
1112 urb->interval = qh->period;
1113 if (!qh->bandwidth_reserved)
1114 uhci_reserve_bandwidth(uhci, qh);
1115 }
1116 return ret;
1117 }
1118
1119
1120
1121
1122 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1123 struct uhci_qh *qh, struct urb_priv *urbp)
1124 {
1125 struct uhci_td *td;
1126 struct list_head *tmp;
1127 int ret;
1128
1129 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1130 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1131
1132
1133
1134
1135 WARN_ON(list_empty(&urbp->td_list));
1136 qh->element = LINK_TO_TD(uhci, td);
1137 tmp = td->list.prev;
1138 ret = -EINPROGRESS;
1139
1140 } else {
1141
1142
1143
1144
1145 qh->initial_toggle =
1146 uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
1147 uhci_fixup_toggles(uhci, qh, 1);
1148
1149 if (list_empty(&urbp->td_list))
1150 td = qh->post_td;
1151 qh->element = td->link;
1152 tmp = urbp->td_list.prev;
1153 ret = 0;
1154 }
1155
1156
1157 while (tmp != &urbp->td_list) {
1158 td = list_entry(tmp, struct uhci_td, list);
1159 tmp = tmp->prev;
1160
1161 uhci_remove_td_from_urbp(td);
1162 uhci_free_td(uhci, td);
1163 }
1164 return ret;
1165 }
1166
1167
1168
1169
1170 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1171 {
1172 struct urb_priv *urbp = urb->hcpriv;
1173 struct uhci_qh *qh = urbp->qh;
1174 struct uhci_td *td, *tmp;
1175 unsigned status;
1176 int ret = 0;
1177
1178 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1179 unsigned int ctrlstat;
1180 int len;
1181
1182 ctrlstat = td_status(uhci, td);
1183 status = uhci_status_bits(ctrlstat);
1184 if (status & TD_CTRL_ACTIVE)
1185 return -EINPROGRESS;
1186
1187 len = uhci_actual_length(ctrlstat);
1188 urb->actual_length += len;
1189
1190 if (status) {
1191 ret = uhci_map_status(status,
1192 uhci_packetout(td_token(uhci, td)));
1193 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1194
1195 dev_dbg(&urb->dev->dev,
1196 "%s: failed with status %x\n",
1197 __func__, status);
1198
1199 if (debug > 1 && errbuf) {
1200
1201 uhci_show_qh(uhci, urbp->qh, errbuf,
1202 ERRBUF_LEN - EXTRA_SPACE, 0);
1203 lprintk(errbuf);
1204 }
1205 }
1206
1207
1208 } else if (len < uhci_expected_length(td_token(uhci, td))) {
1209
1210
1211
1212 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1213 if (td->list.next != urbp->td_list.prev)
1214 ret = 1;
1215 }
1216
1217
1218 else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1219 ret = -EREMOTEIO;
1220
1221
1222 else if (&td->list != urbp->td_list.prev)
1223 ret = 1;
1224 }
1225
1226 uhci_remove_td_from_urbp(td);
1227 if (qh->post_td)
1228 uhci_free_td(uhci, qh->post_td);
1229 qh->post_td = td;
1230
1231 if (ret != 0)
1232 goto err;
1233 }
1234 return ret;
1235
1236 err:
1237 if (ret < 0) {
1238
1239
1240 qh->element = UHCI_PTR_TERM(uhci);
1241 qh->is_stopped = 1;
1242 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1243 qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
1244 (ret == -EREMOTEIO);
1245
1246 } else
1247 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1248 return ret;
1249 }
1250
1251
1252
1253
1254 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1255 struct uhci_qh *qh)
1256 {
1257 struct uhci_td *td = NULL;
1258 int i;
1259 unsigned frame, next;
1260 unsigned long destination, status;
1261 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1262
1263
1264 if (urb->interval >= UHCI_NUMFRAMES ||
1265 urb->number_of_packets >= UHCI_NUMFRAMES)
1266 return -EFBIG;
1267
1268 uhci_get_current_frame_number(uhci);
1269
1270
1271 if (!qh->bandwidth_reserved) {
1272 qh->period = urb->interval;
1273 qh->phase = -1;
1274 i = uhci_check_bandwidth(uhci, qh);
1275 if (i)
1276 return i;
1277
1278
1279 next = uhci->frame_number + 10;
1280 frame = qh->phase;
1281
1282
1283 frame += (next - frame + qh->period - 1) & -qh->period;
1284
1285 } else if (qh->period != urb->interval) {
1286 return -EINVAL;
1287
1288 } else {
1289 next = uhci->frame_number + 1;
1290
1291
1292 if (list_empty(&qh->queue)) {
1293 frame = qh->iso_frame;
1294 } else {
1295 struct urb *lurb;
1296
1297 lurb = list_entry(qh->queue.prev,
1298 struct urb_priv, node)->urb;
1299 frame = lurb->start_frame +
1300 lurb->number_of_packets *
1301 lurb->interval;
1302 }
1303
1304
1305 if (!uhci_frame_before_eq(next, frame)) {
1306
1307
1308 if (urb->transfer_flags & URB_ISO_ASAP)
1309 frame += (next - frame + qh->period - 1) &
1310 -qh->period;
1311
1312
1313
1314
1315
1316 else if (!uhci_frame_before_eq(next,
1317 frame + (urb->number_of_packets - 1) *
1318 qh->period))
1319 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1320 urb, frame,
1321 (urb->number_of_packets - 1) *
1322 qh->period,
1323 next);
1324 }
1325 }
1326
1327
1328 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1329 frame + urb->number_of_packets * urb->interval))
1330 return -EFBIG;
1331 urb->start_frame = frame;
1332
1333 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1334 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1335
1336 for (i = 0; i < urb->number_of_packets; i++) {
1337 td = uhci_alloc_td(uhci);
1338 if (!td)
1339 return -ENOMEM;
1340
1341 uhci_add_td_to_urbp(td, urbp);
1342 uhci_fill_td(uhci, td, status, destination |
1343 uhci_explen(urb->iso_frame_desc[i].length),
1344 urb->transfer_dma +
1345 urb->iso_frame_desc[i].offset);
1346 }
1347
1348
1349 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1350
1351
1352 frame = urb->start_frame;
1353 list_for_each_entry(td, &urbp->td_list, list) {
1354 uhci_insert_td_in_frame_list(uhci, td, frame);
1355 frame += qh->period;
1356 }
1357
1358 if (list_empty(&qh->queue)) {
1359 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1360 qh->iso_frame = urb->start_frame;
1361 }
1362
1363 qh->skel = SKEL_ISO;
1364 if (!qh->bandwidth_reserved)
1365 uhci_reserve_bandwidth(uhci, qh);
1366 return 0;
1367 }
1368
1369 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1370 {
1371 struct uhci_td *td, *tmp;
1372 struct urb_priv *urbp = urb->hcpriv;
1373 struct uhci_qh *qh = urbp->qh;
1374
1375 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1376 unsigned int ctrlstat;
1377 int status;
1378 int actlength;
1379
1380 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1381 return -EINPROGRESS;
1382
1383 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1384
1385 ctrlstat = td_status(uhci, td);
1386 if (ctrlstat & TD_CTRL_ACTIVE) {
1387 status = -EXDEV;
1388 } else {
1389 status = uhci_map_status(uhci_status_bits(ctrlstat),
1390 usb_pipeout(urb->pipe));
1391 actlength = uhci_actual_length(ctrlstat);
1392
1393 urb->actual_length += actlength;
1394 qh->iso_packet_desc->actual_length = actlength;
1395 qh->iso_packet_desc->status = status;
1396 }
1397 if (status)
1398 urb->error_count++;
1399
1400 uhci_remove_td_from_urbp(td);
1401 uhci_free_td(uhci, td);
1402 qh->iso_frame += qh->period;
1403 ++qh->iso_packet_desc;
1404 }
1405 return 0;
1406 }
1407
1408 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1409 struct urb *urb, gfp_t mem_flags)
1410 {
1411 int ret;
1412 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1413 unsigned long flags;
1414 struct urb_priv *urbp;
1415 struct uhci_qh *qh;
1416
1417 spin_lock_irqsave(&uhci->lock, flags);
1418
1419 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1420 if (ret)
1421 goto done_not_linked;
1422
1423 ret = -ENOMEM;
1424 urbp = uhci_alloc_urb_priv(uhci, urb);
1425 if (!urbp)
1426 goto done;
1427
1428 if (urb->ep->hcpriv)
1429 qh = urb->ep->hcpriv;
1430 else {
1431 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1432 if (!qh)
1433 goto err_no_qh;
1434 }
1435 urbp->qh = qh;
1436
1437 switch (qh->type) {
1438 case USB_ENDPOINT_XFER_CONTROL:
1439 ret = uhci_submit_control(uhci, urb, qh);
1440 break;
1441 case USB_ENDPOINT_XFER_BULK:
1442 ret = uhci_submit_bulk(uhci, urb, qh);
1443 break;
1444 case USB_ENDPOINT_XFER_INT:
1445 ret = uhci_submit_interrupt(uhci, urb, qh);
1446 break;
1447 case USB_ENDPOINT_XFER_ISOC:
1448 urb->error_count = 0;
1449 ret = uhci_submit_isochronous(uhci, urb, qh);
1450 break;
1451 }
1452 if (ret != 0)
1453 goto err_submit_failed;
1454
1455
1456 list_add_tail(&urbp->node, &qh->queue);
1457
1458
1459
1460
1461
1462 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1463 uhci_activate_qh(uhci, qh);
1464 uhci_urbp_wants_fsbr(uhci, urbp);
1465 }
1466 goto done;
1467
1468 err_submit_failed:
1469 if (qh->state == QH_STATE_IDLE)
1470 uhci_make_qh_idle(uhci, qh);
1471 err_no_qh:
1472 uhci_free_urb_priv(uhci, urbp);
1473 done:
1474 if (ret)
1475 usb_hcd_unlink_urb_from_ep(hcd, urb);
1476 done_not_linked:
1477 spin_unlock_irqrestore(&uhci->lock, flags);
1478 return ret;
1479 }
1480
1481 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1482 {
1483 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1484 unsigned long flags;
1485 struct uhci_qh *qh;
1486 int rc;
1487
1488 spin_lock_irqsave(&uhci->lock, flags);
1489 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1490 if (rc)
1491 goto done;
1492
1493 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1494
1495
1496 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1497 uhci_unlink_isochronous_tds(uhci, urb);
1498 mb();
1499
1500
1501 uhci_get_current_frame_number(uhci);
1502 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1503 qh->unlink_frame = uhci->frame_number;
1504 }
1505
1506 uhci_unlink_qh(uhci, qh);
1507
1508 done:
1509 spin_unlock_irqrestore(&uhci->lock, flags);
1510 return rc;
1511 }
1512
1513
1514
1515
1516 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1517 struct urb *urb, int status)
1518 __releases(uhci->lock)
1519 __acquires(uhci->lock)
1520 {
1521 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1522
1523 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1524
1525
1526
1527
1528 urb->actual_length -= min_t(u32, 8, urb->actual_length);
1529 }
1530
1531
1532
1533 else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1534 urbp->node.prev == &qh->queue &&
1535 urbp->node.next != &qh->queue) {
1536 struct urb *nurb = list_entry(urbp->node.next,
1537 struct urb_priv, node)->urb;
1538
1539 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1540 qh->iso_frame = nurb->start_frame;
1541 }
1542
1543
1544
1545 list_del_init(&urbp->node);
1546 if (list_empty(&qh->queue) && qh->needs_fixup) {
1547 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1548 usb_pipeout(urb->pipe), qh->initial_toggle);
1549 qh->needs_fixup = 0;
1550 }
1551
1552 uhci_free_urb_priv(uhci, urbp);
1553 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1554
1555 spin_unlock(&uhci->lock);
1556 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1557 spin_lock(&uhci->lock);
1558
1559
1560
1561 if (list_empty(&qh->queue)) {
1562 uhci_unlink_qh(uhci, qh);
1563 if (qh->bandwidth_reserved)
1564 uhci_release_bandwidth(uhci, qh);
1565 }
1566 }
1567
1568
1569
1570
1571 #define QH_FINISHED_UNLINKING(qh) \
1572 (qh->state == QH_STATE_UNLINKING && \
1573 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1574
1575 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1576 {
1577 struct urb_priv *urbp;
1578 struct urb *urb;
1579 int status;
1580
1581 while (!list_empty(&qh->queue)) {
1582 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1583 urb = urbp->urb;
1584
1585 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1586 status = uhci_result_isochronous(uhci, urb);
1587 else
1588 status = uhci_result_common(uhci, urb);
1589 if (status == -EINPROGRESS)
1590 break;
1591
1592
1593
1594 if (urb->unlinked) {
1595 if (QH_FINISHED_UNLINKING(qh))
1596 qh->is_stopped = 1;
1597 else if (!qh->is_stopped)
1598 return;
1599 }
1600
1601 uhci_giveback_urb(uhci, qh, urb, status);
1602 if (status < 0)
1603 break;
1604 }
1605
1606
1607
1608 if (QH_FINISHED_UNLINKING(qh))
1609 qh->is_stopped = 1;
1610 else if (!qh->is_stopped)
1611 return;
1612
1613
1614 restart:
1615 list_for_each_entry(urbp, &qh->queue, node) {
1616 urb = urbp->urb;
1617 if (urb->unlinked) {
1618
1619
1620
1621
1622 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1623 qh->is_stopped = 0;
1624 return;
1625 }
1626 uhci_giveback_urb(uhci, qh, urb, 0);
1627 goto restart;
1628 }
1629 }
1630 qh->is_stopped = 0;
1631
1632
1633
1634 if (!list_empty(&qh->queue)) {
1635 if (qh->needs_fixup)
1636 uhci_fixup_toggles(uhci, qh, 0);
1637
1638
1639
1640
1641 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1642 if (urbp->fsbr && qh->wait_expired) {
1643 struct uhci_td *td = list_entry(urbp->td_list.next,
1644 struct uhci_td, list);
1645
1646 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1647 }
1648
1649 uhci_activate_qh(uhci, qh);
1650 }
1651
1652
1653
1654 else if (QH_FINISHED_UNLINKING(qh))
1655 uhci_make_qh_idle(uhci, qh);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1669 {
1670 struct urb_priv *urbp = NULL;
1671 struct uhci_td *td;
1672 int ret = 1;
1673 unsigned status;
1674
1675 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1676 goto done;
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 if (qh->state != QH_STATE_ACTIVE) {
1687 urbp = NULL;
1688 status = 0;
1689
1690 } else {
1691 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1692 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1693 status = td_status(uhci, td);
1694 if (!(status & TD_CTRL_ACTIVE)) {
1695
1696
1697 qh->wait_expired = 0;
1698 qh->advance_jiffies = jiffies;
1699 goto done;
1700 }
1701 ret = uhci->is_stopped;
1702 }
1703
1704
1705 if (qh->wait_expired)
1706 goto done;
1707
1708 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1709
1710
1711 if (qh->post_td && qh_element(qh) ==
1712 LINK_TO_TD(uhci, qh->post_td)) {
1713 qh->element = qh->post_td->link;
1714 qh->advance_jiffies = jiffies;
1715 ret = 1;
1716 goto done;
1717 }
1718
1719 qh->wait_expired = 1;
1720
1721
1722
1723
1724
1725 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1726 uhci_unlink_qh(uhci, qh);
1727
1728 } else {
1729
1730 if (urbp)
1731 uhci_urbp_wants_fsbr(uhci, urbp);
1732 }
1733
1734 done:
1735 return ret;
1736 }
1737
1738
1739
1740
1741 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1742 {
1743 int i;
1744 struct uhci_qh *qh;
1745
1746
1747 if (uhci->scan_in_progress) {
1748 uhci->need_rescan = 1;
1749 return;
1750 }
1751 uhci->scan_in_progress = 1;
1752 rescan:
1753 uhci->need_rescan = 0;
1754 uhci->fsbr_is_wanted = 0;
1755
1756 uhci_clear_next_interrupt(uhci);
1757 uhci_get_current_frame_number(uhci);
1758 uhci->cur_iso_frame = uhci->frame_number;
1759
1760
1761 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1762 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1763 struct uhci_qh, node);
1764 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1765 uhci->next_qh = list_entry(qh->node.next,
1766 struct uhci_qh, node);
1767
1768 if (uhci_advance_check(uhci, qh)) {
1769 uhci_scan_qh(uhci, qh);
1770 if (qh->state == QH_STATE_ACTIVE) {
1771 uhci_urbp_wants_fsbr(uhci,
1772 list_entry(qh->queue.next, struct urb_priv, node));
1773 }
1774 }
1775 }
1776 }
1777
1778 uhci->last_iso_frame = uhci->cur_iso_frame;
1779 if (uhci->need_rescan)
1780 goto rescan;
1781 uhci->scan_in_progress = 0;
1782
1783 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1784 !uhci->fsbr_expiring) {
1785 uhci->fsbr_expiring = 1;
1786 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1787 }
1788
1789 if (list_empty(&uhci->skel_unlink_qh->node))
1790 uhci_clear_next_interrupt(uhci);
1791 else
1792 uhci_set_next_interrupt(uhci);
1793 }