Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-1.0+
0002 /*
0003  * OHCI HCD (Host Controller Driver) for USB.
0004  *
0005  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
0006  * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
0007  *
0008  * This file is licenced under the GPL.
0009  */
0010 
0011 #include <linux/irq.h>
0012 #include <linux/slab.h>
0013 
0014 static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
0015 {
0016     int     last = urb_priv->length - 1;
0017 
0018     if (last >= 0) {
0019         int     i;
0020         struct td   *td;
0021 
0022         for (i = 0; i <= last; i++) {
0023             td = urb_priv->td [i];
0024             if (td)
0025                 td_free (hc, td);
0026         }
0027     }
0028 
0029     list_del (&urb_priv->pending);
0030     kfree (urb_priv);
0031 }
0032 
0033 /*-------------------------------------------------------------------------*/
0034 
0035 /*
0036  * URB goes back to driver, and isn't reissued.
0037  * It's completely gone from HC data structures.
0038  * PRECONDITION:  ohci lock held, irqs blocked.
0039  */
0040 static void
0041 finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
0042 __releases(ohci->lock)
0043 __acquires(ohci->lock)
0044 {
0045     struct device *dev = ohci_to_hcd(ohci)->self.controller;
0046     struct usb_host_endpoint *ep = urb->ep;
0047     struct urb_priv *urb_priv;
0048 
0049     // ASSERT (urb->hcpriv != 0);
0050 
0051  restart:
0052     urb_free_priv (ohci, urb->hcpriv);
0053     urb->hcpriv = NULL;
0054     if (likely(status == -EINPROGRESS))
0055         status = 0;
0056 
0057     switch (usb_pipetype (urb->pipe)) {
0058     case PIPE_ISOCHRONOUS:
0059         ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
0060         if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
0061             if (quirk_amdiso(ohci))
0062                 usb_amd_quirk_pll_enable();
0063             if (quirk_amdprefetch(ohci))
0064                 sb800_prefetch(dev, 0);
0065         }
0066         break;
0067     case PIPE_INTERRUPT:
0068         ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
0069         break;
0070     }
0071 
0072     /* urb->complete() can reenter this HCD */
0073     usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
0074     spin_unlock (&ohci->lock);
0075     usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
0076     spin_lock (&ohci->lock);
0077 
0078     /* stop periodic dma if it's not needed */
0079     if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
0080             && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
0081         ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
0082         ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
0083     }
0084 
0085     /*
0086      * An isochronous URB that is sumitted too late won't have any TDs
0087      * (marked by the fact that the td_cnt value is larger than the
0088      * actual number of TDs).  If the next URB on this endpoint is like
0089      * that, give it back now.
0090      */
0091     if (!list_empty(&ep->urb_list)) {
0092         urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
0093         urb_priv = urb->hcpriv;
0094         if (urb_priv->td_cnt > urb_priv->length) {
0095             status = 0;
0096             goto restart;
0097         }
0098     }
0099 }
0100 
0101 
0102 /*-------------------------------------------------------------------------*
0103  * ED handling functions
0104  *-------------------------------------------------------------------------*/
0105 
0106 /* search for the right schedule branch to use for a periodic ed.
0107  * does some load balancing; returns the branch, or negative errno.
0108  */
0109 static int balance (struct ohci_hcd *ohci, int interval, int load)
0110 {
0111     int i, branch = -ENOSPC;
0112 
0113     /* iso periods can be huge; iso tds specify frame numbers */
0114     if (interval > NUM_INTS)
0115         interval = NUM_INTS;
0116 
0117     /* search for the least loaded schedule branch of that period
0118      * that has enough bandwidth left unreserved.
0119      */
0120     for (i = 0; i < interval ; i++) {
0121         if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
0122             int j;
0123 
0124             /* usb 1.1 says 90% of one frame */
0125             for (j = i; j < NUM_INTS; j += interval) {
0126                 if ((ohci->load [j] + load) > 900)
0127                     break;
0128             }
0129             if (j < NUM_INTS)
0130                 continue;
0131             branch = i;
0132         }
0133     }
0134     return branch;
0135 }
0136 
0137 /*-------------------------------------------------------------------------*/
0138 
0139 /* both iso and interrupt requests have periods; this routine puts them
0140  * into the schedule tree in the apppropriate place.  most iso devices use
0141  * 1msec periods, but that's not required.
0142  */
0143 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
0144 {
0145     unsigned    i;
0146 
0147     ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
0148         (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
0149         ed, ed->branch, ed->load, ed->interval);
0150 
0151     for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
0152         struct ed   **prev = &ohci->periodic [i];
0153         __hc32      *prev_p = &ohci->hcca->int_table [i];
0154         struct ed   *here = *prev;
0155 
0156         /* sorting each branch by period (slow before fast)
0157          * lets us share the faster parts of the tree.
0158          * (plus maybe: put interrupt eds before iso)
0159          */
0160         while (here && ed != here) {
0161             if (ed->interval > here->interval)
0162                 break;
0163             prev = &here->ed_next;
0164             prev_p = &here->hwNextED;
0165             here = *prev;
0166         }
0167         if (ed != here) {
0168             ed->ed_next = here;
0169             if (here)
0170                 ed->hwNextED = *prev_p;
0171             wmb ();
0172             *prev = ed;
0173             *prev_p = cpu_to_hc32(ohci, ed->dma);
0174             wmb();
0175         }
0176         ohci->load [i] += ed->load;
0177     }
0178     ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
0179 }
0180 
0181 /* link an ed into one of the HC chains */
0182 
0183 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
0184 {
0185     int branch;
0186 
0187     ed->ed_prev = NULL;
0188     ed->ed_next = NULL;
0189     ed->hwNextED = 0;
0190     wmb ();
0191 
0192     /* we care about rm_list when setting CLE/BLE in case the HC was at
0193      * work on some TD when CLE/BLE was turned off, and isn't quiesced
0194      * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
0195      *
0196      * control and bulk EDs are doubly linked (ed_next, ed_prev), but
0197      * periodic ones are singly linked (ed_next). that's because the
0198      * periodic schedule encodes a tree like figure 3-5 in the ohci
0199      * spec:  each qh can have several "previous" nodes, and the tree
0200      * doesn't have unused/idle descriptors.
0201      */
0202     switch (ed->type) {
0203     case PIPE_CONTROL:
0204         if (ohci->ed_controltail == NULL) {
0205             WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
0206             ohci_writel (ohci, ed->dma,
0207                     &ohci->regs->ed_controlhead);
0208         } else {
0209             ohci->ed_controltail->ed_next = ed;
0210             ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
0211                                 ed->dma);
0212         }
0213         ed->ed_prev = ohci->ed_controltail;
0214         if (!ohci->ed_controltail && !ohci->ed_rm_list) {
0215             wmb();
0216             ohci->hc_control |= OHCI_CTRL_CLE;
0217             ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
0218             ohci_writel (ohci, ohci->hc_control,
0219                     &ohci->regs->control);
0220         }
0221         ohci->ed_controltail = ed;
0222         break;
0223 
0224     case PIPE_BULK:
0225         if (ohci->ed_bulktail == NULL) {
0226             WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
0227             ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
0228         } else {
0229             ohci->ed_bulktail->ed_next = ed;
0230             ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
0231                                 ed->dma);
0232         }
0233         ed->ed_prev = ohci->ed_bulktail;
0234         if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
0235             wmb();
0236             ohci->hc_control |= OHCI_CTRL_BLE;
0237             ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
0238             ohci_writel (ohci, ohci->hc_control,
0239                     &ohci->regs->control);
0240         }
0241         ohci->ed_bulktail = ed;
0242         break;
0243 
0244     // case PIPE_INTERRUPT:
0245     // case PIPE_ISOCHRONOUS:
0246     default:
0247         branch = balance (ohci, ed->interval, ed->load);
0248         if (branch < 0) {
0249             ohci_dbg (ohci,
0250                 "ERR %d, interval %d msecs, load %d\n",
0251                 branch, ed->interval, ed->load);
0252             // FIXME if there are TDs queued, fail them!
0253             return branch;
0254         }
0255         ed->branch = branch;
0256         periodic_link (ohci, ed);
0257     }
0258 
0259     /* the HC may not see the schedule updates yet, but if it does
0260      * then they'll be properly ordered.
0261      */
0262 
0263     ed->state = ED_OPER;
0264     return 0;
0265 }
0266 
0267 /*-------------------------------------------------------------------------*/
0268 
0269 /* scan the periodic table to find and unlink this ED */
0270 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
0271 {
0272     int i;
0273 
0274     for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
0275         struct ed   *temp;
0276         struct ed   **prev = &ohci->periodic [i];
0277         __hc32      *prev_p = &ohci->hcca->int_table [i];
0278 
0279         while (*prev && (temp = *prev) != ed) {
0280             prev_p = &temp->hwNextED;
0281             prev = &temp->ed_next;
0282         }
0283         if (*prev) {
0284             *prev_p = ed->hwNextED;
0285             *prev = ed->ed_next;
0286         }
0287         ohci->load [i] -= ed->load;
0288     }
0289     ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
0290 
0291     ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
0292         (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
0293         ed, ed->branch, ed->load, ed->interval);
0294 }
0295 
0296 /* unlink an ed from one of the HC chains.
0297  * just the link to the ed is unlinked.
0298  * the link from the ed still points to another operational ed or 0
0299  * so the HC can eventually finish the processing of the unlinked ed
0300  * (assuming it already started that, which needn't be true).
0301  *
0302  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
0303  * it won't.  ED_SKIP means the HC will finish its current transaction,
0304  * but won't start anything new.  The TD queue may still grow; device
0305  * drivers don't know about this HCD-internal state.
0306  *
0307  * When the HC can't see the ED, something changes ED_UNLINK to one of:
0308  *
0309  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
0310  *    immediately.  HC should be working on them.
0311  *
0312  *  - ED_IDLE: when there's no TD queue or the HC isn't running.
0313  *
0314  * When finish_unlinks() runs later, after SOF interrupt, it will often
0315  * complete one or more URB unlinks before making that state change.
0316  */
0317 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
0318 {
0319     ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
0320     wmb ();
0321     ed->state = ED_UNLINK;
0322 
0323     /* To deschedule something from the control or bulk list, just
0324      * clear CLE/BLE and wait.  There's no safe way to scrub out list
0325      * head/current registers until later, and "later" isn't very
0326      * tightly specified.  Figure 6-5 and Section 6.4.2.2 show how
0327      * the HC is reading the ED queues (while we modify them).
0328      *
0329      * For now, ed_schedule() is "later".  It might be good paranoia
0330      * to scrub those registers in finish_unlinks(), in case of bugs
0331      * that make the HC try to use them.
0332      */
0333     switch (ed->type) {
0334     case PIPE_CONTROL:
0335         /* remove ED from the HC's list: */
0336         if (ed->ed_prev == NULL) {
0337             if (!ed->hwNextED) {
0338                 ohci->hc_control &= ~OHCI_CTRL_CLE;
0339                 ohci_writel (ohci, ohci->hc_control,
0340                         &ohci->regs->control);
0341                 // a ohci_readl() later syncs CLE with the HC
0342             } else
0343                 ohci_writel (ohci,
0344                     hc32_to_cpup (ohci, &ed->hwNextED),
0345                     &ohci->regs->ed_controlhead);
0346         } else {
0347             ed->ed_prev->ed_next = ed->ed_next;
0348             ed->ed_prev->hwNextED = ed->hwNextED;
0349         }
0350         /* remove ED from the HCD's list: */
0351         if (ohci->ed_controltail == ed) {
0352             ohci->ed_controltail = ed->ed_prev;
0353             if (ohci->ed_controltail)
0354                 ohci->ed_controltail->ed_next = NULL;
0355         } else if (ed->ed_next) {
0356             ed->ed_next->ed_prev = ed->ed_prev;
0357         }
0358         break;
0359 
0360     case PIPE_BULK:
0361         /* remove ED from the HC's list: */
0362         if (ed->ed_prev == NULL) {
0363             if (!ed->hwNextED) {
0364                 ohci->hc_control &= ~OHCI_CTRL_BLE;
0365                 ohci_writel (ohci, ohci->hc_control,
0366                         &ohci->regs->control);
0367                 // a ohci_readl() later syncs BLE with the HC
0368             } else
0369                 ohci_writel (ohci,
0370                     hc32_to_cpup (ohci, &ed->hwNextED),
0371                     &ohci->regs->ed_bulkhead);
0372         } else {
0373             ed->ed_prev->ed_next = ed->ed_next;
0374             ed->ed_prev->hwNextED = ed->hwNextED;
0375         }
0376         /* remove ED from the HCD's list: */
0377         if (ohci->ed_bulktail == ed) {
0378             ohci->ed_bulktail = ed->ed_prev;
0379             if (ohci->ed_bulktail)
0380                 ohci->ed_bulktail->ed_next = NULL;
0381         } else if (ed->ed_next) {
0382             ed->ed_next->ed_prev = ed->ed_prev;
0383         }
0384         break;
0385 
0386     // case PIPE_INTERRUPT:
0387     // case PIPE_ISOCHRONOUS:
0388     default:
0389         periodic_unlink (ohci, ed);
0390         break;
0391     }
0392 }
0393 
0394 
0395 /*-------------------------------------------------------------------------*/
0396 
0397 /* get and maybe (re)init an endpoint. init _should_ be done only as part
0398  * of enumeration, usb_set_configuration() or usb_set_interface().
0399  */
0400 static struct ed *ed_get (
0401     struct ohci_hcd     *ohci,
0402     struct usb_host_endpoint *ep,
0403     struct usb_device   *udev,
0404     unsigned int        pipe,
0405     int         interval
0406 ) {
0407     struct ed       *ed;
0408     unsigned long       flags;
0409 
0410     spin_lock_irqsave (&ohci->lock, flags);
0411 
0412     ed = ep->hcpriv;
0413     if (!ed) {
0414         struct td   *td;
0415         int     is_out;
0416         u32     info;
0417 
0418         ed = ed_alloc (ohci, GFP_ATOMIC);
0419         if (!ed) {
0420             /* out of memory */
0421             goto done;
0422         }
0423 
0424         /* dummy td; end of td list for ed */
0425         td = td_alloc (ohci, GFP_ATOMIC);
0426         if (!td) {
0427             /* out of memory */
0428             ed_free (ohci, ed);
0429             ed = NULL;
0430             goto done;
0431         }
0432         ed->dummy = td;
0433         ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
0434         ed->hwHeadP = ed->hwTailP;  /* ED_C, ED_H zeroed */
0435         ed->state = ED_IDLE;
0436 
0437         is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
0438 
0439         /* FIXME usbcore changes dev->devnum before SET_ADDRESS
0440          * succeeds ... otherwise we wouldn't need "pipe".
0441          */
0442         info = usb_pipedevice (pipe);
0443         ed->type = usb_pipetype(pipe);
0444 
0445         info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
0446         info |= usb_endpoint_maxp(&ep->desc) << 16;
0447         if (udev->speed == USB_SPEED_LOW)
0448             info |= ED_LOWSPEED;
0449         /* only control transfers store pids in tds */
0450         if (ed->type != PIPE_CONTROL) {
0451             info |= is_out ? ED_OUT : ED_IN;
0452             if (ed->type != PIPE_BULK) {
0453                 /* periodic transfers... */
0454                 if (ed->type == PIPE_ISOCHRONOUS)
0455                     info |= ED_ISO;
0456                 else if (interval > 32) /* iso can be bigger */
0457                     interval = 32;
0458                 ed->interval = interval;
0459                 ed->load = usb_calc_bus_time (
0460                     udev->speed, !is_out,
0461                     ed->type == PIPE_ISOCHRONOUS,
0462                     usb_endpoint_maxp(&ep->desc))
0463                         / 1000;
0464             }
0465         }
0466         ed->hwINFO = cpu_to_hc32(ohci, info);
0467 
0468         ep->hcpriv = ed;
0469     }
0470 
0471 done:
0472     spin_unlock_irqrestore (&ohci->lock, flags);
0473     return ed;
0474 }
0475 
0476 /*-------------------------------------------------------------------------*/
0477 
0478 /* request unlinking of an endpoint from an operational HC.
0479  * put the ep on the rm_list
0480  * real work is done at the next start frame (SF) hardware interrupt
0481  * caller guarantees HCD is running, so hardware access is safe,
0482  * and that ed->state is ED_OPER
0483  */
0484 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
0485 {
0486     ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
0487     ed_deschedule (ohci, ed);
0488 
0489     /* rm_list is just singly linked, for simplicity */
0490     ed->ed_next = ohci->ed_rm_list;
0491     ed->ed_prev = NULL;
0492     ohci->ed_rm_list = ed;
0493 
0494     /* enable SOF interrupt */
0495     ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
0496     ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
0497     // flush those writes, and get latest HCCA contents
0498     (void) ohci_readl (ohci, &ohci->regs->control);
0499 
0500     /* SF interrupt might get delayed; record the frame counter value that
0501      * indicates when the HC isn't looking at it, so concurrent unlinks
0502      * behave.  frame_no wraps every 2^16 msec, and changes right before
0503      * SF is triggered.
0504      */
0505     ed->tick = ohci_frame_no(ohci) + 1;
0506 
0507 }
0508 
0509 /*-------------------------------------------------------------------------*
0510  * TD handling functions
0511  *-------------------------------------------------------------------------*/
0512 
0513 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
0514 
0515 static void
0516 td_fill (struct ohci_hcd *ohci, u32 info,
0517     dma_addr_t data, int len,
0518     struct urb *urb, int index)
0519 {
0520     struct td       *td, *td_pt;
0521     struct urb_priv     *urb_priv = urb->hcpriv;
0522     int         is_iso = info & TD_ISO;
0523     int         hash;
0524 
0525     // ASSERT (index < urb_priv->length);
0526 
0527     /* aim for only one interrupt per urb.  mostly applies to control
0528      * and iso; other urbs rarely need more than one TD per urb.
0529      * this way, only final tds (or ones with an error) cause IRQs.
0530      * at least immediately; use DI=6 in case any control request is
0531      * tempted to die part way through.  (and to force the hc to flush
0532      * its donelist soonish, even on unlink paths.)
0533      *
0534      * NOTE: could delay interrupts even for the last TD, and get fewer
0535      * interrupts ... increasing per-urb latency by sharing interrupts.
0536      * Drivers that queue bulk urbs may request that behavior.
0537      */
0538     if (index != (urb_priv->length - 1)
0539             || (urb->transfer_flags & URB_NO_INTERRUPT))
0540         info |= TD_DI_SET (6);
0541 
0542     /* use this td as the next dummy */
0543     td_pt = urb_priv->td [index];
0544 
0545     /* fill the old dummy TD */
0546     td = urb_priv->td [index] = urb_priv->ed->dummy;
0547     urb_priv->ed->dummy = td_pt;
0548 
0549     td->ed = urb_priv->ed;
0550     td->next_dl_td = NULL;
0551     td->index = index;
0552     td->urb = urb;
0553     td->data_dma = data;
0554     if (!len)
0555         data = 0;
0556 
0557     td->hwINFO = cpu_to_hc32 (ohci, info);
0558     if (is_iso) {
0559         td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
0560         *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
0561                         (data & 0x0FFF) | 0xE000);
0562     } else {
0563         td->hwCBP = cpu_to_hc32 (ohci, data);
0564     }
0565     if (data)
0566         td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
0567     else
0568         td->hwBE = 0;
0569     td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
0570 
0571     /* append to queue */
0572     list_add_tail (&td->td_list, &td->ed->td_list);
0573 
0574     /* hash it for later reverse mapping */
0575     hash = TD_HASH_FUNC (td->td_dma);
0576     td->td_hash = ohci->td_hash [hash];
0577     ohci->td_hash [hash] = td;
0578 
0579     /* HC might read the TD (or cachelines) right away ... */
0580     wmb ();
0581     td->ed->hwTailP = td->hwNextTD;
0582 }
0583 
0584 /*-------------------------------------------------------------------------*/
0585 
0586 /* Prepare all TDs of a transfer, and queue them onto the ED.
0587  * Caller guarantees HC is active.
0588  * Usually the ED is already on the schedule, so TDs might be
0589  * processed as soon as they're queued.
0590  */
0591 static void td_submit_urb (
0592     struct ohci_hcd *ohci,
0593     struct urb  *urb
0594 ) {
0595     struct urb_priv *urb_priv = urb->hcpriv;
0596     struct device *dev = ohci_to_hcd(ohci)->self.controller;
0597     dma_addr_t  data;
0598     int     data_len = urb->transfer_buffer_length;
0599     int     cnt = 0;
0600     u32     info = 0;
0601     int     is_out = usb_pipeout (urb->pipe);
0602     int     periodic = 0;
0603     int     i, this_sg_len, n;
0604     struct scatterlist  *sg;
0605 
0606     /* OHCI handles the bulk/interrupt data toggles itself.  We just
0607      * use the device toggle bits for resetting, and rely on the fact
0608      * that resetting toggle is meaningless if the endpoint is active.
0609      */
0610     if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
0611         usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
0612             is_out, 1);
0613         urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
0614     }
0615 
0616     list_add (&urb_priv->pending, &ohci->pending);
0617 
0618     i = urb->num_mapped_sgs;
0619     if (data_len > 0 && i > 0) {
0620         sg = urb->sg;
0621         data = sg_dma_address(sg);
0622 
0623         /*
0624          * urb->transfer_buffer_length may be smaller than the
0625          * size of the scatterlist (or vice versa)
0626          */
0627         this_sg_len = min_t(int, sg_dma_len(sg), data_len);
0628     } else {
0629         sg = NULL;
0630         if (data_len)
0631             data = urb->transfer_dma;
0632         else
0633             data = 0;
0634         this_sg_len = data_len;
0635     }
0636 
0637     /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
0638      * using TD_CC_GET, as well as by seeing them on the done list.
0639      * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
0640      */
0641     switch (urb_priv->ed->type) {
0642 
0643     /* Bulk and interrupt are identical except for where in the schedule
0644      * their EDs live.
0645      */
0646     case PIPE_INTERRUPT:
0647         /* ... and periodic urbs have extra accounting */
0648         periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
0649             && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
0650         fallthrough;
0651     case PIPE_BULK:
0652         info = is_out
0653             ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
0654             : TD_T_TOGGLE | TD_CC | TD_DP_IN;
0655         /* TDs _could_ transfer up to 8K each */
0656         for (;;) {
0657             n = min(this_sg_len, 4096);
0658 
0659             /* maybe avoid ED halt on final TD short read */
0660             if (n >= data_len || (i == 1 && n >= this_sg_len)) {
0661                 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
0662                     info |= TD_R;
0663             }
0664             td_fill(ohci, info, data, n, urb, cnt);
0665             this_sg_len -= n;
0666             data_len -= n;
0667             data += n;
0668             cnt++;
0669 
0670             if (this_sg_len <= 0) {
0671                 if (--i <= 0 || data_len <= 0)
0672                     break;
0673                 sg = sg_next(sg);
0674                 data = sg_dma_address(sg);
0675                 this_sg_len = min_t(int, sg_dma_len(sg),
0676                         data_len);
0677             }
0678         }
0679         if ((urb->transfer_flags & URB_ZERO_PACKET)
0680                 && cnt < urb_priv->length) {
0681             td_fill (ohci, info, 0, 0, urb, cnt);
0682             cnt++;
0683         }
0684         /* maybe kickstart bulk list */
0685         if (urb_priv->ed->type == PIPE_BULK) {
0686             wmb ();
0687             ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
0688         }
0689         break;
0690 
0691     /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
0692      * any DATA phase works normally, and the STATUS ack is special.
0693      */
0694     case PIPE_CONTROL:
0695         info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
0696         td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
0697         if (data_len > 0) {
0698             info = TD_CC | TD_R | TD_T_DATA1;
0699             info |= is_out ? TD_DP_OUT : TD_DP_IN;
0700             /* NOTE:  mishandles transfers >8K, some >4K */
0701             td_fill (ohci, info, data, data_len, urb, cnt++);
0702         }
0703         info = (is_out || data_len == 0)
0704             ? TD_CC | TD_DP_IN | TD_T_DATA1
0705             : TD_CC | TD_DP_OUT | TD_T_DATA1;
0706         td_fill (ohci, info, data, 0, urb, cnt++);
0707         /* maybe kickstart control list */
0708         wmb ();
0709         ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
0710         break;
0711 
0712     /* ISO has no retransmit, so no toggle; and it uses special TDs.
0713      * Each TD could handle multiple consecutive frames (interval 1);
0714      * we could often reduce the number of TDs here.
0715      */
0716     case PIPE_ISOCHRONOUS:
0717         for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
0718                 cnt++) {
0719             int frame = urb->start_frame;
0720 
0721             // FIXME scheduling should handle frame counter
0722             // roll-around ... exotic case (and OHCI has
0723             // a 2^16 iso range, vs other HCs max of 2^10)
0724             frame += cnt * urb->interval;
0725             frame &= 0xffff;
0726             td_fill (ohci, TD_CC | TD_ISO | frame,
0727                 data + urb->iso_frame_desc [cnt].offset,
0728                 urb->iso_frame_desc [cnt].length, urb, cnt);
0729         }
0730         if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
0731             if (quirk_amdiso(ohci))
0732                 usb_amd_quirk_pll_disable();
0733             if (quirk_amdprefetch(ohci))
0734                 sb800_prefetch(dev, 1);
0735         }
0736         periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
0737             && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
0738         break;
0739     }
0740 
0741     /* start periodic dma if needed */
0742     if (periodic) {
0743         wmb ();
0744         ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
0745         ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
0746     }
0747 
0748     // ASSERT (urb_priv->length == cnt);
0749 }
0750 
0751 /*-------------------------------------------------------------------------*
0752  * Done List handling functions
0753  *-------------------------------------------------------------------------*/
0754 
0755 /* calculate transfer length/status and update the urb */
0756 static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
0757 {
0758     u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
0759     int cc = 0;
0760     int status = -EINPROGRESS;
0761 
0762     list_del (&td->td_list);
0763 
0764     /* ISO ... drivers see per-TD length/status */
0765     if (tdINFO & TD_ISO) {
0766         u16 tdPSW = ohci_hwPSW(ohci, td, 0);
0767         int dlen = 0;
0768 
0769         /* NOTE:  assumes FC in tdINFO == 0, and that
0770          * only the first of 0..MAXPSW psws is used.
0771          */
0772 
0773         cc = (tdPSW >> 12) & 0xF;
0774         if (tdINFO & TD_CC) /* hc didn't touch? */
0775             return status;
0776 
0777         if (usb_pipeout (urb->pipe))
0778             dlen = urb->iso_frame_desc [td->index].length;
0779         else {
0780             /* short reads are always OK for ISO */
0781             if (cc == TD_DATAUNDERRUN)
0782                 cc = TD_CC_NOERROR;
0783             dlen = tdPSW & 0x3ff;
0784         }
0785         urb->actual_length += dlen;
0786         urb->iso_frame_desc [td->index].actual_length = dlen;
0787         urb->iso_frame_desc [td->index].status = cc_to_error [cc];
0788 
0789         if (cc != TD_CC_NOERROR)
0790             ohci_dbg(ohci,
0791                 "urb %p iso td %p (%d) len %d cc %d\n",
0792                 urb, td, 1 + td->index, dlen, cc);
0793 
0794     /* BULK, INT, CONTROL ... drivers see aggregate length/status,
0795      * except that "setup" bytes aren't counted and "short" transfers
0796      * might not be reported as errors.
0797      */
0798     } else {
0799         int type = usb_pipetype (urb->pipe);
0800         u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
0801 
0802         cc = TD_CC_GET (tdINFO);
0803 
0804         /* update packet status if needed (short is normally ok) */
0805         if (cc == TD_DATAUNDERRUN
0806                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
0807             cc = TD_CC_NOERROR;
0808         if (cc != TD_CC_NOERROR && cc < 0x0E)
0809             status = cc_to_error[cc];
0810 
0811         /* count all non-empty packets except control SETUP packet */
0812         if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
0813             if (td->hwCBP == 0)
0814                 urb->actual_length += tdBE - td->data_dma + 1;
0815             else
0816                 urb->actual_length +=
0817                       hc32_to_cpup (ohci, &td->hwCBP)
0818                     - td->data_dma;
0819         }
0820 
0821         if (cc != TD_CC_NOERROR && cc < 0x0E)
0822             ohci_dbg(ohci,
0823                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
0824                 urb, td, 1 + td->index, cc,
0825                 urb->actual_length,
0826                 urb->transfer_buffer_length);
0827     }
0828     return status;
0829 }
0830 
0831 /*-------------------------------------------------------------------------*/
0832 
0833 static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
0834 {
0835     struct urb      *urb = td->urb;
0836     urb_priv_t      *urb_priv = urb->hcpriv;
0837     struct ed       *ed = td->ed;
0838     struct list_head    *tmp = td->td_list.next;
0839     __hc32          toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
0840 
0841     /* clear ed halt; this is the td that caused it, but keep it inactive
0842      * until its urb->complete() has a chance to clean up.
0843      */
0844     ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
0845     wmb ();
0846     ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
0847 
0848     /* Get rid of all later tds from this urb.  We don't have
0849      * to be careful: no errors and nothing was transferred.
0850      * Also patch the ed so it looks as if those tds completed normally.
0851      */
0852     while (tmp != &ed->td_list) {
0853         struct td   *next;
0854 
0855         next = list_entry (tmp, struct td, td_list);
0856         tmp = next->td_list.next;
0857 
0858         if (next->urb != urb)
0859             break;
0860 
0861         /* NOTE: if multi-td control DATA segments get supported,
0862          * this urb had one of them, this td wasn't the last td
0863          * in that segment (TD_R clear), this ed halted because
0864          * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
0865          * then we need to leave the control STATUS packet queued
0866          * and clear ED_SKIP.
0867          */
0868 
0869         list_del(&next->td_list);
0870         urb_priv->td_cnt++;
0871         ed->hwHeadP = next->hwNextTD | toggle;
0872     }
0873 
0874     /* help for troubleshooting:  report anything that
0875      * looks odd ... that doesn't include protocol stalls
0876      * (or maybe some other things)
0877      */
0878     switch (cc) {
0879     case TD_DATAUNDERRUN:
0880         if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
0881             break;
0882         fallthrough;
0883     case TD_CC_STALL:
0884         if (usb_pipecontrol (urb->pipe))
0885             break;
0886         fallthrough;
0887     default:
0888         ohci_dbg (ohci,
0889             "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
0890             urb, urb->dev->devpath,
0891             usb_pipeendpoint (urb->pipe),
0892             usb_pipein (urb->pipe) ? "in" : "out",
0893             hc32_to_cpu (ohci, td->hwINFO),
0894             cc, cc_to_error [cc]);
0895     }
0896 }
0897 
0898 /* Add a TD to the done list */
0899 static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
0900 {
0901     struct td   *td2, *td_prev;
0902     struct ed   *ed;
0903 
0904     if (td->next_dl_td)
0905         return;     /* Already on the list */
0906 
0907     /* Add all the TDs going back until we reach one that's on the list */
0908     ed = td->ed;
0909     td2 = td_prev = td;
0910     list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
0911         if (td2->next_dl_td)
0912             break;
0913         td2->next_dl_td = td_prev;
0914         td_prev = td2;
0915     }
0916 
0917     if (ohci->dl_end)
0918         ohci->dl_end->next_dl_td = td_prev;
0919     else
0920         ohci->dl_start = td_prev;
0921 
0922     /*
0923      * Make td->next_dl_td point to td itself, to mark the fact
0924      * that td is on the done list.
0925      */
0926     ohci->dl_end = td->next_dl_td = td;
0927 
0928     /* Did we just add the latest pending TD? */
0929     td2 = ed->pending_td;
0930     if (td2 && td2->next_dl_td)
0931         ed->pending_td = NULL;
0932 }
0933 
0934 /* Get the entries on the hardware done queue and put them on our list */
0935 static void update_done_list(struct ohci_hcd *ohci)
0936 {
0937     u32     td_dma;
0938     struct td   *td = NULL;
0939 
0940     td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
0941     ohci->hcca->done_head = 0;
0942     wmb();
0943 
0944     /* get TD from hc's singly linked list, and
0945      * add to ours.  ed->td_list changes later.
0946      */
0947     while (td_dma) {
0948         int     cc;
0949 
0950         td = dma_to_td (ohci, td_dma);
0951         if (!td) {
0952             ohci_err (ohci, "bad entry %8x\n", td_dma);
0953             break;
0954         }
0955 
0956         td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
0957         cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
0958 
0959         /* Non-iso endpoints can halt on error; un-halt,
0960          * and dequeue any other TDs from this urb.
0961          * No other TD could have caused the halt.
0962          */
0963         if (cc != TD_CC_NOERROR
0964                 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
0965             ed_halted(ohci, td, cc);
0966 
0967         td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
0968         add_to_done_list(ohci, td);
0969     }
0970 }
0971 
0972 /*-------------------------------------------------------------------------*/
0973 
0974 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
0975 static void finish_unlinks(struct ohci_hcd *ohci)
0976 {
0977     unsigned    tick = ohci_frame_no(ohci);
0978     struct ed   *ed, **last;
0979 
0980 rescan_all:
0981     for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
0982         struct list_head    *entry, *tmp;
0983         int         completed, modified;
0984         __hc32          *prev;
0985 
0986         /* only take off EDs that the HC isn't using, accounting for
0987          * frame counter wraps and EDs with partially retired TDs
0988          */
0989         if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
0990                 tick_before(tick, ed->tick)) {
0991 skip_ed:
0992             last = &ed->ed_next;
0993             continue;
0994         }
0995         if (!list_empty(&ed->td_list)) {
0996             struct td   *td;
0997             u32     head;
0998 
0999             td = list_first_entry(&ed->td_list, struct td, td_list);
1000 
1001             /* INTR_WDH may need to clean up first */
1002             head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
1003             if (td->td_dma != head &&
1004                     ohci->rh_state == OHCI_RH_RUNNING)
1005                 goto skip_ed;
1006 
1007             /* Don't mess up anything already on the done list */
1008             if (td->next_dl_td)
1009                 goto skip_ed;
1010         }
1011 
1012         /* ED's now officially unlinked, hc doesn't see */
1013         ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1014         ed->hwNextED = 0;
1015         wmb();
1016         ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
1017 
1018         /* reentrancy:  if we drop the schedule lock, someone might
1019          * have modified this list.  normally it's just prepending
1020          * entries (which we'd ignore), but paranoia won't hurt.
1021          */
1022         *last = ed->ed_next;
1023         ed->ed_next = NULL;
1024         modified = 0;
1025 
1026         /* unlink urbs as requested, but rescan the list after
1027          * we call a completion since it might have unlinked
1028          * another (earlier) urb
1029          *
1030          * When we get here, the HC doesn't see this ed.  But it
1031          * must not be rescheduled until all completed URBs have
1032          * been given back to the driver.
1033          */
1034 rescan_this:
1035         completed = 0;
1036         prev = &ed->hwHeadP;
1037         list_for_each_safe (entry, tmp, &ed->td_list) {
1038             struct td   *td;
1039             struct urb  *urb;
1040             urb_priv_t  *urb_priv;
1041             __hc32      savebits;
1042             u32     tdINFO;
1043 
1044             td = list_entry (entry, struct td, td_list);
1045             urb = td->urb;
1046             urb_priv = td->urb->hcpriv;
1047 
1048             if (!urb->unlinked) {
1049                 prev = &td->hwNextTD;
1050                 continue;
1051             }
1052 
1053             /* patch pointer hc uses */
1054             savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
1055             *prev = td->hwNextTD | savebits;
1056 
1057             /* If this was unlinked, the TD may not have been
1058              * retired ... so manually save the data toggle.
1059              * The controller ignores the value we save for
1060              * control and ISO endpoints.
1061              */
1062             tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
1063             if ((tdINFO & TD_T) == TD_T_DATA0)
1064                 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
1065             else if ((tdINFO & TD_T) == TD_T_DATA1)
1066                 ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
1067 
1068             /* HC may have partly processed this TD */
1069             td_done (ohci, urb, td);
1070             urb_priv->td_cnt++;
1071 
1072             /* if URB is done, clean up */
1073             if (urb_priv->td_cnt >= urb_priv->length) {
1074                 modified = completed = 1;
1075                 finish_urb(ohci, urb, 0);
1076             }
1077         }
1078         if (completed && !list_empty (&ed->td_list))
1079             goto rescan_this;
1080 
1081         /*
1082          * If no TDs are queued, ED is now idle.
1083          * Otherwise, if the HC is running, reschedule.
1084          * If the HC isn't running, add ED back to the
1085          * start of the list for later processing.
1086          */
1087         if (list_empty(&ed->td_list)) {
1088             ed->state = ED_IDLE;
1089             list_del(&ed->in_use_list);
1090         } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1091             ed_schedule(ohci, ed);
1092         } else {
1093             ed->ed_next = ohci->ed_rm_list;
1094             ohci->ed_rm_list = ed;
1095             /* Don't loop on the same ED */
1096             if (last == &ohci->ed_rm_list)
1097                 last = &ed->ed_next;
1098         }
1099 
1100         if (modified)
1101             goto rescan_all;
1102     }
1103 
1104     /* maybe reenable control and bulk lists */
1105     if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
1106         u32 command = 0, control = 0;
1107 
1108         if (ohci->ed_controltail) {
1109             command |= OHCI_CLF;
1110             if (quirk_zfmicro(ohci))
1111                 mdelay(1);
1112             if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1113                 control |= OHCI_CTRL_CLE;
1114                 ohci_writel (ohci, 0,
1115                     &ohci->regs->ed_controlcurrent);
1116             }
1117         }
1118         if (ohci->ed_bulktail) {
1119             command |= OHCI_BLF;
1120             if (quirk_zfmicro(ohci))
1121                 mdelay(1);
1122             if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1123                 control |= OHCI_CTRL_BLE;
1124                 ohci_writel (ohci, 0,
1125                     &ohci->regs->ed_bulkcurrent);
1126             }
1127         }
1128 
1129         /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1130         if (control) {
1131             ohci->hc_control |= control;
1132             if (quirk_zfmicro(ohci))
1133                 mdelay(1);
1134             ohci_writel (ohci, ohci->hc_control,
1135                     &ohci->regs->control);
1136         }
1137         if (command) {
1138             if (quirk_zfmicro(ohci))
1139                 mdelay(1);
1140             ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1141         }
1142     }
1143 }
1144 
1145 
1146 
1147 /*-------------------------------------------------------------------------*/
1148 
1149 /* Take back a TD from the host controller */
1150 static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1151 {
1152     struct urb  *urb = td->urb;
1153     urb_priv_t  *urb_priv = urb->hcpriv;
1154     struct ed   *ed = td->ed;
1155     int     status;
1156 
1157     /* update URB's length and status from TD */
1158     status = td_done(ohci, urb, td);
1159     urb_priv->td_cnt++;
1160 
1161     /* If all this urb's TDs are done, call complete() */
1162     if (urb_priv->td_cnt >= urb_priv->length)
1163         finish_urb(ohci, urb, status);
1164 
1165     /* clean schedule:  unlink EDs that are no longer busy */
1166     if (list_empty(&ed->td_list)) {
1167         if (ed->state == ED_OPER)
1168             start_ed_unlink(ohci, ed);
1169 
1170     /* ... reenabling halted EDs only after fault cleanup */
1171     } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1172             == cpu_to_hc32(ohci, ED_SKIP)) {
1173         td = list_entry(ed->td_list.next, struct td, td_list);
1174         if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1175             ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1176             /* ... hc may need waking-up */
1177             switch (ed->type) {
1178             case PIPE_CONTROL:
1179                 ohci_writel(ohci, OHCI_CLF,
1180                         &ohci->regs->cmdstatus);
1181                 break;
1182             case PIPE_BULK:
1183                 ohci_writel(ohci, OHCI_BLF,
1184                         &ohci->regs->cmdstatus);
1185                 break;
1186             }
1187         }
1188     }
1189 }
1190 
1191 /*
1192  * Process normal completions (error or success) and clean the schedules.
1193  *
1194  * This is the main path for handing urbs back to drivers.  The only other
1195  * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
1196  * instead of scanning the (re-reversed) donelist as this does.
1197  */
1198 static void process_done_list(struct ohci_hcd *ohci)
1199 {
1200     struct td   *td;
1201 
1202     while (ohci->dl_start) {
1203         td = ohci->dl_start;
1204         if (td == ohci->dl_end)
1205             ohci->dl_start = ohci->dl_end = NULL;
1206         else
1207             ohci->dl_start = td->next_dl_td;
1208 
1209         takeback_td(ohci, td);
1210     }
1211 }
1212 
1213 /*
1214  * TD takeback and URB giveback must be single-threaded.
1215  * This routine takes care of it all.
1216  */
1217 static void ohci_work(struct ohci_hcd *ohci)
1218 {
1219     if (ohci->working) {
1220         ohci->restart_work = 1;
1221         return;
1222     }
1223     ohci->working = 1;
1224 
1225  restart:
1226     process_done_list(ohci);
1227     if (ohci->ed_rm_list)
1228         finish_unlinks(ohci);
1229 
1230     if (ohci->restart_work) {
1231         ohci->restart_work = 0;
1232         goto restart;
1233     }
1234     ohci->working = 0;
1235 }