Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2012 by Alan Stern
0004  */
0005 
0006 /* This file is part of ehci-hcd.c */
0007 
0008 /*-------------------------------------------------------------------------*/
0009 
0010 /* Set a bit in the USBCMD register */
0011 static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
0012 {
0013     ehci->command |= bit;
0014     ehci_writel(ehci, ehci->command, &ehci->regs->command);
0015 
0016     /* unblock posted write */
0017     ehci_readl(ehci, &ehci->regs->command);
0018 }
0019 
0020 /* Clear a bit in the USBCMD register */
0021 static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
0022 {
0023     ehci->command &= ~bit;
0024     ehci_writel(ehci, ehci->command, &ehci->regs->command);
0025 
0026     /* unblock posted write */
0027     ehci_readl(ehci, &ehci->regs->command);
0028 }
0029 
0030 /*-------------------------------------------------------------------------*/
0031 
0032 /*
0033  * EHCI timer support...  Now using hrtimers.
0034  *
0035  * Lots of different events are triggered from ehci->hrtimer.  Whenever
0036  * the timer routine runs, it checks each possible event; events that are
0037  * currently enabled and whose expiration time has passed get handled.
0038  * The set of enabled events is stored as a collection of bitflags in
0039  * ehci->enabled_hrtimer_events, and they are numbered in order of
0040  * increasing delay values (ranging between 1 ms and 100 ms).
0041  *
0042  * Rather than implementing a sorted list or tree of all pending events,
0043  * we keep track only of the lowest-numbered pending event, in
0044  * ehci->next_hrtimer_event.  Whenever ehci->hrtimer gets restarted, its
0045  * expiration time is set to the timeout value for this event.
0046  *
0047  * As a result, events might not get handled right away; the actual delay
0048  * could be anywhere up to twice the requested delay.  This doesn't
0049  * matter, because none of the events are especially time-critical.  The
0050  * ones that matter most all have a delay of 1 ms, so they will be
0051  * handled after 2 ms at most, which is okay.  In addition to this, we
0052  * allow for an expiration range of 1 ms.
0053  */
0054 
0055 /*
0056  * Delay lengths for the hrtimer event types.
0057  * Keep this list sorted by delay length, in the same order as
0058  * the event types indexed by enum ehci_hrtimer_event in ehci.h.
0059  */
0060 static unsigned event_delays_ns[] = {
0061     1 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_POLL_ASS */
0062     1 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_POLL_PSS */
0063     1 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_POLL_DEAD */
0064     1125 * NSEC_PER_USEC,   /* EHCI_HRTIMER_UNLINK_INTR */
0065     2 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_FREE_ITDS */
0066     2 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_ACTIVE_UNLINK */
0067     5 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_START_UNLINK_INTR */
0068     6 * NSEC_PER_MSEC,  /* EHCI_HRTIMER_ASYNC_UNLINKS */
0069     10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
0070     10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
0071     15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
0072     100 * NSEC_PER_MSEC,    /* EHCI_HRTIMER_IO_WATCHDOG */
0073 };
0074 
0075 /* Enable a pending hrtimer event */
0076 static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
0077         bool resched)
0078 {
0079     ktime_t     *timeout = &ehci->hr_timeouts[event];
0080 
0081     if (resched)
0082         *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
0083     ehci->enabled_hrtimer_events |= (1 << event);
0084 
0085     /* Track only the lowest-numbered pending event */
0086     if (event < ehci->next_hrtimer_event) {
0087         ehci->next_hrtimer_event = event;
0088         hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
0089                 NSEC_PER_MSEC, HRTIMER_MODE_ABS);
0090     }
0091 }
0092 
0093 
0094 /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
0095 static void ehci_poll_ASS(struct ehci_hcd *ehci)
0096 {
0097     unsigned    actual, want;
0098 
0099     /* Don't enable anything if the controller isn't running (e.g., died) */
0100     if (ehci->rh_state != EHCI_RH_RUNNING)
0101         return;
0102 
0103     want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
0104     actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
0105 
0106     if (want != actual) {
0107 
0108         /* Poll again later, but give up after about 2-4 ms */
0109         if (ehci->ASS_poll_count++ < 2) {
0110             ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
0111             return;
0112         }
0113         ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
0114                 want, actual);
0115     }
0116     ehci->ASS_poll_count = 0;
0117 
0118     /* The status is up-to-date; restart or stop the schedule as needed */
0119     if (want == 0) {    /* Stopped */
0120         if (ehci->async_count > 0)
0121             ehci_set_command_bit(ehci, CMD_ASE);
0122 
0123     } else {        /* Running */
0124         if (ehci->async_count == 0) {
0125 
0126             /* Turn off the schedule after a while */
0127             ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
0128                     true);
0129         }
0130     }
0131 }
0132 
0133 /* Turn off the async schedule after a brief delay */
0134 static void ehci_disable_ASE(struct ehci_hcd *ehci)
0135 {
0136     ehci_clear_command_bit(ehci, CMD_ASE);
0137 }
0138 
0139 
0140 /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
0141 static void ehci_poll_PSS(struct ehci_hcd *ehci)
0142 {
0143     unsigned    actual, want;
0144 
0145     /* Don't do anything if the controller isn't running (e.g., died) */
0146     if (ehci->rh_state != EHCI_RH_RUNNING)
0147         return;
0148 
0149     want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
0150     actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
0151 
0152     if (want != actual) {
0153 
0154         /* Poll again later, but give up after about 2-4 ms */
0155         if (ehci->PSS_poll_count++ < 2) {
0156             ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
0157             return;
0158         }
0159         ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
0160                 want, actual);
0161     }
0162     ehci->PSS_poll_count = 0;
0163 
0164     /* The status is up-to-date; restart or stop the schedule as needed */
0165     if (want == 0) {    /* Stopped */
0166         if (ehci->periodic_count > 0)
0167             ehci_set_command_bit(ehci, CMD_PSE);
0168 
0169     } else {        /* Running */
0170         if (ehci->periodic_count == 0) {
0171 
0172             /* Turn off the schedule after a while */
0173             ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
0174                     true);
0175         }
0176     }
0177 }
0178 
0179 /* Turn off the periodic schedule after a brief delay */
0180 static void ehci_disable_PSE(struct ehci_hcd *ehci)
0181 {
0182     ehci_clear_command_bit(ehci, CMD_PSE);
0183 }
0184 
0185 
0186 /* Poll the STS_HALT status bit; see when a dead controller stops */
0187 static void ehci_handle_controller_death(struct ehci_hcd *ehci)
0188 {
0189     if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
0190 
0191         /* Give up after a few milliseconds */
0192         if (ehci->died_poll_count++ < 5) {
0193             /* Try again later */
0194             ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
0195             return;
0196         }
0197         ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
0198     }
0199 
0200     /* Clean up the mess */
0201     ehci->rh_state = EHCI_RH_HALTED;
0202     ehci_writel(ehci, 0, &ehci->regs->configured_flag);
0203     ehci_writel(ehci, 0, &ehci->regs->intr_enable);
0204     ehci_work(ehci);
0205     end_unlink_async(ehci);
0206 
0207     /* Not in process context, so don't try to reset the controller */
0208 }
0209 
0210 /* start to unlink interrupt QHs  */
0211 static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
0212 {
0213     bool        stopped = (ehci->rh_state < EHCI_RH_RUNNING);
0214 
0215     /*
0216      * Process all the QHs on the intr_unlink list that were added
0217      * before the current unlink cycle began.  The list is in
0218      * temporal order, so stop when we reach the first entry in the
0219      * current cycle.  But if the root hub isn't running then
0220      * process all the QHs on the list.
0221      */
0222     while (!list_empty(&ehci->intr_unlink_wait)) {
0223         struct ehci_qh  *qh;
0224 
0225         qh = list_first_entry(&ehci->intr_unlink_wait,
0226                 struct ehci_qh, unlink_node);
0227         if (!stopped && (qh->unlink_cycle ==
0228                 ehci->intr_unlink_wait_cycle))
0229             break;
0230         list_del_init(&qh->unlink_node);
0231         qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
0232         start_unlink_intr(ehci, qh);
0233     }
0234 
0235     /* Handle remaining entries later */
0236     if (!list_empty(&ehci->intr_unlink_wait)) {
0237         ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
0238         ++ehci->intr_unlink_wait_cycle;
0239     }
0240 }
0241 
0242 /* Handle unlinked interrupt QHs once they are gone from the hardware */
0243 static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
0244 {
0245     bool        stopped = (ehci->rh_state < EHCI_RH_RUNNING);
0246 
0247     /*
0248      * Process all the QHs on the intr_unlink list that were added
0249      * before the current unlink cycle began.  The list is in
0250      * temporal order, so stop when we reach the first entry in the
0251      * current cycle.  But if the root hub isn't running then
0252      * process all the QHs on the list.
0253      */
0254     ehci->intr_unlinking = true;
0255     while (!list_empty(&ehci->intr_unlink)) {
0256         struct ehci_qh  *qh;
0257 
0258         qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
0259                 unlink_node);
0260         if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
0261             break;
0262         list_del_init(&qh->unlink_node);
0263         end_unlink_intr(ehci, qh);
0264     }
0265 
0266     /* Handle remaining entries later */
0267     if (!list_empty(&ehci->intr_unlink)) {
0268         ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
0269         ++ehci->intr_unlink_cycle;
0270     }
0271     ehci->intr_unlinking = false;
0272 }
0273 
0274 
0275 /* Start another free-iTDs/siTDs cycle */
0276 static void start_free_itds(struct ehci_hcd *ehci)
0277 {
0278     if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
0279         ehci->last_itd_to_free = list_entry(
0280                 ehci->cached_itd_list.prev,
0281                 struct ehci_itd, itd_list);
0282         ehci->last_sitd_to_free = list_entry(
0283                 ehci->cached_sitd_list.prev,
0284                 struct ehci_sitd, sitd_list);
0285         ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
0286     }
0287 }
0288 
0289 /* Wait for controller to stop using old iTDs and siTDs */
0290 static void end_free_itds(struct ehci_hcd *ehci)
0291 {
0292     struct ehci_itd     *itd, *n;
0293     struct ehci_sitd    *sitd, *sn;
0294 
0295     if (ehci->rh_state < EHCI_RH_RUNNING) {
0296         ehci->last_itd_to_free = NULL;
0297         ehci->last_sitd_to_free = NULL;
0298     }
0299 
0300     list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
0301         list_del(&itd->itd_list);
0302         dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
0303         if (itd == ehci->last_itd_to_free)
0304             break;
0305     }
0306     list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
0307         list_del(&sitd->sitd_list);
0308         dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
0309         if (sitd == ehci->last_sitd_to_free)
0310             break;
0311     }
0312 
0313     if (!list_empty(&ehci->cached_itd_list) ||
0314             !list_empty(&ehci->cached_sitd_list))
0315         start_free_itds(ehci);
0316 }
0317 
0318 
0319 /* Handle lost (or very late) IAA interrupts */
0320 static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
0321 {
0322     u32 cmd, status;
0323 
0324     /*
0325      * Lost IAA irqs wedge things badly; seen first with a vt8235.
0326      * So we need this watchdog, but must protect it against both
0327      * (a) SMP races against real IAA firing and retriggering, and
0328      * (b) clean HC shutdown, when IAA watchdog was pending.
0329      */
0330     if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
0331         return;
0332 
0333     /* If we get here, IAA is *REALLY* late.  It's barely
0334      * conceivable that the system is so busy that CMD_IAAD
0335      * is still legitimately set, so let's be sure it's
0336      * clear before we read STS_IAA.  (The HC should clear
0337      * CMD_IAAD when it sets STS_IAA.)
0338      */
0339     cmd = ehci_readl(ehci, &ehci->regs->command);
0340 
0341     /*
0342      * If IAA is set here it either legitimately triggered
0343      * after the watchdog timer expired (_way_ late, so we'll
0344      * still count it as lost) ... or a silicon erratum:
0345      * - VIA seems to set IAA without triggering the IRQ;
0346      * - IAAD potentially cleared without setting IAA.
0347      */
0348     status = ehci_readl(ehci, &ehci->regs->status);
0349     if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
0350         INCR(ehci->stats.lost_iaa);
0351         ehci_writel(ehci, STS_IAA, &ehci->regs->status);
0352     }
0353 
0354     ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
0355     end_iaa_cycle(ehci);
0356 }
0357 
0358 
0359 /* Enable the I/O watchdog, if appropriate */
0360 static void turn_on_io_watchdog(struct ehci_hcd *ehci)
0361 {
0362     /* Not needed if the controller isn't running or it's already enabled */
0363     if (ehci->rh_state != EHCI_RH_RUNNING ||
0364             (ehci->enabled_hrtimer_events &
0365                 BIT(EHCI_HRTIMER_IO_WATCHDOG)))
0366         return;
0367 
0368     /*
0369      * Isochronous transfers always need the watchdog.
0370      * For other sorts we use it only if the flag is set.
0371      */
0372     if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
0373             ehci->async_count + ehci->intr_count > 0))
0374         ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
0375 }
0376 
0377 
0378 /*
0379  * Handler functions for the hrtimer event types.
0380  * Keep this array in the same order as the event types indexed by
0381  * enum ehci_hrtimer_event in ehci.h.
0382  */
0383 static void (*event_handlers[])(struct ehci_hcd *) = {
0384     ehci_poll_ASS,          /* EHCI_HRTIMER_POLL_ASS */
0385     ehci_poll_PSS,          /* EHCI_HRTIMER_POLL_PSS */
0386     ehci_handle_controller_death,   /* EHCI_HRTIMER_POLL_DEAD */
0387     ehci_handle_intr_unlinks,   /* EHCI_HRTIMER_UNLINK_INTR */
0388     end_free_itds,          /* EHCI_HRTIMER_FREE_ITDS */
0389     end_unlink_async,       /* EHCI_HRTIMER_ACTIVE_UNLINK */
0390     ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
0391     unlink_empty_async,     /* EHCI_HRTIMER_ASYNC_UNLINKS */
0392     ehci_iaa_watchdog,      /* EHCI_HRTIMER_IAA_WATCHDOG */
0393     ehci_disable_PSE,       /* EHCI_HRTIMER_DISABLE_PERIODIC */
0394     ehci_disable_ASE,       /* EHCI_HRTIMER_DISABLE_ASYNC */
0395     ehci_work,          /* EHCI_HRTIMER_IO_WATCHDOG */
0396 };
0397 
0398 static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
0399 {
0400     struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
0401     ktime_t     now;
0402     unsigned long   events;
0403     unsigned long   flags;
0404     unsigned    e;
0405 
0406     spin_lock_irqsave(&ehci->lock, flags);
0407 
0408     events = ehci->enabled_hrtimer_events;
0409     ehci->enabled_hrtimer_events = 0;
0410     ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
0411 
0412     /*
0413      * Check each pending event.  If its time has expired, handle
0414      * the event; otherwise re-enable it.
0415      */
0416     now = ktime_get();
0417     for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
0418         if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
0419             event_handlers[e](ehci);
0420         else
0421             ehci_enable_event(ehci, e, false);
0422     }
0423 
0424     spin_unlock_irqrestore(&ehci->lock, flags);
0425     return HRTIMER_NORESTART;
0426 }