0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
0012 {
0013 ehci->command |= bit;
0014 ehci_writel(ehci, ehci->command, &ehci->regs->command);
0015
0016
0017 ehci_readl(ehci, &ehci->regs->command);
0018 }
0019
0020
0021 static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
0022 {
0023 ehci->command &= ~bit;
0024 ehci_writel(ehci, ehci->command, &ehci->regs->command);
0025
0026
0027 ehci_readl(ehci, &ehci->regs->command);
0028 }
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 static unsigned event_delays_ns[] = {
0061 1 * NSEC_PER_MSEC,
0062 1 * NSEC_PER_MSEC,
0063 1 * NSEC_PER_MSEC,
0064 1125 * NSEC_PER_USEC,
0065 2 * NSEC_PER_MSEC,
0066 2 * NSEC_PER_MSEC,
0067 5 * NSEC_PER_MSEC,
0068 6 * NSEC_PER_MSEC,
0069 10 * NSEC_PER_MSEC,
0070 10 * NSEC_PER_MSEC,
0071 15 * NSEC_PER_MSEC,
0072 100 * NSEC_PER_MSEC,
0073 };
0074
0075
0076 static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
0077 bool resched)
0078 {
0079 ktime_t *timeout = &ehci->hr_timeouts[event];
0080
0081 if (resched)
0082 *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
0083 ehci->enabled_hrtimer_events |= (1 << event);
0084
0085
0086 if (event < ehci->next_hrtimer_event) {
0087 ehci->next_hrtimer_event = event;
0088 hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
0089 NSEC_PER_MSEC, HRTIMER_MODE_ABS);
0090 }
0091 }
0092
0093
0094
0095 static void ehci_poll_ASS(struct ehci_hcd *ehci)
0096 {
0097 unsigned actual, want;
0098
0099
0100 if (ehci->rh_state != EHCI_RH_RUNNING)
0101 return;
0102
0103 want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
0104 actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
0105
0106 if (want != actual) {
0107
0108
0109 if (ehci->ASS_poll_count++ < 2) {
0110 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
0111 return;
0112 }
0113 ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
0114 want, actual);
0115 }
0116 ehci->ASS_poll_count = 0;
0117
0118
0119 if (want == 0) {
0120 if (ehci->async_count > 0)
0121 ehci_set_command_bit(ehci, CMD_ASE);
0122
0123 } else {
0124 if (ehci->async_count == 0) {
0125
0126
0127 ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
0128 true);
0129 }
0130 }
0131 }
0132
0133
0134 static void ehci_disable_ASE(struct ehci_hcd *ehci)
0135 {
0136 ehci_clear_command_bit(ehci, CMD_ASE);
0137 }
0138
0139
0140
0141 static void ehci_poll_PSS(struct ehci_hcd *ehci)
0142 {
0143 unsigned actual, want;
0144
0145
0146 if (ehci->rh_state != EHCI_RH_RUNNING)
0147 return;
0148
0149 want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
0150 actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
0151
0152 if (want != actual) {
0153
0154
0155 if (ehci->PSS_poll_count++ < 2) {
0156 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
0157 return;
0158 }
0159 ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
0160 want, actual);
0161 }
0162 ehci->PSS_poll_count = 0;
0163
0164
0165 if (want == 0) {
0166 if (ehci->periodic_count > 0)
0167 ehci_set_command_bit(ehci, CMD_PSE);
0168
0169 } else {
0170 if (ehci->periodic_count == 0) {
0171
0172
0173 ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
0174 true);
0175 }
0176 }
0177 }
0178
0179
0180 static void ehci_disable_PSE(struct ehci_hcd *ehci)
0181 {
0182 ehci_clear_command_bit(ehci, CMD_PSE);
0183 }
0184
0185
0186
0187 static void ehci_handle_controller_death(struct ehci_hcd *ehci)
0188 {
0189 if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
0190
0191
0192 if (ehci->died_poll_count++ < 5) {
0193
0194 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
0195 return;
0196 }
0197 ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
0198 }
0199
0200
0201 ehci->rh_state = EHCI_RH_HALTED;
0202 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
0203 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
0204 ehci_work(ehci);
0205 end_unlink_async(ehci);
0206
0207
0208 }
0209
0210
0211 static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
0212 {
0213 bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
0214
0215
0216
0217
0218
0219
0220
0221
0222 while (!list_empty(&ehci->intr_unlink_wait)) {
0223 struct ehci_qh *qh;
0224
0225 qh = list_first_entry(&ehci->intr_unlink_wait,
0226 struct ehci_qh, unlink_node);
0227 if (!stopped && (qh->unlink_cycle ==
0228 ehci->intr_unlink_wait_cycle))
0229 break;
0230 list_del_init(&qh->unlink_node);
0231 qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
0232 start_unlink_intr(ehci, qh);
0233 }
0234
0235
0236 if (!list_empty(&ehci->intr_unlink_wait)) {
0237 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
0238 ++ehci->intr_unlink_wait_cycle;
0239 }
0240 }
0241
0242
0243 static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
0244 {
0245 bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
0246
0247
0248
0249
0250
0251
0252
0253
0254 ehci->intr_unlinking = true;
0255 while (!list_empty(&ehci->intr_unlink)) {
0256 struct ehci_qh *qh;
0257
0258 qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
0259 unlink_node);
0260 if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
0261 break;
0262 list_del_init(&qh->unlink_node);
0263 end_unlink_intr(ehci, qh);
0264 }
0265
0266
0267 if (!list_empty(&ehci->intr_unlink)) {
0268 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
0269 ++ehci->intr_unlink_cycle;
0270 }
0271 ehci->intr_unlinking = false;
0272 }
0273
0274
0275
0276 static void start_free_itds(struct ehci_hcd *ehci)
0277 {
0278 if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
0279 ehci->last_itd_to_free = list_entry(
0280 ehci->cached_itd_list.prev,
0281 struct ehci_itd, itd_list);
0282 ehci->last_sitd_to_free = list_entry(
0283 ehci->cached_sitd_list.prev,
0284 struct ehci_sitd, sitd_list);
0285 ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
0286 }
0287 }
0288
0289
0290 static void end_free_itds(struct ehci_hcd *ehci)
0291 {
0292 struct ehci_itd *itd, *n;
0293 struct ehci_sitd *sitd, *sn;
0294
0295 if (ehci->rh_state < EHCI_RH_RUNNING) {
0296 ehci->last_itd_to_free = NULL;
0297 ehci->last_sitd_to_free = NULL;
0298 }
0299
0300 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
0301 list_del(&itd->itd_list);
0302 dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
0303 if (itd == ehci->last_itd_to_free)
0304 break;
0305 }
0306 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
0307 list_del(&sitd->sitd_list);
0308 dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
0309 if (sitd == ehci->last_sitd_to_free)
0310 break;
0311 }
0312
0313 if (!list_empty(&ehci->cached_itd_list) ||
0314 !list_empty(&ehci->cached_sitd_list))
0315 start_free_itds(ehci);
0316 }
0317
0318
0319
0320 static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
0321 {
0322 u32 cmd, status;
0323
0324
0325
0326
0327
0328
0329
0330 if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
0331 return;
0332
0333
0334
0335
0336
0337
0338
0339 cmd = ehci_readl(ehci, &ehci->regs->command);
0340
0341
0342
0343
0344
0345
0346
0347
0348 status = ehci_readl(ehci, &ehci->regs->status);
0349 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
0350 INCR(ehci->stats.lost_iaa);
0351 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
0352 }
0353
0354 ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
0355 end_iaa_cycle(ehci);
0356 }
0357
0358
0359
0360 static void turn_on_io_watchdog(struct ehci_hcd *ehci)
0361 {
0362
0363 if (ehci->rh_state != EHCI_RH_RUNNING ||
0364 (ehci->enabled_hrtimer_events &
0365 BIT(EHCI_HRTIMER_IO_WATCHDOG)))
0366 return;
0367
0368
0369
0370
0371
0372 if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
0373 ehci->async_count + ehci->intr_count > 0))
0374 ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383 static void (*event_handlers[])(struct ehci_hcd *) = {
0384 ehci_poll_ASS,
0385 ehci_poll_PSS,
0386 ehci_handle_controller_death,
0387 ehci_handle_intr_unlinks,
0388 end_free_itds,
0389 end_unlink_async,
0390 ehci_handle_start_intr_unlinks,
0391 unlink_empty_async,
0392 ehci_iaa_watchdog,
0393 ehci_disable_PSE,
0394 ehci_disable_ASE,
0395 ehci_work,
0396 };
0397
0398 static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
0399 {
0400 struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
0401 ktime_t now;
0402 unsigned long events;
0403 unsigned long flags;
0404 unsigned e;
0405
0406 spin_lock_irqsave(&ehci->lock, flags);
0407
0408 events = ehci->enabled_hrtimer_events;
0409 ehci->enabled_hrtimer_events = 0;
0410 ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
0411
0412
0413
0414
0415
0416 now = ktime_get();
0417 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
0418 if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
0419 event_handlers[e](ehci);
0420 else
0421 ehci_enable_event(ehci, e, false);
0422 }
0423
0424 spin_unlock_irqrestore(&ehci->lock, flags);
0425 return HRTIMER_NORESTART;
0426 }