0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #include <linux/scatterlist.h>
0056 #include <linux/slab.h>
0057 #include <linux/dma-mapping.h>
0058 #include "xhci.h"
0059 #include "xhci-trace.h"
0060
0061 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
0062 u32 field1, u32 field2,
0063 u32 field3, u32 field4, bool command_must_succeed);
0064
0065
0066
0067
0068
0069 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
0070 union xhci_trb *trb)
0071 {
0072 unsigned long segment_offset;
0073
0074 if (!seg || !trb || trb < seg->trbs)
0075 return 0;
0076
0077 segment_offset = trb - seg->trbs;
0078 if (segment_offset >= TRBS_PER_SEGMENT)
0079 return 0;
0080 return seg->dma + (segment_offset * sizeof(*trb));
0081 }
0082
0083 static bool trb_is_noop(union xhci_trb *trb)
0084 {
0085 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
0086 }
0087
0088 static bool trb_is_link(union xhci_trb *trb)
0089 {
0090 return TRB_TYPE_LINK_LE32(trb->link.control);
0091 }
0092
0093 static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
0094 {
0095 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
0096 }
0097
0098 static bool last_trb_on_ring(struct xhci_ring *ring,
0099 struct xhci_segment *seg, union xhci_trb *trb)
0100 {
0101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
0102 }
0103
0104 static bool link_trb_toggles_cycle(union xhci_trb *trb)
0105 {
0106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
0107 }
0108
0109 static bool last_td_in_urb(struct xhci_td *td)
0110 {
0111 struct urb_priv *urb_priv = td->urb->hcpriv;
0112
0113 return urb_priv->num_tds_done == urb_priv->num_tds;
0114 }
0115
0116 static void inc_td_cnt(struct urb *urb)
0117 {
0118 struct urb_priv *urb_priv = urb->hcpriv;
0119
0120 urb_priv->num_tds_done++;
0121 }
0122
0123 static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
0124 {
0125 if (trb_is_link(trb)) {
0126
0127 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
0128 } else {
0129 trb->generic.field[0] = 0;
0130 trb->generic.field[1] = 0;
0131 trb->generic.field[2] = 0;
0132
0133 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
0134 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
0135 }
0136 }
0137
0138
0139
0140
0141
0142 static void next_trb(struct xhci_hcd *xhci,
0143 struct xhci_ring *ring,
0144 struct xhci_segment **seg,
0145 union xhci_trb **trb)
0146 {
0147 if (trb_is_link(*trb)) {
0148 *seg = (*seg)->next;
0149 *trb = ((*seg)->trbs);
0150 } else {
0151 (*trb)++;
0152 }
0153 }
0154
0155
0156
0157
0158 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
0159 {
0160 unsigned int link_trb_count = 0;
0161
0162
0163 if (ring->type == TYPE_EVENT) {
0164 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
0165 ring->dequeue++;
0166 goto out;
0167 }
0168 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
0169 ring->cycle_state ^= 1;
0170 ring->deq_seg = ring->deq_seg->next;
0171 ring->dequeue = ring->deq_seg->trbs;
0172 goto out;
0173 }
0174
0175
0176 if (!trb_is_link(ring->dequeue)) {
0177 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
0178 xhci_warn(xhci, "Missing link TRB at end of segment\n");
0179 } else {
0180 ring->dequeue++;
0181 ring->num_trbs_free++;
0182 }
0183 }
0184
0185 while (trb_is_link(ring->dequeue)) {
0186 ring->deq_seg = ring->deq_seg->next;
0187 ring->dequeue = ring->deq_seg->trbs;
0188
0189 if (link_trb_count++ > ring->num_segs) {
0190 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
0191 break;
0192 }
0193 }
0194 out:
0195 trace_xhci_inc_deq(ring);
0196
0197 return;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
0217 bool more_trbs_coming)
0218 {
0219 u32 chain;
0220 union xhci_trb *next;
0221 unsigned int link_trb_count = 0;
0222
0223 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
0224
0225 if (!trb_is_link(ring->enqueue))
0226 ring->num_trbs_free--;
0227
0228 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
0229 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
0230 return;
0231 }
0232
0233 next = ++(ring->enqueue);
0234
0235
0236 while (trb_is_link(next)) {
0237
0238
0239
0240
0241
0242
0243
0244
0245 if (!chain && !more_trbs_coming)
0246 break;
0247
0248
0249
0250
0251
0252 if (!(ring->type == TYPE_ISOC &&
0253 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
0254 !xhci_link_trb_quirk(xhci)) {
0255 next->link.control &= cpu_to_le32(~TRB_CHAIN);
0256 next->link.control |= cpu_to_le32(chain);
0257 }
0258
0259 wmb();
0260 next->link.control ^= cpu_to_le32(TRB_CYCLE);
0261
0262
0263 if (link_trb_toggles_cycle(next))
0264 ring->cycle_state ^= 1;
0265
0266 ring->enq_seg = ring->enq_seg->next;
0267 ring->enqueue = ring->enq_seg->trbs;
0268 next = ring->enqueue;
0269
0270 if (link_trb_count++ > ring->num_segs) {
0271 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
0272 break;
0273 }
0274 }
0275
0276 trace_xhci_inc_enq(ring);
0277 }
0278
0279
0280
0281
0282
0283 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
0284 unsigned int num_trbs)
0285 {
0286 int num_trbs_in_deq_seg;
0287
0288 if (ring->num_trbs_free < num_trbs)
0289 return 0;
0290
0291 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
0292 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
0293 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
0294 return 0;
0295 }
0296
0297 return 1;
0298 }
0299
0300
0301 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
0302 {
0303 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
0304 return;
0305
0306 xhci_dbg(xhci, "// Ding dong!\n");
0307
0308 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
0309
0310 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
0311
0312 readl(&xhci->dba->doorbell[0]);
0313 }
0314
0315 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
0316 {
0317 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
0318 }
0319
0320 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
0321 {
0322 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
0323 cmd_list);
0324 }
0325
0326
0327
0328
0329
0330
0331 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
0332 struct xhci_command *cur_cmd)
0333 {
0334 struct xhci_command *i_cmd;
0335
0336
0337 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
0338
0339 if (i_cmd->status != COMP_COMMAND_ABORTED)
0340 continue;
0341
0342 i_cmd->status = COMP_COMMAND_RING_STOPPED;
0343
0344 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
0345 i_cmd->command_trb);
0346
0347 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
0348
0349
0350
0351
0352
0353 }
0354
0355 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
0356
0357
0358 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
0359 !(xhci->xhc_state & XHCI_STATE_DYING)) {
0360 xhci->current_cmd = cur_cmd;
0361 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
0362 xhci_ring_cmd_db(xhci);
0363 }
0364 }
0365
0366
0367 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
0368 {
0369 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
0370 union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
0371 u64 crcr;
0372 int ret;
0373
0374 xhci_dbg(xhci, "Abort command ring\n");
0375
0376 reinit_completion(&xhci->cmd_ring_stop_completion);
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 next_trb(xhci, NULL, &new_seg, &new_deq);
0387 if (trb_is_link(new_deq))
0388 next_trb(xhci, NULL, &new_seg, &new_deq);
0389
0390 crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
0391 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
0392
0393
0394
0395
0396
0397
0398
0399 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
0400 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
0401 if (ret < 0) {
0402 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
0403 xhci_halt(xhci);
0404 xhci_hc_died(xhci);
0405 return ret;
0406 }
0407
0408
0409
0410
0411
0412
0413 spin_unlock_irqrestore(&xhci->lock, flags);
0414 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
0415 msecs_to_jiffies(2000));
0416 spin_lock_irqsave(&xhci->lock, flags);
0417 if (!ret) {
0418 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
0419 xhci_cleanup_command_queue(xhci);
0420 } else {
0421 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
0422 }
0423 return 0;
0424 }
0425
0426 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
0427 unsigned int slot_id,
0428 unsigned int ep_index,
0429 unsigned int stream_id)
0430 {
0431 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
0432 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
0433 unsigned int ep_state = ep->ep_state;
0434
0435
0436
0437
0438
0439
0440
0441 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
0442 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
0443 return;
0444
0445 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
0446
0447 writel(DB_VALUE(ep_index, stream_id), db_addr);
0448
0449 readl(db_addr);
0450 }
0451
0452
0453 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
0454 unsigned int slot_id,
0455 unsigned int ep_index)
0456 {
0457 unsigned int stream_id;
0458 struct xhci_virt_ep *ep;
0459
0460 ep = &xhci->devs[slot_id]->eps[ep_index];
0461
0462
0463 if (!(ep->ep_state & EP_HAS_STREAMS)) {
0464 if (ep->ring && !(list_empty(&ep->ring->td_list)))
0465 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
0466 return;
0467 }
0468
0469 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
0470 stream_id++) {
0471 struct xhci_stream_info *stream_info = ep->stream_info;
0472 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
0473 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
0474 stream_id);
0475 }
0476 }
0477
0478 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
0479 unsigned int slot_id,
0480 unsigned int ep_index)
0481 {
0482 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
0483 }
0484
0485 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
0486 unsigned int slot_id,
0487 unsigned int ep_index)
0488 {
0489 if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
0490 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
0491 return NULL;
0492 }
0493 if (ep_index >= EP_CTX_PER_DEV) {
0494 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
0495 return NULL;
0496 }
0497 if (!xhci->devs[slot_id]) {
0498 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
0499 return NULL;
0500 }
0501
0502 return &xhci->devs[slot_id]->eps[ep_index];
0503 }
0504
0505 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
0506 struct xhci_virt_ep *ep,
0507 unsigned int stream_id)
0508 {
0509
0510 if (!(ep->ep_state & EP_HAS_STREAMS))
0511 return ep->ring;
0512
0513 if (!ep->stream_info)
0514 return NULL;
0515
0516 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
0517 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
0518 stream_id, ep->vdev->slot_id, ep->ep_index);
0519 return NULL;
0520 }
0521
0522 return ep->stream_info->stream_rings[stream_id];
0523 }
0524
0525
0526
0527
0528
0529 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
0530 unsigned int slot_id, unsigned int ep_index,
0531 unsigned int stream_id)
0532 {
0533 struct xhci_virt_ep *ep;
0534
0535 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
0536 if (!ep)
0537 return NULL;
0538
0539 return xhci_virt_ep_to_ring(xhci, ep, stream_id);
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
0550 unsigned int ep_index, unsigned int stream_id)
0551 {
0552 struct xhci_ep_ctx *ep_ctx;
0553 struct xhci_stream_ctx *st_ctx;
0554 struct xhci_virt_ep *ep;
0555
0556 ep = &vdev->eps[ep_index];
0557
0558 if (ep->ep_state & EP_HAS_STREAMS) {
0559 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
0560 return le64_to_cpu(st_ctx->stream_ring);
0561 }
0562 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
0563 return le64_to_cpu(ep_ctx->deq);
0564 }
0565
0566 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
0567 unsigned int slot_id, unsigned int ep_index,
0568 unsigned int stream_id, struct xhci_td *td)
0569 {
0570 struct xhci_virt_device *dev = xhci->devs[slot_id];
0571 struct xhci_virt_ep *ep = &dev->eps[ep_index];
0572 struct xhci_ring *ep_ring;
0573 struct xhci_command *cmd;
0574 struct xhci_segment *new_seg;
0575 struct xhci_segment *halted_seg = NULL;
0576 union xhci_trb *new_deq;
0577 int new_cycle;
0578 union xhci_trb *halted_trb;
0579 int index = 0;
0580 dma_addr_t addr;
0581 u64 hw_dequeue;
0582 bool cycle_found = false;
0583 bool td_last_trb_found = false;
0584 u32 trb_sct = 0;
0585 int ret;
0586
0587 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
0588 ep_index, stream_id);
0589 if (!ep_ring) {
0590 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
0591 stream_id);
0592 return -ENODEV;
0593 }
0594
0595
0596
0597
0598
0599
0600
0601 if (!td) {
0602 if (list_empty(&ep_ring->td_list)) {
0603 new_seg = ep_ring->enq_seg;
0604 new_deq = ep_ring->enqueue;
0605 new_cycle = ep_ring->cycle_state;
0606 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
0607 goto deq_found;
0608 } else {
0609 xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
0610 return -EINVAL;
0611 }
0612 }
0613
0614 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
0615 new_seg = ep_ring->deq_seg;
0616 new_deq = ep_ring->dequeue;
0617
0618
0619
0620
0621
0622
0623 if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
0624 !(ep->ep_state & EP_HAS_STREAMS))
0625 halted_seg = trb_in_td(xhci, td->start_seg,
0626 td->first_trb, td->last_trb,
0627 hw_dequeue & ~0xf, false);
0628 if (halted_seg) {
0629 index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
0630 sizeof(*halted_trb);
0631 halted_trb = &halted_seg->trbs[index];
0632 new_cycle = halted_trb->generic.field[3] & 0x1;
0633 xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
0634 (u8)(hw_dequeue & 0x1), index, new_cycle);
0635 } else {
0636 new_cycle = hw_dequeue & 0x1;
0637 }
0638
0639
0640
0641
0642
0643
0644
0645 do {
0646 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
0647 == (dma_addr_t)(hw_dequeue & ~0xf)) {
0648 cycle_found = true;
0649 if (td_last_trb_found)
0650 break;
0651 }
0652 if (new_deq == td->last_trb)
0653 td_last_trb_found = true;
0654
0655 if (cycle_found && trb_is_link(new_deq) &&
0656 link_trb_toggles_cycle(new_deq))
0657 new_cycle ^= 0x1;
0658
0659 next_trb(xhci, ep_ring, &new_seg, &new_deq);
0660
0661
0662 if (new_deq == ep->ring->dequeue) {
0663 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
0664 return -EINVAL;
0665 }
0666
0667 } while (!cycle_found || !td_last_trb_found);
0668
0669 deq_found:
0670
0671
0672 addr = xhci_trb_virt_to_dma(new_seg, new_deq);
0673 if (addr == 0) {
0674 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
0675 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
0676 return -EINVAL;
0677 }
0678
0679 if ((ep->ep_state & SET_DEQ_PENDING)) {
0680 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
0681 &addr);
0682 return -EBUSY;
0683 }
0684
0685
0686 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
0687 if (!cmd) {
0688 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
0689 return -ENOMEM;
0690 }
0691
0692 if (stream_id)
0693 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
0694 ret = queue_command(xhci, cmd,
0695 lower_32_bits(addr) | trb_sct | new_cycle,
0696 upper_32_bits(addr),
0697 STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
0698 EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
0699 if (ret < 0) {
0700 xhci_free_command(xhci, cmd);
0701 return ret;
0702 }
0703 ep->queued_deq_seg = new_seg;
0704 ep->queued_deq_ptr = new_deq;
0705
0706 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
0707 "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
0708
0709
0710
0711
0712
0713
0714 ep->ep_state |= SET_DEQ_PENDING;
0715 xhci_ring_cmd_db(xhci);
0716 return 0;
0717 }
0718
0719
0720
0721
0722
0723 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
0724 struct xhci_td *td, bool flip_cycle)
0725 {
0726 struct xhci_segment *seg = td->start_seg;
0727 union xhci_trb *trb = td->first_trb;
0728
0729 while (1) {
0730 trb_to_noop(trb, TRB_TR_NOOP);
0731
0732
0733 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
0734 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
0735
0736 if (trb == td->last_trb)
0737 break;
0738
0739 next_trb(xhci, ep_ring, &seg, &trb);
0740 }
0741 }
0742
0743
0744
0745
0746
0747 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
0748 struct xhci_td *cur_td, int status)
0749 {
0750 struct urb *urb = cur_td->urb;
0751 struct urb_priv *urb_priv = urb->hcpriv;
0752 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
0753
0754 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
0755 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
0756 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
0757 if (xhci->quirks & XHCI_AMD_PLL_FIX)
0758 usb_amd_quirk_pll_enable();
0759 }
0760 }
0761 xhci_urb_free_priv(urb_priv);
0762 usb_hcd_unlink_urb_from_ep(hcd, urb);
0763 trace_xhci_urb_giveback(urb);
0764 usb_hcd_giveback_urb(hcd, urb, status);
0765 }
0766
0767 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
0768 struct xhci_ring *ring, struct xhci_td *td)
0769 {
0770 struct device *dev = xhci_to_hcd(xhci)->self.controller;
0771 struct xhci_segment *seg = td->bounce_seg;
0772 struct urb *urb = td->urb;
0773 size_t len;
0774
0775 if (!ring || !seg || !urb)
0776 return;
0777
0778 if (usb_urb_dir_out(urb)) {
0779 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
0780 DMA_TO_DEVICE);
0781 return;
0782 }
0783
0784 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
0785 DMA_FROM_DEVICE);
0786
0787 if (urb->num_sgs) {
0788 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
0789 seg->bounce_len, seg->bounce_offs);
0790 if (len != seg->bounce_len)
0791 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
0792 len, seg->bounce_len);
0793 } else {
0794 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
0795 seg->bounce_len);
0796 }
0797 seg->bounce_len = 0;
0798 seg->bounce_offs = 0;
0799 }
0800
0801 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
0802 struct xhci_ring *ep_ring, int status)
0803 {
0804 struct urb *urb = NULL;
0805
0806
0807 urb = td->urb;
0808
0809
0810 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
0811
0812
0813
0814
0815
0816
0817 if (urb->actual_length > urb->transfer_buffer_length) {
0818 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
0819 urb->transfer_buffer_length, urb->actual_length);
0820 urb->actual_length = 0;
0821 status = 0;
0822 }
0823
0824 if (!list_empty(&td->td_list))
0825 list_del_init(&td->td_list);
0826
0827 if (!list_empty(&td->cancelled_td_list))
0828 list_del_init(&td->cancelled_td_list);
0829
0830 inc_td_cnt(urb);
0831
0832 if (last_td_in_urb(td)) {
0833 if ((urb->actual_length != urb->transfer_buffer_length &&
0834 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
0835 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
0836 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
0837 urb, urb->actual_length,
0838 urb->transfer_buffer_length, status);
0839
0840
0841 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
0842 status = 0;
0843 xhci_giveback_urb_in_irq(xhci, td, status);
0844 }
0845
0846 return 0;
0847 }
0848
0849
0850
0851 static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
0852 {
0853 struct xhci_ring *ring;
0854 struct xhci_td *td, *tmp_td;
0855
0856 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
0857 cancelled_td_list) {
0858
0859 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
0860
0861 if (td->cancel_status == TD_CLEARED) {
0862 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
0863 __func__, td->urb);
0864 xhci_td_cleanup(ep->xhci, td, ring, td->status);
0865 } else {
0866 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
0867 __func__, td->urb, td->cancel_status);
0868 }
0869 if (ep->xhci->xhc_state & XHCI_STATE_DYING)
0870 return;
0871 }
0872 }
0873
0874 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
0875 unsigned int ep_index, enum xhci_ep_reset_type reset_type)
0876 {
0877 struct xhci_command *command;
0878 int ret = 0;
0879
0880 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
0881 if (!command) {
0882 ret = -ENOMEM;
0883 goto done;
0884 }
0885
0886 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
0887 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
0888 ep_index, slot_id);
0889
0890 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
0891 done:
0892 if (ret)
0893 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
0894 slot_id, ep_index, ret);
0895 return ret;
0896 }
0897
0898 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
0899 struct xhci_virt_ep *ep, unsigned int stream_id,
0900 struct xhci_td *td,
0901 enum xhci_ep_reset_type reset_type)
0902 {
0903 unsigned int slot_id = ep->vdev->slot_id;
0904 int err;
0905
0906
0907
0908
0909
0910 if (ep->vdev->flags & VDEV_PORT_ERROR)
0911 return -ENODEV;
0912
0913
0914 if (reset_type == EP_HARD_RESET) {
0915 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
0916 if (td && list_empty(&td->cancelled_td_list)) {
0917 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
0918 td->cancel_status = TD_HALTED;
0919 }
0920 }
0921
0922 if (ep->ep_state & EP_HALTED) {
0923 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
0924 ep->ep_index);
0925 return 0;
0926 }
0927
0928 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
0929 if (err)
0930 return err;
0931
0932 ep->ep_state |= EP_HALTED;
0933
0934 xhci_ring_cmd_db(xhci);
0935
0936 return 0;
0937 }
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948 static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
0949 {
0950 struct xhci_hcd *xhci;
0951 struct xhci_td *td = NULL;
0952 struct xhci_td *tmp_td = NULL;
0953 struct xhci_td *cached_td = NULL;
0954 struct xhci_ring *ring;
0955 u64 hw_deq;
0956 unsigned int slot_id = ep->vdev->slot_id;
0957 int err;
0958
0959 xhci = ep->xhci;
0960
0961 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
0962 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
0963 "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
0964 (unsigned long long)xhci_trb_virt_to_dma(
0965 td->start_seg, td->first_trb),
0966 td->urb->stream_id, td->urb);
0967 list_del_init(&td->td_list);
0968 ring = xhci_urb_to_transfer_ring(xhci, td->urb);
0969 if (!ring) {
0970 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
0971 td->urb, td->urb->stream_id);
0972 continue;
0973 }
0974
0975
0976
0977
0978
0979
0980 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
0981 td->urb->stream_id);
0982 hw_deq &= ~0xf;
0983
0984 if (td->cancel_status == TD_HALTED ||
0985 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
0986 switch (td->cancel_status) {
0987 case TD_CLEARED:
0988 case TD_CLEARING_CACHE:
0989 break;
0990 case TD_DIRTY:
0991 case TD_HALTED:
0992 td->cancel_status = TD_CLEARING_CACHE;
0993 if (cached_td)
0994
0995 xhci_dbg(xhci,
0996 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
0997 td->urb->stream_id, td->urb,
0998 cached_td->urb->stream_id, cached_td->urb);
0999 cached_td = td;
1000 break;
1001 }
1002 } else {
1003 td_to_noop(xhci, ring, td, false);
1004 td->cancel_status = TD_CLEARED;
1005 }
1006 }
1007
1008
1009 if (!cached_td)
1010 return 0;
1011
1012 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
1013 cached_td->urb->stream_id,
1014 cached_td);
1015 if (err) {
1016
1017 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1018 if (td->cancel_status != TD_CLEARING_CACHE)
1019 continue;
1020 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
1021 td->urb);
1022 td_to_noop(xhci, ring, td, false);
1023 td->cancel_status = TD_CLEARED;
1024 }
1025 }
1026 return 0;
1027 }
1028
1029
1030
1031
1032
1033 static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
1034 {
1035 struct xhci_td *td;
1036 u64 hw_deq;
1037
1038 if (!list_empty(&ep->ring->td_list)) {
1039 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
1040 hw_deq &= ~0xf;
1041 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
1042 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
1043 td->last_trb, hw_deq, false))
1044 return td;
1045 }
1046 return NULL;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1060 union xhci_trb *trb, u32 comp_code)
1061 {
1062 unsigned int ep_index;
1063 struct xhci_virt_ep *ep;
1064 struct xhci_ep_ctx *ep_ctx;
1065 struct xhci_td *td = NULL;
1066 enum xhci_ep_reset_type reset_type;
1067 struct xhci_command *command;
1068 int err;
1069
1070 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1071 if (!xhci->devs[slot_id])
1072 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1073 slot_id);
1074 return;
1075 }
1076
1077 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1078 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1079 if (!ep)
1080 return;
1081
1082 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1083
1084 trace_xhci_handle_cmd_stop_ep(ep_ctx);
1085
1086 if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 switch (GET_EP_CTX_STATE(ep_ctx)) {
1102 case EP_STATE_HALTED:
1103 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1104 if (ep->ep_state & EP_HAS_STREAMS) {
1105 reset_type = EP_SOFT_RESET;
1106 } else {
1107 reset_type = EP_HARD_RESET;
1108 td = find_halted_td(ep);
1109 if (td)
1110 td->status = -EPROTO;
1111 }
1112
1113 err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
1114 reset_type);
1115 if (err)
1116 break;
1117 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1118 return;
1119 case EP_STATE_RUNNING:
1120
1121 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
1122
1123 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1124 if (!command) {
1125 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1126 return;
1127 }
1128 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1129 xhci_ring_cmd_db(xhci);
1130
1131 return;
1132 default:
1133 break;
1134 }
1135 }
1136
1137
1138 xhci_invalidate_cancelled_tds(ep);
1139 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1140
1141
1142 xhci_giveback_invalidated_tds(ep);
1143 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1144 }
1145
1146 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1147 {
1148 struct xhci_td *cur_td;
1149 struct xhci_td *tmp;
1150
1151 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1152 list_del_init(&cur_td->td_list);
1153
1154 if (!list_empty(&cur_td->cancelled_td_list))
1155 list_del_init(&cur_td->cancelled_td_list);
1156
1157 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1158
1159 inc_td_cnt(cur_td->urb);
1160 if (last_td_in_urb(cur_td))
1161 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1162 }
1163 }
1164
1165 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1166 int slot_id, int ep_index)
1167 {
1168 struct xhci_td *cur_td;
1169 struct xhci_td *tmp;
1170 struct xhci_virt_ep *ep;
1171 struct xhci_ring *ring;
1172
1173 ep = &xhci->devs[slot_id]->eps[ep_index];
1174 if ((ep->ep_state & EP_HAS_STREAMS) ||
1175 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1176 int stream_id;
1177
1178 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1179 stream_id++) {
1180 ring = ep->stream_info->stream_rings[stream_id];
1181 if (!ring)
1182 continue;
1183
1184 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1185 "Killing URBs for slot ID %u, ep index %u, stream %u",
1186 slot_id, ep_index, stream_id);
1187 xhci_kill_ring_urbs(xhci, ring);
1188 }
1189 } else {
1190 ring = ep->ring;
1191 if (!ring)
1192 return;
1193 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1194 "Killing URBs for slot ID %u, ep index %u",
1195 slot_id, ep_index);
1196 xhci_kill_ring_urbs(xhci, ring);
1197 }
1198
1199 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1200 cancelled_td_list) {
1201 list_del_init(&cur_td->cancelled_td_list);
1202 inc_td_cnt(cur_td->urb);
1203
1204 if (last_td_in_urb(cur_td))
1205 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1206 }
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 void xhci_hc_died(struct xhci_hcd *xhci)
1219 {
1220 int i, j;
1221
1222 if (xhci->xhc_state & XHCI_STATE_DYING)
1223 return;
1224
1225 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1226 xhci->xhc_state |= XHCI_STATE_DYING;
1227
1228 xhci_cleanup_command_queue(xhci);
1229
1230
1231 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1232 if (!xhci->devs[i])
1233 continue;
1234 for (j = 0; j < 31; j++)
1235 xhci_kill_endpoint_urbs(xhci, i, j);
1236 }
1237
1238
1239 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1240 usb_hc_died(xhci_to_hcd(xhci));
1241 }
1242
1243 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1244 struct xhci_virt_device *dev,
1245 struct xhci_ring *ep_ring,
1246 unsigned int ep_index)
1247 {
1248 union xhci_trb *dequeue_temp;
1249 int num_trbs_free_temp;
1250 bool revert = false;
1251
1252 num_trbs_free_temp = ep_ring->num_trbs_free;
1253 dequeue_temp = ep_ring->dequeue;
1254
1255
1256
1257
1258
1259
1260
1261 if (trb_is_link(ep_ring->dequeue)) {
1262 ep_ring->deq_seg = ep_ring->deq_seg->next;
1263 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1264 }
1265
1266 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1267
1268 ep_ring->num_trbs_free++;
1269 ep_ring->dequeue++;
1270 if (trb_is_link(ep_ring->dequeue)) {
1271 if (ep_ring->dequeue ==
1272 dev->eps[ep_index].queued_deq_ptr)
1273 break;
1274 ep_ring->deq_seg = ep_ring->deq_seg->next;
1275 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1276 }
1277 if (ep_ring->dequeue == dequeue_temp) {
1278 revert = true;
1279 break;
1280 }
1281 }
1282
1283 if (revert) {
1284 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1285 ep_ring->num_trbs_free = num_trbs_free_temp;
1286 }
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1297 union xhci_trb *trb, u32 cmd_comp_code)
1298 {
1299 unsigned int ep_index;
1300 unsigned int stream_id;
1301 struct xhci_ring *ep_ring;
1302 struct xhci_virt_ep *ep;
1303 struct xhci_ep_ctx *ep_ctx;
1304 struct xhci_slot_ctx *slot_ctx;
1305 struct xhci_td *td, *tmp_td;
1306
1307 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1308 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1309 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1310 if (!ep)
1311 return;
1312
1313 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1314 if (!ep_ring) {
1315 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1316 stream_id);
1317
1318 goto cleanup;
1319 }
1320
1321 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1322 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1323 trace_xhci_handle_cmd_set_deq(slot_ctx);
1324 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1325
1326 if (cmd_comp_code != COMP_SUCCESS) {
1327 unsigned int ep_state;
1328 unsigned int slot_state;
1329
1330 switch (cmd_comp_code) {
1331 case COMP_TRB_ERROR:
1332 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1333 break;
1334 case COMP_CONTEXT_STATE_ERROR:
1335 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1336 ep_state = GET_EP_CTX_STATE(ep_ctx);
1337 slot_state = le32_to_cpu(slot_ctx->dev_state);
1338 slot_state = GET_SLOT_STATE(slot_state);
1339 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1340 "Slot state = %u, EP state = %u",
1341 slot_state, ep_state);
1342 break;
1343 case COMP_SLOT_NOT_ENABLED_ERROR:
1344 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1345 slot_id);
1346 break;
1347 default:
1348 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1349 cmd_comp_code);
1350 break;
1351 }
1352
1353
1354
1355
1356
1357
1358 } else {
1359 u64 deq;
1360
1361 if (ep->ep_state & EP_HAS_STREAMS) {
1362 struct xhci_stream_ctx *ctx =
1363 &ep->stream_info->stream_ctx_array[stream_id];
1364 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1365 } else {
1366 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1367 }
1368 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1369 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1370 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1371 ep->queued_deq_ptr) == deq) {
1372
1373
1374
1375 update_ring_for_set_deq_completion(xhci, ep->vdev,
1376 ep_ring, ep_index);
1377 } else {
1378 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1379 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1380 ep->queued_deq_seg, ep->queued_deq_ptr);
1381 }
1382 }
1383
1384 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1385 cancelled_td_list) {
1386 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1387 if (td->cancel_status == TD_CLEARING_CACHE) {
1388 td->cancel_status = TD_CLEARED;
1389 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
1390 __func__, td->urb);
1391 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1392 } else {
1393 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
1394 __func__, td->urb, td->cancel_status);
1395 }
1396 }
1397 cleanup:
1398 ep->ep_state &= ~SET_DEQ_PENDING;
1399 ep->queued_deq_seg = NULL;
1400 ep->queued_deq_ptr = NULL;
1401
1402 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1403 }
1404
1405 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1406 union xhci_trb *trb, u32 cmd_comp_code)
1407 {
1408 struct xhci_virt_ep *ep;
1409 struct xhci_ep_ctx *ep_ctx;
1410 unsigned int ep_index;
1411
1412 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1413 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1414 if (!ep)
1415 return;
1416
1417 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1418 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1419
1420
1421
1422
1423 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1424 "Ignoring reset ep completion code of %u", cmd_comp_code);
1425
1426
1427 xhci_invalidate_cancelled_tds(ep);
1428
1429
1430 ep->ep_state &= ~EP_HALTED;
1431
1432 xhci_giveback_invalidated_tds(ep);
1433
1434
1435 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1436 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1437 }
1438
1439 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1440 struct xhci_command *command, u32 cmd_comp_code)
1441 {
1442 if (cmd_comp_code == COMP_SUCCESS)
1443 command->slot_id = slot_id;
1444 else
1445 command->slot_id = 0;
1446 }
1447
1448 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1449 {
1450 struct xhci_virt_device *virt_dev;
1451 struct xhci_slot_ctx *slot_ctx;
1452
1453 virt_dev = xhci->devs[slot_id];
1454 if (!virt_dev)
1455 return;
1456
1457 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1458 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1459
1460 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1461
1462 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1463 }
1464
1465 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1466 u32 cmd_comp_code)
1467 {
1468 struct xhci_virt_device *virt_dev;
1469 struct xhci_input_control_ctx *ctrl_ctx;
1470 struct xhci_ep_ctx *ep_ctx;
1471 unsigned int ep_index;
1472 u32 add_flags;
1473
1474
1475
1476
1477
1478
1479 virt_dev = xhci->devs[slot_id];
1480 if (!virt_dev)
1481 return;
1482 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1483 if (!ctrl_ctx) {
1484 xhci_warn(xhci, "Could not get input context, bad type.\n");
1485 return;
1486 }
1487
1488 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1489
1490
1491 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1492
1493 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1494 trace_xhci_handle_cmd_config_ep(ep_ctx);
1495
1496 return;
1497 }
1498
1499 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1500 {
1501 struct xhci_virt_device *vdev;
1502 struct xhci_slot_ctx *slot_ctx;
1503
1504 vdev = xhci->devs[slot_id];
1505 if (!vdev)
1506 return;
1507 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1508 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1509 }
1510
1511 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1512 {
1513 struct xhci_virt_device *vdev;
1514 struct xhci_slot_ctx *slot_ctx;
1515
1516 vdev = xhci->devs[slot_id];
1517 if (!vdev) {
1518 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1519 slot_id);
1520 return;
1521 }
1522 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1523 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1524
1525 xhci_dbg(xhci, "Completed reset device command.\n");
1526 }
1527
1528 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1529 struct xhci_event_cmd *event)
1530 {
1531 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1532 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1533 return;
1534 }
1535 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1536 "NEC firmware version %2x.%02x",
1537 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1538 NEC_FW_MINOR(le32_to_cpu(event->status)));
1539 }
1540
1541 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1542 {
1543 list_del(&cmd->cmd_list);
1544
1545 if (cmd->completion) {
1546 cmd->status = status;
1547 complete(cmd->completion);
1548 } else {
1549 kfree(cmd);
1550 }
1551 }
1552
1553 void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1554 {
1555 struct xhci_command *cur_cmd, *tmp_cmd;
1556 xhci->current_cmd = NULL;
1557 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1558 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1559 }
1560
1561 void xhci_handle_command_timeout(struct work_struct *work)
1562 {
1563 struct xhci_hcd *xhci;
1564 unsigned long flags;
1565 char str[XHCI_MSG_MAX];
1566 u64 hw_ring_state;
1567 u32 cmd_field3;
1568 u32 usbsts;
1569
1570 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1571
1572 spin_lock_irqsave(&xhci->lock, flags);
1573
1574
1575
1576
1577
1578 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1579 spin_unlock_irqrestore(&xhci->lock, flags);
1580 return;
1581 }
1582
1583 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
1584 usbsts = readl(&xhci->op_regs->status);
1585 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
1586
1587
1588 if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
1589 struct xhci_virt_ep *ep;
1590
1591 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
1592
1593 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
1594 TRB_TO_EP_INDEX(cmd_field3));
1595 if (ep)
1596 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1597
1598 xhci_halt(xhci);
1599 xhci_hc_died(xhci);
1600 goto time_out_completed;
1601 }
1602
1603
1604 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1605
1606
1607 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1608 if (hw_ring_state == ~(u64)0) {
1609 xhci_hc_died(xhci);
1610 goto time_out_completed;
1611 }
1612
1613 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1614 (hw_ring_state & CMD_RING_RUNNING)) {
1615
1616 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1617 xhci_dbg(xhci, "Command timeout\n");
1618 xhci_abort_cmd_ring(xhci, flags);
1619 goto time_out_completed;
1620 }
1621
1622
1623 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1624 xhci_dbg(xhci, "host removed, ring start fail?\n");
1625 xhci_cleanup_command_queue(xhci);
1626
1627 goto time_out_completed;
1628 }
1629
1630
1631 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1632 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1633
1634 time_out_completed:
1635 spin_unlock_irqrestore(&xhci->lock, flags);
1636 return;
1637 }
1638
1639 static void handle_cmd_completion(struct xhci_hcd *xhci,
1640 struct xhci_event_cmd *event)
1641 {
1642 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1643 u64 cmd_dma;
1644 dma_addr_t cmd_dequeue_dma;
1645 u32 cmd_comp_code;
1646 union xhci_trb *cmd_trb;
1647 struct xhci_command *cmd;
1648 u32 cmd_type;
1649
1650 if (slot_id >= MAX_HC_SLOTS) {
1651 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1652 return;
1653 }
1654
1655 cmd_dma = le64_to_cpu(event->cmd_trb);
1656 cmd_trb = xhci->cmd_ring->dequeue;
1657
1658 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1659
1660 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1661 cmd_trb);
1662
1663
1664
1665
1666 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1667 xhci_warn(xhci,
1668 "ERROR mismatched command completion event\n");
1669 return;
1670 }
1671
1672 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1673
1674 cancel_delayed_work(&xhci->cmd_timer);
1675
1676 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1677
1678
1679 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1680 complete_all(&xhci->cmd_ring_stop_completion);
1681 return;
1682 }
1683
1684 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1685 xhci_err(xhci,
1686 "Command completion event does not match command\n");
1687 return;
1688 }
1689
1690
1691
1692
1693
1694
1695
1696 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1697 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1698 if (cmd->status == COMP_COMMAND_ABORTED) {
1699 if (xhci->current_cmd == cmd)
1700 xhci->current_cmd = NULL;
1701 goto event_handled;
1702 }
1703 }
1704
1705 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1706 switch (cmd_type) {
1707 case TRB_ENABLE_SLOT:
1708 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1709 break;
1710 case TRB_DISABLE_SLOT:
1711 xhci_handle_cmd_disable_slot(xhci, slot_id);
1712 break;
1713 case TRB_CONFIG_EP:
1714 if (!cmd->completion)
1715 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
1716 break;
1717 case TRB_EVAL_CONTEXT:
1718 break;
1719 case TRB_ADDR_DEV:
1720 xhci_handle_cmd_addr_dev(xhci, slot_id);
1721 break;
1722 case TRB_STOP_RING:
1723 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1724 le32_to_cpu(cmd_trb->generic.field[3])));
1725 if (!cmd->completion)
1726 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1727 cmd_comp_code);
1728 break;
1729 case TRB_SET_DEQ:
1730 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1731 le32_to_cpu(cmd_trb->generic.field[3])));
1732 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1733 break;
1734 case TRB_CMD_NOOP:
1735
1736 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1737 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1738 break;
1739 case TRB_RESET_EP:
1740 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1741 le32_to_cpu(cmd_trb->generic.field[3])));
1742 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1743 break;
1744 case TRB_RESET_DEV:
1745
1746
1747
1748 slot_id = TRB_TO_SLOT_ID(
1749 le32_to_cpu(cmd_trb->generic.field[3]));
1750 xhci_handle_cmd_reset_dev(xhci, slot_id);
1751 break;
1752 case TRB_NEC_GET_FW:
1753 xhci_handle_cmd_nec_get_fw(xhci, event);
1754 break;
1755 default:
1756
1757 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1758 break;
1759 }
1760
1761
1762 if (!list_is_singular(&xhci->cmd_list)) {
1763 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1764 struct xhci_command, cmd_list);
1765 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
1766 } else if (xhci->current_cmd == cmd) {
1767 xhci->current_cmd = NULL;
1768 }
1769
1770 event_handled:
1771 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1772
1773 inc_deq(xhci, xhci->cmd_ring);
1774 }
1775
1776 static void handle_vendor_event(struct xhci_hcd *xhci,
1777 union xhci_trb *event, u32 trb_type)
1778 {
1779 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1780 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1781 handle_cmd_completion(xhci, &event->event_cmd);
1782 }
1783
1784 static void handle_device_notification(struct xhci_hcd *xhci,
1785 union xhci_trb *event)
1786 {
1787 u32 slot_id;
1788 struct usb_device *udev;
1789
1790 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1791 if (!xhci->devs[slot_id]) {
1792 xhci_warn(xhci, "Device Notification event for "
1793 "unused slot %u\n", slot_id);
1794 return;
1795 }
1796
1797 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1798 slot_id);
1799 udev = xhci->devs[slot_id]->udev;
1800 if (udev && udev->parent)
1801 usb_wakeup_notification(udev->parent, udev->portnum);
1802 }
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1817 {
1818 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1819 u32 pll_lock_check;
1820 u32 retry_count = 4;
1821
1822 do {
1823
1824 writel(0x6F, hcd->regs + 0x1048);
1825 udelay(10);
1826
1827 writel(0x7F, hcd->regs + 0x1048);
1828 udelay(200);
1829 pll_lock_check = readl(hcd->regs + 0x1070);
1830 } while (!(pll_lock_check & 0x1) && --retry_count);
1831 }
1832
1833 static void handle_port_status(struct xhci_hcd *xhci,
1834 union xhci_trb *event)
1835 {
1836 struct usb_hcd *hcd;
1837 u32 port_id;
1838 u32 portsc, cmd_reg;
1839 int max_ports;
1840 int slot_id;
1841 unsigned int hcd_portnum;
1842 struct xhci_bus_state *bus_state;
1843 bool bogus_port_status = false;
1844 struct xhci_port *port;
1845
1846
1847 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1848 xhci_warn(xhci,
1849 "WARN: xHC returned failed port status event\n");
1850
1851 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1852 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1853
1854 if ((port_id <= 0) || (port_id > max_ports)) {
1855 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1856 port_id);
1857 inc_deq(xhci, xhci->event_ring);
1858 return;
1859 }
1860
1861 port = &xhci->hw_ports[port_id - 1];
1862 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
1863 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1864 port_id);
1865 bogus_port_status = true;
1866 goto cleanup;
1867 }
1868
1869
1870 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1871 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1872 bogus_port_status = true;
1873 goto cleanup;
1874 }
1875
1876 hcd = port->rhub->hcd;
1877 bus_state = &port->rhub->bus_state;
1878 hcd_portnum = port->hcd_portnum;
1879 portsc = readl(port->addr);
1880
1881 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1882 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1883
1884 trace_xhci_handle_port_status(hcd_portnum, portsc);
1885
1886 if (hcd->state == HC_STATE_SUSPENDED) {
1887 xhci_dbg(xhci, "resume root hub\n");
1888 usb_hcd_resume_root_hub(hcd);
1889 }
1890
1891 if (hcd->speed >= HCD_USB3 &&
1892 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1893 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1894 if (slot_id && xhci->devs[slot_id])
1895 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
1896 }
1897
1898 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1899 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1900
1901 cmd_reg = readl(&xhci->op_regs->command);
1902 if (!(cmd_reg & CMD_RUN)) {
1903 xhci_warn(xhci, "xHC is not running.\n");
1904 goto cleanup;
1905 }
1906
1907 if (DEV_SUPERSPEED_ANY(portsc)) {
1908 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1909
1910
1911
1912
1913 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
1914 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1915 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1916 xhci_set_link_state(xhci, port, XDEV_U0);
1917
1918
1919
1920 bogus_port_status = true;
1921 goto cleanup;
1922 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
1923 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1924 bus_state->resume_done[hcd_portnum] = jiffies +
1925 msecs_to_jiffies(USB_RESUME_TIMEOUT);
1926 set_bit(hcd_portnum, &bus_state->resuming_ports);
1927
1928
1929
1930
1931 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1932 mod_timer(&hcd->rh_timer,
1933 bus_state->resume_done[hcd_portnum]);
1934 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1935 bogus_port_status = true;
1936 }
1937 }
1938
1939 if ((portsc & PORT_PLC) &&
1940 DEV_SUPERSPEED_ANY(portsc) &&
1941 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1942 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1943 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1944 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1945 complete(&bus_state->u3exit_done[hcd_portnum]);
1946
1947
1948
1949
1950
1951
1952
1953 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1954 if (slot_id && xhci->devs[slot_id])
1955 xhci_ring_device(xhci, slot_id);
1956 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
1957 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1958 usb_wakeup_notification(hcd->self.root_hub,
1959 hcd_portnum + 1);
1960 bogus_port_status = true;
1961 goto cleanup;
1962 }
1963 }
1964
1965
1966
1967
1968
1969
1970 if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
1971 test_and_clear_bit(hcd_portnum,
1972 &bus_state->rexit_ports)) {
1973 complete(&bus_state->rexit_done[hcd_portnum]);
1974 bogus_port_status = true;
1975 goto cleanup;
1976 }
1977
1978 if (hcd->speed < HCD_USB3) {
1979 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1980 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1981 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1982 xhci_cavium_reset_phy_quirk(xhci);
1983 }
1984
1985 cleanup:
1986
1987 inc_deq(xhci, xhci->event_ring);
1988
1989
1990
1991
1992
1993 if (bogus_port_status)
1994 return;
1995
1996
1997
1998
1999
2000
2001
2002
2003 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
2004 __func__, hcd->self.busnum);
2005 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2006 spin_unlock(&xhci->lock);
2007
2008 usb_hcd_poll_rh_status(hcd);
2009 spin_lock(&xhci->lock);
2010 }
2011
2012
2013
2014
2015
2016
2017
2018 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
2019 struct xhci_segment *start_seg,
2020 union xhci_trb *start_trb,
2021 union xhci_trb *end_trb,
2022 dma_addr_t suspect_dma,
2023 bool debug)
2024 {
2025 dma_addr_t start_dma;
2026 dma_addr_t end_seg_dma;
2027 dma_addr_t end_trb_dma;
2028 struct xhci_segment *cur_seg;
2029
2030 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
2031 cur_seg = start_seg;
2032
2033 do {
2034 if (start_dma == 0)
2035 return NULL;
2036
2037 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2038 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2039
2040 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
2041
2042 if (debug)
2043 xhci_warn(xhci,
2044 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2045 (unsigned long long)suspect_dma,
2046 (unsigned long long)start_dma,
2047 (unsigned long long)end_trb_dma,
2048 (unsigned long long)cur_seg->dma,
2049 (unsigned long long)end_seg_dma);
2050
2051 if (end_trb_dma > 0) {
2052
2053 if (start_dma <= end_trb_dma) {
2054 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2055 return cur_seg;
2056 } else {
2057
2058
2059
2060 if ((suspect_dma >= start_dma &&
2061 suspect_dma <= end_seg_dma) ||
2062 (suspect_dma >= cur_seg->dma &&
2063 suspect_dma <= end_trb_dma))
2064 return cur_seg;
2065 }
2066 return NULL;
2067 } else {
2068
2069 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2070 return cur_seg;
2071 }
2072 cur_seg = cur_seg->next;
2073 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2074 } while (cur_seg != start_seg);
2075
2076 return NULL;
2077 }
2078
2079 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2080 struct xhci_virt_ep *ep)
2081 {
2082
2083
2084
2085
2086 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2087 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2088 !(ep->ep_state & EP_CLEARING_TT)) {
2089 ep->ep_state |= EP_CLEARING_TT;
2090 td->urb->ep->hcpriv = td->urb->dev;
2091 if (usb_hub_clear_tt_buffer(td->urb))
2092 ep->ep_state &= ~EP_CLEARING_TT;
2093 }
2094 }
2095
2096
2097
2098
2099
2100
2101
2102 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
2103 struct xhci_ep_ctx *ep_ctx,
2104 unsigned int trb_comp_code)
2105 {
2106
2107 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
2108 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
2109 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2110
2111
2112
2113
2114
2115
2116 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2117 return 1;
2118
2119 return 0;
2120 }
2121
2122 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2123 {
2124 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2125
2126
2127
2128 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2129 trb_comp_code);
2130 xhci_dbg(xhci, "Treating code as success.\n");
2131 return 1;
2132 }
2133 return 0;
2134 }
2135
2136 static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2137 struct xhci_ring *ep_ring, struct xhci_td *td,
2138 u32 trb_comp_code)
2139 {
2140 struct xhci_ep_ctx *ep_ctx;
2141
2142 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2143
2144 switch (trb_comp_code) {
2145 case COMP_STOPPED_LENGTH_INVALID:
2146 case COMP_STOPPED_SHORT_PACKET:
2147 case COMP_STOPPED:
2148
2149
2150
2151
2152
2153 return 0;
2154 case COMP_USB_TRANSACTION_ERROR:
2155 case COMP_BABBLE_DETECTED_ERROR:
2156 case COMP_SPLIT_TRANSACTION_ERROR:
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2169
2170
2171
2172
2173
2174 if ((ep->ep_state & EP_HALTED) &&
2175 !list_empty(&td->cancelled_td_list)) {
2176 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2177 (unsigned long long)xhci_trb_virt_to_dma(
2178 td->start_seg, td->first_trb));
2179 return 0;
2180 }
2181
2182 break;
2183 }
2184
2185 xhci_clear_hub_tt_buffer(xhci, td, ep);
2186 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2187 EP_HARD_RESET);
2188 return 0;
2189 case COMP_STALL_ERROR:
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200 if (ep->ep_index != 0)
2201 xhci_clear_hub_tt_buffer(xhci, td, ep);
2202
2203 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2204 EP_HARD_RESET);
2205
2206 return 0;
2207 default:
2208 break;
2209 }
2210
2211
2212 ep_ring->dequeue = td->last_trb;
2213 ep_ring->deq_seg = td->last_trb_seg;
2214 ep_ring->num_trbs_free += td->num_trbs - 1;
2215 inc_deq(xhci, ep_ring);
2216
2217 return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2218 }
2219
2220
2221 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2222 union xhci_trb *stop_trb)
2223 {
2224 u32 sum;
2225 union xhci_trb *trb = ring->dequeue;
2226 struct xhci_segment *seg = ring->deq_seg;
2227
2228 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2229 if (!trb_is_noop(trb) && !trb_is_link(trb))
2230 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2231 }
2232 return sum;
2233 }
2234
2235
2236
2237
2238 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2239 struct xhci_ring *ep_ring, struct xhci_td *td,
2240 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2241 {
2242 struct xhci_ep_ctx *ep_ctx;
2243 u32 trb_comp_code;
2244 u32 remaining, requested;
2245 u32 trb_type;
2246
2247 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2248 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2249 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2250 requested = td->urb->transfer_buffer_length;
2251 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2252
2253 switch (trb_comp_code) {
2254 case COMP_SUCCESS:
2255 if (trb_type != TRB_STATUS) {
2256 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2257 (trb_type == TRB_DATA) ? "data" : "setup");
2258 td->status = -ESHUTDOWN;
2259 break;
2260 }
2261 td->status = 0;
2262 break;
2263 case COMP_SHORT_PACKET:
2264 td->status = 0;
2265 break;
2266 case COMP_STOPPED_SHORT_PACKET:
2267 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2268 td->urb->actual_length = remaining;
2269 else
2270 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2271 goto finish_td;
2272 case COMP_STOPPED:
2273 switch (trb_type) {
2274 case TRB_SETUP:
2275 td->urb->actual_length = 0;
2276 goto finish_td;
2277 case TRB_DATA:
2278 case TRB_NORMAL:
2279 td->urb->actual_length = requested - remaining;
2280 goto finish_td;
2281 case TRB_STATUS:
2282 td->urb->actual_length = requested;
2283 goto finish_td;
2284 default:
2285 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2286 trb_type);
2287 goto finish_td;
2288 }
2289 case COMP_STOPPED_LENGTH_INVALID:
2290 goto finish_td;
2291 default:
2292 if (!xhci_requires_manual_halt_cleanup(xhci,
2293 ep_ctx, trb_comp_code))
2294 break;
2295 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2296 trb_comp_code, ep->ep_index);
2297 fallthrough;
2298 case COMP_STALL_ERROR:
2299
2300 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2301 td->urb->actual_length = requested - remaining;
2302 else if (!td->urb_length_set)
2303 td->urb->actual_length = 0;
2304 goto finish_td;
2305 }
2306
2307
2308 if (trb_type == TRB_SETUP)
2309 goto finish_td;
2310
2311
2312
2313
2314
2315 if (trb_type == TRB_DATA ||
2316 trb_type == TRB_NORMAL) {
2317 td->urb_length_set = true;
2318 td->urb->actual_length = requested - remaining;
2319 xhci_dbg(xhci, "Waiting for status stage event\n");
2320 return 0;
2321 }
2322
2323
2324 if (!td->urb_length_set)
2325 td->urb->actual_length = requested;
2326
2327 finish_td:
2328 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2329 }
2330
2331
2332
2333
2334 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2335 struct xhci_ring *ep_ring, struct xhci_td *td,
2336 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2337 {
2338 struct urb_priv *urb_priv;
2339 int idx;
2340 struct usb_iso_packet_descriptor *frame;
2341 u32 trb_comp_code;
2342 bool sum_trbs_for_length = false;
2343 u32 remaining, requested, ep_trb_len;
2344 int short_framestatus;
2345
2346 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2347 urb_priv = td->urb->hcpriv;
2348 idx = urb_priv->num_tds_done;
2349 frame = &td->urb->iso_frame_desc[idx];
2350 requested = frame->length;
2351 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2352 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2353 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2354 -EREMOTEIO : 0;
2355
2356
2357 switch (trb_comp_code) {
2358 case COMP_SUCCESS:
2359 if (remaining) {
2360 frame->status = short_framestatus;
2361 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2362 sum_trbs_for_length = true;
2363 break;
2364 }
2365 frame->status = 0;
2366 break;
2367 case COMP_SHORT_PACKET:
2368 frame->status = short_framestatus;
2369 sum_trbs_for_length = true;
2370 break;
2371 case COMP_BANDWIDTH_OVERRUN_ERROR:
2372 frame->status = -ECOMM;
2373 break;
2374 case COMP_ISOCH_BUFFER_OVERRUN:
2375 case COMP_BABBLE_DETECTED_ERROR:
2376 frame->status = -EOVERFLOW;
2377 break;
2378 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2379 case COMP_STALL_ERROR:
2380 frame->status = -EPROTO;
2381 break;
2382 case COMP_USB_TRANSACTION_ERROR:
2383 frame->status = -EPROTO;
2384 if (ep_trb != td->last_trb)
2385 return 0;
2386 break;
2387 case COMP_STOPPED:
2388 sum_trbs_for_length = true;
2389 break;
2390 case COMP_STOPPED_SHORT_PACKET:
2391
2392 frame->status = short_framestatus;
2393 requested = remaining;
2394 break;
2395 case COMP_STOPPED_LENGTH_INVALID:
2396 requested = 0;
2397 remaining = 0;
2398 break;
2399 default:
2400 sum_trbs_for_length = true;
2401 frame->status = -1;
2402 break;
2403 }
2404
2405 if (sum_trbs_for_length)
2406 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
2407 ep_trb_len - remaining;
2408 else
2409 frame->actual_length = requested;
2410
2411 td->urb->actual_length += frame->actual_length;
2412
2413 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2414 }
2415
2416 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2417 struct xhci_virt_ep *ep, int status)
2418 {
2419 struct urb_priv *urb_priv;
2420 struct usb_iso_packet_descriptor *frame;
2421 int idx;
2422
2423 urb_priv = td->urb->hcpriv;
2424 idx = urb_priv->num_tds_done;
2425 frame = &td->urb->iso_frame_desc[idx];
2426
2427
2428 frame->status = -EXDEV;
2429
2430
2431 frame->actual_length = 0;
2432
2433
2434 ep->ring->dequeue = td->last_trb;
2435 ep->ring->deq_seg = td->last_trb_seg;
2436 ep->ring->num_trbs_free += td->num_trbs - 1;
2437 inc_deq(xhci, ep->ring);
2438
2439 return xhci_td_cleanup(xhci, td, ep->ring, status);
2440 }
2441
2442
2443
2444
2445 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2446 struct xhci_ring *ep_ring, struct xhci_td *td,
2447 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2448 {
2449 struct xhci_slot_ctx *slot_ctx;
2450 u32 trb_comp_code;
2451 u32 remaining, requested, ep_trb_len;
2452
2453 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2454 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2455 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2456 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2457 requested = td->urb->transfer_buffer_length;
2458
2459 switch (trb_comp_code) {
2460 case COMP_SUCCESS:
2461 ep_ring->err_count = 0;
2462
2463 if (ep_trb != td->last_trb || remaining) {
2464 xhci_warn(xhci, "WARN Successful completion on short TX\n");
2465 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2466 td->urb->ep->desc.bEndpointAddress,
2467 requested, remaining);
2468 }
2469 td->status = 0;
2470 break;
2471 case COMP_SHORT_PACKET:
2472 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2473 td->urb->ep->desc.bEndpointAddress,
2474 requested, remaining);
2475 td->status = 0;
2476 break;
2477 case COMP_STOPPED_SHORT_PACKET:
2478 td->urb->actual_length = remaining;
2479 goto finish_td;
2480 case COMP_STOPPED_LENGTH_INVALID:
2481
2482 ep_trb_len = 0;
2483 remaining = 0;
2484 break;
2485 case COMP_USB_TRANSACTION_ERROR:
2486 if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2487 (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
2488 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2489 break;
2490
2491 td->status = 0;
2492
2493 xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2494 EP_SOFT_RESET);
2495 return 0;
2496 default:
2497
2498 break;
2499 }
2500
2501 if (ep_trb == td->last_trb)
2502 td->urb->actual_length = requested - remaining;
2503 else
2504 td->urb->actual_length =
2505 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2506 ep_trb_len - remaining;
2507 finish_td:
2508 if (remaining > requested) {
2509 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2510 remaining);
2511 td->urb->actual_length = 0;
2512 }
2513
2514 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2515 }
2516
2517
2518
2519
2520
2521
2522 static int handle_tx_event(struct xhci_hcd *xhci,
2523 struct xhci_transfer_event *event)
2524 {
2525 struct xhci_virt_ep *ep;
2526 struct xhci_ring *ep_ring;
2527 unsigned int slot_id;
2528 int ep_index;
2529 struct xhci_td *td = NULL;
2530 dma_addr_t ep_trb_dma;
2531 struct xhci_segment *ep_seg;
2532 union xhci_trb *ep_trb;
2533 int status = -EINPROGRESS;
2534 struct xhci_ep_ctx *ep_ctx;
2535 struct list_head *tmp;
2536 u32 trb_comp_code;
2537 int td_num = 0;
2538 bool handling_skipped_tds = false;
2539
2540 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2541 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2542 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2543 ep_trb_dma = le64_to_cpu(event->buffer);
2544
2545 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2546 if (!ep) {
2547 xhci_err(xhci, "ERROR Invalid Transfer event\n");
2548 goto err_out;
2549 }
2550
2551 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2552 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2553
2554 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2555 xhci_err(xhci,
2556 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2557 slot_id, ep_index);
2558 goto err_out;
2559 }
2560
2561
2562 if (!ep_ring) {
2563 switch (trb_comp_code) {
2564 case COMP_STALL_ERROR:
2565 case COMP_USB_TRANSACTION_ERROR:
2566 case COMP_INVALID_STREAM_TYPE_ERROR:
2567 case COMP_INVALID_STREAM_ID_ERROR:
2568 xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
2569 EP_SOFT_RESET);
2570 goto cleanup;
2571 case COMP_RING_UNDERRUN:
2572 case COMP_RING_OVERRUN:
2573 case COMP_STOPPED_LENGTH_INVALID:
2574 goto cleanup;
2575 default:
2576 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2577 slot_id, ep_index);
2578 goto err_out;
2579 }
2580 }
2581
2582
2583 if (ep->skip) {
2584 list_for_each(tmp, &ep_ring->td_list)
2585 td_num++;
2586 }
2587
2588
2589 switch (trb_comp_code) {
2590
2591
2592
2593 case COMP_SUCCESS:
2594 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2595 break;
2596 if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2597 ep_ring->last_td_was_short)
2598 trb_comp_code = COMP_SHORT_PACKET;
2599 else
2600 xhci_warn_ratelimited(xhci,
2601 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2602 slot_id, ep_index);
2603 break;
2604 case COMP_SHORT_PACKET:
2605 break;
2606
2607 case COMP_STOPPED:
2608 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2609 slot_id, ep_index);
2610 break;
2611 case COMP_STOPPED_LENGTH_INVALID:
2612 xhci_dbg(xhci,
2613 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2614 slot_id, ep_index);
2615 break;
2616 case COMP_STOPPED_SHORT_PACKET:
2617 xhci_dbg(xhci,
2618 "Stopped with short packet transfer detected for slot %u ep %u\n",
2619 slot_id, ep_index);
2620 break;
2621
2622 case COMP_STALL_ERROR:
2623 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2624 ep_index);
2625 status = -EPIPE;
2626 break;
2627 case COMP_SPLIT_TRANSACTION_ERROR:
2628 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2629 slot_id, ep_index);
2630 status = -EPROTO;
2631 break;
2632 case COMP_USB_TRANSACTION_ERROR:
2633 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2634 slot_id, ep_index);
2635 status = -EPROTO;
2636 break;
2637 case COMP_BABBLE_DETECTED_ERROR:
2638 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2639 slot_id, ep_index);
2640 status = -EOVERFLOW;
2641 break;
2642
2643 case COMP_TRB_ERROR:
2644 xhci_warn(xhci,
2645 "WARN: TRB error for slot %u ep %u on endpoint\n",
2646 slot_id, ep_index);
2647 status = -EILSEQ;
2648 break;
2649
2650 case COMP_DATA_BUFFER_ERROR:
2651 xhci_warn(xhci,
2652 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2653 slot_id, ep_index);
2654 status = -ENOSR;
2655 break;
2656 case COMP_BANDWIDTH_OVERRUN_ERROR:
2657 xhci_warn(xhci,
2658 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2659 slot_id, ep_index);
2660 break;
2661 case COMP_ISOCH_BUFFER_OVERRUN:
2662 xhci_warn(xhci,
2663 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2664 slot_id, ep_index);
2665 break;
2666 case COMP_RING_UNDERRUN:
2667
2668
2669
2670
2671
2672 xhci_dbg(xhci, "underrun event on endpoint\n");
2673 if (!list_empty(&ep_ring->td_list))
2674 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2675 "still with TDs queued?\n",
2676 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2677 ep_index);
2678 goto cleanup;
2679 case COMP_RING_OVERRUN:
2680 xhci_dbg(xhci, "overrun event on endpoint\n");
2681 if (!list_empty(&ep_ring->td_list))
2682 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2683 "still with TDs queued?\n",
2684 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2685 ep_index);
2686 goto cleanup;
2687 case COMP_MISSED_SERVICE_ERROR:
2688
2689
2690
2691
2692
2693
2694 ep->skip = true;
2695 xhci_dbg(xhci,
2696 "Miss service interval error for slot %u ep %u, set skip flag\n",
2697 slot_id, ep_index);
2698 goto cleanup;
2699 case COMP_NO_PING_RESPONSE_ERROR:
2700 ep->skip = true;
2701 xhci_dbg(xhci,
2702 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2703 slot_id, ep_index);
2704 goto cleanup;
2705
2706 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2707
2708 xhci_warn(xhci,
2709 "WARN: detect an incompatible device for slot %u ep %u",
2710 slot_id, ep_index);
2711 status = -EPROTO;
2712 break;
2713 default:
2714 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2715 status = 0;
2716 break;
2717 }
2718 xhci_warn(xhci,
2719 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2720 trb_comp_code, slot_id, ep_index);
2721 goto cleanup;
2722 }
2723
2724 do {
2725
2726
2727
2728 if (list_empty(&ep_ring->td_list)) {
2729
2730
2731
2732
2733
2734
2735
2736
2737 if (!(trb_comp_code == COMP_STOPPED ||
2738 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2739 ep_ring->last_td_was_short)) {
2740 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2741 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2742 ep_index);
2743 }
2744 if (ep->skip) {
2745 ep->skip = false;
2746 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2747 slot_id, ep_index);
2748 }
2749 if (trb_comp_code == COMP_STALL_ERROR ||
2750 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2751 trb_comp_code)) {
2752 xhci_handle_halted_endpoint(xhci, ep,
2753 ep_ring->stream_id,
2754 NULL,
2755 EP_HARD_RESET);
2756 }
2757 goto cleanup;
2758 }
2759
2760
2761 if (ep->skip && td_num == 0) {
2762 ep->skip = false;
2763 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2764 slot_id, ep_index);
2765 goto cleanup;
2766 }
2767
2768 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2769 td_list);
2770 if (ep->skip)
2771 td_num--;
2772
2773
2774 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2775 td->last_trb, ep_trb_dma, false);
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2786 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2787 goto cleanup;
2788 }
2789
2790 if (!ep_seg) {
2791 if (!ep->skip ||
2792 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2793
2794
2795
2796
2797 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2798 ep_ring->last_td_was_short) {
2799 ep_ring->last_td_was_short = false;
2800 goto cleanup;
2801 }
2802
2803 xhci_err(xhci,
2804 "ERROR Transfer event TRB DMA ptr not "
2805 "part of current TD ep_index %d "
2806 "comp_code %u\n", ep_index,
2807 trb_comp_code);
2808 trb_in_td(xhci, ep_ring->deq_seg,
2809 ep_ring->dequeue, td->last_trb,
2810 ep_trb_dma, true);
2811 return -ESHUTDOWN;
2812 }
2813
2814 skip_isoc_td(xhci, td, ep, status);
2815 goto cleanup;
2816 }
2817 if (trb_comp_code == COMP_SHORT_PACKET)
2818 ep_ring->last_td_was_short = true;
2819 else
2820 ep_ring->last_td_was_short = false;
2821
2822 if (ep->skip) {
2823 xhci_dbg(xhci,
2824 "Found td. Clear skip flag for slot %u ep %u.\n",
2825 slot_id, ep_index);
2826 ep->skip = false;
2827 }
2828
2829 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2830 sizeof(*ep_trb)];
2831
2832 trace_xhci_handle_transfer(ep_ring,
2833 (struct xhci_generic_trb *) ep_trb);
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843 if (trb_is_noop(ep_trb)) {
2844 if (trb_comp_code == COMP_STALL_ERROR ||
2845 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2846 trb_comp_code))
2847 xhci_handle_halted_endpoint(xhci, ep,
2848 ep_ring->stream_id,
2849 td, EP_HARD_RESET);
2850 goto cleanup;
2851 }
2852
2853 td->status = status;
2854
2855
2856 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2857 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2858 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2859 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2860 else
2861 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
2862 cleanup:
2863 handling_skipped_tds = ep->skip &&
2864 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2865 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2866
2867
2868
2869
2870
2871 if (!handling_skipped_tds)
2872 inc_deq(xhci, xhci->event_ring);
2873
2874
2875
2876
2877
2878
2879
2880 } while (handling_skipped_tds);
2881
2882 return 0;
2883
2884 err_out:
2885 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2886 (unsigned long long) xhci_trb_virt_to_dma(
2887 xhci->event_ring->deq_seg,
2888 xhci->event_ring->dequeue),
2889 lower_32_bits(le64_to_cpu(event->buffer)),
2890 upper_32_bits(le64_to_cpu(event->buffer)),
2891 le32_to_cpu(event->transfer_len),
2892 le32_to_cpu(event->flags));
2893 return -ENODEV;
2894 }
2895
2896
2897
2898
2899
2900
2901
2902 static int xhci_handle_event(struct xhci_hcd *xhci)
2903 {
2904 union xhci_trb *event;
2905 int update_ptrs = 1;
2906 u32 trb_type;
2907 int ret;
2908
2909
2910 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2911 xhci_err(xhci, "ERROR event ring not ready\n");
2912 return -ENOMEM;
2913 }
2914
2915 event = xhci->event_ring->dequeue;
2916
2917 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2918 xhci->event_ring->cycle_state)
2919 return 0;
2920
2921 trace_xhci_handle_event(xhci->event_ring, &event->generic);
2922
2923
2924
2925
2926
2927 rmb();
2928 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
2929
2930
2931 switch (trb_type) {
2932 case TRB_COMPLETION:
2933 handle_cmd_completion(xhci, &event->event_cmd);
2934 break;
2935 case TRB_PORT_STATUS:
2936 handle_port_status(xhci, event);
2937 update_ptrs = 0;
2938 break;
2939 case TRB_TRANSFER:
2940 ret = handle_tx_event(xhci, &event->trans_event);
2941 if (ret >= 0)
2942 update_ptrs = 0;
2943 break;
2944 case TRB_DEV_NOTE:
2945 handle_device_notification(xhci, event);
2946 break;
2947 default:
2948 if (trb_type >= TRB_VENDOR_DEFINED_LOW)
2949 handle_vendor_event(xhci, event, trb_type);
2950 else
2951 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
2952 }
2953
2954
2955
2956 if (xhci->xhc_state & XHCI_STATE_DYING) {
2957 xhci_dbg(xhci, "xHCI host dying, returning from "
2958 "event handler.\n");
2959 return 0;
2960 }
2961
2962 if (update_ptrs)
2963
2964 inc_deq(xhci, xhci->event_ring);
2965
2966
2967
2968
2969 return 1;
2970 }
2971
2972
2973
2974
2975
2976
2977 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
2978 union xhci_trb *event_ring_deq)
2979 {
2980 u64 temp_64;
2981 dma_addr_t deq;
2982
2983 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2984
2985 if (event_ring_deq != xhci->event_ring->dequeue) {
2986 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2987 xhci->event_ring->dequeue);
2988 if (deq == 0)
2989 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
2990
2991
2992
2993
2994 if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
2995 ((u64) deq & (u64) ~ERST_PTR_MASK))
2996 return;
2997
2998
2999 temp_64 &= ERST_PTR_MASK;
3000 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3001 }
3002
3003
3004 temp_64 |= ERST_EHB;
3005 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3006 }
3007
3008
3009
3010
3011
3012
3013 irqreturn_t xhci_irq(struct usb_hcd *hcd)
3014 {
3015 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3016 union xhci_trb *event_ring_deq;
3017 irqreturn_t ret = IRQ_NONE;
3018 u64 temp_64;
3019 u32 status;
3020 int event_loop = 0;
3021
3022 spin_lock(&xhci->lock);
3023
3024 status = readl(&xhci->op_regs->status);
3025 if (status == ~(u32)0) {
3026 xhci_hc_died(xhci);
3027 ret = IRQ_HANDLED;
3028 goto out;
3029 }
3030
3031 if (!(status & STS_EINT))
3032 goto out;
3033
3034 if (status & STS_FATAL) {
3035 xhci_warn(xhci, "WARNING: Host System Error\n");
3036 xhci_halt(xhci);
3037 ret = IRQ_HANDLED;
3038 goto out;
3039 }
3040
3041
3042
3043
3044
3045
3046 status |= STS_EINT;
3047 writel(status, &xhci->op_regs->status);
3048
3049 if (!hcd->msi_enabled) {
3050 u32 irq_pending;
3051 irq_pending = readl(&xhci->ir_set->irq_pending);
3052 irq_pending |= IMAN_IP;
3053 writel(irq_pending, &xhci->ir_set->irq_pending);
3054 }
3055
3056 if (xhci->xhc_state & XHCI_STATE_DYING ||
3057 xhci->xhc_state & XHCI_STATE_HALTED) {
3058 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
3059 "Shouldn't IRQs be disabled?\n");
3060
3061
3062
3063 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3064 xhci_write_64(xhci, temp_64 | ERST_EHB,
3065 &xhci->ir_set->erst_dequeue);
3066 ret = IRQ_HANDLED;
3067 goto out;
3068 }
3069
3070 event_ring_deq = xhci->event_ring->dequeue;
3071
3072
3073
3074 while (xhci_handle_event(xhci) > 0) {
3075 if (event_loop++ < TRBS_PER_SEGMENT / 2)
3076 continue;
3077 xhci_update_erst_dequeue(xhci, event_ring_deq);
3078 event_ring_deq = xhci->event_ring->dequeue;
3079
3080
3081 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3082 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
3083
3084 event_loop = 0;
3085 }
3086
3087 xhci_update_erst_dequeue(xhci, event_ring_deq);
3088 ret = IRQ_HANDLED;
3089
3090 out:
3091 spin_unlock(&xhci->lock);
3092
3093 return ret;
3094 }
3095
3096 irqreturn_t xhci_msi_irq(int irq, void *hcd)
3097 {
3098 return xhci_irq(hcd);
3099 }
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3111 bool more_trbs_coming,
3112 u32 field1, u32 field2, u32 field3, u32 field4)
3113 {
3114 struct xhci_generic_trb *trb;
3115
3116 trb = &ring->enqueue->generic;
3117 trb->field[0] = cpu_to_le32(field1);
3118 trb->field[1] = cpu_to_le32(field2);
3119 trb->field[2] = cpu_to_le32(field3);
3120
3121 wmb();
3122 trb->field[3] = cpu_to_le32(field4);
3123
3124 trace_xhci_queue_trb(ring, trb);
3125
3126 inc_enq(xhci, ring, more_trbs_coming);
3127 }
3128
3129
3130
3131
3132
3133 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3134 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3135 {
3136 unsigned int num_trbs_needed;
3137 unsigned int link_trb_count = 0;
3138
3139
3140 switch (ep_state) {
3141 case EP_STATE_DISABLED:
3142
3143
3144
3145
3146 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3147 return -ENOENT;
3148 case EP_STATE_ERROR:
3149 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3150
3151
3152 return -EINVAL;
3153 case EP_STATE_HALTED:
3154 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3155 break;
3156 case EP_STATE_STOPPED:
3157 case EP_STATE_RUNNING:
3158 break;
3159 default:
3160 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3161
3162
3163
3164
3165 return -EINVAL;
3166 }
3167
3168 while (1) {
3169 if (room_on_ring(xhci, ep_ring, num_trbs))
3170 break;
3171
3172 if (ep_ring == xhci->cmd_ring) {
3173 xhci_err(xhci, "Do not support expand command ring\n");
3174 return -ENOMEM;
3175 }
3176
3177 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3178 "ERROR no room on ep ring, try ring expansion");
3179 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3180 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3181 mem_flags)) {
3182 xhci_err(xhci, "Ring expansion failed\n");
3183 return -ENOMEM;
3184 }
3185 }
3186
3187 while (trb_is_link(ep_ring->enqueue)) {
3188
3189
3190
3191 if (!xhci_link_trb_quirk(xhci) &&
3192 !(ep_ring->type == TYPE_ISOC &&
3193 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3194 ep_ring->enqueue->link.control &=
3195 cpu_to_le32(~TRB_CHAIN);
3196 else
3197 ep_ring->enqueue->link.control |=
3198 cpu_to_le32(TRB_CHAIN);
3199
3200 wmb();
3201 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3202
3203
3204 if (link_trb_toggles_cycle(ep_ring->enqueue))
3205 ep_ring->cycle_state ^= 1;
3206
3207 ep_ring->enq_seg = ep_ring->enq_seg->next;
3208 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3209
3210
3211 if (link_trb_count++ > ep_ring->num_segs) {
3212 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3213 return -EINVAL;
3214 }
3215 }
3216
3217 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3218 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3219 return -EINVAL;
3220 }
3221
3222 return 0;
3223 }
3224
3225 static int prepare_transfer(struct xhci_hcd *xhci,
3226 struct xhci_virt_device *xdev,
3227 unsigned int ep_index,
3228 unsigned int stream_id,
3229 unsigned int num_trbs,
3230 struct urb *urb,
3231 unsigned int td_index,
3232 gfp_t mem_flags)
3233 {
3234 int ret;
3235 struct urb_priv *urb_priv;
3236 struct xhci_td *td;
3237 struct xhci_ring *ep_ring;
3238 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3239
3240 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3241 stream_id);
3242 if (!ep_ring) {
3243 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3244 stream_id);
3245 return -EINVAL;
3246 }
3247
3248 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3249 num_trbs, mem_flags);
3250 if (ret)
3251 return ret;
3252
3253 urb_priv = urb->hcpriv;
3254 td = &urb_priv->td[td_index];
3255
3256 INIT_LIST_HEAD(&td->td_list);
3257 INIT_LIST_HEAD(&td->cancelled_td_list);
3258
3259 if (td_index == 0) {
3260 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3261 if (unlikely(ret))
3262 return ret;
3263 }
3264
3265 td->urb = urb;
3266
3267 list_add_tail(&td->td_list, &ep_ring->td_list);
3268 td->start_seg = ep_ring->enq_seg;
3269 td->first_trb = ep_ring->enqueue;
3270
3271 return 0;
3272 }
3273
3274 unsigned int count_trbs(u64 addr, u64 len)
3275 {
3276 unsigned int num_trbs;
3277
3278 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3279 TRB_MAX_BUFF_SIZE);
3280 if (num_trbs == 0)
3281 num_trbs++;
3282
3283 return num_trbs;
3284 }
3285
3286 static inline unsigned int count_trbs_needed(struct urb *urb)
3287 {
3288 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3289 }
3290
3291 static unsigned int count_sg_trbs_needed(struct urb *urb)
3292 {
3293 struct scatterlist *sg;
3294 unsigned int i, len, full_len, num_trbs = 0;
3295
3296 full_len = urb->transfer_buffer_length;
3297
3298 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3299 len = sg_dma_len(sg);
3300 num_trbs += count_trbs(sg_dma_address(sg), len);
3301 len = min_t(unsigned int, len, full_len);
3302 full_len -= len;
3303 if (full_len == 0)
3304 break;
3305 }
3306
3307 return num_trbs;
3308 }
3309
3310 static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3311 {
3312 u64 addr, len;
3313
3314 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3315 len = urb->iso_frame_desc[i].length;
3316
3317 return count_trbs(addr, len);
3318 }
3319
3320 static void check_trb_math(struct urb *urb, int running_total)
3321 {
3322 if (unlikely(running_total != urb->transfer_buffer_length))
3323 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3324 "queued %#x (%d), asked for %#x (%d)\n",
3325 __func__,
3326 urb->ep->desc.bEndpointAddress,
3327 running_total, running_total,
3328 urb->transfer_buffer_length,
3329 urb->transfer_buffer_length);
3330 }
3331
3332 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3333 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3334 struct xhci_generic_trb *start_trb)
3335 {
3336
3337
3338
3339
3340 wmb();
3341 if (start_cycle)
3342 start_trb->field[3] |= cpu_to_le32(start_cycle);
3343 else
3344 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3345 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3346 }
3347
3348 static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3349 struct xhci_ep_ctx *ep_ctx)
3350 {
3351 int xhci_interval;
3352 int ep_interval;
3353
3354 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3355 ep_interval = urb->interval;
3356
3357
3358 if (urb->dev->speed == USB_SPEED_LOW ||
3359 urb->dev->speed == USB_SPEED_FULL)
3360 ep_interval *= 8;
3361
3362
3363
3364
3365 if (xhci_interval != ep_interval) {
3366 dev_dbg_ratelimited(&urb->dev->dev,
3367 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3368 ep_interval, ep_interval == 1 ? "" : "s",
3369 xhci_interval, xhci_interval == 1 ? "" : "s");
3370 urb->interval = xhci_interval;
3371
3372 if (urb->dev->speed == USB_SPEED_LOW ||
3373 urb->dev->speed == USB_SPEED_FULL)
3374 urb->interval /= 8;
3375 }
3376 }
3377
3378
3379
3380
3381
3382
3383
3384 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3385 struct urb *urb, int slot_id, unsigned int ep_index)
3386 {
3387 struct xhci_ep_ctx *ep_ctx;
3388
3389 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3390 check_interval(xhci, urb, ep_ctx);
3391
3392 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3393 }
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3416 int trb_buff_len, unsigned int td_total_len,
3417 struct urb *urb, bool more_trbs_coming)
3418 {
3419 u32 maxp, total_packet_count;
3420
3421
3422 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3423 return ((td_total_len - transferred) >> 10);
3424
3425
3426 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3427 trb_buff_len == td_total_len)
3428 return 0;
3429
3430
3431 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3432 trb_buff_len = 0;
3433
3434 maxp = usb_endpoint_maxp(&urb->ep->desc);
3435 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3436
3437
3438 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3439 }
3440
3441
3442 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3443 u32 *trb_buff_len, struct xhci_segment *seg)
3444 {
3445 struct device *dev = xhci_to_hcd(xhci)->self.controller;
3446 unsigned int unalign;
3447 unsigned int max_pkt;
3448 u32 new_buff_len;
3449 size_t len;
3450
3451 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3452 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3453
3454
3455 if (unalign == 0)
3456 return 0;
3457
3458 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3459 unalign, *trb_buff_len);
3460
3461
3462 if (*trb_buff_len > unalign) {
3463 *trb_buff_len -= unalign;
3464 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3465 return 0;
3466 }
3467
3468
3469
3470
3471
3472
3473 new_buff_len = max_pkt - (enqd_len % max_pkt);
3474
3475 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3476 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3477
3478
3479 if (usb_urb_dir_out(urb)) {
3480 if (urb->num_sgs) {
3481 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3482 seg->bounce_buf, new_buff_len, enqd_len);
3483 if (len != new_buff_len)
3484 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3485 len, new_buff_len);
3486 } else {
3487 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3488 }
3489
3490 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3491 max_pkt, DMA_TO_DEVICE);
3492 } else {
3493 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3494 max_pkt, DMA_FROM_DEVICE);
3495 }
3496
3497 if (dma_mapping_error(dev, seg->bounce_dma)) {
3498
3499 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3500 return 0;
3501 }
3502 *trb_buff_len = new_buff_len;
3503 seg->bounce_len = new_buff_len;
3504 seg->bounce_offs = enqd_len;
3505
3506 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3507
3508 return 1;
3509 }
3510
3511
3512 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3513 struct urb *urb, int slot_id, unsigned int ep_index)
3514 {
3515 struct xhci_ring *ring;
3516 struct urb_priv *urb_priv;
3517 struct xhci_td *td;
3518 struct xhci_generic_trb *start_trb;
3519 struct scatterlist *sg = NULL;
3520 bool more_trbs_coming = true;
3521 bool need_zero_pkt = false;
3522 bool first_trb = true;
3523 unsigned int num_trbs;
3524 unsigned int start_cycle, num_sgs = 0;
3525 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3526 int sent_len, ret;
3527 u32 field, length_field, remainder;
3528 u64 addr, send_addr;
3529
3530 ring = xhci_urb_to_transfer_ring(xhci, urb);
3531 if (!ring)
3532 return -EINVAL;
3533
3534 full_len = urb->transfer_buffer_length;
3535
3536 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3537 num_sgs = urb->num_mapped_sgs;
3538 sg = urb->sg;
3539 addr = (u64) sg_dma_address(sg);
3540 block_len = sg_dma_len(sg);
3541 num_trbs = count_sg_trbs_needed(urb);
3542 } else {
3543 num_trbs = count_trbs_needed(urb);
3544 addr = (u64) urb->transfer_dma;
3545 block_len = full_len;
3546 }
3547 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3548 ep_index, urb->stream_id,
3549 num_trbs, urb, 0, mem_flags);
3550 if (unlikely(ret < 0))
3551 return ret;
3552
3553 urb_priv = urb->hcpriv;
3554
3555
3556 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3557 need_zero_pkt = true;
3558
3559 td = &urb_priv->td[0];
3560
3561
3562
3563
3564
3565
3566 start_trb = &ring->enqueue->generic;
3567 start_cycle = ring->cycle_state;
3568 send_addr = addr;
3569
3570
3571 for (enqd_len = 0; first_trb || enqd_len < full_len;
3572 enqd_len += trb_buff_len) {
3573 field = TRB_TYPE(TRB_NORMAL);
3574
3575
3576 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3577 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3578
3579 if (enqd_len + trb_buff_len > full_len)
3580 trb_buff_len = full_len - enqd_len;
3581
3582
3583 if (first_trb) {
3584 first_trb = false;
3585 if (start_cycle == 0)
3586 field |= TRB_CYCLE;
3587 } else
3588 field |= ring->cycle_state;
3589
3590
3591
3592
3593 if (enqd_len + trb_buff_len < full_len) {
3594 field |= TRB_CHAIN;
3595 if (trb_is_link(ring->enqueue + 1)) {
3596 if (xhci_align_td(xhci, urb, enqd_len,
3597 &trb_buff_len,
3598 ring->enq_seg)) {
3599 send_addr = ring->enq_seg->bounce_dma;
3600
3601 td->bounce_seg = ring->enq_seg;
3602 }
3603 }
3604 }
3605 if (enqd_len + trb_buff_len >= full_len) {
3606 field &= ~TRB_CHAIN;
3607 field |= TRB_IOC;
3608 more_trbs_coming = false;
3609 td->last_trb = ring->enqueue;
3610 td->last_trb_seg = ring->enq_seg;
3611 if (xhci_urb_suitable_for_idt(urb)) {
3612 memcpy(&send_addr, urb->transfer_buffer,
3613 trb_buff_len);
3614 le64_to_cpus(&send_addr);
3615 field |= TRB_IDT;
3616 }
3617 }
3618
3619
3620 if (usb_urb_dir_in(urb))
3621 field |= TRB_ISP;
3622
3623
3624 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3625 full_len, urb, more_trbs_coming);
3626
3627 length_field = TRB_LEN(trb_buff_len) |
3628 TRB_TD_SIZE(remainder) |
3629 TRB_INTR_TARGET(0);
3630
3631 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3632 lower_32_bits(send_addr),
3633 upper_32_bits(send_addr),
3634 length_field,
3635 field);
3636 td->num_trbs++;
3637 addr += trb_buff_len;
3638 sent_len = trb_buff_len;
3639
3640 while (sg && sent_len >= block_len) {
3641
3642 --num_sgs;
3643 sent_len -= block_len;
3644 sg = sg_next(sg);
3645 if (num_sgs != 0 && sg) {
3646 block_len = sg_dma_len(sg);
3647 addr = (u64) sg_dma_address(sg);
3648 addr += sent_len;
3649 }
3650 }
3651 block_len -= sent_len;
3652 send_addr = addr;
3653 }
3654
3655 if (need_zero_pkt) {
3656 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3657 ep_index, urb->stream_id,
3658 1, urb, 1, mem_flags);
3659 urb_priv->td[1].last_trb = ring->enqueue;
3660 urb_priv->td[1].last_trb_seg = ring->enq_seg;
3661 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3662 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3663 urb_priv->td[1].num_trbs++;
3664 }
3665
3666 check_trb_math(urb, enqd_len);
3667 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3668 start_cycle, start_trb);
3669 return 0;
3670 }
3671
3672
3673 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3674 struct urb *urb, int slot_id, unsigned int ep_index)
3675 {
3676 struct xhci_ring *ep_ring;
3677 int num_trbs;
3678 int ret;
3679 struct usb_ctrlrequest *setup;
3680 struct xhci_generic_trb *start_trb;
3681 int start_cycle;
3682 u32 field;
3683 struct urb_priv *urb_priv;
3684 struct xhci_td *td;
3685
3686 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3687 if (!ep_ring)
3688 return -EINVAL;
3689
3690
3691
3692
3693
3694 if (!urb->setup_packet)
3695 return -EINVAL;
3696
3697
3698 num_trbs = 2;
3699
3700
3701
3702
3703
3704 if (urb->transfer_buffer_length > 0)
3705 num_trbs++;
3706 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3707 ep_index, urb->stream_id,
3708 num_trbs, urb, 0, mem_flags);
3709 if (ret < 0)
3710 return ret;
3711
3712 urb_priv = urb->hcpriv;
3713 td = &urb_priv->td[0];
3714 td->num_trbs = num_trbs;
3715
3716
3717
3718
3719
3720
3721 start_trb = &ep_ring->enqueue->generic;
3722 start_cycle = ep_ring->cycle_state;
3723
3724
3725
3726 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3727 field = 0;
3728 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3729 if (start_cycle == 0)
3730 field |= 0x1;
3731
3732
3733 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3734 if (urb->transfer_buffer_length > 0) {
3735 if (setup->bRequestType & USB_DIR_IN)
3736 field |= TRB_TX_TYPE(TRB_DATA_IN);
3737 else
3738 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3739 }
3740 }
3741
3742 queue_trb(xhci, ep_ring, true,
3743 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3744 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3745 TRB_LEN(8) | TRB_INTR_TARGET(0),
3746
3747 field);
3748
3749
3750
3751 if (usb_urb_dir_in(urb))
3752 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3753 else
3754 field = TRB_TYPE(TRB_DATA);
3755
3756 if (urb->transfer_buffer_length > 0) {
3757 u32 length_field, remainder;
3758 u64 addr;
3759
3760 if (xhci_urb_suitable_for_idt(urb)) {
3761 memcpy(&addr, urb->transfer_buffer,
3762 urb->transfer_buffer_length);
3763 le64_to_cpus(&addr);
3764 field |= TRB_IDT;
3765 } else {
3766 addr = (u64) urb->transfer_dma;
3767 }
3768
3769 remainder = xhci_td_remainder(xhci, 0,
3770 urb->transfer_buffer_length,
3771 urb->transfer_buffer_length,
3772 urb, 1);
3773 length_field = TRB_LEN(urb->transfer_buffer_length) |
3774 TRB_TD_SIZE(remainder) |
3775 TRB_INTR_TARGET(0);
3776 if (setup->bRequestType & USB_DIR_IN)
3777 field |= TRB_DIR_IN;
3778 queue_trb(xhci, ep_ring, true,
3779 lower_32_bits(addr),
3780 upper_32_bits(addr),
3781 length_field,
3782 field | ep_ring->cycle_state);
3783 }
3784
3785
3786 td->last_trb = ep_ring->enqueue;
3787 td->last_trb_seg = ep_ring->enq_seg;
3788
3789
3790
3791 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3792 field = 0;
3793 else
3794 field = TRB_DIR_IN;
3795 queue_trb(xhci, ep_ring, false,
3796 0,
3797 0,
3798 TRB_INTR_TARGET(0),
3799
3800 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3801
3802 giveback_first_trb(xhci, slot_id, ep_index, 0,
3803 start_cycle, start_trb);
3804 return 0;
3805 }
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3816 struct urb *urb, unsigned int total_packet_count)
3817 {
3818 unsigned int max_burst;
3819
3820 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3821 return 0;
3822
3823 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3824 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3825 }
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3836 struct urb *urb, unsigned int total_packet_count)
3837 {
3838 unsigned int max_burst;
3839 unsigned int residue;
3840
3841 if (xhci->hci_version < 0x100)
3842 return 0;
3843
3844 if (urb->dev->speed >= USB_SPEED_SUPER) {
3845
3846 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3847 residue = total_packet_count % (max_burst + 1);
3848
3849
3850
3851 if (residue == 0)
3852 return max_burst;
3853 return residue - 1;
3854 }
3855 if (total_packet_count == 0)
3856 return 0;
3857 return total_packet_count - 1;
3858 }
3859
3860
3861
3862
3863
3864
3865
3866
3867 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3868 struct urb *urb, int index)
3869 {
3870 int start_frame, ist, ret = 0;
3871 int start_frame_id, end_frame_id, current_frame_id;
3872
3873 if (urb->dev->speed == USB_SPEED_LOW ||
3874 urb->dev->speed == USB_SPEED_FULL)
3875 start_frame = urb->start_frame + index * urb->interval;
3876 else
3877 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3888 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3889 ist <<= 3;
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904 current_frame_id = readl(&xhci->run_regs->microframe_index);
3905 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3906 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3907
3908 start_frame &= 0x7ff;
3909 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3910 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3911
3912 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3913 __func__, index, readl(&xhci->run_regs->microframe_index),
3914 start_frame_id, end_frame_id, start_frame);
3915
3916 if (start_frame_id < end_frame_id) {
3917 if (start_frame > end_frame_id ||
3918 start_frame < start_frame_id)
3919 ret = -EINVAL;
3920 } else if (start_frame_id > end_frame_id) {
3921 if ((start_frame > end_frame_id &&
3922 start_frame < start_frame_id))
3923 ret = -EINVAL;
3924 } else {
3925 ret = -EINVAL;
3926 }
3927
3928 if (index == 0) {
3929 if (ret == -EINVAL || start_frame == start_frame_id) {
3930 start_frame = start_frame_id + 1;
3931 if (urb->dev->speed == USB_SPEED_LOW ||
3932 urb->dev->speed == USB_SPEED_FULL)
3933 urb->start_frame = start_frame;
3934 else
3935 urb->start_frame = start_frame << 3;
3936 ret = 0;
3937 }
3938 }
3939
3940 if (ret) {
3941 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3942 start_frame, current_frame_id, index,
3943 start_frame_id, end_frame_id);
3944 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3945 return ret;
3946 }
3947
3948 return start_frame;
3949 }
3950
3951
3952 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
3953 {
3954 if (xhci->hci_version < 0x100)
3955 return false;
3956
3957 if (i == num_tds - 1)
3958 return false;
3959
3960
3961
3962
3963 if (i && xhci->quirks & XHCI_AVOID_BEI)
3964 return !!(i % xhci->isoc_bei_interval);
3965
3966 return true;
3967 }
3968
3969
3970 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3971 struct urb *urb, int slot_id, unsigned int ep_index)
3972 {
3973 struct xhci_ring *ep_ring;
3974 struct urb_priv *urb_priv;
3975 struct xhci_td *td;
3976 int num_tds, trbs_per_td;
3977 struct xhci_generic_trb *start_trb;
3978 bool first_trb;
3979 int start_cycle;
3980 u32 field, length_field;
3981 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3982 u64 start_addr, addr;
3983 int i, j;
3984 bool more_trbs_coming;
3985 struct xhci_virt_ep *xep;
3986 int frame_id;
3987
3988 xep = &xhci->devs[slot_id]->eps[ep_index];
3989 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3990
3991 num_tds = urb->number_of_packets;
3992 if (num_tds < 1) {
3993 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3994 return -EINVAL;
3995 }
3996 start_addr = (u64) urb->transfer_dma;
3997 start_trb = &ep_ring->enqueue->generic;
3998 start_cycle = ep_ring->cycle_state;
3999
4000 urb_priv = urb->hcpriv;
4001
4002 for (i = 0; i < num_tds; i++) {
4003 unsigned int total_pkt_count, max_pkt;
4004 unsigned int burst_count, last_burst_pkt_count;
4005 u32 sia_frame_id;
4006
4007 first_trb = true;
4008 running_total = 0;
4009 addr = start_addr + urb->iso_frame_desc[i].offset;
4010 td_len = urb->iso_frame_desc[i].length;
4011 td_remain_len = td_len;
4012 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4013 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4014
4015
4016 if (total_pkt_count == 0)
4017 total_pkt_count++;
4018 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4019 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4020 urb, total_pkt_count);
4021
4022 trbs_per_td = count_isoc_trbs_needed(urb, i);
4023
4024 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4025 urb->stream_id, trbs_per_td, urb, i, mem_flags);
4026 if (ret < 0) {
4027 if (i == 0)
4028 return ret;
4029 goto cleanup;
4030 }
4031 td = &urb_priv->td[i];
4032 td->num_trbs = trbs_per_td;
4033
4034 sia_frame_id = TRB_SIA;
4035 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4036 HCC_CFC(xhci->hcc_params)) {
4037 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4038 if (frame_id >= 0)
4039 sia_frame_id = TRB_FRAME_ID(frame_id);
4040 }
4041
4042
4043
4044
4045
4046 field = TRB_TYPE(TRB_ISOC) |
4047 TRB_TLBPC(last_burst_pkt_count) |
4048 sia_frame_id |
4049 (i ? ep_ring->cycle_state : !start_cycle);
4050
4051
4052 if (!xep->use_extended_tbc)
4053 field |= TRB_TBC(burst_count);
4054
4055
4056 for (j = 0; j < trbs_per_td; j++) {
4057 u32 remainder = 0;
4058
4059
4060 if (!first_trb)
4061 field = TRB_TYPE(TRB_NORMAL) |
4062 ep_ring->cycle_state;
4063
4064
4065 if (usb_urb_dir_in(urb))
4066 field |= TRB_ISP;
4067
4068
4069 if (j < trbs_per_td - 1) {
4070 more_trbs_coming = true;
4071 field |= TRB_CHAIN;
4072 } else {
4073 more_trbs_coming = false;
4074 td->last_trb = ep_ring->enqueue;
4075 td->last_trb_seg = ep_ring->enq_seg;
4076 field |= TRB_IOC;
4077 if (trb_block_event_intr(xhci, num_tds, i))
4078 field |= TRB_BEI;
4079 }
4080
4081 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
4082 if (trb_buff_len > td_remain_len)
4083 trb_buff_len = td_remain_len;
4084
4085
4086 remainder = xhci_td_remainder(xhci, running_total,
4087 trb_buff_len, td_len,
4088 urb, more_trbs_coming);
4089
4090 length_field = TRB_LEN(trb_buff_len) |
4091 TRB_INTR_TARGET(0);
4092
4093
4094 if (first_trb && xep->use_extended_tbc)
4095 length_field |= TRB_TD_SIZE_TBC(burst_count);
4096 else
4097 length_field |= TRB_TD_SIZE(remainder);
4098 first_trb = false;
4099
4100 queue_trb(xhci, ep_ring, more_trbs_coming,
4101 lower_32_bits(addr),
4102 upper_32_bits(addr),
4103 length_field,
4104 field);
4105 running_total += trb_buff_len;
4106
4107 addr += trb_buff_len;
4108 td_remain_len -= trb_buff_len;
4109 }
4110
4111
4112 if (running_total != td_len) {
4113 xhci_err(xhci, "ISOC TD length unmatch\n");
4114 ret = -EINVAL;
4115 goto cleanup;
4116 }
4117 }
4118
4119
4120 if (HCC_CFC(xhci->hcc_params))
4121 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
4122
4123 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
4124 if (xhci->quirks & XHCI_AMD_PLL_FIX)
4125 usb_amd_quirk_pll_disable();
4126 }
4127 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
4128
4129 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4130 start_cycle, start_trb);
4131 return 0;
4132 cleanup:
4133
4134
4135 for (i--; i >= 0; i--)
4136 list_del_init(&urb_priv->td[i].td_list);
4137
4138
4139
4140
4141
4142
4143 urb_priv->td[0].last_trb = ep_ring->enqueue;
4144
4145 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
4146
4147
4148 ep_ring->enqueue = urb_priv->td[0].first_trb;
4149 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4150 ep_ring->cycle_state = start_cycle;
4151 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
4152 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
4153 return ret;
4154 }
4155
4156
4157
4158
4159
4160
4161
4162
4163 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
4164 struct urb *urb, int slot_id, unsigned int ep_index)
4165 {
4166 struct xhci_virt_device *xdev;
4167 struct xhci_ring *ep_ring;
4168 struct xhci_ep_ctx *ep_ctx;
4169 int start_frame;
4170 int num_tds, num_trbs, i;
4171 int ret;
4172 struct xhci_virt_ep *xep;
4173 int ist;
4174
4175 xdev = xhci->devs[slot_id];
4176 xep = &xhci->devs[slot_id]->eps[ep_index];
4177 ep_ring = xdev->eps[ep_index].ring;
4178 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
4179
4180 num_trbs = 0;
4181 num_tds = urb->number_of_packets;
4182 for (i = 0; i < num_tds; i++)
4183 num_trbs += count_isoc_trbs_needed(urb, i);
4184
4185
4186
4187
4188 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4189 num_trbs, mem_flags);
4190 if (ret)
4191 return ret;
4192
4193
4194
4195
4196
4197 check_interval(xhci, urb, ep_ctx);
4198
4199
4200 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
4201 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
4202 urb->start_frame = xep->next_frame_id;
4203 goto skip_start_over;
4204 }
4205 }
4206
4207 start_frame = readl(&xhci->run_regs->microframe_index);
4208 start_frame &= 0x3fff;
4209
4210
4211
4212
4213 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4214 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4215 ist <<= 3;
4216 start_frame += ist + XHCI_CFC_DELAY;
4217 start_frame = roundup(start_frame, 8);
4218
4219
4220
4221
4222
4223 if (urb->dev->speed == USB_SPEED_LOW ||
4224 urb->dev->speed == USB_SPEED_FULL) {
4225 start_frame = roundup(start_frame, urb->interval << 3);
4226 urb->start_frame = start_frame >> 3;
4227 } else {
4228 start_frame = roundup(start_frame, urb->interval);
4229 urb->start_frame = start_frame;
4230 }
4231
4232 skip_start_over:
4233 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4234
4235 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4236 }
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4249 u32 field1, u32 field2,
4250 u32 field3, u32 field4, bool command_must_succeed)
4251 {
4252 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4253 int ret;
4254
4255 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4256 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4257 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4258 return -ESHUTDOWN;
4259 }
4260
4261 if (!command_must_succeed)
4262 reserved_trbs++;
4263
4264 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4265 reserved_trbs, GFP_ATOMIC);
4266 if (ret < 0) {
4267 xhci_err(xhci, "ERR: No room for command on command ring\n");
4268 if (command_must_succeed)
4269 xhci_err(xhci, "ERR: Reserved TRB counting for "
4270 "unfailable commands failed.\n");
4271 return ret;
4272 }
4273
4274 cmd->command_trb = xhci->cmd_ring->enqueue;
4275
4276
4277 if (list_empty(&xhci->cmd_list)) {
4278 xhci->current_cmd = cmd;
4279 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
4280 }
4281
4282 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4283
4284 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4285 field4 | xhci->cmd_ring->cycle_state);
4286 return 0;
4287 }
4288
4289
4290 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4291 u32 trb_type, u32 slot_id)
4292 {
4293 return queue_command(xhci, cmd, 0, 0, 0,
4294 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4295 }
4296
4297
4298 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4299 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4300 {
4301 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4302 upper_32_bits(in_ctx_ptr), 0,
4303 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4304 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4305 }
4306
4307 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4308 u32 field1, u32 field2, u32 field3, u32 field4)
4309 {
4310 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4311 }
4312
4313
4314 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4315 u32 slot_id)
4316 {
4317 return queue_command(xhci, cmd, 0, 0, 0,
4318 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4319 false);
4320 }
4321
4322
4323 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4324 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4325 u32 slot_id, bool command_must_succeed)
4326 {
4327 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4328 upper_32_bits(in_ctx_ptr), 0,
4329 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4330 command_must_succeed);
4331 }
4332
4333
4334 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4335 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4336 {
4337 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4338 upper_32_bits(in_ctx_ptr), 0,
4339 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4340 command_must_succeed);
4341 }
4342
4343
4344
4345
4346
4347 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4348 int slot_id, unsigned int ep_index, int suspend)
4349 {
4350 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4351 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4352 u32 type = TRB_TYPE(TRB_STOP_RING);
4353 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4354
4355 return queue_command(xhci, cmd, 0, 0, 0,
4356 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4357 }
4358
4359 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4360 int slot_id, unsigned int ep_index,
4361 enum xhci_ep_reset_type reset_type)
4362 {
4363 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4364 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4365 u32 type = TRB_TYPE(TRB_RESET_EP);
4366
4367 if (reset_type == EP_SOFT_RESET)
4368 type |= TRB_TSP;
4369
4370 return queue_command(xhci, cmd, 0, 0, 0,
4371 trb_slot_id | trb_ep_index | type, false);
4372 }