0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 #include <linux/scatterlist.h>
0059 #include <linux/dma-mapping.h>
0060 #include <linux/delay.h>
0061 #include <linux/slab.h>
0062 #include <linux/irq.h>
0063
0064 #include "cdnsp-trace.h"
0065 #include "cdnsp-gadget.h"
0066
0067
0068
0069
0070
0071 dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
0072 union cdnsp_trb *trb)
0073 {
0074 unsigned long segment_offset = trb - seg->trbs;
0075
0076 if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
0077 return 0;
0078
0079 return seg->dma + (segment_offset * sizeof(*trb));
0080 }
0081
0082 static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
0083 {
0084 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
0085 }
0086
0087 static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
0088 {
0089 return TRB_TYPE_LINK_LE32(trb->link.control);
0090 }
0091
0092 bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
0093 {
0094 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
0095 }
0096
0097 bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
0098 struct cdnsp_segment *seg,
0099 union cdnsp_trb *trb)
0100 {
0101 return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
0102 }
0103
0104 static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
0105 {
0106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
0107 }
0108
0109 static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
0110 {
0111 if (cdnsp_trb_is_link(trb)) {
0112
0113 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
0114 } else {
0115 trb->generic.field[0] = 0;
0116 trb->generic.field[1] = 0;
0117 trb->generic.field[2] = 0;
0118
0119 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
0120 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
0121 }
0122 }
0123
0124
0125
0126
0127
0128
0129 static void cdnsp_next_trb(struct cdnsp_device *pdev,
0130 struct cdnsp_ring *ring,
0131 struct cdnsp_segment **seg,
0132 union cdnsp_trb **trb)
0133 {
0134 if (cdnsp_trb_is_link(*trb)) {
0135 *seg = (*seg)->next;
0136 *trb = ((*seg)->trbs);
0137 } else {
0138 (*trb)++;
0139 }
0140 }
0141
0142
0143
0144
0145
0146 void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
0147 {
0148
0149 if (ring->type == TYPE_EVENT) {
0150 if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
0151 ring->dequeue++;
0152 goto out;
0153 }
0154
0155 if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
0156 ring->cycle_state ^= 1;
0157
0158 ring->deq_seg = ring->deq_seg->next;
0159 ring->dequeue = ring->deq_seg->trbs;
0160 goto out;
0161 }
0162
0163
0164 if (!cdnsp_trb_is_link(ring->dequeue)) {
0165 ring->dequeue++;
0166 ring->num_trbs_free++;
0167 }
0168 while (cdnsp_trb_is_link(ring->dequeue)) {
0169 ring->deq_seg = ring->deq_seg->next;
0170 ring->dequeue = ring->deq_seg->trbs;
0171 }
0172 out:
0173 trace_cdnsp_inc_deq(ring);
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static void cdnsp_inc_enq(struct cdnsp_device *pdev,
0188 struct cdnsp_ring *ring,
0189 bool more_trbs_coming)
0190 {
0191 union cdnsp_trb *next;
0192 u32 chain;
0193
0194 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
0195
0196
0197 if (!cdnsp_trb_is_link(ring->enqueue))
0198 ring->num_trbs_free--;
0199 next = ++(ring->enqueue);
0200
0201
0202 while (cdnsp_trb_is_link(next)) {
0203
0204
0205
0206
0207
0208
0209
0210 if (!chain && !more_trbs_coming)
0211 break;
0212
0213 next->link.control &= cpu_to_le32(~TRB_CHAIN);
0214 next->link.control |= cpu_to_le32(chain);
0215
0216
0217 wmb();
0218 next->link.control ^= cpu_to_le32(TRB_CYCLE);
0219
0220
0221 if (cdnsp_link_trb_toggles_cycle(next))
0222 ring->cycle_state ^= 1;
0223
0224 ring->enq_seg = ring->enq_seg->next;
0225 ring->enqueue = ring->enq_seg->trbs;
0226 next = ring->enqueue;
0227 }
0228
0229 trace_cdnsp_inc_enq(ring);
0230 }
0231
0232
0233
0234
0235
0236 static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
0237 struct cdnsp_ring *ring,
0238 unsigned int num_trbs)
0239 {
0240 int num_trbs_in_deq_seg;
0241
0242 if (ring->num_trbs_free < num_trbs)
0243 return false;
0244
0245 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
0246 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
0247
0248 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
0249 return false;
0250 }
0251
0252 return true;
0253 }
0254
0255
0256
0257
0258
0259
0260 static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
0261 {
0262 if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
0263 cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
0264 }
0265
0266
0267 void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
0268 {
0269 writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
0270 }
0271
0272
0273
0274
0275
0276 static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
0277 struct cdnsp_ep *pep,
0278 unsigned int stream_id)
0279 {
0280 __le32 __iomem *reg_addr = &pdev->dba->ep_db;
0281 unsigned int ep_state = pep->ep_state;
0282 unsigned int db_value;
0283
0284
0285
0286
0287
0288 if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
0289 return false;
0290
0291
0292 if (pep->ep_state & EP_HAS_STREAMS) {
0293 if (pep->stream_info.drbls_count >= 2)
0294 return false;
0295
0296 pep->stream_info.drbls_count++;
0297 }
0298
0299 pep->ep_state &= ~EP_STOPPED;
0300
0301 if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
0302 !pdev->ep0_expect_in)
0303 db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
0304 else
0305 db_value = DB_VALUE(pep->idx, stream_id);
0306
0307 trace_cdnsp_tr_drbl(pep, stream_id);
0308
0309 writel(db_value, reg_addr);
0310
0311 cdnsp_force_l0_go(pdev);
0312
0313
0314 return true;
0315 }
0316
0317
0318
0319
0320
0321
0322 static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
0323 struct cdnsp_ep *pep,
0324 unsigned int stream_id)
0325 {
0326 if (!(pep->ep_state & EP_HAS_STREAMS))
0327 return pep->ring;
0328
0329 if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
0330 dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
0331 pep->name, stream_id);
0332 return NULL;
0333 }
0334
0335 return pep->stream_info.stream_rings[stream_id];
0336 }
0337
0338 static struct cdnsp_ring *
0339 cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
0340 struct cdnsp_request *preq)
0341 {
0342 return cdnsp_get_transfer_ring(pdev, preq->pep,
0343 preq->request.stream_id);
0344 }
0345
0346
0347 void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
0348 struct cdnsp_ep *pep)
0349 {
0350 struct cdnsp_stream_info *stream_info;
0351 unsigned int stream_id;
0352 int ret;
0353
0354 if (pep->ep_state & EP_DIS_IN_RROGRESS)
0355 return;
0356
0357
0358 if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
0359 if (pep->ring && !list_empty(&pep->ring->td_list))
0360 cdnsp_ring_ep_doorbell(pdev, pep, 0);
0361 return;
0362 }
0363
0364 stream_info = &pep->stream_info;
0365
0366 for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
0367 struct cdnsp_td *td, *td_temp;
0368 struct cdnsp_ring *ep_ring;
0369
0370 if (stream_info->drbls_count >= 2)
0371 return;
0372
0373 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
0374 if (!ep_ring)
0375 continue;
0376
0377 if (!ep_ring->stream_active || ep_ring->stream_rejected)
0378 continue;
0379
0380 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
0381 td_list) {
0382 if (td->drbl)
0383 continue;
0384
0385 ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
0386 if (ret)
0387 td->drbl = 1;
0388 }
0389 }
0390 }
0391
0392
0393
0394
0395
0396
0397
0398 static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
0399 unsigned int ep_index,
0400 unsigned int stream_id)
0401 {
0402 struct cdnsp_stream_ctx *st_ctx;
0403 struct cdnsp_ep *pep;
0404
0405 pep = &pdev->eps[stream_id];
0406
0407 if (pep->ep_state & EP_HAS_STREAMS) {
0408 st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
0409 return le64_to_cpu(st_ctx->stream_ring);
0410 }
0411
0412 return le64_to_cpu(pep->out_ctx->deq);
0413 }
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
0431 struct cdnsp_ep *pep,
0432 unsigned int stream_id,
0433 struct cdnsp_td *cur_td,
0434 struct cdnsp_dequeue_state *state)
0435 {
0436 bool td_last_trb_found = false;
0437 struct cdnsp_segment *new_seg;
0438 struct cdnsp_ring *ep_ring;
0439 union cdnsp_trb *new_deq;
0440 bool cycle_found = false;
0441 u64 hw_dequeue;
0442
0443 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
0444 if (!ep_ring)
0445 return;
0446
0447
0448
0449
0450
0451 hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
0452 new_seg = ep_ring->deq_seg;
0453 new_deq = ep_ring->dequeue;
0454 state->new_cycle_state = hw_dequeue & 0x1;
0455 state->stream_id = stream_id;
0456
0457
0458
0459
0460
0461
0462
0463 do {
0464 if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
0465 == (dma_addr_t)(hw_dequeue & ~0xf)) {
0466 cycle_found = true;
0467
0468 if (td_last_trb_found)
0469 break;
0470 }
0471
0472 if (new_deq == cur_td->last_trb)
0473 td_last_trb_found = true;
0474
0475 if (cycle_found && cdnsp_trb_is_link(new_deq) &&
0476 cdnsp_link_trb_toggles_cycle(new_deq))
0477 state->new_cycle_state ^= 0x1;
0478
0479 cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
0480
0481
0482 if (new_deq == pep->ring->dequeue) {
0483 dev_err(pdev->dev,
0484 "Error: Failed finding new dequeue state\n");
0485 state->new_deq_seg = NULL;
0486 state->new_deq_ptr = NULL;
0487 return;
0488 }
0489
0490 } while (!cycle_found || !td_last_trb_found);
0491
0492 state->new_deq_seg = new_seg;
0493 state->new_deq_ptr = new_deq;
0494
0495 trace_cdnsp_new_deq_state(state);
0496 }
0497
0498
0499
0500
0501
0502
0503 static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
0504 struct cdnsp_ring *ep_ring,
0505 struct cdnsp_td *td,
0506 bool flip_cycle)
0507 {
0508 struct cdnsp_segment *seg = td->start_seg;
0509 union cdnsp_trb *trb = td->first_trb;
0510
0511 while (1) {
0512 cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
0513
0514
0515 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
0516 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
0517
0518 if (trb == td->last_trb)
0519 break;
0520
0521 cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
0522 }
0523 }
0524
0525
0526
0527
0528
0529
0530
0531 static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
0532 struct cdnsp_segment *start_seg,
0533 union cdnsp_trb *start_trb,
0534 union cdnsp_trb *end_trb,
0535 dma_addr_t suspect_dma)
0536 {
0537 struct cdnsp_segment *cur_seg;
0538 union cdnsp_trb *temp_trb;
0539 dma_addr_t end_seg_dma;
0540 dma_addr_t end_trb_dma;
0541 dma_addr_t start_dma;
0542
0543 start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
0544 cur_seg = start_seg;
0545
0546 do {
0547 if (start_dma == 0)
0548 return NULL;
0549
0550 temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
0551
0552 end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
0553
0554 end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
0555
0556 trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
0557 end_trb_dma, cur_seg->dma,
0558 end_seg_dma);
0559
0560 if (end_trb_dma > 0) {
0561
0562
0563
0564
0565 if (start_dma <= end_trb_dma) {
0566 if (suspect_dma >= start_dma &&
0567 suspect_dma <= end_trb_dma) {
0568 return cur_seg;
0569 }
0570 } else {
0571
0572
0573
0574
0575 if ((suspect_dma >= start_dma &&
0576 suspect_dma <= end_seg_dma) ||
0577 (suspect_dma >= cur_seg->dma &&
0578 suspect_dma <= end_trb_dma)) {
0579 return cur_seg;
0580 }
0581 }
0582
0583 return NULL;
0584 }
0585
0586
0587 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
0588 return cur_seg;
0589
0590 cur_seg = cur_seg->next;
0591 start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
0592 } while (cur_seg != start_seg);
0593
0594 return NULL;
0595 }
0596
0597 static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
0598 struct cdnsp_ring *ring,
0599 struct cdnsp_td *td)
0600 {
0601 struct cdnsp_segment *seg = td->bounce_seg;
0602 struct cdnsp_request *preq;
0603 size_t len;
0604
0605 if (!seg)
0606 return;
0607
0608 preq = td->preq;
0609
0610 trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
0611 seg->bounce_dma, 0);
0612
0613 if (!preq->direction) {
0614 dma_unmap_single(pdev->dev, seg->bounce_dma,
0615 ring->bounce_buf_len, DMA_TO_DEVICE);
0616 return;
0617 }
0618
0619 dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
0620 DMA_FROM_DEVICE);
0621
0622
0623 len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
0624 seg->bounce_buf, seg->bounce_len,
0625 seg->bounce_offs);
0626 if (len != seg->bounce_len)
0627 dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
0628 len, seg->bounce_len);
0629
0630 seg->bounce_len = 0;
0631 seg->bounce_offs = 0;
0632 }
0633
0634 static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
0635 struct cdnsp_ep *pep,
0636 struct cdnsp_dequeue_state *deq_state)
0637 {
0638 struct cdnsp_ring *ep_ring;
0639 int ret;
0640
0641 if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
0642 cdnsp_ring_doorbell_for_active_rings(pdev, pep);
0643 return 0;
0644 }
0645
0646 cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
0647 cdnsp_ring_cmd_db(pdev);
0648 ret = cdnsp_wait_for_cmd_compl(pdev);
0649
0650 trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
0651 trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
0652
0653
0654
0655
0656
0657 ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
0658
0659 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
0660 ep_ring->deq_seg = ep_ring->deq_seg->next;
0661 ep_ring->dequeue = ep_ring->deq_seg->trbs;
0662 }
0663
0664 while (ep_ring->dequeue != deq_state->new_deq_ptr) {
0665 ep_ring->num_trbs_free++;
0666 ep_ring->dequeue++;
0667
0668 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
0669 if (ep_ring->dequeue == deq_state->new_deq_ptr)
0670 break;
0671
0672 ep_ring->deq_seg = ep_ring->deq_seg->next;
0673 ep_ring->dequeue = ep_ring->deq_seg->trbs;
0674 }
0675 }
0676
0677
0678
0679
0680
0681 if (ret)
0682 return -ESHUTDOWN;
0683
0684
0685 cdnsp_ring_doorbell_for_active_rings(pdev, pep);
0686
0687 return 0;
0688 }
0689
0690 int cdnsp_remove_request(struct cdnsp_device *pdev,
0691 struct cdnsp_request *preq,
0692 struct cdnsp_ep *pep)
0693 {
0694 struct cdnsp_dequeue_state deq_state;
0695 struct cdnsp_td *cur_td = NULL;
0696 struct cdnsp_ring *ep_ring;
0697 struct cdnsp_segment *seg;
0698 int status = -ECONNRESET;
0699 int ret = 0;
0700 u64 hw_deq;
0701
0702 memset(&deq_state, 0, sizeof(deq_state));
0703
0704 trace_cdnsp_remove_request(pep->out_ctx);
0705 trace_cdnsp_remove_request_td(preq);
0706
0707 cur_td = &preq->td;
0708 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
0709
0710
0711
0712
0713
0714
0715 hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
0716 hw_deq &= ~0xf;
0717
0718 seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
0719 cur_td->last_trb, hw_deq);
0720
0721 if (seg && (pep->ep_state & EP_ENABLED))
0722 cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
0723 cur_td, &deq_state);
0724 else
0725 cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
0726
0727
0728
0729
0730
0731 list_del_init(&cur_td->td_list);
0732 ep_ring->num_tds--;
0733 pep->stream_info.td_count--;
0734
0735
0736
0737
0738
0739 if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
0740 status = -ESHUTDOWN;
0741 ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
0742 }
0743
0744 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
0745 cdnsp_gadget_giveback(pep, cur_td->preq, status);
0746
0747 return ret;
0748 }
0749
0750 static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
0751 {
0752 struct cdnsp_port *port = pdev->active_port;
0753 u8 old_port = 0;
0754
0755 if (port && port->port_num == port_id)
0756 return 0;
0757
0758 if (port)
0759 old_port = port->port_num;
0760
0761 if (port_id == pdev->usb2_port.port_num) {
0762 port = &pdev->usb2_port;
0763 } else if (port_id == pdev->usb3_port.port_num) {
0764 port = &pdev->usb3_port;
0765 } else {
0766 dev_err(pdev->dev, "Port event with invalid port ID %d\n",
0767 port_id);
0768 return -EINVAL;
0769 }
0770
0771 if (port_id != old_port) {
0772 cdnsp_disable_slot(pdev);
0773 pdev->active_port = port;
0774 cdnsp_enable_slot(pdev);
0775 }
0776
0777 if (port_id == pdev->usb2_port.port_num)
0778 cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
0779 else
0780 writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
0781 &pdev->usb3_port.regs->portpmsc);
0782
0783 return 0;
0784 }
0785
0786 static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
0787 union cdnsp_trb *event)
0788 {
0789 struct cdnsp_port_regs __iomem *port_regs;
0790 u32 portsc, cmd_regs;
0791 bool port2 = false;
0792 u32 link_state;
0793 u32 port_id;
0794
0795
0796 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
0797 dev_err(pdev->dev, "ERR: incorrect PSC event\n");
0798
0799 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
0800
0801 if (cdnsp_update_port_id(pdev, port_id))
0802 goto cleanup;
0803
0804 port_regs = pdev->active_port->regs;
0805
0806 if (port_id == pdev->usb2_port.port_num)
0807 port2 = true;
0808
0809 new_event:
0810 portsc = readl(&port_regs->portsc);
0811 writel(cdnsp_port_state_to_neutral(portsc) |
0812 (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
0813
0814 trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
0815
0816 pdev->gadget.speed = cdnsp_port_speed(portsc);
0817 link_state = portsc & PORT_PLS_MASK;
0818
0819
0820 if ((portsc & PORT_PLC)) {
0821 if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
0822 link_state == XDEV_RESUME) {
0823 cmd_regs = readl(&pdev->op_regs->command);
0824 if (!(cmd_regs & CMD_R_S))
0825 goto cleanup;
0826
0827 if (DEV_SUPERSPEED_ANY(portsc)) {
0828 cdnsp_set_link_state(pdev, &port_regs->portsc,
0829 XDEV_U0);
0830
0831 cdnsp_resume_gadget(pdev);
0832 }
0833 }
0834
0835 if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
0836 link_state == XDEV_U0) {
0837 pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
0838
0839 cdnsp_force_header_wakeup(pdev, 1);
0840 cdnsp_ring_cmd_db(pdev);
0841 cdnsp_wait_for_cmd_compl(pdev);
0842 }
0843
0844 if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
0845 !DEV_SUPERSPEED_ANY(portsc))
0846 cdnsp_resume_gadget(pdev);
0847
0848 if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3)
0849 cdnsp_suspend_gadget(pdev);
0850
0851 pdev->link_state = link_state;
0852 }
0853
0854 if (portsc & PORT_CSC) {
0855
0856 if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
0857 cdnsp_disconnect_gadget(pdev);
0858
0859
0860 if (portsc & PORT_CONNECT) {
0861 if (!port2)
0862 cdnsp_irq_reset(pdev);
0863
0864 usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
0865 }
0866 }
0867
0868
0869 if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
0870 cdnsp_irq_reset(pdev);
0871 pdev->u1_allowed = 0;
0872 pdev->u2_allowed = 0;
0873 pdev->may_wakeup = 0;
0874 }
0875
0876 if (portsc & PORT_CEC)
0877 dev_err(pdev->dev, "Port Over Current detected\n");
0878
0879 if (portsc & PORT_CEC)
0880 dev_err(pdev->dev, "Port Configure Error detected\n");
0881
0882 if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
0883 goto new_event;
0884
0885 cleanup:
0886 cdnsp_inc_deq(pdev, pdev->event_ring);
0887 }
0888
0889 static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
0890 struct cdnsp_td *td,
0891 struct cdnsp_ring *ep_ring,
0892 int *status)
0893 {
0894 struct cdnsp_request *preq = td->preq;
0895
0896
0897 cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
0898
0899
0900
0901
0902
0903 if (preq->request.actual > preq->request.length) {
0904 preq->request.actual = 0;
0905 *status = 0;
0906 }
0907
0908 list_del_init(&td->td_list);
0909 ep_ring->num_tds--;
0910 preq->pep->stream_info.td_count--;
0911
0912 cdnsp_gadget_giveback(preq->pep, preq, *status);
0913 }
0914
0915 static void cdnsp_finish_td(struct cdnsp_device *pdev,
0916 struct cdnsp_td *td,
0917 struct cdnsp_transfer_event *event,
0918 struct cdnsp_ep *ep,
0919 int *status)
0920 {
0921 struct cdnsp_ring *ep_ring;
0922 u32 trb_comp_code;
0923
0924 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
0925 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
0926
0927 if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
0928 trb_comp_code == COMP_STOPPED ||
0929 trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
0930
0931
0932
0933
0934
0935 return;
0936 }
0937
0938
0939 while (ep_ring->dequeue != td->last_trb)
0940 cdnsp_inc_deq(pdev, ep_ring);
0941
0942 cdnsp_inc_deq(pdev, ep_ring);
0943
0944 cdnsp_td_cleanup(pdev, td, ep_ring, status);
0945 }
0946
0947
0948 static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
0949 struct cdnsp_ring *ring,
0950 union cdnsp_trb *stop_trb)
0951 {
0952 struct cdnsp_segment *seg = ring->deq_seg;
0953 union cdnsp_trb *trb = ring->dequeue;
0954 u32 sum;
0955
0956 for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
0957 if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
0958 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
0959 }
0960 return sum;
0961 }
0962
0963 static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
0964 struct cdnsp_ep *pep,
0965 unsigned int stream_id,
0966 int start_cycle,
0967 struct cdnsp_generic_trb *start_trb)
0968 {
0969
0970
0971
0972
0973 wmb();
0974
0975 if (start_cycle)
0976 start_trb->field[3] |= cpu_to_le32(start_cycle);
0977 else
0978 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
0979
0980 if ((pep->ep_state & EP_HAS_STREAMS) &&
0981 !pep->stream_info.first_prime_det) {
0982 trace_cdnsp_wait_for_prime(pep, stream_id);
0983 return 0;
0984 }
0985
0986 return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
0987 }
0988
0989
0990
0991
0992 static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
0993 struct cdnsp_td *td,
0994 union cdnsp_trb *event_trb,
0995 struct cdnsp_transfer_event *event,
0996 struct cdnsp_ep *pep,
0997 int *status)
0998 {
0999 struct cdnsp_ring *ep_ring;
1000 u32 remaining;
1001 u32 trb_type;
1002
1003 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
1004 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1005 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1006
1007
1008
1009
1010
1011
1012 if (trb_type == TRB_DATA) {
1013 td->request_length_set = true;
1014 td->preq->request.actual = td->preq->request.length - remaining;
1015 }
1016
1017
1018 if (!td->request_length_set)
1019 td->preq->request.actual = td->preq->request.length;
1020
1021 if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
1022 pdev->three_stage_setup) {
1023 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1024 td_list);
1025 pdev->ep0_stage = CDNSP_STATUS_STAGE;
1026
1027 cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1028 &td->last_trb->generic);
1029 return;
1030 }
1031
1032 *status = 0;
1033
1034 cdnsp_finish_td(pdev, td, event, pep, status);
1035 }
1036
1037
1038
1039
1040 static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
1041 struct cdnsp_td *td,
1042 union cdnsp_trb *ep_trb,
1043 struct cdnsp_transfer_event *event,
1044 struct cdnsp_ep *pep,
1045 int status)
1046 {
1047 struct cdnsp_request *preq = td->preq;
1048 u32 remaining, requested, ep_trb_len;
1049 bool sum_trbs_for_length = false;
1050 struct cdnsp_ring *ep_ring;
1051 u32 trb_comp_code;
1052 u32 td_length;
1053
1054 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1055 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1056 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1057 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1058
1059 requested = preq->request.length;
1060
1061
1062 switch (trb_comp_code) {
1063 case COMP_SUCCESS:
1064 preq->request.status = 0;
1065 break;
1066 case COMP_SHORT_PACKET:
1067 preq->request.status = 0;
1068 sum_trbs_for_length = true;
1069 break;
1070 case COMP_ISOCH_BUFFER_OVERRUN:
1071 case COMP_BABBLE_DETECTED_ERROR:
1072 preq->request.status = -EOVERFLOW;
1073 break;
1074 case COMP_STOPPED:
1075 sum_trbs_for_length = true;
1076 break;
1077 case COMP_STOPPED_SHORT_PACKET:
1078
1079 preq->request.status = 0;
1080 requested = remaining;
1081 break;
1082 case COMP_STOPPED_LENGTH_INVALID:
1083 requested = 0;
1084 remaining = 0;
1085 break;
1086 default:
1087 sum_trbs_for_length = true;
1088 preq->request.status = -1;
1089 break;
1090 }
1091
1092 if (sum_trbs_for_length) {
1093 td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1094 td_length += ep_trb_len - remaining;
1095 } else {
1096 td_length = requested;
1097 }
1098
1099 td->preq->request.actual += td_length;
1100
1101 cdnsp_finish_td(pdev, td, event, pep, &status);
1102 }
1103
1104 static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
1105 struct cdnsp_td *td,
1106 struct cdnsp_transfer_event *event,
1107 struct cdnsp_ep *pep,
1108 int status)
1109 {
1110 struct cdnsp_ring *ep_ring;
1111
1112 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1113 td->preq->request.status = -EXDEV;
1114 td->preq->request.actual = 0;
1115
1116
1117 while (ep_ring->dequeue != td->last_trb)
1118 cdnsp_inc_deq(pdev, ep_ring);
1119
1120 cdnsp_inc_deq(pdev, ep_ring);
1121
1122 cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1123 }
1124
1125
1126
1127
1128 static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
1129 struct cdnsp_td *td,
1130 union cdnsp_trb *ep_trb,
1131 struct cdnsp_transfer_event *event,
1132 struct cdnsp_ep *ep,
1133 int *status)
1134 {
1135 u32 remaining, requested, ep_trb_len;
1136 struct cdnsp_ring *ep_ring;
1137 u32 trb_comp_code;
1138
1139 ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1140 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1141 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1142 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1143 requested = td->preq->request.length;
1144
1145 switch (trb_comp_code) {
1146 case COMP_SUCCESS:
1147 case COMP_SHORT_PACKET:
1148 *status = 0;
1149 break;
1150 case COMP_STOPPED_SHORT_PACKET:
1151 td->preq->request.actual = remaining;
1152 goto finish_td;
1153 case COMP_STOPPED_LENGTH_INVALID:
1154
1155 ep_trb_len = 0;
1156 remaining = 0;
1157 break;
1158 }
1159
1160 if (ep_trb == td->last_trb)
1161 ep_trb_len = requested - remaining;
1162 else
1163 ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1164 ep_trb_len - remaining;
1165 td->preq->request.actual = ep_trb_len;
1166
1167 finish_td:
1168 ep->stream_info.drbls_count--;
1169
1170 cdnsp_finish_td(pdev, td, event, ep, status);
1171 }
1172
1173 static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
1174 struct cdnsp_transfer_event *event)
1175 {
1176 struct cdnsp_generic_trb *generic;
1177 struct cdnsp_ring *ep_ring;
1178 struct cdnsp_ep *pep;
1179 int cur_stream;
1180 int ep_index;
1181 int host_sid;
1182 int dev_sid;
1183
1184 generic = (struct cdnsp_generic_trb *)event;
1185 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1186 dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
1187 host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
1188
1189 pep = &pdev->eps[ep_index];
1190
1191 if (!(pep->ep_state & EP_HAS_STREAMS))
1192 return;
1193
1194 if (host_sid == STREAM_PRIME_ACK) {
1195 pep->stream_info.first_prime_det = 1;
1196 for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
1197 cur_stream++) {
1198 ep_ring = pep->stream_info.stream_rings[cur_stream];
1199 ep_ring->stream_active = 1;
1200 ep_ring->stream_rejected = 0;
1201 }
1202 }
1203
1204 if (host_sid == STREAM_REJECTED) {
1205 struct cdnsp_td *td, *td_temp;
1206
1207 pep->stream_info.drbls_count--;
1208 ep_ring = pep->stream_info.stream_rings[dev_sid];
1209 ep_ring->stream_active = 0;
1210 ep_ring->stream_rejected = 1;
1211
1212 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1213 td_list) {
1214 td->drbl = 0;
1215 }
1216 }
1217
1218 cdnsp_ring_doorbell_for_active_rings(pdev, pep);
1219 }
1220
1221
1222
1223
1224
1225 static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
1226 struct cdnsp_transfer_event *event)
1227 {
1228 const struct usb_endpoint_descriptor *desc;
1229 bool handling_skipped_tds = false;
1230 struct cdnsp_segment *ep_seg;
1231 struct cdnsp_ring *ep_ring;
1232 int status = -EINPROGRESS;
1233 union cdnsp_trb *ep_trb;
1234 dma_addr_t ep_trb_dma;
1235 struct cdnsp_ep *pep;
1236 struct cdnsp_td *td;
1237 u32 trb_comp_code;
1238 int invalidate;
1239 int ep_index;
1240
1241 invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
1242 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1243 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1244 ep_trb_dma = le64_to_cpu(event->buffer);
1245
1246 pep = &pdev->eps[ep_index];
1247 ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1248
1249
1250
1251
1252
1253
1254 if (invalidate || !pdev->gadget.connected)
1255 goto cleanup;
1256
1257 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
1258 trace_cdnsp_ep_disabled(pep->out_ctx);
1259 goto err_out;
1260 }
1261
1262
1263 if (!ep_ring) {
1264 switch (trb_comp_code) {
1265 case COMP_INVALID_STREAM_TYPE_ERROR:
1266 case COMP_INVALID_STREAM_ID_ERROR:
1267 case COMP_RING_UNDERRUN:
1268 case COMP_RING_OVERRUN:
1269 goto cleanup;
1270 default:
1271 dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
1272 pep->name);
1273 goto err_out;
1274 }
1275 }
1276
1277
1278 switch (trb_comp_code) {
1279 case COMP_BABBLE_DETECTED_ERROR:
1280 status = -EOVERFLOW;
1281 break;
1282 case COMP_RING_UNDERRUN:
1283 case COMP_RING_OVERRUN:
1284
1285
1286
1287
1288
1289 goto cleanup;
1290 case COMP_MISSED_SERVICE_ERROR:
1291
1292
1293
1294
1295
1296
1297 pep->skip = true;
1298 break;
1299 }
1300
1301 do {
1302
1303
1304
1305
1306 if (list_empty(&ep_ring->td_list)) {
1307
1308
1309
1310
1311
1312
1313
1314 if (!(trb_comp_code == COMP_STOPPED ||
1315 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1316 ep_ring->last_td_was_short))
1317 trace_cdnsp_trb_without_td(ep_ring,
1318 (struct cdnsp_generic_trb *)event);
1319
1320 if (pep->skip) {
1321 pep->skip = false;
1322 trace_cdnsp_ep_list_empty_with_skip(pep, 0);
1323 }
1324
1325 goto cleanup;
1326 }
1327
1328 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1329 td_list);
1330
1331
1332 ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1333 ep_ring->dequeue, td->last_trb,
1334 ep_trb_dma);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
1345 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
1346 pep->skip = false;
1347 goto cleanup;
1348 }
1349
1350 desc = td->preq->pep->endpoint.desc;
1351 if (!ep_seg) {
1352 if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
1353
1354 dev_err(pdev->dev,
1355 "ERROR Transfer event TRB DMA ptr not "
1356 "part of current TD ep_index %d "
1357 "comp_code %u\n", ep_index,
1358 trb_comp_code);
1359 return -EINVAL;
1360 }
1361
1362 cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1363 goto cleanup;
1364 }
1365
1366 if (trb_comp_code == COMP_SHORT_PACKET)
1367 ep_ring->last_td_was_short = true;
1368 else
1369 ep_ring->last_td_was_short = false;
1370
1371 if (pep->skip) {
1372 pep->skip = false;
1373 cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1374 goto cleanup;
1375 }
1376
1377 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
1378 / sizeof(*ep_trb)];
1379
1380 trace_cdnsp_handle_transfer(ep_ring,
1381 (struct cdnsp_generic_trb *)ep_trb);
1382
1383 if (cdnsp_trb_is_noop(ep_trb))
1384 goto cleanup;
1385
1386 if (usb_endpoint_xfer_control(desc))
1387 cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
1388 &status);
1389 else if (usb_endpoint_xfer_isoc(desc))
1390 cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
1391 status);
1392 else
1393 cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
1394 &status);
1395 cleanup:
1396 handling_skipped_tds = pep->skip;
1397
1398
1399
1400
1401
1402 if (!handling_skipped_tds)
1403 cdnsp_inc_deq(pdev, pdev->event_ring);
1404
1405
1406
1407
1408
1409
1410
1411 } while (handling_skipped_tds);
1412 return 0;
1413
1414 err_out:
1415 dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
1416 (unsigned long long)
1417 cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1418 pdev->event_ring->dequeue),
1419 lower_32_bits(le64_to_cpu(event->buffer)),
1420 upper_32_bits(le64_to_cpu(event->buffer)),
1421 le32_to_cpu(event->transfer_len),
1422 le32_to_cpu(event->flags));
1423 return -EINVAL;
1424 }
1425
1426
1427
1428
1429
1430
1431 static bool cdnsp_handle_event(struct cdnsp_device *pdev)
1432 {
1433 unsigned int comp_code;
1434 union cdnsp_trb *event;
1435 bool update_ptrs = true;
1436 u32 cycle_bit;
1437 int ret = 0;
1438 u32 flags;
1439
1440 event = pdev->event_ring->dequeue;
1441 flags = le32_to_cpu(event->event_cmd.flags);
1442 cycle_bit = (flags & TRB_CYCLE);
1443
1444
1445 if (cycle_bit != pdev->event_ring->cycle_state)
1446 return false;
1447
1448 trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
1449
1450
1451
1452
1453
1454 rmb();
1455
1456 switch (flags & TRB_TYPE_BITMASK) {
1457 case TRB_TYPE(TRB_COMPLETION):
1458
1459
1460
1461
1462 cdnsp_inc_deq(pdev, pdev->cmd_ring);
1463 break;
1464 case TRB_TYPE(TRB_PORT_STATUS):
1465 cdnsp_handle_port_status(pdev, event);
1466 update_ptrs = false;
1467 break;
1468 case TRB_TYPE(TRB_TRANSFER):
1469 ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
1470 if (ret >= 0)
1471 update_ptrs = false;
1472 break;
1473 case TRB_TYPE(TRB_SETUP):
1474 pdev->ep0_stage = CDNSP_SETUP_STAGE;
1475 pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
1476 pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
1477 pdev->setup = *((struct usb_ctrlrequest *)
1478 &event->trans_event.buffer);
1479
1480 cdnsp_setup_analyze(pdev);
1481 break;
1482 case TRB_TYPE(TRB_ENDPOINT_NRDY):
1483 cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
1484 break;
1485 case TRB_TYPE(TRB_HC_EVENT): {
1486 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
1487
1488 switch (comp_code) {
1489 case COMP_EVENT_RING_FULL_ERROR:
1490 dev_err(pdev->dev, "Event Ring Full\n");
1491 break;
1492 default:
1493 dev_err(pdev->dev, "Controller error code 0x%02x\n",
1494 comp_code);
1495 }
1496
1497 break;
1498 }
1499 case TRB_TYPE(TRB_MFINDEX_WRAP):
1500 case TRB_TYPE(TRB_DRB_OVERFLOW):
1501 break;
1502 default:
1503 dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
1504 TRB_FIELD_TO_TYPE(flags));
1505 }
1506
1507 if (update_ptrs)
1508
1509 cdnsp_inc_deq(pdev, pdev->event_ring);
1510
1511
1512
1513
1514
1515 return true;
1516 }
1517
1518 irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
1519 {
1520 struct cdnsp_device *pdev = (struct cdnsp_device *)data;
1521 union cdnsp_trb *event_ring_deq;
1522 unsigned long flags;
1523 int counter = 0;
1524
1525 spin_lock_irqsave(&pdev->lock, flags);
1526
1527 if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
1528
1529
1530
1531
1532
1533 if (pdev->gadget_driver)
1534 cdnsp_died(pdev);
1535
1536 spin_unlock_irqrestore(&pdev->lock, flags);
1537 return IRQ_HANDLED;
1538 }
1539
1540 event_ring_deq = pdev->event_ring->dequeue;
1541
1542 while (cdnsp_handle_event(pdev)) {
1543 if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
1544 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
1545 event_ring_deq = pdev->event_ring->dequeue;
1546 counter = 0;
1547 }
1548 }
1549
1550 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
1551
1552 spin_unlock_irqrestore(&pdev->lock, flags);
1553
1554 return IRQ_HANDLED;
1555 }
1556
1557 irqreturn_t cdnsp_irq_handler(int irq, void *priv)
1558 {
1559 struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
1560 u32 irq_pending;
1561 u32 status;
1562
1563 status = readl(&pdev->op_regs->status);
1564
1565 if (status == ~(u32)0) {
1566 cdnsp_died(pdev);
1567 return IRQ_HANDLED;
1568 }
1569
1570 if (!(status & STS_EINT))
1571 return IRQ_NONE;
1572
1573 writel(status | STS_EINT, &pdev->op_regs->status);
1574 irq_pending = readl(&pdev->ir_set->irq_pending);
1575 irq_pending |= IMAN_IP;
1576 writel(irq_pending, &pdev->ir_set->irq_pending);
1577
1578 if (status & STS_FATAL) {
1579 cdnsp_died(pdev);
1580 return IRQ_HANDLED;
1581 }
1582
1583 return IRQ_WAKE_THREAD;
1584 }
1585
1586
1587
1588
1589
1590
1591
1592 static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
1593 bool more_trbs_coming, u32 field1, u32 field2,
1594 u32 field3, u32 field4)
1595 {
1596 struct cdnsp_generic_trb *trb;
1597
1598 trb = &ring->enqueue->generic;
1599
1600 trb->field[0] = cpu_to_le32(field1);
1601 trb->field[1] = cpu_to_le32(field2);
1602 trb->field[2] = cpu_to_le32(field3);
1603 trb->field[3] = cpu_to_le32(field4);
1604
1605 trace_cdnsp_queue_trb(ring, trb);
1606 cdnsp_inc_enq(pdev, ring, more_trbs_coming);
1607 }
1608
1609
1610
1611
1612
1613 static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
1614 struct cdnsp_ring *ep_ring,
1615 u32 ep_state, unsigned
1616 int num_trbs,
1617 gfp_t mem_flags)
1618 {
1619 unsigned int num_trbs_needed;
1620
1621
1622 switch (ep_state) {
1623 case EP_STATE_STOPPED:
1624 case EP_STATE_RUNNING:
1625 case EP_STATE_HALTED:
1626 break;
1627 default:
1628 dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
1629 return -EINVAL;
1630 }
1631
1632 while (1) {
1633 if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1634 break;
1635
1636 trace_cdnsp_no_room_on_ring("try ring expansion");
1637
1638 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1639 if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1640 mem_flags)) {
1641 dev_err(pdev->dev, "Ring expansion failed\n");
1642 return -ENOMEM;
1643 }
1644 }
1645
1646 while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1647 ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1648
1649 wmb();
1650 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1651
1652
1653 if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1654 ep_ring->cycle_state ^= 1;
1655 ep_ring->enq_seg = ep_ring->enq_seg->next;
1656 ep_ring->enqueue = ep_ring->enq_seg->trbs;
1657 }
1658 return 0;
1659 }
1660
1661 static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
1662 struct cdnsp_request *preq,
1663 unsigned int num_trbs)
1664 {
1665 struct cdnsp_ring *ep_ring;
1666 int ret;
1667
1668 ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1669 preq->request.stream_id);
1670 if (!ep_ring)
1671 return -EINVAL;
1672
1673 ret = cdnsp_prepare_ring(pdev, ep_ring,
1674 GET_EP_CTX_STATE(preq->pep->out_ctx),
1675 num_trbs, GFP_ATOMIC);
1676 if (ret)
1677 return ret;
1678
1679 INIT_LIST_HEAD(&preq->td.td_list);
1680 preq->td.preq = preq;
1681
1682
1683 list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1684 ep_ring->num_tds++;
1685 preq->pep->stream_info.td_count++;
1686
1687 preq->td.start_seg = ep_ring->enq_seg;
1688 preq->td.first_trb = ep_ring->enqueue;
1689
1690 return 0;
1691 }
1692
1693 static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
1694 {
1695 unsigned int num_trbs;
1696
1697 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
1698 TRB_MAX_BUFF_SIZE);
1699 if (num_trbs == 0)
1700 num_trbs++;
1701
1702 return num_trbs;
1703 }
1704
1705 static unsigned int count_trbs_needed(struct cdnsp_request *preq)
1706 {
1707 return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1708 }
1709
1710 static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
1711 {
1712 unsigned int i, len, full_len, num_trbs = 0;
1713 struct scatterlist *sg;
1714
1715 full_len = preq->request.length;
1716
1717 for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
1718 len = sg_dma_len(sg);
1719 num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
1720 len = min(len, full_len);
1721 full_len -= len;
1722 if (full_len == 0)
1723 break;
1724 }
1725
1726 return num_trbs;
1727 }
1728
1729 static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
1730 {
1731 return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1732 }
1733
1734 static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
1735 {
1736 if (running_total != preq->request.length)
1737 dev_err(preq->pep->pdev->dev,
1738 "%s - Miscalculated tx length, "
1739 "queued %#x, asked for %#x (%d)\n",
1740 preq->pep->name, running_total,
1741 preq->request.length, preq->request.actual);
1742 }
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
1762 int transferred,
1763 int trb_buff_len,
1764 unsigned int td_total_len,
1765 struct cdnsp_request *preq,
1766 bool more_trbs_coming)
1767 {
1768 u32 maxp, total_packet_count;
1769
1770
1771 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
1772 trb_buff_len == td_total_len)
1773 return 0;
1774
1775 maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
1776 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
1777
1778
1779 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
1780 }
1781
1782 static int cdnsp_align_td(struct cdnsp_device *pdev,
1783 struct cdnsp_request *preq, u32 enqd_len,
1784 u32 *trb_buff_len, struct cdnsp_segment *seg)
1785 {
1786 struct device *dev = pdev->dev;
1787 unsigned int unalign;
1788 unsigned int max_pkt;
1789 u32 new_buff_len;
1790
1791 max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
1792 unalign = (enqd_len + *trb_buff_len) % max_pkt;
1793
1794
1795 if (unalign == 0)
1796 return 0;
1797
1798
1799 if (*trb_buff_len > unalign) {
1800 *trb_buff_len -= unalign;
1801 trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
1802 enqd_len, 0, unalign);
1803 return 0;
1804 }
1805
1806
1807
1808
1809
1810
1811 new_buff_len = max_pkt - (enqd_len % max_pkt);
1812
1813 if (new_buff_len > (preq->request.length - enqd_len))
1814 new_buff_len = (preq->request.length - enqd_len);
1815
1816
1817 if (preq->direction) {
1818 sg_pcopy_to_buffer(preq->request.sg,
1819 preq->request.num_mapped_sgs,
1820 seg->bounce_buf, new_buff_len, enqd_len);
1821 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1822 max_pkt, DMA_TO_DEVICE);
1823 } else {
1824 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1825 max_pkt, DMA_FROM_DEVICE);
1826 }
1827
1828 if (dma_mapping_error(dev, seg->bounce_dma)) {
1829
1830 dev_warn(pdev->dev,
1831 "Failed mapping bounce buffer, not aligning\n");
1832 return 0;
1833 }
1834
1835 *trb_buff_len = new_buff_len;
1836 seg->bounce_len = new_buff_len;
1837 seg->bounce_offs = enqd_len;
1838
1839 trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
1840 unalign);
1841
1842
1843
1844
1845
1846 return 1;
1847 }
1848
1849 int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1850 {
1851 unsigned int enqd_len, block_len, trb_buff_len, full_len;
1852 unsigned int start_cycle, num_sgs = 0;
1853 struct cdnsp_generic_trb *start_trb;
1854 u32 field, length_field, remainder;
1855 struct scatterlist *sg = NULL;
1856 bool more_trbs_coming = true;
1857 bool need_zero_pkt = false;
1858 bool zero_len_trb = false;
1859 struct cdnsp_ring *ring;
1860 bool first_trb = true;
1861 unsigned int num_trbs;
1862 struct cdnsp_ep *pep;
1863 u64 addr, send_addr;
1864 int sent_len, ret;
1865
1866 ring = cdnsp_request_to_transfer_ring(pdev, preq);
1867 if (!ring)
1868 return -EINVAL;
1869
1870 full_len = preq->request.length;
1871
1872 if (preq->request.num_sgs) {
1873 num_sgs = preq->request.num_sgs;
1874 sg = preq->request.sg;
1875 addr = (u64)sg_dma_address(sg);
1876 block_len = sg_dma_len(sg);
1877 num_trbs = count_sg_trbs_needed(preq);
1878 } else {
1879 num_trbs = count_trbs_needed(preq);
1880 addr = (u64)preq->request.dma;
1881 block_len = full_len;
1882 }
1883
1884 pep = preq->pep;
1885
1886
1887 if (preq->request.zero && preq->request.length &&
1888 IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
1889 need_zero_pkt = true;
1890 num_trbs++;
1891 }
1892
1893 ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
1894 if (ret)
1895 return ret;
1896
1897
1898
1899
1900
1901
1902 start_trb = &ring->enqueue->generic;
1903 start_cycle = ring->cycle_state;
1904 send_addr = addr;
1905
1906
1907 for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
1908 enqd_len += trb_buff_len) {
1909 field = TRB_TYPE(TRB_NORMAL);
1910
1911
1912 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
1913 trb_buff_len = min(trb_buff_len, block_len);
1914 if (enqd_len + trb_buff_len > full_len)
1915 trb_buff_len = full_len - enqd_len;
1916
1917
1918 if (first_trb) {
1919 first_trb = false;
1920 if (start_cycle == 0)
1921 field |= TRB_CYCLE;
1922 } else {
1923 field |= ring->cycle_state;
1924 }
1925
1926
1927
1928
1929
1930 if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
1931 field |= TRB_CHAIN;
1932 if (cdnsp_trb_is_link(ring->enqueue + 1)) {
1933 if (cdnsp_align_td(pdev, preq, enqd_len,
1934 &trb_buff_len,
1935 ring->enq_seg)) {
1936 send_addr = ring->enq_seg->bounce_dma;
1937
1938 preq->td.bounce_seg = ring->enq_seg;
1939 }
1940 }
1941 }
1942
1943 if (enqd_len + trb_buff_len >= full_len) {
1944 if (need_zero_pkt && !zero_len_trb) {
1945 zero_len_trb = true;
1946 } else {
1947 zero_len_trb = false;
1948 field &= ~TRB_CHAIN;
1949 field |= TRB_IOC;
1950 more_trbs_coming = false;
1951 need_zero_pkt = false;
1952 preq->td.last_trb = ring->enqueue;
1953 }
1954 }
1955
1956
1957 if (!preq->direction)
1958 field |= TRB_ISP;
1959
1960
1961 remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
1962 full_len, preq,
1963 more_trbs_coming);
1964
1965 length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
1966 TRB_INTR_TARGET(0);
1967
1968 cdnsp_queue_trb(pdev, ring, more_trbs_coming,
1969 lower_32_bits(send_addr),
1970 upper_32_bits(send_addr),
1971 length_field,
1972 field);
1973
1974 addr += trb_buff_len;
1975 sent_len = trb_buff_len;
1976 while (sg && sent_len >= block_len) {
1977
1978 --num_sgs;
1979 sent_len -= block_len;
1980 if (num_sgs != 0) {
1981 sg = sg_next(sg);
1982 block_len = sg_dma_len(sg);
1983 addr = (u64)sg_dma_address(sg);
1984 addr += sent_len;
1985 }
1986 }
1987 block_len -= sent_len;
1988 send_addr = addr;
1989 }
1990
1991 cdnsp_check_trb_math(preq, enqd_len);
1992 ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
1993 start_cycle, start_trb);
1994
1995 if (ret)
1996 preq->td.drbl = 1;
1997
1998 return 0;
1999 }
2000
2001 int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
2002 {
2003 u32 field, length_field, remainder;
2004 struct cdnsp_ep *pep = preq->pep;
2005 struct cdnsp_ring *ep_ring;
2006 int num_trbs;
2007 int ret;
2008
2009 ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2010 if (!ep_ring)
2011 return -EINVAL;
2012
2013
2014 num_trbs = (pdev->three_stage_setup) ? 2 : 1;
2015
2016 ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
2017 if (ret)
2018 return ret;
2019
2020
2021 if (pdev->ep0_expect_in)
2022 field = TRB_TYPE(TRB_DATA) | TRB_IOC;
2023 else
2024 field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
2025
2026 if (preq->request.length > 0) {
2027 remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
2028 preq->request.length, preq, 1);
2029
2030 length_field = TRB_LEN(preq->request.length) |
2031 TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
2032
2033 if (pdev->ep0_expect_in)
2034 field |= TRB_DIR_IN;
2035
2036 cdnsp_queue_trb(pdev, ep_ring, true,
2037 lower_32_bits(preq->request.dma),
2038 upper_32_bits(preq->request.dma), length_field,
2039 field | ep_ring->cycle_state |
2040 TRB_SETUPID(pdev->setup_id) |
2041 pdev->setup_speed);
2042
2043 pdev->ep0_stage = CDNSP_DATA_STAGE;
2044 }
2045
2046
2047 preq->td.last_trb = ep_ring->enqueue;
2048
2049
2050 if (preq->request.length == 0)
2051 field = ep_ring->cycle_state;
2052 else
2053 field = (ep_ring->cycle_state ^ 1);
2054
2055 if (preq->request.length > 0 && pdev->ep0_expect_in)
2056 field |= TRB_DIR_IN;
2057
2058 if (pep->ep_state & EP0_HALTED_STATUS) {
2059 pep->ep_state &= ~EP0_HALTED_STATUS;
2060 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
2061 } else {
2062 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
2063 }
2064
2065 cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2066 field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
2067 TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
2068
2069 cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
2070
2071 return 0;
2072 }
2073
2074 int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2075 {
2076 u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
2077 int ret = 0;
2078
2079 if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
2080 trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
2081 goto ep_stopped;
2082 }
2083
2084 cdnsp_queue_stop_endpoint(pdev, pep->idx);
2085 cdnsp_ring_cmd_db(pdev);
2086 ret = cdnsp_wait_for_cmd_compl(pdev);
2087
2088 trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
2089
2090 ep_stopped:
2091 pep->ep_state |= EP_STOPPED;
2092 return ret;
2093 }
2094
2095 int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2096 {
2097 int ret;
2098
2099 cdnsp_queue_flush_endpoint(pdev, pep->idx);
2100 cdnsp_ring_cmd_db(pdev);
2101 ret = cdnsp_wait_for_cmd_compl(pdev);
2102
2103 trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
2104
2105 return ret;
2106 }
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116 static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
2117 struct cdnsp_request *preq,
2118 unsigned int total_packet_count)
2119 {
2120 unsigned int max_burst;
2121
2122 if (pdev->gadget.speed < USB_SPEED_SUPER)
2123 return 0;
2124
2125 max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2126 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 static unsigned int
2138 cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
2139 struct cdnsp_request *preq,
2140 unsigned int total_packet_count)
2141 {
2142 unsigned int max_burst;
2143 unsigned int residue;
2144
2145 if (pdev->gadget.speed >= USB_SPEED_SUPER) {
2146
2147 max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2148 residue = total_packet_count % (max_burst + 1);
2149
2150
2151
2152
2153
2154 if (residue == 0)
2155 return max_burst;
2156
2157 return residue - 1;
2158 }
2159 if (total_packet_count == 0)
2160 return 0;
2161
2162 return total_packet_count - 1;
2163 }
2164
2165
2166 static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
2167 struct cdnsp_request *preq)
2168 {
2169 int trb_buff_len, td_len, td_remain_len, ret;
2170 unsigned int burst_count, last_burst_pkt;
2171 unsigned int total_pkt_count, max_pkt;
2172 struct cdnsp_generic_trb *start_trb;
2173 bool more_trbs_coming = true;
2174 struct cdnsp_ring *ep_ring;
2175 int running_total = 0;
2176 u32 field, length_field;
2177 int start_cycle;
2178 int trbs_per_td;
2179 u64 addr;
2180 int i;
2181
2182 ep_ring = preq->pep->ring;
2183 start_trb = &ep_ring->enqueue->generic;
2184 start_cycle = ep_ring->cycle_state;
2185 td_len = preq->request.length;
2186 addr = (u64)preq->request.dma;
2187 td_remain_len = td_len;
2188
2189 max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
2190 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
2191
2192
2193 if (total_pkt_count == 0)
2194 total_pkt_count++;
2195
2196 burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
2197 last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
2198 total_pkt_count);
2199 trbs_per_td = count_isoc_trbs_needed(preq);
2200
2201 ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
2202 if (ret)
2203 goto cleanup;
2204
2205
2206
2207
2208
2209
2210 field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
2211 TRB_SIA | TRB_TBC(burst_count);
2212
2213 if (!start_cycle)
2214 field |= TRB_CYCLE;
2215
2216
2217 for (i = 0; i < trbs_per_td; i++) {
2218 u32 remainder;
2219
2220
2221 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
2222 if (trb_buff_len > td_remain_len)
2223 trb_buff_len = td_remain_len;
2224
2225
2226 remainder = cdnsp_td_remainder(pdev, running_total,
2227 trb_buff_len, td_len, preq,
2228 more_trbs_coming);
2229
2230 length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
2231
2232
2233 if (i) {
2234 field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2235 length_field |= TRB_TD_SIZE(remainder);
2236 } else {
2237 length_field |= TRB_TD_SIZE_TBC(burst_count);
2238 }
2239
2240
2241 if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
2242 field |= TRB_ISP;
2243
2244
2245 if (i < trbs_per_td - 1) {
2246 more_trbs_coming = true;
2247 field |= TRB_CHAIN;
2248 } else {
2249 more_trbs_coming = false;
2250 preq->td.last_trb = ep_ring->enqueue;
2251 field |= TRB_IOC;
2252 }
2253
2254 cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2255 lower_32_bits(addr), upper_32_bits(addr),
2256 length_field, field);
2257
2258 running_total += trb_buff_len;
2259 addr += trb_buff_len;
2260 td_remain_len -= trb_buff_len;
2261 }
2262
2263
2264 if (running_total != td_len) {
2265 dev_err(pdev->dev, "ISOC TD length unmatch\n");
2266 ret = -EINVAL;
2267 goto cleanup;
2268 }
2269
2270 cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
2271 start_cycle, start_trb);
2272
2273 return 0;
2274
2275 cleanup:
2276
2277 list_del_init(&preq->td.td_list);
2278 ep_ring->num_tds--;
2279
2280
2281
2282
2283
2284
2285
2286
2287 preq->td.last_trb = ep_ring->enqueue;
2288
2289 cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2290
2291
2292 ep_ring->enqueue = preq->td.first_trb;
2293 ep_ring->enq_seg = preq->td.start_seg;
2294 ep_ring->cycle_state = start_cycle;
2295 return ret;
2296 }
2297
2298 int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
2299 struct cdnsp_request *preq)
2300 {
2301 struct cdnsp_ring *ep_ring;
2302 u32 ep_state;
2303 int num_trbs;
2304 int ret;
2305
2306 ep_ring = preq->pep->ring;
2307 ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
2308 num_trbs = count_isoc_trbs_needed(preq);
2309
2310
2311
2312
2313
2314
2315 ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
2316 if (ret)
2317 return ret;
2318
2319 return cdnsp_queue_isoc_tx(pdev, preq);
2320 }
2321
2322
2323
2324
2325
2326
2327 static void cdnsp_queue_command(struct cdnsp_device *pdev,
2328 u32 field1,
2329 u32 field2,
2330 u32 field3,
2331 u32 field4)
2332 {
2333 cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
2334 GFP_ATOMIC);
2335
2336 pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
2337
2338 cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
2339 field3, field4 | pdev->cmd_ring->cycle_state);
2340 }
2341
2342
2343 void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
2344 {
2345 cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
2346 SLOT_ID_FOR_TRB(pdev->slot_id));
2347 }
2348
2349
2350 void cdnsp_queue_address_device(struct cdnsp_device *pdev,
2351 dma_addr_t in_ctx_ptr,
2352 enum cdnsp_setup_dev setup)
2353 {
2354 cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2355 upper_32_bits(in_ctx_ptr), 0,
2356 TRB_TYPE(TRB_ADDR_DEV) |
2357 SLOT_ID_FOR_TRB(pdev->slot_id) |
2358 (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
2359 }
2360
2361
2362 void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
2363 {
2364 cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
2365 SLOT_ID_FOR_TRB(pdev->slot_id));
2366 }
2367
2368
2369 void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
2370 dma_addr_t in_ctx_ptr)
2371 {
2372 cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2373 upper_32_bits(in_ctx_ptr), 0,
2374 TRB_TYPE(TRB_CONFIG_EP) |
2375 SLOT_ID_FOR_TRB(pdev->slot_id));
2376 }
2377
2378
2379
2380
2381
2382 void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2383 {
2384 cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
2385 EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
2386 }
2387
2388
2389 void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
2390 struct cdnsp_ep *pep,
2391 struct cdnsp_dequeue_state *deq_state)
2392 {
2393 u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
2394 u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
2395 u32 type = TRB_TYPE(TRB_SET_DEQ);
2396 u32 trb_sct = 0;
2397 dma_addr_t addr;
2398
2399 addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
2400 deq_state->new_deq_ptr);
2401
2402 if (deq_state->stream_id)
2403 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
2404
2405 cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
2406 deq_state->new_cycle_state, upper_32_bits(addr),
2407 trb_stream_id, trb_slot_id |
2408 EP_ID_FOR_TRB(pep->idx) | type);
2409 }
2410
2411 void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
2412 {
2413 return cdnsp_queue_command(pdev, 0, 0, 0,
2414 SLOT_ID_FOR_TRB(pdev->slot_id) |
2415 EP_ID_FOR_TRB(ep_index) |
2416 TRB_TYPE(TRB_RESET_EP));
2417 }
2418
2419
2420
2421
2422 void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2423 {
2424 cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
2425 SLOT_ID_FOR_TRB(pdev->slot_id) |
2426 EP_ID_FOR_TRB(ep_index));
2427 }
2428
2429
2430
2431
2432 void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
2433 unsigned int ep_index)
2434 {
2435 cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
2436 SLOT_ID_FOR_TRB(pdev->slot_id) |
2437 EP_ID_FOR_TRB(ep_index));
2438 }
2439
2440 void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
2441 {
2442 u32 lo, mid;
2443
2444 lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
2445 TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
2446 mid = TRB_FH_TR_PACKET_DEV_NOT |
2447 TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
2448 TRB_FH_TO_INTERFACE(intf_num);
2449
2450 cdnsp_queue_command(pdev, lo, mid, 0,
2451 TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
2452 }