0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/usb.h>
0012 #include <linux/pci.h>
0013 #include <linux/slab.h>
0014 #include <linux/dmapool.h>
0015 #include <linux/dma-mapping.h>
0016
0017 #include "xhci.h"
0018 #include "xhci-trace.h"
0019 #include "xhci-debugfs.h"
0020
0021
0022
0023
0024
0025
0026
0027
0028 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
0029 unsigned int cycle_state,
0030 unsigned int max_packet,
0031 gfp_t flags)
0032 {
0033 struct xhci_segment *seg;
0034 dma_addr_t dma;
0035 int i;
0036 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0037
0038 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
0039 if (!seg)
0040 return NULL;
0041
0042 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
0043 if (!seg->trbs) {
0044 kfree(seg);
0045 return NULL;
0046 }
0047
0048 if (max_packet) {
0049 seg->bounce_buf = kzalloc_node(max_packet, flags,
0050 dev_to_node(dev));
0051 if (!seg->bounce_buf) {
0052 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
0053 kfree(seg);
0054 return NULL;
0055 }
0056 }
0057
0058 if (cycle_state == 0) {
0059 for (i = 0; i < TRBS_PER_SEGMENT; i++)
0060 seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
0061 }
0062 seg->dma = dma;
0063 seg->next = NULL;
0064
0065 return seg;
0066 }
0067
0068 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
0069 {
0070 if (seg->trbs) {
0071 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
0072 seg->trbs = NULL;
0073 }
0074 kfree(seg->bounce_buf);
0075 kfree(seg);
0076 }
0077
0078 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
0079 struct xhci_segment *first)
0080 {
0081 struct xhci_segment *seg;
0082
0083 seg = first->next;
0084 while (seg != first) {
0085 struct xhci_segment *next = seg->next;
0086 xhci_segment_free(xhci, seg);
0087 seg = next;
0088 }
0089 xhci_segment_free(xhci, first);
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099 static void xhci_link_segments(struct xhci_segment *prev,
0100 struct xhci_segment *next,
0101 enum xhci_ring_type type, bool chain_links)
0102 {
0103 u32 val;
0104
0105 if (!prev || !next)
0106 return;
0107 prev->next = next;
0108 if (type != TYPE_EVENT) {
0109 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
0110 cpu_to_le64(next->dma);
0111
0112
0113 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
0114 val &= ~TRB_TYPE_BITMASK;
0115 val |= TRB_TYPE(TRB_LINK);
0116 if (chain_links)
0117 val |= TRB_CHAIN;
0118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
0119 }
0120 }
0121
0122
0123
0124
0125
0126 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
0127 struct xhci_segment *first, struct xhci_segment *last,
0128 unsigned int num_segs)
0129 {
0130 struct xhci_segment *next;
0131 bool chain_links;
0132
0133 if (!ring || !first || !last)
0134 return;
0135
0136
0137 chain_links = !!(xhci_link_trb_quirk(xhci) ||
0138 (ring->type == TYPE_ISOC &&
0139 (xhci->quirks & XHCI_AMD_0x96_HOST)));
0140
0141 next = ring->enq_seg->next;
0142 xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
0143 xhci_link_segments(last, next, ring->type, chain_links);
0144 ring->num_segs += num_segs;
0145 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
0146
0147 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
0148 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
0149 &= ~cpu_to_le32(LINK_TOGGLE);
0150 last->trbs[TRBS_PER_SEGMENT-1].link.control
0151 |= cpu_to_le32(LINK_TOGGLE);
0152 ring->last_seg = last;
0153 }
0154 }
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
0188 struct xhci_ring *ring,
0189 struct xhci_segment *seg,
0190 gfp_t mem_flags)
0191 {
0192 unsigned long key;
0193 int ret;
0194
0195 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
0196
0197 if (radix_tree_lookup(trb_address_map, key))
0198 return 0;
0199
0200 ret = radix_tree_maybe_preload(mem_flags);
0201 if (ret)
0202 return ret;
0203 ret = radix_tree_insert(trb_address_map,
0204 key, ring);
0205 radix_tree_preload_end();
0206 return ret;
0207 }
0208
0209 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
0210 struct xhci_segment *seg)
0211 {
0212 unsigned long key;
0213
0214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
0215 if (radix_tree_lookup(trb_address_map, key))
0216 radix_tree_delete(trb_address_map, key);
0217 }
0218
0219 static int xhci_update_stream_segment_mapping(
0220 struct radix_tree_root *trb_address_map,
0221 struct xhci_ring *ring,
0222 struct xhci_segment *first_seg,
0223 struct xhci_segment *last_seg,
0224 gfp_t mem_flags)
0225 {
0226 struct xhci_segment *seg;
0227 struct xhci_segment *failed_seg;
0228 int ret;
0229
0230 if (WARN_ON_ONCE(trb_address_map == NULL))
0231 return 0;
0232
0233 seg = first_seg;
0234 do {
0235 ret = xhci_insert_segment_mapping(trb_address_map,
0236 ring, seg, mem_flags);
0237 if (ret)
0238 goto remove_streams;
0239 if (seg == last_seg)
0240 return 0;
0241 seg = seg->next;
0242 } while (seg != first_seg);
0243
0244 return 0;
0245
0246 remove_streams:
0247 failed_seg = seg;
0248 seg = first_seg;
0249 do {
0250 xhci_remove_segment_mapping(trb_address_map, seg);
0251 if (seg == failed_seg)
0252 return ret;
0253 seg = seg->next;
0254 } while (seg != first_seg);
0255
0256 return ret;
0257 }
0258
0259 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
0260 {
0261 struct xhci_segment *seg;
0262
0263 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
0264 return;
0265
0266 seg = ring->first_seg;
0267 do {
0268 xhci_remove_segment_mapping(ring->trb_address_map, seg);
0269 seg = seg->next;
0270 } while (seg != ring->first_seg);
0271 }
0272
0273 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
0274 {
0275 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
0276 ring->first_seg, ring->last_seg, mem_flags);
0277 }
0278
0279
0280 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
0281 {
0282 if (!ring)
0283 return;
0284
0285 trace_xhci_ring_free(ring);
0286
0287 if (ring->first_seg) {
0288 if (ring->type == TYPE_STREAM)
0289 xhci_remove_stream_mapping(ring);
0290 xhci_free_segments_for_ring(xhci, ring->first_seg);
0291 }
0292
0293 kfree(ring);
0294 }
0295
0296 void xhci_initialize_ring_info(struct xhci_ring *ring,
0297 unsigned int cycle_state)
0298 {
0299
0300 ring->enqueue = ring->first_seg->trbs;
0301 ring->enq_seg = ring->first_seg;
0302 ring->dequeue = ring->enqueue;
0303 ring->deq_seg = ring->first_seg;
0304
0305
0306
0307
0308
0309
0310
0311 ring->cycle_state = cycle_state;
0312
0313
0314
0315
0316
0317 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
0318 }
0319
0320
0321 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
0322 struct xhci_segment **first, struct xhci_segment **last,
0323 unsigned int num_segs, unsigned int cycle_state,
0324 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
0325 {
0326 struct xhci_segment *prev;
0327 bool chain_links;
0328
0329
0330 chain_links = !!(xhci_link_trb_quirk(xhci) ||
0331 (type == TYPE_ISOC &&
0332 (xhci->quirks & XHCI_AMD_0x96_HOST)));
0333
0334 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
0335 if (!prev)
0336 return -ENOMEM;
0337 num_segs--;
0338
0339 *first = prev;
0340 while (num_segs > 0) {
0341 struct xhci_segment *next;
0342
0343 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
0344 if (!next) {
0345 prev = *first;
0346 while (prev) {
0347 next = prev->next;
0348 xhci_segment_free(xhci, prev);
0349 prev = next;
0350 }
0351 return -ENOMEM;
0352 }
0353 xhci_link_segments(prev, next, type, chain_links);
0354
0355 prev = next;
0356 num_segs--;
0357 }
0358 xhci_link_segments(prev, *first, type, chain_links);
0359 *last = prev;
0360
0361 return 0;
0362 }
0363
0364
0365
0366
0367
0368
0369
0370
0371 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
0372 unsigned int num_segs, unsigned int cycle_state,
0373 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
0374 {
0375 struct xhci_ring *ring;
0376 int ret;
0377 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0378
0379 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
0380 if (!ring)
0381 return NULL;
0382
0383 ring->num_segs = num_segs;
0384 ring->bounce_buf_len = max_packet;
0385 INIT_LIST_HEAD(&ring->td_list);
0386 ring->type = type;
0387 if (num_segs == 0)
0388 return ring;
0389
0390 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
0391 &ring->last_seg, num_segs, cycle_state, type,
0392 max_packet, flags);
0393 if (ret)
0394 goto fail;
0395
0396
0397 if (type != TYPE_EVENT) {
0398
0399 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
0400 cpu_to_le32(LINK_TOGGLE);
0401 }
0402 xhci_initialize_ring_info(ring, cycle_state);
0403 trace_xhci_ring_alloc(ring);
0404 return ring;
0405
0406 fail:
0407 kfree(ring);
0408 return NULL;
0409 }
0410
0411 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
0412 struct xhci_virt_device *virt_dev,
0413 unsigned int ep_index)
0414 {
0415 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
0416 virt_dev->eps[ep_index].ring = NULL;
0417 }
0418
0419
0420
0421
0422
0423 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
0424 unsigned int num_trbs, gfp_t flags)
0425 {
0426 struct xhci_segment *first;
0427 struct xhci_segment *last;
0428 unsigned int num_segs;
0429 unsigned int num_segs_needed;
0430 int ret;
0431
0432 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
0433 (TRBS_PER_SEGMENT - 1);
0434
0435
0436 num_segs = max(ring->num_segs, num_segs_needed);
0437
0438 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
0439 num_segs, ring->cycle_state, ring->type,
0440 ring->bounce_buf_len, flags);
0441 if (ret)
0442 return -ENOMEM;
0443
0444 if (ring->type == TYPE_STREAM)
0445 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
0446 ring, first, last, flags);
0447 if (ret) {
0448 struct xhci_segment *next;
0449 do {
0450 next = first->next;
0451 xhci_segment_free(xhci, first);
0452 if (first == last)
0453 break;
0454 first = next;
0455 } while (true);
0456 return ret;
0457 }
0458
0459 xhci_link_rings(xhci, ring, first, last, num_segs);
0460 trace_xhci_ring_expansion(ring);
0461 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
0462 "ring expansion succeed, now has %d segments",
0463 ring->num_segs);
0464
0465 return 0;
0466 }
0467
0468 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
0469 int type, gfp_t flags)
0470 {
0471 struct xhci_container_ctx *ctx;
0472 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0473
0474 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
0475 return NULL;
0476
0477 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
0478 if (!ctx)
0479 return NULL;
0480
0481 ctx->type = type;
0482 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
0483 if (type == XHCI_CTX_TYPE_INPUT)
0484 ctx->size += CTX_SIZE(xhci->hcc_params);
0485
0486 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
0487 if (!ctx->bytes) {
0488 kfree(ctx);
0489 return NULL;
0490 }
0491 return ctx;
0492 }
0493
0494 void xhci_free_container_ctx(struct xhci_hcd *xhci,
0495 struct xhci_container_ctx *ctx)
0496 {
0497 if (!ctx)
0498 return;
0499 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
0500 kfree(ctx);
0501 }
0502
0503 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
0504 struct xhci_container_ctx *ctx)
0505 {
0506 if (ctx->type != XHCI_CTX_TYPE_INPUT)
0507 return NULL;
0508
0509 return (struct xhci_input_control_ctx *)ctx->bytes;
0510 }
0511
0512 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
0513 struct xhci_container_ctx *ctx)
0514 {
0515 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
0516 return (struct xhci_slot_ctx *)ctx->bytes;
0517
0518 return (struct xhci_slot_ctx *)
0519 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
0520 }
0521
0522 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
0523 struct xhci_container_ctx *ctx,
0524 unsigned int ep_index)
0525 {
0526
0527 ep_index++;
0528 if (ctx->type == XHCI_CTX_TYPE_INPUT)
0529 ep_index++;
0530
0531 return (struct xhci_ep_ctx *)
0532 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
0533 }
0534 EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
0535
0536
0537
0538 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
0539 unsigned int num_stream_ctxs,
0540 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
0541 {
0542 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0543 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
0544
0545 if (size > MEDIUM_STREAM_ARRAY_SIZE)
0546 dma_free_coherent(dev, size,
0547 stream_ctx, dma);
0548 else if (size <= SMALL_STREAM_ARRAY_SIZE)
0549 return dma_pool_free(xhci->small_streams_pool,
0550 stream_ctx, dma);
0551 else
0552 return dma_pool_free(xhci->medium_streams_pool,
0553 stream_ctx, dma);
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
0567 unsigned int num_stream_ctxs, dma_addr_t *dma,
0568 gfp_t mem_flags)
0569 {
0570 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0571 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
0572
0573 if (size > MEDIUM_STREAM_ARRAY_SIZE)
0574 return dma_alloc_coherent(dev, size,
0575 dma, mem_flags);
0576 else if (size <= SMALL_STREAM_ARRAY_SIZE)
0577 return dma_pool_alloc(xhci->small_streams_pool,
0578 mem_flags, dma);
0579 else
0580 return dma_pool_alloc(xhci->medium_streams_pool,
0581 mem_flags, dma);
0582 }
0583
0584 struct xhci_ring *xhci_dma_to_transfer_ring(
0585 struct xhci_virt_ep *ep,
0586 u64 address)
0587 {
0588 if (ep->ep_state & EP_HAS_STREAMS)
0589 return radix_tree_lookup(&ep->stream_info->trb_address_map,
0590 address >> TRB_SEGMENT_SHIFT);
0591 return ep->ring;
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
0604 unsigned int num_stream_ctxs,
0605 unsigned int num_streams,
0606 unsigned int max_packet, gfp_t mem_flags)
0607 {
0608 struct xhci_stream_info *stream_info;
0609 u32 cur_stream;
0610 struct xhci_ring *cur_ring;
0611 u64 addr;
0612 int ret;
0613 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0614
0615 xhci_dbg(xhci, "Allocating %u streams and %u "
0616 "stream context array entries.\n",
0617 num_streams, num_stream_ctxs);
0618 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
0619 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
0620 return NULL;
0621 }
0622 xhci->cmd_ring_reserved_trbs++;
0623
0624 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
0625 dev_to_node(dev));
0626 if (!stream_info)
0627 goto cleanup_trbs;
0628
0629 stream_info->num_streams = num_streams;
0630 stream_info->num_stream_ctxs = num_stream_ctxs;
0631
0632
0633 stream_info->stream_rings = kcalloc_node(
0634 num_streams, sizeof(struct xhci_ring *), mem_flags,
0635 dev_to_node(dev));
0636 if (!stream_info->stream_rings)
0637 goto cleanup_info;
0638
0639
0640 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
0641 num_stream_ctxs, &stream_info->ctx_array_dma,
0642 mem_flags);
0643 if (!stream_info->stream_ctx_array)
0644 goto cleanup_ctx;
0645 memset(stream_info->stream_ctx_array, 0,
0646 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
0647
0648
0649 stream_info->free_streams_command =
0650 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
0651 if (!stream_info->free_streams_command)
0652 goto cleanup_ctx;
0653
0654 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
0655
0656
0657
0658
0659
0660
0661 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
0662 stream_info->stream_rings[cur_stream] =
0663 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
0664 mem_flags);
0665 cur_ring = stream_info->stream_rings[cur_stream];
0666 if (!cur_ring)
0667 goto cleanup_rings;
0668 cur_ring->stream_id = cur_stream;
0669 cur_ring->trb_address_map = &stream_info->trb_address_map;
0670
0671 addr = cur_ring->first_seg->dma |
0672 SCT_FOR_CTX(SCT_PRI_TR) |
0673 cur_ring->cycle_state;
0674 stream_info->stream_ctx_array[cur_stream].stream_ring =
0675 cpu_to_le64(addr);
0676 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
0677 cur_stream, (unsigned long long) addr);
0678
0679 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
0680 if (ret) {
0681 xhci_ring_free(xhci, cur_ring);
0682 stream_info->stream_rings[cur_stream] = NULL;
0683 goto cleanup_rings;
0684 }
0685 }
0686
0687
0688
0689
0690
0691
0692
0693 return stream_info;
0694
0695 cleanup_rings:
0696 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
0697 cur_ring = stream_info->stream_rings[cur_stream];
0698 if (cur_ring) {
0699 xhci_ring_free(xhci, cur_ring);
0700 stream_info->stream_rings[cur_stream] = NULL;
0701 }
0702 }
0703 xhci_free_command(xhci, stream_info->free_streams_command);
0704 cleanup_ctx:
0705 kfree(stream_info->stream_rings);
0706 cleanup_info:
0707 kfree(stream_info);
0708 cleanup_trbs:
0709 xhci->cmd_ring_reserved_trbs--;
0710 return NULL;
0711 }
0712
0713
0714
0715
0716 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
0717 struct xhci_ep_ctx *ep_ctx,
0718 struct xhci_stream_info *stream_info)
0719 {
0720 u32 max_primary_streams;
0721
0722
0723
0724
0725 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
0726 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
0727 "Setting number of stream ctx array entries to %u",
0728 1 << (max_primary_streams + 1));
0729 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
0730 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
0731 | EP_HAS_LSA);
0732 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
0733 }
0734
0735
0736
0737
0738
0739
0740 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
0741 struct xhci_virt_ep *ep)
0742 {
0743 dma_addr_t addr;
0744 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
0745 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
0746 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
0747 }
0748
0749
0750
0751
0752
0753 void xhci_free_stream_info(struct xhci_hcd *xhci,
0754 struct xhci_stream_info *stream_info)
0755 {
0756 int cur_stream;
0757 struct xhci_ring *cur_ring;
0758
0759 if (!stream_info)
0760 return;
0761
0762 for (cur_stream = 1; cur_stream < stream_info->num_streams;
0763 cur_stream++) {
0764 cur_ring = stream_info->stream_rings[cur_stream];
0765 if (cur_ring) {
0766 xhci_ring_free(xhci, cur_ring);
0767 stream_info->stream_rings[cur_stream] = NULL;
0768 }
0769 }
0770 xhci_free_command(xhci, stream_info->free_streams_command);
0771 xhci->cmd_ring_reserved_trbs--;
0772 if (stream_info->stream_ctx_array)
0773 xhci_free_stream_ctx(xhci,
0774 stream_info->num_stream_ctxs,
0775 stream_info->stream_ctx_array,
0776 stream_info->ctx_array_dma);
0777
0778 kfree(stream_info->stream_rings);
0779 kfree(stream_info);
0780 }
0781
0782
0783
0784
0785 static void xhci_free_tt_info(struct xhci_hcd *xhci,
0786 struct xhci_virt_device *virt_dev,
0787 int slot_id)
0788 {
0789 struct list_head *tt_list_head;
0790 struct xhci_tt_bw_info *tt_info, *next;
0791 bool slot_found = false;
0792
0793
0794
0795
0796 if (virt_dev->real_port == 0 ||
0797 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
0798 xhci_dbg(xhci, "Bad real port.\n");
0799 return;
0800 }
0801
0802 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
0803 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
0804
0805 if (tt_info->slot_id == slot_id) {
0806 slot_found = true;
0807 list_del(&tt_info->tt_list);
0808 kfree(tt_info);
0809 } else if (slot_found) {
0810 break;
0811 }
0812 }
0813 }
0814
0815 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
0816 struct xhci_virt_device *virt_dev,
0817 struct usb_device *hdev,
0818 struct usb_tt *tt, gfp_t mem_flags)
0819 {
0820 struct xhci_tt_bw_info *tt_info;
0821 unsigned int num_ports;
0822 int i, j;
0823 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0824
0825 if (!tt->multi)
0826 num_ports = 1;
0827 else
0828 num_ports = hdev->maxchild;
0829
0830 for (i = 0; i < num_ports; i++, tt_info++) {
0831 struct xhci_interval_bw_table *bw_table;
0832
0833 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
0834 dev_to_node(dev));
0835 if (!tt_info)
0836 goto free_tts;
0837 INIT_LIST_HEAD(&tt_info->tt_list);
0838 list_add(&tt_info->tt_list,
0839 &xhci->rh_bw[virt_dev->real_port - 1].tts);
0840 tt_info->slot_id = virt_dev->udev->slot_id;
0841 if (tt->multi)
0842 tt_info->ttport = i+1;
0843 bw_table = &tt_info->bw_table;
0844 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
0845 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
0846 }
0847 return 0;
0848
0849 free_tts:
0850 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
0851 return -ENOMEM;
0852 }
0853
0854
0855
0856
0857
0858
0859
0860 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
0861 {
0862 struct xhci_virt_device *dev;
0863 int i;
0864 int old_active_eps = 0;
0865
0866
0867 if (slot_id == 0 || !xhci->devs[slot_id])
0868 return;
0869
0870 dev = xhci->devs[slot_id];
0871
0872 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
0873 if (!dev)
0874 return;
0875
0876 trace_xhci_free_virt_device(dev);
0877
0878 if (dev->tt_info)
0879 old_active_eps = dev->tt_info->active_eps;
0880
0881 for (i = 0; i < 31; i++) {
0882 if (dev->eps[i].ring)
0883 xhci_ring_free(xhci, dev->eps[i].ring);
0884 if (dev->eps[i].stream_info)
0885 xhci_free_stream_info(xhci,
0886 dev->eps[i].stream_info);
0887
0888
0889
0890
0891
0892 if (!list_empty(&dev->eps[i].bw_endpoint_list))
0893 xhci_warn(xhci, "Slot %u endpoint %u "
0894 "not removed from BW list!\n",
0895 slot_id, i);
0896 }
0897
0898 xhci_free_tt_info(xhci, dev, slot_id);
0899
0900 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
0901
0902 if (dev->in_ctx)
0903 xhci_free_container_ctx(xhci, dev->in_ctx);
0904 if (dev->out_ctx)
0905 xhci_free_container_ctx(xhci, dev->out_ctx);
0906
0907 if (dev->udev && dev->udev->slot_id)
0908 dev->udev->slot_id = 0;
0909 kfree(xhci->devs[slot_id]);
0910 xhci->devs[slot_id] = NULL;
0911 }
0912
0913
0914
0915
0916
0917
0918
0919 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
0920 {
0921 struct xhci_virt_device *vdev;
0922 struct list_head *tt_list_head;
0923 struct xhci_tt_bw_info *tt_info, *next;
0924 int i;
0925
0926 vdev = xhci->devs[slot_id];
0927 if (!vdev)
0928 return;
0929
0930 if (vdev->real_port == 0 ||
0931 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
0932 xhci_dbg(xhci, "Bad vdev->real_port.\n");
0933 goto out;
0934 }
0935
0936 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
0937 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
0938
0939 if (tt_info->slot_id == slot_id) {
0940
0941 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
0942 vdev = xhci->devs[i];
0943 if (vdev && (vdev->tt_info == tt_info))
0944 xhci_free_virt_devices_depth_first(
0945 xhci, i);
0946 }
0947 }
0948 }
0949 out:
0950
0951 xhci_debugfs_remove_slot(xhci, slot_id);
0952 xhci_free_virt_device(xhci, slot_id);
0953 }
0954
0955 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
0956 struct usb_device *udev, gfp_t flags)
0957 {
0958 struct xhci_virt_device *dev;
0959 int i;
0960
0961
0962 if (slot_id == 0 || xhci->devs[slot_id]) {
0963 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
0964 return 0;
0965 }
0966
0967 dev = kzalloc(sizeof(*dev), flags);
0968 if (!dev)
0969 return 0;
0970
0971 dev->slot_id = slot_id;
0972
0973
0974 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
0975 if (!dev->out_ctx)
0976 goto fail;
0977
0978 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
0979 (unsigned long long)dev->out_ctx->dma);
0980
0981
0982 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
0983 if (!dev->in_ctx)
0984 goto fail;
0985
0986 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
0987 (unsigned long long)dev->in_ctx->dma);
0988
0989
0990 for (i = 0; i < 31; i++) {
0991 dev->eps[i].ep_index = i;
0992 dev->eps[i].vdev = dev;
0993 dev->eps[i].xhci = xhci;
0994 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
0995 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
0996 }
0997
0998
0999 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1000 if (!dev->eps[0].ring)
1001 goto fail;
1002
1003 dev->udev = udev;
1004
1005
1006 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1007 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1008 slot_id,
1009 &xhci->dcbaa->dev_context_ptrs[slot_id],
1010 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1011
1012 trace_xhci_alloc_virt_device(dev);
1013
1014 xhci->devs[slot_id] = dev;
1015
1016 return 1;
1017 fail:
1018
1019 if (dev->in_ctx)
1020 xhci_free_container_ctx(xhci, dev->in_ctx);
1021 if (dev->out_ctx)
1022 xhci_free_container_ctx(xhci, dev->out_ctx);
1023 kfree(dev);
1024
1025 return 0;
1026 }
1027
1028 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1029 struct usb_device *udev)
1030 {
1031 struct xhci_virt_device *virt_dev;
1032 struct xhci_ep_ctx *ep0_ctx;
1033 struct xhci_ring *ep_ring;
1034
1035 virt_dev = xhci->devs[udev->slot_id];
1036 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1037 ep_ring = virt_dev->eps[0].ring;
1038
1039
1040
1041
1042
1043
1044
1045 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1046 ep_ring->enqueue)
1047 | ep_ring->cycle_state);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1061 struct usb_device *udev)
1062 {
1063 struct usb_device *top_dev;
1064 struct usb_hcd *hcd;
1065
1066 if (udev->speed >= USB_SPEED_SUPER)
1067 hcd = xhci_get_usb3_hcd(xhci);
1068 else
1069 hcd = xhci->main_hcd;
1070
1071 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1072 top_dev = top_dev->parent)
1073 ;
1074
1075 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1076 }
1077
1078
1079 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1080 {
1081 struct xhci_virt_device *dev;
1082 struct xhci_ep_ctx *ep0_ctx;
1083 struct xhci_slot_ctx *slot_ctx;
1084 u32 port_num;
1085 u32 max_packets;
1086 struct usb_device *top_dev;
1087
1088 dev = xhci->devs[udev->slot_id];
1089
1090 if (udev->slot_id == 0 || !dev) {
1091 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1092 udev->slot_id);
1093 return -EINVAL;
1094 }
1095 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1096 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1097
1098
1099 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1100 switch (udev->speed) {
1101 case USB_SPEED_SUPER_PLUS:
1102 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1103 max_packets = MAX_PACKET(512);
1104 break;
1105 case USB_SPEED_SUPER:
1106 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1107 max_packets = MAX_PACKET(512);
1108 break;
1109 case USB_SPEED_HIGH:
1110 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1111 max_packets = MAX_PACKET(64);
1112 break;
1113
1114 case USB_SPEED_FULL:
1115 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1116 max_packets = MAX_PACKET(64);
1117 break;
1118 case USB_SPEED_LOW:
1119 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1120 max_packets = MAX_PACKET(8);
1121 break;
1122 case USB_SPEED_WIRELESS:
1123 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1124 return -EINVAL;
1125 default:
1126
1127 return -EINVAL;
1128 }
1129
1130 port_num = xhci_find_real_port_number(xhci, udev);
1131 if (!port_num)
1132 return -EINVAL;
1133 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1134
1135 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1136 top_dev = top_dev->parent)
1137 ;
1138 dev->fake_port = top_dev->portnum;
1139 dev->real_port = port_num;
1140 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1141 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1142
1143
1144
1145
1146
1147
1148
1149 if (!udev->tt || !udev->tt->hub->parent) {
1150 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1151 } else {
1152 struct xhci_root_port_bw_info *rh_bw;
1153 struct xhci_tt_bw_info *tt_bw;
1154
1155 rh_bw = &xhci->rh_bw[port_num - 1];
1156
1157 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1158 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1159 continue;
1160
1161 if (!dev->udev->tt->multi ||
1162 (udev->tt->multi &&
1163 tt_bw->ttport == dev->udev->ttport)) {
1164 dev->bw_table = &tt_bw->bw_table;
1165 dev->tt_info = tt_bw;
1166 break;
1167 }
1168 }
1169 if (!dev->tt_info)
1170 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1171 }
1172
1173
1174 if (udev->tt && udev->tt->hub->parent) {
1175 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1176 (udev->ttport << 8));
1177 if (udev->tt->multi)
1178 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1179 }
1180 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1181 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1182
1183
1184
1185 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1186
1187
1188 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1189 max_packets);
1190
1191 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1192 dev->eps[0].ring->cycle_state);
1193
1194 trace_xhci_setup_addressable_virt_device(dev);
1195
1196
1197
1198 return 0;
1199 }
1200
1201
1202
1203
1204
1205
1206 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1207 struct usb_host_endpoint *ep)
1208 {
1209 unsigned int interval;
1210
1211 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1212 if (interval != ep->desc.bInterval - 1)
1213 dev_warn(&udev->dev,
1214 "ep %#x - rounding interval to %d %sframes\n",
1215 ep->desc.bEndpointAddress,
1216 1 << interval,
1217 udev->speed == USB_SPEED_FULL ? "" : "micro");
1218
1219 if (udev->speed == USB_SPEED_FULL) {
1220
1221
1222
1223
1224
1225 interval += 3;
1226 }
1227
1228 return interval;
1229 }
1230
1231
1232
1233
1234
1235 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1236 struct usb_host_endpoint *ep, unsigned int desc_interval,
1237 unsigned int min_exponent, unsigned int max_exponent)
1238 {
1239 unsigned int interval;
1240
1241 interval = fls(desc_interval) - 1;
1242 interval = clamp_val(interval, min_exponent, max_exponent);
1243 if ((1 << interval) != desc_interval)
1244 dev_dbg(&udev->dev,
1245 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1246 ep->desc.bEndpointAddress,
1247 1 << interval,
1248 desc_interval);
1249
1250 return interval;
1251 }
1252
1253 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1254 struct usb_host_endpoint *ep)
1255 {
1256 if (ep->desc.bInterval == 0)
1257 return 0;
1258 return xhci_microframes_to_exponent(udev, ep,
1259 ep->desc.bInterval, 0, 15);
1260 }
1261
1262
1263 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1264 struct usb_host_endpoint *ep)
1265 {
1266 return xhci_microframes_to_exponent(udev, ep,
1267 ep->desc.bInterval * 8, 3, 10);
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1279 struct usb_host_endpoint *ep)
1280 {
1281 unsigned int interval = 0;
1282
1283 switch (udev->speed) {
1284 case USB_SPEED_HIGH:
1285
1286 if (usb_endpoint_xfer_control(&ep->desc) ||
1287 usb_endpoint_xfer_bulk(&ep->desc)) {
1288 interval = xhci_parse_microframe_interval(udev, ep);
1289 break;
1290 }
1291 fallthrough;
1292
1293 case USB_SPEED_SUPER_PLUS:
1294 case USB_SPEED_SUPER:
1295 if (usb_endpoint_xfer_int(&ep->desc) ||
1296 usb_endpoint_xfer_isoc(&ep->desc)) {
1297 interval = xhci_parse_exponent_interval(udev, ep);
1298 }
1299 break;
1300
1301 case USB_SPEED_FULL:
1302 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1303 interval = xhci_parse_exponent_interval(udev, ep);
1304 break;
1305 }
1306
1307
1308
1309
1310
1311 fallthrough;
1312
1313 case USB_SPEED_LOW:
1314 if (usb_endpoint_xfer_int(&ep->desc) ||
1315 usb_endpoint_xfer_isoc(&ep->desc)) {
1316
1317 interval = xhci_parse_frame_interval(udev, ep);
1318 }
1319 break;
1320
1321 default:
1322 BUG();
1323 }
1324 return interval;
1325 }
1326
1327
1328
1329
1330
1331
1332 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1333 struct usb_host_endpoint *ep)
1334 {
1335 if (udev->speed < USB_SPEED_SUPER ||
1336 !usb_endpoint_xfer_isoc(&ep->desc))
1337 return 0;
1338 return ep->ss_ep_comp.bmAttributes;
1339 }
1340
1341 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1342 struct usb_host_endpoint *ep)
1343 {
1344
1345 if (udev->speed >= USB_SPEED_SUPER)
1346 return ep->ss_ep_comp.bMaxBurst;
1347
1348 if (udev->speed == USB_SPEED_HIGH &&
1349 (usb_endpoint_xfer_isoc(&ep->desc) ||
1350 usb_endpoint_xfer_int(&ep->desc)))
1351 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1352
1353 return 0;
1354 }
1355
1356 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1357 {
1358 int in;
1359
1360 in = usb_endpoint_dir_in(&ep->desc);
1361
1362 switch (usb_endpoint_type(&ep->desc)) {
1363 case USB_ENDPOINT_XFER_CONTROL:
1364 return CTRL_EP;
1365 case USB_ENDPOINT_XFER_BULK:
1366 return in ? BULK_IN_EP : BULK_OUT_EP;
1367 case USB_ENDPOINT_XFER_ISOC:
1368 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1369 case USB_ENDPOINT_XFER_INT:
1370 return in ? INT_IN_EP : INT_OUT_EP;
1371 }
1372 return 0;
1373 }
1374
1375
1376
1377
1378
1379 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1380 struct usb_host_endpoint *ep)
1381 {
1382 int max_burst;
1383 int max_packet;
1384
1385
1386 if (usb_endpoint_xfer_control(&ep->desc) ||
1387 usb_endpoint_xfer_bulk(&ep->desc))
1388 return 0;
1389
1390
1391 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1392 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1393 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1394
1395 else if (udev->speed >= USB_SPEED_SUPER)
1396 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1397
1398 max_packet = usb_endpoint_maxp(&ep->desc);
1399 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1400
1401 return max_packet * max_burst;
1402 }
1403
1404
1405
1406
1407 int xhci_endpoint_init(struct xhci_hcd *xhci,
1408 struct xhci_virt_device *virt_dev,
1409 struct usb_device *udev,
1410 struct usb_host_endpoint *ep,
1411 gfp_t mem_flags)
1412 {
1413 unsigned int ep_index;
1414 struct xhci_ep_ctx *ep_ctx;
1415 struct xhci_ring *ep_ring;
1416 unsigned int max_packet;
1417 enum xhci_ring_type ring_type;
1418 u32 max_esit_payload;
1419 u32 endpoint_type;
1420 unsigned int max_burst;
1421 unsigned int interval;
1422 unsigned int mult;
1423 unsigned int avg_trb_len;
1424 unsigned int err_count = 0;
1425
1426 ep_index = xhci_get_endpoint_index(&ep->desc);
1427 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1428
1429 endpoint_type = xhci_get_endpoint_type(ep);
1430 if (!endpoint_type)
1431 return -EINVAL;
1432
1433 ring_type = usb_endpoint_type(&ep->desc);
1434
1435
1436
1437
1438
1439
1440
1441 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1442 interval = xhci_get_endpoint_interval(udev, ep);
1443
1444
1445 if (usb_endpoint_xfer_int(&ep->desc) ||
1446 usb_endpoint_xfer_isoc(&ep->desc)) {
1447 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1448 udev->speed >= USB_SPEED_HIGH &&
1449 interval >= 7) {
1450 interval = 6;
1451 }
1452 }
1453
1454 mult = xhci_get_endpoint_mult(udev, ep);
1455 max_packet = usb_endpoint_maxp(&ep->desc);
1456 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1457 avg_trb_len = max_esit_payload;
1458
1459
1460
1461
1462 if (!usb_endpoint_xfer_isoc(&ep->desc))
1463 err_count = 3;
1464
1465 if (usb_endpoint_xfer_bulk(&ep->desc)) {
1466 if (udev->speed == USB_SPEED_HIGH)
1467 max_packet = 512;
1468 if (udev->speed == USB_SPEED_FULL) {
1469 max_packet = rounddown_pow_of_two(max_packet);
1470 max_packet = clamp_val(max_packet, 8, 64);
1471 }
1472 }
1473
1474 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1475 avg_trb_len = 8;
1476
1477 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1478 mult = 0;
1479
1480
1481 virt_dev->eps[ep_index].new_ring =
1482 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1483 if (!virt_dev->eps[ep_index].new_ring)
1484 return -ENOMEM;
1485
1486 virt_dev->eps[ep_index].skip = false;
1487 ep_ring = virt_dev->eps[ep_index].new_ring;
1488
1489
1490 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1491 EP_INTERVAL(interval) |
1492 EP_MULT(mult));
1493 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1494 MAX_PACKET(max_packet) |
1495 MAX_BURST(max_burst) |
1496 ERROR_COUNT(err_count));
1497 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1498 ep_ring->cycle_state);
1499
1500 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1501 EP_AVG_TRB_LENGTH(avg_trb_len));
1502
1503 return 0;
1504 }
1505
1506 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1507 struct xhci_virt_device *virt_dev,
1508 struct usb_host_endpoint *ep)
1509 {
1510 unsigned int ep_index;
1511 struct xhci_ep_ctx *ep_ctx;
1512
1513 ep_index = xhci_get_endpoint_index(&ep->desc);
1514 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1515
1516 ep_ctx->ep_info = 0;
1517 ep_ctx->ep_info2 = 0;
1518 ep_ctx->deq = 0;
1519 ep_ctx->tx_info = 0;
1520
1521
1522
1523 }
1524
1525 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1526 {
1527 bw_info->ep_interval = 0;
1528 bw_info->mult = 0;
1529 bw_info->num_packets = 0;
1530 bw_info->max_packet_size = 0;
1531 bw_info->type = 0;
1532 bw_info->max_esit_payload = 0;
1533 }
1534
1535 void xhci_update_bw_info(struct xhci_hcd *xhci,
1536 struct xhci_container_ctx *in_ctx,
1537 struct xhci_input_control_ctx *ctrl_ctx,
1538 struct xhci_virt_device *virt_dev)
1539 {
1540 struct xhci_bw_info *bw_info;
1541 struct xhci_ep_ctx *ep_ctx;
1542 unsigned int ep_type;
1543 int i;
1544
1545 for (i = 1; i < 31; i++) {
1546 bw_info = &virt_dev->eps[i].bw_info;
1547
1548
1549
1550
1551
1552
1553 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1554
1555 xhci_clear_endpoint_bw_info(bw_info);
1556 continue;
1557 }
1558
1559 if (EP_IS_ADDED(ctrl_ctx, i)) {
1560 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1561 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1562
1563
1564 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1565 ep_type != ISOC_IN_EP &&
1566 ep_type != INT_IN_EP)
1567 continue;
1568
1569
1570 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1571 le32_to_cpu(ep_ctx->ep_info));
1572
1573
1574
1575
1576 bw_info->mult = CTX_TO_EP_MULT(
1577 le32_to_cpu(ep_ctx->ep_info)) + 1;
1578 bw_info->num_packets = CTX_TO_MAX_BURST(
1579 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1580 bw_info->max_packet_size = MAX_PACKET_DECODED(
1581 le32_to_cpu(ep_ctx->ep_info2));
1582 bw_info->type = ep_type;
1583 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1584 le32_to_cpu(ep_ctx->tx_info));
1585 }
1586 }
1587 }
1588
1589
1590
1591
1592
1593 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1594 struct xhci_container_ctx *in_ctx,
1595 struct xhci_container_ctx *out_ctx,
1596 unsigned int ep_index)
1597 {
1598 struct xhci_ep_ctx *out_ep_ctx;
1599 struct xhci_ep_ctx *in_ep_ctx;
1600
1601 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1602 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1603
1604 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1605 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1606 in_ep_ctx->deq = out_ep_ctx->deq;
1607 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1608 if (xhci->quirks & XHCI_MTK_HOST) {
1609 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1610 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1611 }
1612 }
1613
1614
1615
1616
1617
1618
1619 void xhci_slot_copy(struct xhci_hcd *xhci,
1620 struct xhci_container_ctx *in_ctx,
1621 struct xhci_container_ctx *out_ctx)
1622 {
1623 struct xhci_slot_ctx *in_slot_ctx;
1624 struct xhci_slot_ctx *out_slot_ctx;
1625
1626 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1627 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1628
1629 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1630 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1631 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1632 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1633 }
1634
1635
1636 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1637 {
1638 int i;
1639 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1640 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1641
1642 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1643 "Allocating %d scratchpad buffers", num_sp);
1644
1645 if (!num_sp)
1646 return 0;
1647
1648 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1649 dev_to_node(dev));
1650 if (!xhci->scratchpad)
1651 goto fail_sp;
1652
1653 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1654 num_sp * sizeof(u64),
1655 &xhci->scratchpad->sp_dma, flags);
1656 if (!xhci->scratchpad->sp_array)
1657 goto fail_sp2;
1658
1659 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1660 flags, dev_to_node(dev));
1661 if (!xhci->scratchpad->sp_buffers)
1662 goto fail_sp3;
1663
1664 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1665 for (i = 0; i < num_sp; i++) {
1666 dma_addr_t dma;
1667 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1668 flags);
1669 if (!buf)
1670 goto fail_sp4;
1671
1672 xhci->scratchpad->sp_array[i] = dma;
1673 xhci->scratchpad->sp_buffers[i] = buf;
1674 }
1675
1676 return 0;
1677
1678 fail_sp4:
1679 for (i = i - 1; i >= 0; i--) {
1680 dma_free_coherent(dev, xhci->page_size,
1681 xhci->scratchpad->sp_buffers[i],
1682 xhci->scratchpad->sp_array[i]);
1683 }
1684
1685 kfree(xhci->scratchpad->sp_buffers);
1686
1687 fail_sp3:
1688 dma_free_coherent(dev, num_sp * sizeof(u64),
1689 xhci->scratchpad->sp_array,
1690 xhci->scratchpad->sp_dma);
1691
1692 fail_sp2:
1693 kfree(xhci->scratchpad);
1694 xhci->scratchpad = NULL;
1695
1696 fail_sp:
1697 return -ENOMEM;
1698 }
1699
1700 static void scratchpad_free(struct xhci_hcd *xhci)
1701 {
1702 int num_sp;
1703 int i;
1704 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1705
1706 if (!xhci->scratchpad)
1707 return;
1708
1709 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1710
1711 for (i = 0; i < num_sp; i++) {
1712 dma_free_coherent(dev, xhci->page_size,
1713 xhci->scratchpad->sp_buffers[i],
1714 xhci->scratchpad->sp_array[i]);
1715 }
1716 kfree(xhci->scratchpad->sp_buffers);
1717 dma_free_coherent(dev, num_sp * sizeof(u64),
1718 xhci->scratchpad->sp_array,
1719 xhci->scratchpad->sp_dma);
1720 kfree(xhci->scratchpad);
1721 xhci->scratchpad = NULL;
1722 }
1723
1724 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1725 bool allocate_completion, gfp_t mem_flags)
1726 {
1727 struct xhci_command *command;
1728 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1729
1730 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1731 if (!command)
1732 return NULL;
1733
1734 if (allocate_completion) {
1735 command->completion =
1736 kzalloc_node(sizeof(struct completion), mem_flags,
1737 dev_to_node(dev));
1738 if (!command->completion) {
1739 kfree(command);
1740 return NULL;
1741 }
1742 init_completion(command->completion);
1743 }
1744
1745 command->status = 0;
1746 INIT_LIST_HEAD(&command->cmd_list);
1747 return command;
1748 }
1749
1750 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1751 bool allocate_completion, gfp_t mem_flags)
1752 {
1753 struct xhci_command *command;
1754
1755 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1756 if (!command)
1757 return NULL;
1758
1759 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1760 mem_flags);
1761 if (!command->in_ctx) {
1762 kfree(command->completion);
1763 kfree(command);
1764 return NULL;
1765 }
1766 return command;
1767 }
1768
1769 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1770 {
1771 kfree(urb_priv);
1772 }
1773
1774 void xhci_free_command(struct xhci_hcd *xhci,
1775 struct xhci_command *command)
1776 {
1777 xhci_free_container_ctx(xhci,
1778 command->in_ctx);
1779 kfree(command->completion);
1780 kfree(command);
1781 }
1782
1783 int xhci_alloc_erst(struct xhci_hcd *xhci,
1784 struct xhci_ring *evt_ring,
1785 struct xhci_erst *erst,
1786 gfp_t flags)
1787 {
1788 size_t size;
1789 unsigned int val;
1790 struct xhci_segment *seg;
1791 struct xhci_erst_entry *entry;
1792
1793 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1794 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1795 size, &erst->erst_dma_addr, flags);
1796 if (!erst->entries)
1797 return -ENOMEM;
1798
1799 erst->num_entries = evt_ring->num_segs;
1800
1801 seg = evt_ring->first_seg;
1802 for (val = 0; val < evt_ring->num_segs; val++) {
1803 entry = &erst->entries[val];
1804 entry->seg_addr = cpu_to_le64(seg->dma);
1805 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1806 entry->rsvd = 0;
1807 seg = seg->next;
1808 }
1809
1810 return 0;
1811 }
1812
1813 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1814 {
1815 size_t size;
1816 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1817
1818 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1819 if (erst->entries)
1820 dma_free_coherent(dev, size,
1821 erst->entries,
1822 erst->erst_dma_addr);
1823 erst->entries = NULL;
1824 }
1825
1826 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1827 {
1828 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1829 int i, j, num_ports;
1830
1831 cancel_delayed_work_sync(&xhci->cmd_timer);
1832
1833 xhci_free_erst(xhci, &xhci->erst);
1834
1835 if (xhci->event_ring)
1836 xhci_ring_free(xhci, xhci->event_ring);
1837 xhci->event_ring = NULL;
1838 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1839
1840 if (xhci->cmd_ring)
1841 xhci_ring_free(xhci, xhci->cmd_ring);
1842 xhci->cmd_ring = NULL;
1843 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1844 xhci_cleanup_command_queue(xhci);
1845
1846 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1847 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1848 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1849 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1850 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1851 while (!list_empty(ep))
1852 list_del_init(ep->next);
1853 }
1854 }
1855
1856 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1857 xhci_free_virt_devices_depth_first(xhci, i);
1858
1859 dma_pool_destroy(xhci->segment_pool);
1860 xhci->segment_pool = NULL;
1861 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1862
1863 dma_pool_destroy(xhci->device_pool);
1864 xhci->device_pool = NULL;
1865 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1866
1867 dma_pool_destroy(xhci->small_streams_pool);
1868 xhci->small_streams_pool = NULL;
1869 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1870 "Freed small stream array pool");
1871
1872 dma_pool_destroy(xhci->medium_streams_pool);
1873 xhci->medium_streams_pool = NULL;
1874 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1875 "Freed medium stream array pool");
1876
1877 if (xhci->dcbaa)
1878 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1879 xhci->dcbaa, xhci->dcbaa->dma);
1880 xhci->dcbaa = NULL;
1881
1882 scratchpad_free(xhci);
1883
1884 if (!xhci->rh_bw)
1885 goto no_bw;
1886
1887 for (i = 0; i < num_ports; i++) {
1888 struct xhci_tt_bw_info *tt, *n;
1889 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1890 list_del(&tt->tt_list);
1891 kfree(tt);
1892 }
1893 }
1894
1895 no_bw:
1896 xhci->cmd_ring_reserved_trbs = 0;
1897 xhci->usb2_rhub.num_ports = 0;
1898 xhci->usb3_rhub.num_ports = 0;
1899 xhci->num_active_eps = 0;
1900 kfree(xhci->usb2_rhub.ports);
1901 kfree(xhci->usb3_rhub.ports);
1902 kfree(xhci->hw_ports);
1903 kfree(xhci->rh_bw);
1904 kfree(xhci->ext_caps);
1905 for (i = 0; i < xhci->num_port_caps; i++)
1906 kfree(xhci->port_caps[i].psi);
1907 kfree(xhci->port_caps);
1908 xhci->num_port_caps = 0;
1909
1910 xhci->usb2_rhub.ports = NULL;
1911 xhci->usb3_rhub.ports = NULL;
1912 xhci->hw_ports = NULL;
1913 xhci->rh_bw = NULL;
1914 xhci->ext_caps = NULL;
1915 xhci->port_caps = NULL;
1916
1917 xhci->page_size = 0;
1918 xhci->page_shift = 0;
1919 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1920 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1921 }
1922
1923 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1924 struct xhci_segment *input_seg,
1925 union xhci_trb *start_trb,
1926 union xhci_trb *end_trb,
1927 dma_addr_t input_dma,
1928 struct xhci_segment *result_seg,
1929 char *test_name, int test_number)
1930 {
1931 unsigned long long start_dma;
1932 unsigned long long end_dma;
1933 struct xhci_segment *seg;
1934
1935 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1936 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1937
1938 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1939 if (seg != result_seg) {
1940 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1941 test_name, test_number);
1942 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1943 "input DMA 0x%llx\n",
1944 input_seg,
1945 (unsigned long long) input_dma);
1946 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1947 "ending TRB %p (0x%llx DMA)\n",
1948 start_trb, start_dma,
1949 end_trb, end_dma);
1950 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1951 result_seg, seg);
1952 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1953 true);
1954 return -1;
1955 }
1956 return 0;
1957 }
1958
1959
1960 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1961 {
1962 struct {
1963 dma_addr_t input_dma;
1964 struct xhci_segment *result_seg;
1965 } simple_test_vector [] = {
1966
1967 { 0, NULL },
1968
1969 { xhci->event_ring->first_seg->dma - 16, NULL },
1970
1971 { xhci->event_ring->first_seg->dma - 1, NULL },
1972
1973 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1974
1975 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1976 xhci->event_ring->first_seg },
1977
1978 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1979
1980 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1981
1982 { (dma_addr_t) (~0), NULL },
1983 };
1984 struct {
1985 struct xhci_segment *input_seg;
1986 union xhci_trb *start_trb;
1987 union xhci_trb *end_trb;
1988 dma_addr_t input_dma;
1989 struct xhci_segment *result_seg;
1990 } complex_test_vector [] = {
1991
1992 { .input_seg = xhci->event_ring->first_seg,
1993 .start_trb = xhci->event_ring->first_seg->trbs,
1994 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1995 .input_dma = xhci->cmd_ring->first_seg->dma,
1996 .result_seg = NULL,
1997 },
1998
1999 { .input_seg = xhci->event_ring->first_seg,
2000 .start_trb = xhci->event_ring->first_seg->trbs,
2001 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2002 .input_dma = xhci->cmd_ring->first_seg->dma,
2003 .result_seg = NULL,
2004 },
2005
2006 { .input_seg = xhci->event_ring->first_seg,
2007 .start_trb = xhci->cmd_ring->first_seg->trbs,
2008 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2009 .input_dma = xhci->cmd_ring->first_seg->dma,
2010 .result_seg = NULL,
2011 },
2012
2013 { .input_seg = xhci->event_ring->first_seg,
2014 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2015 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2016 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2017 .result_seg = NULL,
2018 },
2019
2020 { .input_seg = xhci->event_ring->first_seg,
2021 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2022 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2023 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2024 .result_seg = NULL,
2025 },
2026
2027 { .input_seg = xhci->event_ring->first_seg,
2028 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2029 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2030 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2031 .result_seg = NULL,
2032 },
2033
2034 { .input_seg = xhci->event_ring->first_seg,
2035 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2036 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2037 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2038 .result_seg = NULL,
2039 },
2040
2041 { .input_seg = xhci->event_ring->first_seg,
2042 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2043 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2044 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2045 .result_seg = NULL,
2046 },
2047 };
2048
2049 unsigned int num_tests;
2050 int i, ret;
2051
2052 num_tests = ARRAY_SIZE(simple_test_vector);
2053 for (i = 0; i < num_tests; i++) {
2054 ret = xhci_test_trb_in_td(xhci,
2055 xhci->event_ring->first_seg,
2056 xhci->event_ring->first_seg->trbs,
2057 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2058 simple_test_vector[i].input_dma,
2059 simple_test_vector[i].result_seg,
2060 "Simple", i);
2061 if (ret < 0)
2062 return ret;
2063 }
2064
2065 num_tests = ARRAY_SIZE(complex_test_vector);
2066 for (i = 0; i < num_tests; i++) {
2067 ret = xhci_test_trb_in_td(xhci,
2068 complex_test_vector[i].input_seg,
2069 complex_test_vector[i].start_trb,
2070 complex_test_vector[i].end_trb,
2071 complex_test_vector[i].input_dma,
2072 complex_test_vector[i].result_seg,
2073 "Complex", i);
2074 if (ret < 0)
2075 return ret;
2076 }
2077 xhci_dbg(xhci, "TRB math tests passed.\n");
2078 return 0;
2079 }
2080
2081 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2082 {
2083 u64 temp;
2084 dma_addr_t deq;
2085
2086 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2087 xhci->event_ring->dequeue);
2088 if (!deq)
2089 xhci_warn(xhci, "WARN something wrong with SW event ring "
2090 "dequeue ptr.\n");
2091
2092 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2093 temp &= ERST_PTR_MASK;
2094
2095
2096
2097 temp &= ~ERST_EHB;
2098 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2099 "// Write event ring dequeue pointer, "
2100 "preserving EHB bit");
2101 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2102 &xhci->ir_set->erst_dequeue);
2103 }
2104
2105 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2106 __le32 __iomem *addr, int max_caps)
2107 {
2108 u32 temp, port_offset, port_count;
2109 int i;
2110 u8 major_revision, minor_revision;
2111 struct xhci_hub *rhub;
2112 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2113 struct xhci_port_cap *port_cap;
2114
2115 temp = readl(addr);
2116 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2117 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2118
2119 if (major_revision == 0x03) {
2120 rhub = &xhci->usb3_rhub;
2121
2122
2123
2124
2125
2126
2127
2128 if (minor_revision > 0x00 && minor_revision < 0x10)
2129 minor_revision <<= 4;
2130 } else if (major_revision <= 0x02) {
2131 rhub = &xhci->usb2_rhub;
2132 } else {
2133 xhci_warn(xhci, "Ignoring unknown port speed, "
2134 "Ext Cap %p, revision = 0x%x\n",
2135 addr, major_revision);
2136
2137 return;
2138 }
2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2140
2141 if (rhub->min_rev < minor_revision)
2142 rhub->min_rev = minor_revision;
2143
2144
2145 temp = readl(addr + 2);
2146 port_offset = XHCI_EXT_PORT_OFF(temp);
2147 port_count = XHCI_EXT_PORT_COUNT(temp);
2148 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2149 "Ext Cap %p, port offset = %u, "
2150 "count = %u, revision = 0x%x",
2151 addr, port_offset, port_count, major_revision);
2152
2153 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2154
2155 return;
2156
2157 port_cap = &xhci->port_caps[xhci->num_port_caps++];
2158 if (xhci->num_port_caps > max_caps)
2159 return;
2160
2161 port_cap->maj_rev = major_revision;
2162 port_cap->min_rev = minor_revision;
2163 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2164
2165 if (port_cap->psi_count) {
2166 port_cap->psi = kcalloc_node(port_cap->psi_count,
2167 sizeof(*port_cap->psi),
2168 GFP_KERNEL, dev_to_node(dev));
2169 if (!port_cap->psi)
2170 port_cap->psi_count = 0;
2171
2172 port_cap->psi_uid_count++;
2173 for (i = 0; i < port_cap->psi_count; i++) {
2174 port_cap->psi[i] = readl(addr + 4 + i);
2175
2176
2177
2178
2179 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2180 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2181 port_cap->psi_uid_count++;
2182
2183 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2184 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2185 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2186 XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2187 XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2188 XHCI_EXT_PORT_LP(port_cap->psi[i]),
2189 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2190 }
2191 }
2192
2193 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2194 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2195
2196 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2197 (temp & XHCI_HLC)) {
2198 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2199 "xHCI 1.0: support USB2 hardware lpm");
2200 xhci->hw_lpm_support = 1;
2201 }
2202
2203 port_offset--;
2204 for (i = port_offset; i < (port_offset + port_count); i++) {
2205 struct xhci_port *hw_port = &xhci->hw_ports[i];
2206
2207 if (hw_port->rhub) {
2208 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2209 " port %u\n", addr, i);
2210 xhci_warn(xhci, "Port was marked as USB %u, "
2211 "duplicated as USB %u\n",
2212 hw_port->rhub->maj_rev, major_revision);
2213
2214
2215
2216 if (hw_port->rhub != rhub &&
2217 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2218 hw_port->rhub->num_ports--;
2219 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2220 }
2221 continue;
2222 }
2223 hw_port->rhub = rhub;
2224 hw_port->port_cap = port_cap;
2225 rhub->num_ports++;
2226 }
2227
2228 }
2229
2230 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2231 struct xhci_hub *rhub, gfp_t flags)
2232 {
2233 int port_index = 0;
2234 int i;
2235 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2236
2237 if (!rhub->num_ports)
2238 return;
2239 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2240 flags, dev_to_node(dev));
2241 if (!rhub->ports)
2242 return;
2243
2244 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2245 if (xhci->hw_ports[i].rhub != rhub ||
2246 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2247 continue;
2248 xhci->hw_ports[i].hcd_portnum = port_index;
2249 rhub->ports[port_index] = &xhci->hw_ports[i];
2250 port_index++;
2251 if (port_index == rhub->num_ports)
2252 break;
2253 }
2254 }
2255
2256
2257
2258
2259
2260
2261
2262
2263 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2264 {
2265 void __iomem *base;
2266 u32 offset;
2267 unsigned int num_ports;
2268 int i, j;
2269 int cap_count = 0;
2270 u32 cap_start;
2271 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2272
2273 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2274 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2275 flags, dev_to_node(dev));
2276 if (!xhci->hw_ports)
2277 return -ENOMEM;
2278
2279 for (i = 0; i < num_ports; i++) {
2280 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2281 NUM_PORT_REGS * i;
2282 xhci->hw_ports[i].hw_portnum = i;
2283 }
2284
2285 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2286 dev_to_node(dev));
2287 if (!xhci->rh_bw)
2288 return -ENOMEM;
2289 for (i = 0; i < num_ports; i++) {
2290 struct xhci_interval_bw_table *bw_table;
2291
2292 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2293 bw_table = &xhci->rh_bw[i].bw_table;
2294 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2295 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2296 }
2297 base = &xhci->cap_regs->hc_capbase;
2298
2299 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2300 if (!cap_start) {
2301 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2302 return -ENODEV;
2303 }
2304
2305 offset = cap_start;
2306
2307 while (offset) {
2308 cap_count++;
2309 offset = xhci_find_next_ext_cap(base, offset,
2310 XHCI_EXT_CAPS_PROTOCOL);
2311 }
2312
2313 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2314 flags, dev_to_node(dev));
2315 if (!xhci->ext_caps)
2316 return -ENOMEM;
2317
2318 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2319 flags, dev_to_node(dev));
2320 if (!xhci->port_caps)
2321 return -ENOMEM;
2322
2323 offset = cap_start;
2324
2325 while (offset) {
2326 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2327 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2328 num_ports)
2329 break;
2330 offset = xhci_find_next_ext_cap(base, offset,
2331 XHCI_EXT_CAPS_PROTOCOL);
2332 }
2333 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2334 xhci_warn(xhci, "No ports on the roothubs?\n");
2335 return -ENODEV;
2336 }
2337 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2338 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2339 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2340
2341
2342
2343
2344 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2345 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2346 "Limiting USB 3.0 roothub ports to %u.",
2347 USB_SS_MAXPORTS);
2348 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2349 }
2350 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2351 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2352 "Limiting USB 2.0 roothub ports to %u.",
2353 USB_MAXCHILDREN);
2354 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2355 }
2356
2357 if (!xhci->usb2_rhub.num_ports)
2358 xhci_info(xhci, "USB2 root hub has no ports\n");
2359
2360 if (!xhci->usb3_rhub.num_ports)
2361 xhci_info(xhci, "USB3 root hub has no ports\n");
2362
2363 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2364 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2365
2366 return 0;
2367 }
2368
2369 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2370 {
2371 dma_addr_t dma;
2372 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2373 unsigned int val, val2;
2374 u64 val_64;
2375 u32 page_size, temp;
2376 int i, ret;
2377
2378 INIT_LIST_HEAD(&xhci->cmd_list);
2379
2380
2381 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2382 init_completion(&xhci->cmd_ring_stop_completion);
2383
2384 page_size = readl(&xhci->op_regs->page_size);
2385 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2386 "Supported page size register = 0x%x", page_size);
2387 i = ffs(page_size);
2388 if (i < 16)
2389 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2390 "Supported page size of %iK", (1 << (i+12)) / 1024);
2391 else
2392 xhci_warn(xhci, "WARN: no supported page size\n");
2393
2394 xhci->page_shift = 12;
2395 xhci->page_size = 1 << xhci->page_shift;
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "HCD page size set to %iK", xhci->page_size / 1024);
2398
2399
2400
2401
2402
2403 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2404 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2405 "// xHC can handle at most %d device slots.", val);
2406 val2 = readl(&xhci->op_regs->config_reg);
2407 val |= (val2 & ~HCS_SLOTS_MASK);
2408 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2409 "// Setting Max device slots reg = 0x%x.", val);
2410 writel(val, &xhci->op_regs->config_reg);
2411
2412
2413
2414
2415
2416 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2417 flags);
2418 if (!xhci->dcbaa)
2419 goto fail;
2420 xhci->dcbaa->dma = dma;
2421 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2422 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2423 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2424 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2425
2426
2427
2428
2429
2430
2431
2432
2433 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2434 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2435
2436
2437 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2438 2112, 64, xhci->page_size);
2439 if (!xhci->segment_pool || !xhci->device_pool)
2440 goto fail;
2441
2442
2443
2444
2445 xhci->small_streams_pool =
2446 dma_pool_create("xHCI 256 byte stream ctx arrays",
2447 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2448 xhci->medium_streams_pool =
2449 dma_pool_create("xHCI 1KB stream ctx arrays",
2450 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2451
2452
2453
2454
2455 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2456 goto fail;
2457
2458
2459 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2460 if (!xhci->cmd_ring)
2461 goto fail;
2462 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2463 "Allocated command ring at %p", xhci->cmd_ring);
2464 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2465 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2466
2467
2468 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2469 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2470 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2471 xhci->cmd_ring->cycle_state;
2472 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2473 "// Setting command ring address to 0x%016llx", val_64);
2474 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2475
2476
2477
2478
2479
2480 xhci->cmd_ring_reserved_trbs++;
2481
2482 val = readl(&xhci->cap_regs->db_off);
2483 val &= DBOFF_MASK;
2484 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2485 "// Doorbell array is located at offset 0x%x"
2486 " from cap regs base addr", val);
2487 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2488
2489 xhci->ir_set = &xhci->run_regs->ir_set[0];
2490
2491
2492
2493
2494
2495 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2496 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2497 0, flags);
2498 if (!xhci->event_ring)
2499 goto fail;
2500 if (xhci_check_trb_in_td_math(xhci) < 0)
2501 goto fail;
2502
2503 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2504 if (ret)
2505 goto fail;
2506
2507
2508 val = readl(&xhci->ir_set->erst_size);
2509 val &= ERST_SIZE_MASK;
2510 val |= ERST_NUM_SEGS;
2511 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2512 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2513 val);
2514 writel(val, &xhci->ir_set->erst_size);
2515
2516 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2517 "// Set ERST entries to point to event ring.");
2518
2519 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2520 "// Set ERST base address for ir_set 0 = 0x%llx",
2521 (unsigned long long)xhci->erst.erst_dma_addr);
2522 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2523 val_64 &= ERST_PTR_MASK;
2524 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2525 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2526
2527
2528 xhci_set_hc_event_deq(xhci);
2529 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2530 "Wrote ERST address to ir_set 0.");
2531
2532 xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2533
2534
2535
2536
2537
2538
2539 for (i = 0; i < MAX_HC_SLOTS; i++)
2540 xhci->devs[i] = NULL;
2541 for (i = 0; i < USB_MAXCHILDREN; i++) {
2542 xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2543 xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2544
2545 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2546 init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2547 }
2548
2549 if (scratchpad_alloc(xhci, flags))
2550 goto fail;
2551 if (xhci_setup_port_arrays(xhci, flags))
2552 goto fail;
2553
2554
2555
2556
2557
2558 temp = readl(&xhci->op_regs->dev_notification);
2559 temp &= ~DEV_NOTE_MASK;
2560 temp |= DEV_NOTE_FWAKE;
2561 writel(temp, &xhci->op_regs->dev_notification);
2562
2563 return 0;
2564
2565 fail:
2566 xhci_halt(xhci);
2567 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2568 xhci_mem_cleanup(xhci);
2569 return -ENOMEM;
2570 }