0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/pci.h>
0012 #include <linux/iopoll.h>
0013 #include <linux/irq.h>
0014 #include <linux/log2.h>
0015 #include <linux/module.h>
0016 #include <linux/moduleparam.h>
0017 #include <linux/slab.h>
0018 #include <linux/dmi.h>
0019 #include <linux/dma-mapping.h>
0020
0021 #include "xhci.h"
0022 #include "xhci-trace.h"
0023 #include "xhci-debugfs.h"
0024 #include "xhci-dbgcap.h"
0025
0026 #define DRIVER_AUTHOR "Sarah Sharp"
0027 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
0028
0029 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
0030
0031
0032 static int link_quirk;
0033 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
0034 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
0035
0036 static unsigned long long quirks;
0037 module_param(quirks, ullong, S_IRUGO);
0038 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
0039
0040 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
0041 {
0042 struct xhci_segment *seg = ring->first_seg;
0043
0044 if (!td || !td->start_seg)
0045 return false;
0046 do {
0047 if (seg == td->start_seg)
0048 return true;
0049 seg = seg->next;
0050 } while (seg && seg != ring->first_seg);
0051
0052 return false;
0053 }
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
0069 {
0070 u32 result;
0071 int ret;
0072
0073 ret = readl_poll_timeout_atomic(ptr, result,
0074 (result & mask) == done ||
0075 result == U32_MAX,
0076 1, timeout_us);
0077 if (result == U32_MAX)
0078 return -ENODEV;
0079
0080 return ret;
0081 }
0082
0083
0084
0085
0086 void xhci_quiesce(struct xhci_hcd *xhci)
0087 {
0088 u32 halted;
0089 u32 cmd;
0090 u32 mask;
0091
0092 mask = ~(XHCI_IRQS);
0093 halted = readl(&xhci->op_regs->status) & STS_HALT;
0094 if (!halted)
0095 mask &= ~CMD_RUN;
0096
0097 cmd = readl(&xhci->op_regs->command);
0098 cmd &= mask;
0099 writel(cmd, &xhci->op_regs->command);
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 int xhci_halt(struct xhci_hcd *xhci)
0111 {
0112 int ret;
0113
0114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
0115 xhci_quiesce(xhci);
0116
0117 ret = xhci_handshake(&xhci->op_regs->status,
0118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
0119 if (ret) {
0120 xhci_warn(xhci, "Host halt failed, %d\n", ret);
0121 return ret;
0122 }
0123
0124 xhci->xhc_state |= XHCI_STATE_HALTED;
0125 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
0126
0127 return ret;
0128 }
0129
0130
0131
0132
0133 int xhci_start(struct xhci_hcd *xhci)
0134 {
0135 u32 temp;
0136 int ret;
0137
0138 temp = readl(&xhci->op_regs->command);
0139 temp |= (CMD_RUN);
0140 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
0141 temp);
0142 writel(temp, &xhci->op_regs->command);
0143
0144
0145
0146
0147
0148 ret = xhci_handshake(&xhci->op_regs->status,
0149 STS_HALT, 0, XHCI_MAX_HALT_USEC);
0150 if (ret == -ETIMEDOUT)
0151 xhci_err(xhci, "Host took too long to start, "
0152 "waited %u microseconds.\n",
0153 XHCI_MAX_HALT_USEC);
0154 if (!ret) {
0155
0156 xhci->xhc_state = 0;
0157 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
0158 }
0159
0160 return ret;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169
0170 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
0171 {
0172 u32 command;
0173 u32 state;
0174 int ret;
0175
0176 state = readl(&xhci->op_regs->status);
0177
0178 if (state == ~(u32)0) {
0179 xhci_warn(xhci, "Host not accessible, reset failed.\n");
0180 return -ENODEV;
0181 }
0182
0183 if ((state & STS_HALT) == 0) {
0184 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
0185 return 0;
0186 }
0187
0188 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
0189 command = readl(&xhci->op_regs->command);
0190 command |= CMD_RESET;
0191 writel(command, &xhci->op_regs->command);
0192
0193
0194
0195
0196
0197
0198
0199
0200 if (xhci->quirks & XHCI_INTEL_HOST)
0201 udelay(1000);
0202
0203 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
0204 if (ret)
0205 return ret;
0206
0207 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
0208 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
0209
0210 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0211 "Wait for controller to be ready for doorbell rings");
0212
0213
0214
0215
0216 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
0217
0218 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
0219 xhci->usb2_rhub.bus_state.suspended_ports = 0;
0220 xhci->usb2_rhub.bus_state.resuming_ports = 0;
0221 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
0222 xhci->usb3_rhub.bus_state.suspended_ports = 0;
0223 xhci->usb3_rhub.bus_state.resuming_ports = 0;
0224
0225 return ret;
0226 }
0227
0228 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
0229 {
0230 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
0231 int err, i;
0232 u64 val;
0233 u32 intrs;
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
0250 return;
0251
0252 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
0253
0254
0255 val = readl(&xhci->op_regs->command);
0256 val &= ~CMD_HSEIE;
0257 writel(val, &xhci->op_regs->command);
0258
0259
0260 val = readl(&xhci->op_regs->status);
0261 val |= STS_FATAL;
0262 writel(val, &xhci->op_regs->status);
0263
0264
0265 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
0266 if (upper_32_bits(val))
0267 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
0268 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
0269 if (upper_32_bits(val))
0270 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
0271
0272 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
0273 ARRAY_SIZE(xhci->run_regs->ir_set));
0274
0275 for (i = 0; i < intrs; i++) {
0276 struct xhci_intr_reg __iomem *ir;
0277
0278 ir = &xhci->run_regs->ir_set[i];
0279 val = xhci_read_64(xhci, &ir->erst_base);
0280 if (upper_32_bits(val))
0281 xhci_write_64(xhci, 0, &ir->erst_base);
0282 val= xhci_read_64(xhci, &ir->erst_dequeue);
0283 if (upper_32_bits(val))
0284 xhci_write_64(xhci, 0, &ir->erst_dequeue);
0285 }
0286
0287
0288 err = xhci_handshake(&xhci->op_regs->status,
0289 STS_FATAL, STS_FATAL,
0290 XHCI_MAX_HALT_USEC);
0291 if (!err)
0292 xhci_info(xhci, "Fault detected\n");
0293 }
0294
0295 #ifdef CONFIG_USB_PCI
0296
0297
0298
0299 static int xhci_setup_msi(struct xhci_hcd *xhci)
0300 {
0301 int ret;
0302
0303
0304
0305 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
0306
0307 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
0308 if (ret < 0) {
0309 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0310 "failed to allocate MSI entry");
0311 return ret;
0312 }
0313
0314 ret = request_irq(pdev->irq, xhci_msi_irq,
0315 0, "xhci_hcd", xhci_to_hcd(xhci));
0316 if (ret) {
0317 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0318 "disable MSI interrupt");
0319 pci_free_irq_vectors(pdev);
0320 }
0321
0322 return ret;
0323 }
0324
0325
0326
0327
0328 static int xhci_setup_msix(struct xhci_hcd *xhci)
0329 {
0330 int i, ret;
0331 struct usb_hcd *hcd = xhci_to_hcd(xhci);
0332 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
0333
0334
0335
0336
0337
0338
0339
0340
0341 xhci->msix_count = min(num_online_cpus() + 1,
0342 HCS_MAX_INTRS(xhci->hcs_params1));
0343
0344 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
0345 PCI_IRQ_MSIX);
0346 if (ret < 0) {
0347 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0348 "Failed to enable MSI-X");
0349 return ret;
0350 }
0351
0352 for (i = 0; i < xhci->msix_count; i++) {
0353 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
0354 "xhci_hcd", xhci_to_hcd(xhci));
0355 if (ret)
0356 goto disable_msix;
0357 }
0358
0359 hcd->msix_enabled = 1;
0360 return ret;
0361
0362 disable_msix:
0363 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
0364 while (--i >= 0)
0365 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
0366 pci_free_irq_vectors(pdev);
0367 return ret;
0368 }
0369
0370
0371 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
0372 {
0373 struct usb_hcd *hcd = xhci_to_hcd(xhci);
0374 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
0375
0376 if (xhci->quirks & XHCI_PLAT)
0377 return;
0378
0379
0380 if (hcd->irq > 0)
0381 return;
0382
0383 if (hcd->msix_enabled) {
0384 int i;
0385
0386 for (i = 0; i < xhci->msix_count; i++)
0387 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
0388 } else {
0389 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
0390 }
0391
0392 pci_free_irq_vectors(pdev);
0393 hcd->msix_enabled = 0;
0394 }
0395
0396 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
0397 {
0398 struct usb_hcd *hcd = xhci_to_hcd(xhci);
0399
0400 if (hcd->msix_enabled) {
0401 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
0402 int i;
0403
0404 for (i = 0; i < xhci->msix_count; i++)
0405 synchronize_irq(pci_irq_vector(pdev, i));
0406 }
0407 }
0408
0409 static int xhci_try_enable_msi(struct usb_hcd *hcd)
0410 {
0411 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
0412 struct pci_dev *pdev;
0413 int ret;
0414
0415
0416 if (xhci->quirks & XHCI_PLAT)
0417 return 0;
0418
0419 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
0420
0421
0422
0423
0424 if (xhci->quirks & XHCI_BROKEN_MSI)
0425 goto legacy_irq;
0426
0427
0428 if (hcd->irq)
0429 free_irq(hcd->irq, hcd);
0430 hcd->irq = 0;
0431
0432 ret = xhci_setup_msix(xhci);
0433 if (ret)
0434
0435 ret = xhci_setup_msi(xhci);
0436
0437 if (!ret) {
0438 hcd->msi_enabled = 1;
0439 return 0;
0440 }
0441
0442 if (!pdev->irq) {
0443 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
0444 return -EINVAL;
0445 }
0446
0447 legacy_irq:
0448 if (!strlen(hcd->irq_descr))
0449 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
0450 hcd->driver->description, hcd->self.busnum);
0451
0452
0453 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
0454 hcd->irq_descr, hcd);
0455 if (ret) {
0456 xhci_err(xhci, "request interrupt %d failed\n",
0457 pdev->irq);
0458 return ret;
0459 }
0460 hcd->irq = pdev->irq;
0461 return 0;
0462 }
0463
0464 #else
0465
0466 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
0467 {
0468 return 0;
0469 }
0470
0471 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
0472 {
0473 }
0474
0475 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
0476 {
0477 }
0478
0479 #endif
0480
0481 static void compliance_mode_recovery(struct timer_list *t)
0482 {
0483 struct xhci_hcd *xhci;
0484 struct usb_hcd *hcd;
0485 struct xhci_hub *rhub;
0486 u32 temp;
0487 int i;
0488
0489 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
0490 rhub = &xhci->usb3_rhub;
0491 hcd = rhub->hcd;
0492
0493 if (!hcd)
0494 return;
0495
0496 for (i = 0; i < rhub->num_ports; i++) {
0497 temp = readl(rhub->ports[i]->addr);
0498 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
0499
0500
0501
0502
0503 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
0504 "Compliance mode detected->port %d",
0505 i + 1);
0506 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
0507 "Attempting compliance mode recovery");
0508
0509 if (hcd->state == HC_STATE_SUSPENDED)
0510 usb_hcd_resume_root_hub(hcd);
0511
0512 usb_hcd_poll_rh_status(hcd);
0513 }
0514 }
0515
0516 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
0517 mod_timer(&xhci->comp_mode_recovery_timer,
0518 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
0532 {
0533 xhci->port_status_u0 = 0;
0534 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
0535 0);
0536 xhci->comp_mode_recovery_timer.expires = jiffies +
0537 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
0538
0539 add_timer(&xhci->comp_mode_recovery_timer);
0540 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
0541 "Compliance mode recovery timer initialized");
0542 }
0543
0544
0545
0546
0547
0548
0549
0550 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
0551 {
0552 const char *dmi_product_name, *dmi_sys_vendor;
0553
0554 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
0555 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
0556 if (!dmi_product_name || !dmi_sys_vendor)
0557 return false;
0558
0559 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
0560 return false;
0561
0562 if (strstr(dmi_product_name, "Z420") ||
0563 strstr(dmi_product_name, "Z620") ||
0564 strstr(dmi_product_name, "Z820") ||
0565 strstr(dmi_product_name, "Z1 Workstation"))
0566 return true;
0567
0568 return false;
0569 }
0570
0571 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
0572 {
0573 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
0574 }
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 static int xhci_init(struct usb_hcd *hcd)
0585 {
0586 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
0587 int retval;
0588
0589 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
0590 spin_lock_init(&xhci->lock);
0591 if (xhci->hci_version == 0x95 && link_quirk) {
0592 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
0593 "QUIRK: Not clearing Link TRB chain bits.");
0594 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
0595 } else {
0596 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0597 "xHCI doesn't need link TRB QUIRK");
0598 }
0599 retval = xhci_mem_init(xhci, GFP_KERNEL);
0600 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
0601
0602
0603 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
0604 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
0605 compliance_mode_recovery_timer_init(xhci);
0606 }
0607
0608 return retval;
0609 }
0610
0611
0612
0613
0614 static int xhci_run_finished(struct xhci_hcd *xhci)
0615 {
0616 unsigned long flags;
0617 u32 temp;
0618
0619
0620
0621
0622
0623 spin_lock_irqsave(&xhci->lock, flags);
0624
0625 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
0626 temp = readl(&xhci->op_regs->command);
0627 temp |= (CMD_EIE);
0628 writel(temp, &xhci->op_regs->command);
0629
0630 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
0631 temp = readl(&xhci->ir_set->irq_pending);
0632 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
0633
0634 if (xhci_start(xhci)) {
0635 xhci_halt(xhci);
0636 spin_unlock_irqrestore(&xhci->lock, flags);
0637 return -ENODEV;
0638 }
0639
0640 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
0641
0642 if (xhci->quirks & XHCI_NEC_HOST)
0643 xhci_ring_cmd_db(xhci);
0644
0645 spin_unlock_irqrestore(&xhci->lock, flags);
0646
0647 return 0;
0648 }
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662 int xhci_run(struct usb_hcd *hcd)
0663 {
0664 u32 temp;
0665 u64 temp_64;
0666 int ret;
0667 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
0668
0669
0670
0671
0672
0673 hcd->uses_new_polling = 1;
0674 if (!usb_hcd_is_primary_hcd(hcd))
0675 return xhci_run_finished(xhci);
0676
0677 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
0678
0679 ret = xhci_try_enable_msi(hcd);
0680 if (ret)
0681 return ret;
0682
0683 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
0684 temp_64 &= ~ERST_PTR_MASK;
0685 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0686 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
0687
0688 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0689 "// Set the interrupt modulation register");
0690 temp = readl(&xhci->ir_set->irq_control);
0691 temp &= ~ER_IRQ_INTERVAL_MASK;
0692 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
0693 writel(temp, &xhci->ir_set->irq_control);
0694
0695 if (xhci->quirks & XHCI_NEC_HOST) {
0696 struct xhci_command *command;
0697
0698 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
0699 if (!command)
0700 return -ENOMEM;
0701
0702 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
0703 TRB_TYPE(TRB_NEC_GET_FW));
0704 if (ret)
0705 xhci_free_command(xhci, command);
0706 }
0707 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0708 "Finished %s for main hcd", __func__);
0709
0710 xhci_create_dbc_dev(xhci);
0711
0712 xhci_debugfs_init(xhci);
0713
0714 if (xhci_has_one_roothub(xhci))
0715 return xhci_run_finished(xhci);
0716
0717 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
0718
0719 return 0;
0720 }
0721 EXPORT_SYMBOL_GPL(xhci_run);
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 static void xhci_stop(struct usb_hcd *hcd)
0733 {
0734 u32 temp;
0735 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
0736
0737 mutex_lock(&xhci->mutex);
0738
0739
0740 if (!usb_hcd_is_primary_hcd(hcd)) {
0741 mutex_unlock(&xhci->mutex);
0742 return;
0743 }
0744
0745 xhci_remove_dbc_dev(xhci);
0746
0747 spin_lock_irq(&xhci->lock);
0748 xhci->xhc_state |= XHCI_STATE_HALTED;
0749 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
0750 xhci_halt(xhci);
0751 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
0752 spin_unlock_irq(&xhci->lock);
0753
0754 xhci_cleanup_msix(xhci);
0755
0756
0757 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
0758 (!(xhci_all_ports_seen_u0(xhci)))) {
0759 del_timer_sync(&xhci->comp_mode_recovery_timer);
0760 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
0761 "%s: compliance mode recovery timer deleted",
0762 __func__);
0763 }
0764
0765 if (xhci->quirks & XHCI_AMD_PLL_FIX)
0766 usb_amd_dev_put();
0767
0768 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0769 "// Disabling event ring interrupts");
0770 temp = readl(&xhci->op_regs->status);
0771 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
0772 temp = readl(&xhci->ir_set->irq_pending);
0773 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
0774
0775 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
0776 xhci_mem_cleanup(xhci);
0777 xhci_debugfs_exit(xhci);
0778 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0779 "xhci_stop completed - status = %x",
0780 readl(&xhci->op_regs->status));
0781 mutex_unlock(&xhci->mutex);
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 void xhci_shutdown(struct usb_hcd *hcd)
0794 {
0795 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
0796
0797 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
0798 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
0799
0800
0801 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
0802 __func__, hcd->self.busnum);
0803 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
0804 del_timer_sync(&hcd->rh_timer);
0805
0806 if (xhci->shared_hcd) {
0807 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
0808 del_timer_sync(&xhci->shared_hcd->rh_timer);
0809 }
0810
0811 spin_lock_irq(&xhci->lock);
0812 xhci_halt(xhci);
0813
0814 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
0815 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
0816 spin_unlock_irq(&xhci->lock);
0817
0818 xhci_cleanup_msix(xhci);
0819
0820 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0821 "xhci_shutdown completed - status = %x",
0822 readl(&xhci->op_regs->status));
0823 }
0824 EXPORT_SYMBOL_GPL(xhci_shutdown);
0825
0826 #ifdef CONFIG_PM
0827 static void xhci_save_registers(struct xhci_hcd *xhci)
0828 {
0829 xhci->s3.command = readl(&xhci->op_regs->command);
0830 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
0831 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
0832 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
0833 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
0834 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
0835 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
0836 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
0837 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
0838 }
0839
0840 static void xhci_restore_registers(struct xhci_hcd *xhci)
0841 {
0842 writel(xhci->s3.command, &xhci->op_regs->command);
0843 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
0844 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
0845 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
0846 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
0847 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
0848 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
0849 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
0850 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
0851 }
0852
0853 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
0854 {
0855 u64 val_64;
0856
0857
0858 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
0859 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
0860 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
0861 xhci->cmd_ring->dequeue) &
0862 (u64) ~CMD_RING_RSVD_BITS) |
0863 xhci->cmd_ring->cycle_state;
0864 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
0865 "// Setting command ring address to 0x%llx",
0866 (long unsigned long) val_64);
0867 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
0868 }
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
0880 {
0881 struct xhci_ring *ring;
0882 struct xhci_segment *seg;
0883
0884 ring = xhci->cmd_ring;
0885 seg = ring->deq_seg;
0886 do {
0887 memset(seg->trbs, 0,
0888 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
0889 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
0890 cpu_to_le32(~TRB_CYCLE);
0891 seg = seg->next;
0892 } while (seg != ring->deq_seg);
0893
0894
0895 ring->deq_seg = ring->first_seg;
0896 ring->dequeue = ring->first_seg->trbs;
0897 ring->enq_seg = ring->deq_seg;
0898 ring->enqueue = ring->dequeue;
0899
0900 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
0901
0902
0903
0904
0905 ring->cycle_state = 1;
0906
0907
0908
0909
0910
0911
0912
0913
0914 xhci_set_cmd_ring_deq(xhci);
0915 }
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
0927 struct xhci_hub *rhub,
0928 bool do_wakeup)
0929 {
0930 unsigned long flags;
0931 u32 t1, t2, portsc;
0932 int i;
0933
0934 spin_lock_irqsave(&xhci->lock, flags);
0935
0936 for (i = 0; i < rhub->num_ports; i++) {
0937 portsc = readl(rhub->ports[i]->addr);
0938 t1 = xhci_port_state_to_neutral(portsc);
0939 t2 = t1;
0940
0941
0942 if (!do_wakeup)
0943 t2 &= ~PORT_WAKE_BITS;
0944
0945
0946 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
0947 t2 |= PORT_CSC;
0948
0949 if (t1 != t2) {
0950 writel(t2, rhub->ports[i]->addr);
0951 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
0952 rhub->hcd->self.busnum, i + 1, portsc, t2);
0953 }
0954 }
0955 spin_unlock_irqrestore(&xhci->lock, flags);
0956 }
0957
0958 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
0959 {
0960 struct xhci_port **ports;
0961 int port_index;
0962 u32 status;
0963 u32 portsc;
0964
0965 status = readl(&xhci->op_regs->status);
0966 if (status & STS_EINT)
0967 return true;
0968
0969
0970
0971
0972
0973
0974 port_index = xhci->usb2_rhub.num_ports;
0975 ports = xhci->usb2_rhub.ports;
0976 while (port_index--) {
0977 portsc = readl(ports[port_index]->addr);
0978 if (portsc & PORT_CHANGE_MASK ||
0979 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
0980 return true;
0981 }
0982 port_index = xhci->usb3_rhub.num_ports;
0983 ports = xhci->usb3_rhub.ports;
0984 while (port_index--) {
0985 portsc = readl(ports[port_index]->addr);
0986 if (portsc & PORT_CHANGE_MASK ||
0987 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
0988 return true;
0989 }
0990 return false;
0991 }
0992
0993
0994
0995
0996
0997
0998
0999 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
1000 {
1001 int rc = 0;
1002 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
1003 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1004 u32 command;
1005 u32 res;
1006
1007 if (!hcd->state)
1008 return 0;
1009
1010 if (hcd->state != HC_STATE_SUSPENDED ||
1011 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
1012 return -EINVAL;
1013
1014
1015 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
1016 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
1017
1018 if (!HCD_HW_ACCESSIBLE(hcd))
1019 return 0;
1020
1021 xhci_dbc_suspend(xhci);
1022
1023
1024 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
1025 __func__, hcd->self.busnum);
1026 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1027 del_timer_sync(&hcd->rh_timer);
1028 if (xhci->shared_hcd) {
1029 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1030 del_timer_sync(&xhci->shared_hcd->rh_timer);
1031 }
1032
1033 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1034 usleep_range(1000, 1500);
1035
1036 spin_lock_irq(&xhci->lock);
1037 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1038 if (xhci->shared_hcd)
1039 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1040
1041
1042
1043
1044 command = readl(&xhci->op_regs->command);
1045 command &= ~CMD_RUN;
1046 writel(command, &xhci->op_regs->command);
1047
1048
1049 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1050
1051 if (xhci_handshake(&xhci->op_regs->status,
1052 STS_HALT, STS_HALT, delay)) {
1053 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1054 spin_unlock_irq(&xhci->lock);
1055 return -ETIMEDOUT;
1056 }
1057 xhci_clear_command_ring(xhci);
1058
1059
1060 xhci_save_registers(xhci);
1061
1062
1063 command = readl(&xhci->op_regs->command);
1064 command |= CMD_CSS;
1065 writel(command, &xhci->op_regs->command);
1066 xhci->broken_suspend = 0;
1067 if (xhci_handshake(&xhci->op_regs->status,
1068 STS_SAVE, 0, 20 * 1000)) {
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 res = readl(&xhci->op_regs->status);
1079 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1080 (((res & STS_SRE) == 0) &&
1081 ((res & STS_HCE) == 0))) {
1082 xhci->broken_suspend = 1;
1083 } else {
1084 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1085 spin_unlock_irq(&xhci->lock);
1086 return -ETIMEDOUT;
1087 }
1088 }
1089 spin_unlock_irq(&xhci->lock);
1090
1091
1092
1093
1094
1095 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1096 (!(xhci_all_ports_seen_u0(xhci)))) {
1097 del_timer_sync(&xhci->comp_mode_recovery_timer);
1098 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1099 "%s: compliance mode recovery timer deleted",
1100 __func__);
1101 }
1102
1103
1104
1105 xhci_msix_sync_irqs(xhci);
1106
1107 return rc;
1108 }
1109 EXPORT_SYMBOL_GPL(xhci_suspend);
1110
1111
1112
1113
1114
1115
1116
1117 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1118 {
1119 u32 command, temp = 0;
1120 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1121 int retval = 0;
1122 bool comp_timer_running = false;
1123 bool pending_portevent = false;
1124 bool reinit_xhc = false;
1125
1126 if (!hcd->state)
1127 return 0;
1128
1129
1130
1131
1132
1133 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1134 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1135 msleep(100);
1136
1137 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1138 if (xhci->shared_hcd)
1139 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1140
1141 spin_lock_irq(&xhci->lock);
1142
1143 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1144 reinit_xhc = true;
1145
1146 if (!reinit_xhc) {
1147
1148
1149
1150
1151 retval = xhci_handshake(&xhci->op_regs->status,
1152 STS_CNR, 0, 10 * 1000 * 1000);
1153 if (retval) {
1154 xhci_warn(xhci, "Controller not ready at resume %d\n",
1155 retval);
1156 spin_unlock_irq(&xhci->lock);
1157 return retval;
1158 }
1159
1160 xhci_restore_registers(xhci);
1161
1162 xhci_set_cmd_ring_deq(xhci);
1163
1164
1165 command = readl(&xhci->op_regs->command);
1166 command |= CMD_CRS;
1167 writel(command, &xhci->op_regs->command);
1168
1169
1170
1171
1172
1173 if (xhci_handshake(&xhci->op_regs->status,
1174 STS_RESTORE, 0, 100 * 1000)) {
1175 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1176 spin_unlock_irq(&xhci->lock);
1177 return -ETIMEDOUT;
1178 }
1179 }
1180
1181 temp = readl(&xhci->op_regs->status);
1182
1183
1184 if (temp & (STS_SRE | STS_HCE)) {
1185 reinit_xhc = true;
1186 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1187 }
1188
1189 if (reinit_xhc) {
1190 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1191 !(xhci_all_ports_seen_u0(xhci))) {
1192 del_timer_sync(&xhci->comp_mode_recovery_timer);
1193 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1194 "Compliance Mode Recovery Timer deleted!");
1195 }
1196
1197
1198 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1199 if (xhci->shared_hcd)
1200 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1201
1202 xhci_dbg(xhci, "Stop HCD\n");
1203 xhci_halt(xhci);
1204 xhci_zero_64b_regs(xhci);
1205 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1206 spin_unlock_irq(&xhci->lock);
1207 if (retval)
1208 return retval;
1209 xhci_cleanup_msix(xhci);
1210
1211 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1212 temp = readl(&xhci->op_regs->status);
1213 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1214 temp = readl(&xhci->ir_set->irq_pending);
1215 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1216
1217 xhci_dbg(xhci, "cleaning up memory\n");
1218 xhci_mem_cleanup(xhci);
1219 xhci_debugfs_exit(xhci);
1220 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1221 readl(&xhci->op_regs->status));
1222
1223
1224
1225
1226
1227 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1228 retval = xhci_init(hcd);
1229 if (retval)
1230 return retval;
1231 comp_timer_running = true;
1232
1233 xhci_dbg(xhci, "Start the primary HCD\n");
1234 retval = xhci_run(hcd);
1235 if (!retval && xhci->shared_hcd) {
1236 xhci_dbg(xhci, "Start the secondary HCD\n");
1237 retval = xhci_run(xhci->shared_hcd);
1238 }
1239
1240 hcd->state = HC_STATE_SUSPENDED;
1241 if (xhci->shared_hcd)
1242 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1243 goto done;
1244 }
1245
1246
1247 command = readl(&xhci->op_regs->command);
1248 command |= CMD_RUN;
1249 writel(command, &xhci->op_regs->command);
1250 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1251 0, 250 * 1000);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 spin_unlock_irq(&xhci->lock);
1263
1264 xhci_dbc_resume(xhci);
1265
1266 done:
1267 if (retval == 0) {
1268
1269
1270
1271
1272
1273 pending_portevent = xhci_pending_portevent(xhci);
1274 if (!pending_portevent) {
1275 msleep(120);
1276 pending_portevent = xhci_pending_portevent(xhci);
1277 }
1278
1279 if (pending_portevent) {
1280 if (xhci->shared_hcd)
1281 usb_hcd_resume_root_hub(xhci->shared_hcd);
1282 usb_hcd_resume_root_hub(hcd);
1283 }
1284 }
1285
1286
1287
1288
1289
1290
1291 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1292 compliance_mode_recovery_timer_init(xhci);
1293
1294 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1295 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1296
1297
1298 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1299 __func__, hcd->self.busnum);
1300 if (xhci->shared_hcd) {
1301 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1302 usb_hcd_poll_rh_status(xhci->shared_hcd);
1303 }
1304 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1305 usb_hcd_poll_rh_status(hcd);
1306
1307 return retval;
1308 }
1309 EXPORT_SYMBOL_GPL(xhci_resume);
1310 #endif
1311
1312
1313
1314 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1315 {
1316 void *temp;
1317 int ret = 0;
1318 unsigned int buf_len;
1319 enum dma_data_direction dir;
1320
1321 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1322 buf_len = urb->transfer_buffer_length;
1323
1324 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1325 dev_to_node(hcd->self.sysdev));
1326
1327 if (usb_urb_dir_out(urb))
1328 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1329 temp, buf_len, 0);
1330
1331 urb->transfer_buffer = temp;
1332 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1333 urb->transfer_buffer,
1334 urb->transfer_buffer_length,
1335 dir);
1336
1337 if (dma_mapping_error(hcd->self.sysdev,
1338 urb->transfer_dma)) {
1339 ret = -EAGAIN;
1340 kfree(temp);
1341 } else {
1342 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1343 }
1344
1345 return ret;
1346 }
1347
1348 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1349 struct urb *urb)
1350 {
1351 bool ret = false;
1352 unsigned int i;
1353 unsigned int len = 0;
1354 unsigned int trb_size;
1355 unsigned int max_pkt;
1356 struct scatterlist *sg;
1357 struct scatterlist *tail_sg;
1358
1359 tail_sg = urb->sg;
1360 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1361
1362 if (!urb->num_sgs)
1363 return ret;
1364
1365 if (urb->dev->speed >= USB_SPEED_SUPER)
1366 trb_size = TRB_CACHE_SIZE_SS;
1367 else
1368 trb_size = TRB_CACHE_SIZE_HS;
1369
1370 if (urb->transfer_buffer_length != 0 &&
1371 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1372 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1373 len = len + sg->length;
1374 if (i > trb_size - 2) {
1375 len = len - tail_sg->length;
1376 if (len < max_pkt) {
1377 ret = true;
1378 break;
1379 }
1380
1381 tail_sg = sg_next(tail_sg);
1382 }
1383 }
1384 }
1385 return ret;
1386 }
1387
1388 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1389 {
1390 unsigned int len;
1391 unsigned int buf_len;
1392 enum dma_data_direction dir;
1393
1394 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1395
1396 buf_len = urb->transfer_buffer_length;
1397
1398 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1399 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1400 dma_unmap_single(hcd->self.sysdev,
1401 urb->transfer_dma,
1402 urb->transfer_buffer_length,
1403 dir);
1404
1405 if (usb_urb_dir_in(urb)) {
1406 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1407 urb->transfer_buffer,
1408 buf_len,
1409 0);
1410 if (len != buf_len) {
1411 xhci_dbg(hcd_to_xhci(hcd),
1412 "Copy from tmp buf to urb sg list failed\n");
1413 urb->actual_length = len;
1414 }
1415 }
1416 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1417 kfree(urb->transfer_buffer);
1418 urb->transfer_buffer = NULL;
1419 }
1420
1421
1422
1423
1424
1425
1426
1427 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1428 gfp_t mem_flags)
1429 {
1430 struct xhci_hcd *xhci;
1431
1432 xhci = hcd_to_xhci(hcd);
1433
1434 if (xhci_urb_suitable_for_idt(urb))
1435 return 0;
1436
1437 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1438 if (xhci_urb_temp_buffer_required(hcd, urb))
1439 return xhci_map_temp_buffer(hcd, urb);
1440 }
1441 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1442 }
1443
1444 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1445 {
1446 struct xhci_hcd *xhci;
1447 bool unmap_temp_buf = false;
1448
1449 xhci = hcd_to_xhci(hcd);
1450
1451 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1452 unmap_temp_buf = true;
1453
1454 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1455 xhci_unmap_temp_buf(hcd, urb);
1456 else
1457 usb_hcd_unmap_urb_for_dma(hcd, urb);
1458 }
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1471 {
1472 unsigned int index;
1473 if (usb_endpoint_xfer_control(desc))
1474 index = (unsigned int) (usb_endpoint_num(desc)*2);
1475 else
1476 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1477 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1478 return index;
1479 }
1480 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1481
1482
1483
1484
1485 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1486 {
1487 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1488 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1489 return direction | number;
1490 }
1491
1492
1493
1494
1495
1496 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1497 {
1498 return 1 << (xhci_get_endpoint_index(desc) + 1);
1499 }
1500
1501
1502
1503
1504
1505
1506
1507 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1508 {
1509 return fls(added_ctxs) - 1;
1510 }
1511
1512
1513
1514
1515 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1516 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1517 const char *func) {
1518 struct xhci_hcd *xhci;
1519 struct xhci_virt_device *virt_dev;
1520
1521 if (!hcd || (check_ep && !ep) || !udev) {
1522 pr_debug("xHCI %s called with invalid args\n", func);
1523 return -EINVAL;
1524 }
1525 if (!udev->parent) {
1526 pr_debug("xHCI %s called for root hub\n", func);
1527 return 0;
1528 }
1529
1530 xhci = hcd_to_xhci(hcd);
1531 if (check_virt_dev) {
1532 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1533 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1534 func);
1535 return -EINVAL;
1536 }
1537
1538 virt_dev = xhci->devs[udev->slot_id];
1539 if (virt_dev->udev != udev) {
1540 xhci_dbg(xhci, "xHCI %s called with udev and "
1541 "virt_dev does not match\n", func);
1542 return -EINVAL;
1543 }
1544 }
1545
1546 if (xhci->xhc_state & XHCI_STATE_HALTED)
1547 return -ENODEV;
1548
1549 return 1;
1550 }
1551
1552 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1553 struct usb_device *udev, struct xhci_command *command,
1554 bool ctx_change, bool must_succeed);
1555
1556
1557
1558
1559
1560
1561
1562 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1563 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1564 {
1565 struct xhci_container_ctx *out_ctx;
1566 struct xhci_input_control_ctx *ctrl_ctx;
1567 struct xhci_ep_ctx *ep_ctx;
1568 struct xhci_command *command;
1569 int max_packet_size;
1570 int hw_max_packet_size;
1571 int ret = 0;
1572
1573 out_ctx = xhci->devs[slot_id]->out_ctx;
1574 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1575 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1576 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1577 if (hw_max_packet_size != max_packet_size) {
1578 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1579 "Max Packet Size for ep 0 changed.");
1580 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1581 "Max packet size in usb_device = %d",
1582 max_packet_size);
1583 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1584 "Max packet size in xHCI HW = %d",
1585 hw_max_packet_size);
1586 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1587 "Issuing evaluate context command.");
1588
1589
1590
1591
1592
1593
1594 command = xhci_alloc_command(xhci, true, mem_flags);
1595 if (!command)
1596 return -ENOMEM;
1597
1598 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1599 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1600 if (!ctrl_ctx) {
1601 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1602 __func__);
1603 ret = -ENOMEM;
1604 goto command_cleanup;
1605 }
1606
1607 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1608 xhci->devs[slot_id]->out_ctx, ep_index);
1609
1610 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1611 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);
1612 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1613 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1614
1615 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1616 ctrl_ctx->drop_flags = 0;
1617
1618 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1619 true, false);
1620
1621
1622
1623
1624 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1625 command_cleanup:
1626 kfree(command->completion);
1627 kfree(command);
1628 }
1629 return ret;
1630 }
1631
1632
1633
1634
1635
1636 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1637 {
1638 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1639 unsigned long flags;
1640 int ret = 0;
1641 unsigned int slot_id, ep_index;
1642 unsigned int *ep_state;
1643 struct urb_priv *urb_priv;
1644 int num_tds;
1645
1646 if (!urb)
1647 return -EINVAL;
1648 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1649 true, true, __func__);
1650 if (ret <= 0)
1651 return ret ? ret : -EINVAL;
1652
1653 slot_id = urb->dev->slot_id;
1654 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1655 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1656
1657 if (!HCD_HW_ACCESSIBLE(hcd))
1658 return -ESHUTDOWN;
1659
1660 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1661 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1662 return -ENODEV;
1663 }
1664
1665 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1666 num_tds = urb->number_of_packets;
1667 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1668 urb->transfer_buffer_length > 0 &&
1669 urb->transfer_flags & URB_ZERO_PACKET &&
1670 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1671 num_tds = 2;
1672 else
1673 num_tds = 1;
1674
1675 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1676 if (!urb_priv)
1677 return -ENOMEM;
1678
1679 urb_priv->num_tds = num_tds;
1680 urb_priv->num_tds_done = 0;
1681 urb->hcpriv = urb_priv;
1682
1683 trace_xhci_urb_enqueue(urb);
1684
1685 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1686
1687
1688
1689 if (urb->dev->speed == USB_SPEED_FULL) {
1690 ret = xhci_check_maxpacket(xhci, slot_id,
1691 ep_index, urb, mem_flags);
1692 if (ret < 0) {
1693 xhci_urb_free_priv(urb_priv);
1694 urb->hcpriv = NULL;
1695 return ret;
1696 }
1697 }
1698 }
1699
1700 spin_lock_irqsave(&xhci->lock, flags);
1701
1702 if (xhci->xhc_state & XHCI_STATE_DYING) {
1703 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1704 urb->ep->desc.bEndpointAddress, urb);
1705 ret = -ESHUTDOWN;
1706 goto free_priv;
1707 }
1708 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1709 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1710 *ep_state);
1711 ret = -EINVAL;
1712 goto free_priv;
1713 }
1714 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1715 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1716 ret = -EINVAL;
1717 goto free_priv;
1718 }
1719
1720 switch (usb_endpoint_type(&urb->ep->desc)) {
1721
1722 case USB_ENDPOINT_XFER_CONTROL:
1723 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1724 slot_id, ep_index);
1725 break;
1726 case USB_ENDPOINT_XFER_BULK:
1727 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1728 slot_id, ep_index);
1729 break;
1730 case USB_ENDPOINT_XFER_INT:
1731 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1732 slot_id, ep_index);
1733 break;
1734 case USB_ENDPOINT_XFER_ISOC:
1735 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1736 slot_id, ep_index);
1737 }
1738
1739 if (ret) {
1740 free_priv:
1741 xhci_urb_free_priv(urb_priv);
1742 urb->hcpriv = NULL;
1743 }
1744 spin_unlock_irqrestore(&xhci->lock, flags);
1745 return ret;
1746 }
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1780 {
1781 unsigned long flags;
1782 int ret, i;
1783 u32 temp;
1784 struct xhci_hcd *xhci;
1785 struct urb_priv *urb_priv;
1786 struct xhci_td *td;
1787 unsigned int ep_index;
1788 struct xhci_ring *ep_ring;
1789 struct xhci_virt_ep *ep;
1790 struct xhci_command *command;
1791 struct xhci_virt_device *vdev;
1792
1793 xhci = hcd_to_xhci(hcd);
1794 spin_lock_irqsave(&xhci->lock, flags);
1795
1796 trace_xhci_urb_dequeue(urb);
1797
1798
1799 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1800 if (ret)
1801 goto done;
1802
1803
1804 vdev = xhci->devs[urb->dev->slot_id];
1805 urb_priv = urb->hcpriv;
1806 if (!vdev || !urb_priv)
1807 goto err_giveback;
1808
1809 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1810 ep = &vdev->eps[ep_index];
1811 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1812 if (!ep || !ep_ring)
1813 goto err_giveback;
1814
1815
1816 temp = readl(&xhci->op_regs->status);
1817 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1818 xhci_hc_died(xhci);
1819 goto done;
1820 }
1821
1822
1823
1824
1825
1826
1827 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1828 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1829 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1830 td = &urb_priv->td[i];
1831 if (!list_empty(&td->cancelled_td_list))
1832 list_del_init(&td->cancelled_td_list);
1833 }
1834 goto err_giveback;
1835 }
1836
1837 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1838 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1839 "HC halted, freeing TD manually.");
1840 for (i = urb_priv->num_tds_done;
1841 i < urb_priv->num_tds;
1842 i++) {
1843 td = &urb_priv->td[i];
1844 if (!list_empty(&td->td_list))
1845 list_del_init(&td->td_list);
1846 if (!list_empty(&td->cancelled_td_list))
1847 list_del_init(&td->cancelled_td_list);
1848 }
1849 goto err_giveback;
1850 }
1851
1852 i = urb_priv->num_tds_done;
1853 if (i < urb_priv->num_tds)
1854 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1855 "Cancel URB %p, dev %s, ep 0x%x, "
1856 "starting at offset 0x%llx",
1857 urb, urb->dev->devpath,
1858 urb->ep->desc.bEndpointAddress,
1859 (unsigned long long) xhci_trb_virt_to_dma(
1860 urb_priv->td[i].start_seg,
1861 urb_priv->td[i].first_trb));
1862
1863 for (; i < urb_priv->num_tds; i++) {
1864 td = &urb_priv->td[i];
1865
1866 if (list_empty(&td->cancelled_td_list)) {
1867 td->cancel_status = TD_DIRTY;
1868 list_add_tail(&td->cancelled_td_list,
1869 &ep->cancelled_td_list);
1870 }
1871 }
1872
1873
1874
1875
1876 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1877 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1878 if (!command) {
1879 ret = -ENOMEM;
1880 goto done;
1881 }
1882 ep->ep_state |= EP_STOP_CMD_PENDING;
1883 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1884 ep_index, 0);
1885 xhci_ring_cmd_db(xhci);
1886 }
1887 done:
1888 spin_unlock_irqrestore(&xhci->lock, flags);
1889 return ret;
1890
1891 err_giveback:
1892 if (urb_priv)
1893 xhci_urb_free_priv(urb_priv);
1894 usb_hcd_unlink_urb_from_ep(hcd, urb);
1895 spin_unlock_irqrestore(&xhci->lock, flags);
1896 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1897 return ret;
1898 }
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1914 struct usb_host_endpoint *ep)
1915 {
1916 struct xhci_hcd *xhci;
1917 struct xhci_container_ctx *in_ctx, *out_ctx;
1918 struct xhci_input_control_ctx *ctrl_ctx;
1919 unsigned int ep_index;
1920 struct xhci_ep_ctx *ep_ctx;
1921 u32 drop_flag;
1922 u32 new_add_flags, new_drop_flags;
1923 int ret;
1924
1925 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1926 if (ret <= 0)
1927 return ret;
1928 xhci = hcd_to_xhci(hcd);
1929 if (xhci->xhc_state & XHCI_STATE_DYING)
1930 return -ENODEV;
1931
1932 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1933 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1934 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1935 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1936 __func__, drop_flag);
1937 return 0;
1938 }
1939
1940 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1941 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1942 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1943 if (!ctrl_ctx) {
1944 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1945 __func__);
1946 return 0;
1947 }
1948
1949 ep_index = xhci_get_endpoint_index(&ep->desc);
1950 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1951
1952
1953
1954 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1955 le32_to_cpu(ctrl_ctx->drop_flags) &
1956 xhci_get_endpoint_flag(&ep->desc)) {
1957
1958 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1959 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1960 __func__, ep);
1961 return 0;
1962 }
1963
1964 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1965 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1966
1967 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1968 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1969
1970 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1971
1972 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1973
1974 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1975 (unsigned int) ep->desc.bEndpointAddress,
1976 udev->slot_id,
1977 (unsigned int) new_drop_flags,
1978 (unsigned int) new_add_flags);
1979 return 0;
1980 }
1981 EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1997 struct usb_host_endpoint *ep)
1998 {
1999 struct xhci_hcd *xhci;
2000 struct xhci_container_ctx *in_ctx;
2001 unsigned int ep_index;
2002 struct xhci_input_control_ctx *ctrl_ctx;
2003 struct xhci_ep_ctx *ep_ctx;
2004 u32 added_ctxs;
2005 u32 new_add_flags, new_drop_flags;
2006 struct xhci_virt_device *virt_dev;
2007 int ret = 0;
2008
2009 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
2010 if (ret <= 0) {
2011
2012 ep->hcpriv = NULL;
2013 return ret;
2014 }
2015 xhci = hcd_to_xhci(hcd);
2016 if (xhci->xhc_state & XHCI_STATE_DYING)
2017 return -ENODEV;
2018
2019 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
2020 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
2021
2022
2023
2024
2025 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
2026 __func__, added_ctxs);
2027 return 0;
2028 }
2029
2030 virt_dev = xhci->devs[udev->slot_id];
2031 in_ctx = virt_dev->in_ctx;
2032 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2033 if (!ctrl_ctx) {
2034 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2035 __func__);
2036 return 0;
2037 }
2038
2039 ep_index = xhci_get_endpoint_index(&ep->desc);
2040
2041
2042
2043 if (virt_dev->eps[ep_index].ring &&
2044 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
2045 xhci_warn(xhci, "Trying to add endpoint 0x%x "
2046 "without dropping it.\n",
2047 (unsigned int) ep->desc.bEndpointAddress);
2048 return -EINVAL;
2049 }
2050
2051
2052
2053
2054 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
2055 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
2056 __func__, ep);
2057 return 0;
2058 }
2059
2060
2061
2062
2063
2064
2065 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
2066 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
2067 __func__, ep->desc.bEndpointAddress);
2068 return -ENOMEM;
2069 }
2070
2071 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
2072 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
2073
2074
2075
2076
2077
2078
2079
2080 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
2081
2082
2083 ep->hcpriv = udev;
2084
2085 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2086 trace_xhci_add_endpoint(ep_ctx);
2087
2088 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
2089 (unsigned int) ep->desc.bEndpointAddress,
2090 udev->slot_id,
2091 (unsigned int) new_drop_flags,
2092 (unsigned int) new_add_flags);
2093 return 0;
2094 }
2095 EXPORT_SYMBOL_GPL(xhci_add_endpoint);
2096
2097 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
2098 {
2099 struct xhci_input_control_ctx *ctrl_ctx;
2100 struct xhci_ep_ctx *ep_ctx;
2101 struct xhci_slot_ctx *slot_ctx;
2102 int i;
2103
2104 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
2105 if (!ctrl_ctx) {
2106 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2107 __func__);
2108 return;
2109 }
2110
2111
2112
2113
2114
2115
2116 ctrl_ctx->drop_flags = 0;
2117 ctrl_ctx->add_flags = 0;
2118 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2119 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2120
2121 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2122 for (i = 1; i < 31; i++) {
2123 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2124 ep_ctx->ep_info = 0;
2125 ep_ctx->ep_info2 = 0;
2126 ep_ctx->deq = 0;
2127 ep_ctx->tx_info = 0;
2128 }
2129 }
2130
2131 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2132 struct usb_device *udev, u32 *cmd_status)
2133 {
2134 int ret;
2135
2136 switch (*cmd_status) {
2137 case COMP_COMMAND_ABORTED:
2138 case COMP_COMMAND_RING_STOPPED:
2139 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2140 ret = -ETIME;
2141 break;
2142 case COMP_RESOURCE_ERROR:
2143 dev_warn(&udev->dev,
2144 "Not enough host controller resources for new device state.\n");
2145 ret = -ENOMEM;
2146
2147 break;
2148 case COMP_BANDWIDTH_ERROR:
2149 case COMP_SECONDARY_BANDWIDTH_ERROR:
2150 dev_warn(&udev->dev,
2151 "Not enough bandwidth for new device state.\n");
2152 ret = -ENOSPC;
2153
2154 break;
2155 case COMP_TRB_ERROR:
2156
2157 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2158 "add flag = 1, "
2159 "and endpoint is not disabled.\n");
2160 ret = -EINVAL;
2161 break;
2162 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2163 dev_warn(&udev->dev,
2164 "ERROR: Incompatible device for endpoint configure command.\n");
2165 ret = -ENODEV;
2166 break;
2167 case COMP_SUCCESS:
2168 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2169 "Successful Endpoint Configure command");
2170 ret = 0;
2171 break;
2172 default:
2173 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2174 *cmd_status);
2175 ret = -EINVAL;
2176 break;
2177 }
2178 return ret;
2179 }
2180
2181 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2182 struct usb_device *udev, u32 *cmd_status)
2183 {
2184 int ret;
2185
2186 switch (*cmd_status) {
2187 case COMP_COMMAND_ABORTED:
2188 case COMP_COMMAND_RING_STOPPED:
2189 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2190 ret = -ETIME;
2191 break;
2192 case COMP_PARAMETER_ERROR:
2193 dev_warn(&udev->dev,
2194 "WARN: xHCI driver setup invalid evaluate context command.\n");
2195 ret = -EINVAL;
2196 break;
2197 case COMP_SLOT_NOT_ENABLED_ERROR:
2198 dev_warn(&udev->dev,
2199 "WARN: slot not enabled for evaluate context command.\n");
2200 ret = -EINVAL;
2201 break;
2202 case COMP_CONTEXT_STATE_ERROR:
2203 dev_warn(&udev->dev,
2204 "WARN: invalid context state for evaluate context command.\n");
2205 ret = -EINVAL;
2206 break;
2207 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2208 dev_warn(&udev->dev,
2209 "ERROR: Incompatible device for evaluate context command.\n");
2210 ret = -ENODEV;
2211 break;
2212 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2213
2214 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2215 ret = -EINVAL;
2216 break;
2217 case COMP_SUCCESS:
2218 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2219 "Successful evaluate context command");
2220 ret = 0;
2221 break;
2222 default:
2223 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2224 *cmd_status);
2225 ret = -EINVAL;
2226 break;
2227 }
2228 return ret;
2229 }
2230
2231 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2232 struct xhci_input_control_ctx *ctrl_ctx)
2233 {
2234 u32 valid_add_flags;
2235 u32 valid_drop_flags;
2236
2237
2238
2239
2240
2241 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2242 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2243
2244
2245
2246
2247
2248 return hweight32(valid_add_flags) -
2249 hweight32(valid_add_flags & valid_drop_flags);
2250 }
2251
2252 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2253 struct xhci_input_control_ctx *ctrl_ctx)
2254 {
2255 u32 valid_add_flags;
2256 u32 valid_drop_flags;
2257
2258 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2259 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2260
2261 return hweight32(valid_drop_flags) -
2262 hweight32(valid_add_flags & valid_drop_flags);
2263 }
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2279 struct xhci_input_control_ctx *ctrl_ctx)
2280 {
2281 u32 added_eps;
2282
2283 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2284 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2285 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2286 "Not enough ep ctxs: "
2287 "%u active, need to add %u, limit is %u.",
2288 xhci->num_active_eps, added_eps,
2289 xhci->limit_active_eps);
2290 return -ENOMEM;
2291 }
2292 xhci->num_active_eps += added_eps;
2293 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2294 "Adding %u ep ctxs, %u now active.", added_eps,
2295 xhci->num_active_eps);
2296 return 0;
2297 }
2298
2299
2300
2301
2302
2303
2304
2305 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2306 struct xhci_input_control_ctx *ctrl_ctx)
2307 {
2308 u32 num_failed_eps;
2309
2310 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2311 xhci->num_active_eps -= num_failed_eps;
2312 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2313 "Removing %u failed ep ctxs, %u now active.",
2314 num_failed_eps,
2315 xhci->num_active_eps);
2316 }
2317
2318
2319
2320
2321
2322
2323
2324 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2325 struct xhci_input_control_ctx *ctrl_ctx)
2326 {
2327 u32 num_dropped_eps;
2328
2329 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2330 xhci->num_active_eps -= num_dropped_eps;
2331 if (num_dropped_eps)
2332 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2333 "Removing %u dropped ep ctxs, %u now active.",
2334 num_dropped_eps,
2335 xhci->num_active_eps);
2336 }
2337
2338 static unsigned int xhci_get_block_size(struct usb_device *udev)
2339 {
2340 switch (udev->speed) {
2341 case USB_SPEED_LOW:
2342 case USB_SPEED_FULL:
2343 return FS_BLOCK;
2344 case USB_SPEED_HIGH:
2345 return HS_BLOCK;
2346 case USB_SPEED_SUPER:
2347 case USB_SPEED_SUPER_PLUS:
2348 return SS_BLOCK;
2349 case USB_SPEED_UNKNOWN:
2350 case USB_SPEED_WIRELESS:
2351 default:
2352
2353 return 1;
2354 }
2355 }
2356
2357 static unsigned int
2358 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2359 {
2360 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2361 return LS_OVERHEAD;
2362 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2363 return FS_OVERHEAD;
2364 return HS_OVERHEAD;
2365 }
2366
2367
2368
2369
2370
2371 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2372 struct xhci_virt_device *virt_dev,
2373 int old_active_eps)
2374 {
2375 struct xhci_interval_bw_table *bw_table;
2376 struct xhci_tt_bw_info *tt_info;
2377
2378
2379 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2380 tt_info = virt_dev->tt_info;
2381
2382
2383
2384
2385 if (old_active_eps)
2386 return 0;
2387 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2388 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2389 return -ENOMEM;
2390 return 0;
2391 }
2392
2393
2394
2395
2396
2397
2398 return 0;
2399 }
2400
2401 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2402 struct xhci_virt_device *virt_dev)
2403 {
2404 unsigned int bw_reserved;
2405
2406 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2407 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2408 return -ENOMEM;
2409
2410 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2411 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2412 return -ENOMEM;
2413
2414 return 0;
2415 }
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2459 struct xhci_virt_device *virt_dev,
2460 int old_active_eps)
2461 {
2462 unsigned int bw_reserved;
2463 unsigned int max_bandwidth;
2464 unsigned int bw_used;
2465 unsigned int block_size;
2466 struct xhci_interval_bw_table *bw_table;
2467 unsigned int packet_size = 0;
2468 unsigned int overhead = 0;
2469 unsigned int packets_transmitted = 0;
2470 unsigned int packets_remaining = 0;
2471 unsigned int i;
2472
2473 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2474 return xhci_check_ss_bw(xhci, virt_dev);
2475
2476 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2477 max_bandwidth = HS_BW_LIMIT;
2478
2479 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2480 } else {
2481 max_bandwidth = FS_BW_LIMIT;
2482 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2483 }
2484
2485 bw_table = virt_dev->bw_table;
2486
2487
2488
2489 block_size = xhci_get_block_size(virt_dev->udev);
2490
2491
2492
2493
2494 if (virt_dev->tt_info) {
2495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2496 "Recalculating BW for rootport %u",
2497 virt_dev->real_port);
2498 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2499 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2500 "newly activated TT.\n");
2501 return -ENOMEM;
2502 }
2503 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2504 "Recalculating BW for TT slot %u port %u",
2505 virt_dev->tt_info->slot_id,
2506 virt_dev->tt_info->ttport);
2507 } else {
2508 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2509 "Recalculating BW for rootport %u",
2510 virt_dev->real_port);
2511 }
2512
2513
2514
2515
2516 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2517 bw_table->interval_bw[0].num_packets *
2518 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2519
2520 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2521 unsigned int bw_added;
2522 unsigned int largest_mps;
2523 unsigned int interval_overhead;
2524
2525
2526
2527
2528
2529
2530 packets_remaining = 2 * packets_remaining +
2531 bw_table->interval_bw[i].num_packets;
2532
2533
2534
2535
2536 if (list_empty(&bw_table->interval_bw[i].endpoints))
2537 largest_mps = 0;
2538 else {
2539 struct xhci_virt_ep *virt_ep;
2540 struct list_head *ep_entry;
2541
2542 ep_entry = bw_table->interval_bw[i].endpoints.next;
2543 virt_ep = list_entry(ep_entry,
2544 struct xhci_virt_ep, bw_endpoint_list);
2545
2546 largest_mps = DIV_ROUND_UP(
2547 virt_ep->bw_info.max_packet_size,
2548 block_size);
2549 }
2550 if (largest_mps > packet_size)
2551 packet_size = largest_mps;
2552
2553
2554 interval_overhead = xhci_get_largest_overhead(
2555 &bw_table->interval_bw[i]);
2556 if (interval_overhead > overhead)
2557 overhead = interval_overhead;
2558
2559
2560
2561
2562 packets_transmitted = packets_remaining >> (i + 1);
2563
2564
2565 bw_added = packets_transmitted * (overhead + packet_size);
2566
2567
2568 packets_remaining = packets_remaining % (1 << (i + 1));
2569
2570
2571
2572
2573
2574 if (packets_remaining == 0) {
2575 packet_size = 0;
2576 overhead = 0;
2577 } else if (packets_transmitted > 0) {
2578
2579
2580
2581
2582
2583 packet_size = largest_mps;
2584 overhead = interval_overhead;
2585 }
2586
2587
2588
2589 bw_used += bw_added;
2590 if (bw_used > max_bandwidth) {
2591 xhci_warn(xhci, "Not enough bandwidth. "
2592 "Proposed: %u, Max: %u\n",
2593 bw_used, max_bandwidth);
2594 return -ENOMEM;
2595 }
2596 }
2597
2598
2599
2600
2601
2602
2603 if (packets_remaining > 0)
2604 bw_used += overhead + packet_size;
2605
2606 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2607 unsigned int port_index = virt_dev->real_port - 1;
2608
2609
2610
2611
2612
2613 bw_used += TT_HS_OVERHEAD *
2614 xhci->rh_bw[port_index].num_active_tts;
2615 }
2616
2617 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2618 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2619 "Available: %u " "percent",
2620 bw_used, max_bandwidth, bw_reserved,
2621 (max_bandwidth - bw_used - bw_reserved) * 100 /
2622 max_bandwidth);
2623
2624 bw_used += bw_reserved;
2625 if (bw_used > max_bandwidth) {
2626 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2627 bw_used, max_bandwidth);
2628 return -ENOMEM;
2629 }
2630
2631 bw_table->bw_used = bw_used;
2632 return 0;
2633 }
2634
2635 static bool xhci_is_async_ep(unsigned int ep_type)
2636 {
2637 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2638 ep_type != ISOC_IN_EP &&
2639 ep_type != INT_IN_EP);
2640 }
2641
2642 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2643 {
2644 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2645 }
2646
2647 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2648 {
2649 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2650
2651 if (ep_bw->ep_interval == 0)
2652 return SS_OVERHEAD_BURST +
2653 (ep_bw->mult * ep_bw->num_packets *
2654 (SS_OVERHEAD + mps));
2655 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2656 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2657 1 << ep_bw->ep_interval);
2658
2659 }
2660
2661 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2662 struct xhci_bw_info *ep_bw,
2663 struct xhci_interval_bw_table *bw_table,
2664 struct usb_device *udev,
2665 struct xhci_virt_ep *virt_ep,
2666 struct xhci_tt_bw_info *tt_info)
2667 {
2668 struct xhci_interval_bw *interval_bw;
2669 int normalized_interval;
2670
2671 if (xhci_is_async_ep(ep_bw->type))
2672 return;
2673
2674 if (udev->speed >= USB_SPEED_SUPER) {
2675 if (xhci_is_sync_in_ep(ep_bw->type))
2676 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2677 xhci_get_ss_bw_consumed(ep_bw);
2678 else
2679 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2680 xhci_get_ss_bw_consumed(ep_bw);
2681 return;
2682 }
2683
2684
2685
2686
2687 if (list_empty(&virt_ep->bw_endpoint_list))
2688 return;
2689
2690
2691
2692 if (udev->speed == USB_SPEED_HIGH)
2693 normalized_interval = ep_bw->ep_interval;
2694 else
2695 normalized_interval = ep_bw->ep_interval - 3;
2696
2697 if (normalized_interval == 0)
2698 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2699 interval_bw = &bw_table->interval_bw[normalized_interval];
2700 interval_bw->num_packets -= ep_bw->num_packets;
2701 switch (udev->speed) {
2702 case USB_SPEED_LOW:
2703 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2704 break;
2705 case USB_SPEED_FULL:
2706 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2707 break;
2708 case USB_SPEED_HIGH:
2709 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2710 break;
2711 case USB_SPEED_SUPER:
2712 case USB_SPEED_SUPER_PLUS:
2713 case USB_SPEED_UNKNOWN:
2714 case USB_SPEED_WIRELESS:
2715
2716
2717
2718 return;
2719 }
2720 if (tt_info)
2721 tt_info->active_eps -= 1;
2722 list_del_init(&virt_ep->bw_endpoint_list);
2723 }
2724
2725 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2726 struct xhci_bw_info *ep_bw,
2727 struct xhci_interval_bw_table *bw_table,
2728 struct usb_device *udev,
2729 struct xhci_virt_ep *virt_ep,
2730 struct xhci_tt_bw_info *tt_info)
2731 {
2732 struct xhci_interval_bw *interval_bw;
2733 struct xhci_virt_ep *smaller_ep;
2734 int normalized_interval;
2735
2736 if (xhci_is_async_ep(ep_bw->type))
2737 return;
2738
2739 if (udev->speed == USB_SPEED_SUPER) {
2740 if (xhci_is_sync_in_ep(ep_bw->type))
2741 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2742 xhci_get_ss_bw_consumed(ep_bw);
2743 else
2744 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2745 xhci_get_ss_bw_consumed(ep_bw);
2746 return;
2747 }
2748
2749
2750
2751
2752 if (udev->speed == USB_SPEED_HIGH)
2753 normalized_interval = ep_bw->ep_interval;
2754 else
2755 normalized_interval = ep_bw->ep_interval - 3;
2756
2757 if (normalized_interval == 0)
2758 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2759 interval_bw = &bw_table->interval_bw[normalized_interval];
2760 interval_bw->num_packets += ep_bw->num_packets;
2761 switch (udev->speed) {
2762 case USB_SPEED_LOW:
2763 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2764 break;
2765 case USB_SPEED_FULL:
2766 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2767 break;
2768 case USB_SPEED_HIGH:
2769 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2770 break;
2771 case USB_SPEED_SUPER:
2772 case USB_SPEED_SUPER_PLUS:
2773 case USB_SPEED_UNKNOWN:
2774 case USB_SPEED_WIRELESS:
2775
2776
2777
2778 return;
2779 }
2780
2781 if (tt_info)
2782 tt_info->active_eps += 1;
2783
2784 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2785 bw_endpoint_list) {
2786 if (ep_bw->max_packet_size >=
2787 smaller_ep->bw_info.max_packet_size) {
2788
2789 list_add_tail(&virt_ep->bw_endpoint_list,
2790 &smaller_ep->bw_endpoint_list);
2791 return;
2792 }
2793 }
2794
2795 list_add_tail(&virt_ep->bw_endpoint_list,
2796 &interval_bw->endpoints);
2797 }
2798
2799 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2800 struct xhci_virt_device *virt_dev,
2801 int old_active_eps)
2802 {
2803 struct xhci_root_port_bw_info *rh_bw_info;
2804 if (!virt_dev->tt_info)
2805 return;
2806
2807 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2808 if (old_active_eps == 0 &&
2809 virt_dev->tt_info->active_eps != 0) {
2810 rh_bw_info->num_active_tts += 1;
2811 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2812 } else if (old_active_eps != 0 &&
2813 virt_dev->tt_info->active_eps == 0) {
2814 rh_bw_info->num_active_tts -= 1;
2815 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2816 }
2817 }
2818
2819 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2820 struct xhci_virt_device *virt_dev,
2821 struct xhci_container_ctx *in_ctx)
2822 {
2823 struct xhci_bw_info ep_bw_info[31];
2824 int i;
2825 struct xhci_input_control_ctx *ctrl_ctx;
2826 int old_active_eps = 0;
2827
2828 if (virt_dev->tt_info)
2829 old_active_eps = virt_dev->tt_info->active_eps;
2830
2831 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2832 if (!ctrl_ctx) {
2833 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2834 __func__);
2835 return -ENOMEM;
2836 }
2837
2838 for (i = 0; i < 31; i++) {
2839 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2840 continue;
2841
2842
2843 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2844 sizeof(ep_bw_info[i]));
2845
2846
2847
2848 if (EP_IS_DROPPED(ctrl_ctx, i))
2849 xhci_drop_ep_from_interval_table(xhci,
2850 &virt_dev->eps[i].bw_info,
2851 virt_dev->bw_table,
2852 virt_dev->udev,
2853 &virt_dev->eps[i],
2854 virt_dev->tt_info);
2855 }
2856
2857 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2858 for (i = 0; i < 31; i++) {
2859
2860 if (EP_IS_ADDED(ctrl_ctx, i))
2861 xhci_add_ep_to_interval_table(xhci,
2862 &virt_dev->eps[i].bw_info,
2863 virt_dev->bw_table,
2864 virt_dev->udev,
2865 &virt_dev->eps[i],
2866 virt_dev->tt_info);
2867 }
2868
2869 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2870
2871
2872
2873 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2874 return 0;
2875 }
2876
2877
2878 for (i = 0; i < 31; i++) {
2879 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2880 continue;
2881
2882
2883
2884
2885 if (EP_IS_ADDED(ctrl_ctx, i)) {
2886 xhci_drop_ep_from_interval_table(xhci,
2887 &virt_dev->eps[i].bw_info,
2888 virt_dev->bw_table,
2889 virt_dev->udev,
2890 &virt_dev->eps[i],
2891 virt_dev->tt_info);
2892 }
2893
2894 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2895 sizeof(ep_bw_info[i]));
2896
2897 if (EP_IS_DROPPED(ctrl_ctx, i))
2898 xhci_add_ep_to_interval_table(xhci,
2899 &virt_dev->eps[i].bw_info,
2900 virt_dev->bw_table,
2901 virt_dev->udev,
2902 &virt_dev->eps[i],
2903 virt_dev->tt_info);
2904 }
2905 return -ENOMEM;
2906 }
2907
2908
2909
2910
2911
2912 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2913 struct usb_device *udev,
2914 struct xhci_command *command,
2915 bool ctx_change, bool must_succeed)
2916 {
2917 int ret;
2918 unsigned long flags;
2919 struct xhci_input_control_ctx *ctrl_ctx;
2920 struct xhci_virt_device *virt_dev;
2921 struct xhci_slot_ctx *slot_ctx;
2922
2923 if (!command)
2924 return -EINVAL;
2925
2926 spin_lock_irqsave(&xhci->lock, flags);
2927
2928 if (xhci->xhc_state & XHCI_STATE_DYING) {
2929 spin_unlock_irqrestore(&xhci->lock, flags);
2930 return -ESHUTDOWN;
2931 }
2932
2933 virt_dev = xhci->devs[udev->slot_id];
2934
2935 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2936 if (!ctrl_ctx) {
2937 spin_unlock_irqrestore(&xhci->lock, flags);
2938 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2939 __func__);
2940 return -ENOMEM;
2941 }
2942
2943 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2944 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2945 spin_unlock_irqrestore(&xhci->lock, flags);
2946 xhci_warn(xhci, "Not enough host resources, "
2947 "active endpoint contexts = %u\n",
2948 xhci->num_active_eps);
2949 return -ENOMEM;
2950 }
2951 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2952 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2953 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2954 xhci_free_host_resources(xhci, ctrl_ctx);
2955 spin_unlock_irqrestore(&xhci->lock, flags);
2956 xhci_warn(xhci, "Not enough bandwidth\n");
2957 return -ENOMEM;
2958 }
2959
2960 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2961
2962 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2963 trace_xhci_configure_endpoint(slot_ctx);
2964
2965 if (!ctx_change)
2966 ret = xhci_queue_configure_endpoint(xhci, command,
2967 command->in_ctx->dma,
2968 udev->slot_id, must_succeed);
2969 else
2970 ret = xhci_queue_evaluate_context(xhci, command,
2971 command->in_ctx->dma,
2972 udev->slot_id, must_succeed);
2973 if (ret < 0) {
2974 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2975 xhci_free_host_resources(xhci, ctrl_ctx);
2976 spin_unlock_irqrestore(&xhci->lock, flags);
2977 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2978 "FIXME allocate a new ring segment");
2979 return -ENOMEM;
2980 }
2981 xhci_ring_cmd_db(xhci);
2982 spin_unlock_irqrestore(&xhci->lock, flags);
2983
2984
2985 wait_for_completion(command->completion);
2986
2987 if (!ctx_change)
2988 ret = xhci_configure_endpoint_result(xhci, udev,
2989 &command->status);
2990 else
2991 ret = xhci_evaluate_context_result(xhci, udev,
2992 &command->status);
2993
2994 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2995 spin_lock_irqsave(&xhci->lock, flags);
2996
2997
2998
2999 if (ret)
3000 xhci_free_host_resources(xhci, ctrl_ctx);
3001 else
3002 xhci_finish_resource_reservation(xhci, ctrl_ctx);
3003 spin_unlock_irqrestore(&xhci->lock, flags);
3004 }
3005 return ret;
3006 }
3007
3008 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
3009 struct xhci_virt_device *vdev, int i)
3010 {
3011 struct xhci_virt_ep *ep = &vdev->eps[i];
3012
3013 if (ep->ep_state & EP_HAS_STREAMS) {
3014 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
3015 xhci_get_endpoint_address(i));
3016 xhci_free_stream_info(xhci, ep->stream_info);
3017 ep->stream_info = NULL;
3018 ep->ep_state &= ~EP_HAS_STREAMS;
3019 }
3020 }
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3033 {
3034 int i;
3035 int ret = 0;
3036 struct xhci_hcd *xhci;
3037 struct xhci_virt_device *virt_dev;
3038 struct xhci_input_control_ctx *ctrl_ctx;
3039 struct xhci_slot_ctx *slot_ctx;
3040 struct xhci_command *command;
3041
3042 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3043 if (ret <= 0)
3044 return ret;
3045 xhci = hcd_to_xhci(hcd);
3046 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3047 (xhci->xhc_state & XHCI_STATE_REMOVING))
3048 return -ENODEV;
3049
3050 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3051 virt_dev = xhci->devs[udev->slot_id];
3052
3053 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3054 if (!command)
3055 return -ENOMEM;
3056
3057 command->in_ctx = virt_dev->in_ctx;
3058
3059
3060 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3061 if (!ctrl_ctx) {
3062 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3063 __func__);
3064 ret = -ENOMEM;
3065 goto command_cleanup;
3066 }
3067 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3068 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
3069 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
3070
3071
3072 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
3073 ctrl_ctx->drop_flags == 0) {
3074 ret = 0;
3075 goto command_cleanup;
3076 }
3077
3078 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3079 for (i = 31; i >= 1; i--) {
3080 __le32 le32 = cpu_to_le32(BIT(i));
3081
3082 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
3083 || (ctrl_ctx->add_flags & le32) || i == 1) {
3084 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
3085 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
3086 break;
3087 }
3088 }
3089
3090 ret = xhci_configure_endpoint(xhci, udev, command,
3091 false, false);
3092 if (ret)
3093
3094 goto command_cleanup;
3095
3096
3097 for (i = 1; i < 31; i++) {
3098 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
3099 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
3100 xhci_free_endpoint_ring(xhci, virt_dev, i);
3101 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3102 }
3103 }
3104 xhci_zero_in_ctx(xhci, virt_dev);
3105
3106
3107
3108
3109 for (i = 1; i < 31; i++) {
3110 if (!virt_dev->eps[i].new_ring)
3111 continue;
3112
3113
3114
3115 if (virt_dev->eps[i].ring) {
3116 xhci_free_endpoint_ring(xhci, virt_dev, i);
3117 }
3118 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3119 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3120 virt_dev->eps[i].new_ring = NULL;
3121 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3122 }
3123 command_cleanup:
3124 kfree(command->completion);
3125 kfree(command);
3126
3127 return ret;
3128 }
3129 EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3130
3131 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3132 {
3133 struct xhci_hcd *xhci;
3134 struct xhci_virt_device *virt_dev;
3135 int i, ret;
3136
3137 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3138 if (ret <= 0)
3139 return;
3140 xhci = hcd_to_xhci(hcd);
3141
3142 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3143 virt_dev = xhci->devs[udev->slot_id];
3144
3145 for (i = 0; i < 31; i++) {
3146 if (virt_dev->eps[i].new_ring) {
3147 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3148 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3149 virt_dev->eps[i].new_ring = NULL;
3150 }
3151 }
3152 xhci_zero_in_ctx(xhci, virt_dev);
3153 }
3154 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3155
3156 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3157 struct xhci_container_ctx *in_ctx,
3158 struct xhci_container_ctx *out_ctx,
3159 struct xhci_input_control_ctx *ctrl_ctx,
3160 u32 add_flags, u32 drop_flags)
3161 {
3162 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3163 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3164 xhci_slot_copy(xhci, in_ctx, out_ctx);
3165 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3166 }
3167
3168 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3169 struct usb_host_endpoint *host_ep)
3170 {
3171 struct xhci_hcd *xhci;
3172 struct xhci_virt_device *vdev;
3173 struct xhci_virt_ep *ep;
3174 struct usb_device *udev;
3175 unsigned long flags;
3176 unsigned int ep_index;
3177
3178 xhci = hcd_to_xhci(hcd);
3179 rescan:
3180 spin_lock_irqsave(&xhci->lock, flags);
3181
3182 udev = (struct usb_device *)host_ep->hcpriv;
3183 if (!udev || !udev->slot_id)
3184 goto done;
3185
3186 vdev = xhci->devs[udev->slot_id];
3187 if (!vdev)
3188 goto done;
3189
3190 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3191 ep = &vdev->eps[ep_index];
3192
3193
3194 if (ep->ep_state & EP_CLEARING_TT) {
3195 spin_unlock_irqrestore(&xhci->lock, flags);
3196 schedule_timeout_uninterruptible(1);
3197 goto rescan;
3198 }
3199
3200 if (ep->ep_state)
3201 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3202 ep->ep_state);
3203 done:
3204 host_ep->hcpriv = NULL;
3205 spin_unlock_irqrestore(&xhci->lock, flags);
3206 }
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3221 struct usb_host_endpoint *host_ep)
3222 {
3223 struct xhci_hcd *xhci;
3224 struct usb_device *udev;
3225 struct xhci_virt_device *vdev;
3226 struct xhci_virt_ep *ep;
3227 struct xhci_input_control_ctx *ctrl_ctx;
3228 struct xhci_command *stop_cmd, *cfg_cmd;
3229 unsigned int ep_index;
3230 unsigned long flags;
3231 u32 ep_flag;
3232 int err;
3233
3234 xhci = hcd_to_xhci(hcd);
3235 if (!host_ep->hcpriv)
3236 return;
3237 udev = (struct usb_device *) host_ep->hcpriv;
3238 vdev = xhci->devs[udev->slot_id];
3239
3240
3241
3242
3243
3244
3245 if (!udev->slot_id || !vdev)
3246 return;
3247 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3248 ep = &vdev->eps[ep_index];
3249
3250
3251 spin_lock_irqsave(&xhci->lock, flags);
3252 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3253 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3254 spin_unlock_irqrestore(&xhci->lock, flags);
3255 return;
3256 }
3257 spin_unlock_irqrestore(&xhci->lock, flags);
3258
3259 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3260 usb_endpoint_xfer_isoc(&host_ep->desc))
3261 return;
3262
3263 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3264
3265 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3266 return;
3267
3268 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3269 if (!stop_cmd)
3270 return;
3271
3272 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3273 if (!cfg_cmd)
3274 goto cleanup;
3275
3276 spin_lock_irqsave(&xhci->lock, flags);
3277
3278
3279 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3280
3281
3282
3283
3284
3285
3286
3287 if (!list_empty(&ep->ring->td_list)) {
3288 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3289 spin_unlock_irqrestore(&xhci->lock, flags);
3290 xhci_free_command(xhci, cfg_cmd);
3291 goto cleanup;
3292 }
3293
3294 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3295 ep_index, 0);
3296 if (err < 0) {
3297 spin_unlock_irqrestore(&xhci->lock, flags);
3298 xhci_free_command(xhci, cfg_cmd);
3299 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3300 __func__, err);
3301 goto cleanup;
3302 }
3303
3304 xhci_ring_cmd_db(xhci);
3305 spin_unlock_irqrestore(&xhci->lock, flags);
3306
3307 wait_for_completion(stop_cmd->completion);
3308
3309 spin_lock_irqsave(&xhci->lock, flags);
3310
3311
3312 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3313 if (!ctrl_ctx) {
3314 spin_unlock_irqrestore(&xhci->lock, flags);
3315 xhci_free_command(xhci, cfg_cmd);
3316 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3317 __func__);
3318 goto cleanup;
3319 }
3320
3321 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3322 ctrl_ctx, ep_flag, ep_flag);
3323 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3324
3325 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3326 udev->slot_id, false);
3327 if (err < 0) {
3328 spin_unlock_irqrestore(&xhci->lock, flags);
3329 xhci_free_command(xhci, cfg_cmd);
3330 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3331 __func__, err);
3332 goto cleanup;
3333 }
3334
3335 xhci_ring_cmd_db(xhci);
3336 spin_unlock_irqrestore(&xhci->lock, flags);
3337
3338 wait_for_completion(cfg_cmd->completion);
3339
3340 xhci_free_command(xhci, cfg_cmd);
3341 cleanup:
3342 xhci_free_command(xhci, stop_cmd);
3343 spin_lock_irqsave(&xhci->lock, flags);
3344 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3345 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3346 spin_unlock_irqrestore(&xhci->lock, flags);
3347 }
3348
3349 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3350 struct usb_device *udev, struct usb_host_endpoint *ep,
3351 unsigned int slot_id)
3352 {
3353 int ret;
3354 unsigned int ep_index;
3355 unsigned int ep_state;
3356
3357 if (!ep)
3358 return -EINVAL;
3359 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3360 if (ret <= 0)
3361 return ret ? ret : -EINVAL;
3362 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3363 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3364 " descriptor for ep 0x%x does not support streams\n",
3365 ep->desc.bEndpointAddress);
3366 return -EINVAL;
3367 }
3368
3369 ep_index = xhci_get_endpoint_index(&ep->desc);
3370 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3371 if (ep_state & EP_HAS_STREAMS ||
3372 ep_state & EP_GETTING_STREAMS) {
3373 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3374 "already has streams set up.\n",
3375 ep->desc.bEndpointAddress);
3376 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3377 "dynamic stream context array reallocation.\n");
3378 return -EINVAL;
3379 }
3380 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3381 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3382 "endpoint 0x%x; URBs are pending.\n",
3383 ep->desc.bEndpointAddress);
3384 return -EINVAL;
3385 }
3386 return 0;
3387 }
3388
3389 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3390 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3391 {
3392 unsigned int max_streams;
3393
3394
3395 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3396
3397
3398
3399
3400
3401
3402 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3403 if (*num_stream_ctxs > max_streams) {
3404 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3405 max_streams);
3406 *num_stream_ctxs = max_streams;
3407 *num_streams = max_streams;
3408 }
3409 }
3410
3411
3412
3413
3414
3415 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3416 struct usb_device *udev,
3417 struct usb_host_endpoint **eps, unsigned int num_eps,
3418 unsigned int *num_streams, u32 *changed_ep_bitmask)
3419 {
3420 unsigned int max_streams;
3421 unsigned int endpoint_flag;
3422 int i;
3423 int ret;
3424
3425 for (i = 0; i < num_eps; i++) {
3426 ret = xhci_check_streams_endpoint(xhci, udev,
3427 eps[i], udev->slot_id);
3428 if (ret < 0)
3429 return ret;
3430
3431 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3432 if (max_streams < (*num_streams - 1)) {
3433 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3434 eps[i]->desc.bEndpointAddress,
3435 max_streams);
3436 *num_streams = max_streams+1;
3437 }
3438
3439 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3440 if (*changed_ep_bitmask & endpoint_flag)
3441 return -EINVAL;
3442 *changed_ep_bitmask |= endpoint_flag;
3443 }
3444 return 0;
3445 }
3446
3447 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3448 struct usb_device *udev,
3449 struct usb_host_endpoint **eps, unsigned int num_eps)
3450 {
3451 u32 changed_ep_bitmask = 0;
3452 unsigned int slot_id;
3453 unsigned int ep_index;
3454 unsigned int ep_state;
3455 int i;
3456
3457 slot_id = udev->slot_id;
3458 if (!xhci->devs[slot_id])
3459 return 0;
3460
3461 for (i = 0; i < num_eps; i++) {
3462 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3463 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3464
3465 if (ep_state & EP_GETTING_NO_STREAMS) {
3466 xhci_warn(xhci, "WARN Can't disable streams for "
3467 "endpoint 0x%x, "
3468 "streams are being disabled already\n",
3469 eps[i]->desc.bEndpointAddress);
3470 return 0;
3471 }
3472
3473 if (!(ep_state & EP_HAS_STREAMS) &&
3474 !(ep_state & EP_GETTING_STREAMS)) {
3475 xhci_warn(xhci, "WARN Can't disable streams for "
3476 "endpoint 0x%x, "
3477 "streams are already disabled!\n",
3478 eps[i]->desc.bEndpointAddress);
3479 xhci_warn(xhci, "WARN xhci_free_streams() called "
3480 "with non-streams endpoint\n");
3481 return 0;
3482 }
3483 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3484 }
3485 return changed_ep_bitmask;
3486 }
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3505 struct usb_host_endpoint **eps, unsigned int num_eps,
3506 unsigned int num_streams, gfp_t mem_flags)
3507 {
3508 int i, ret;
3509 struct xhci_hcd *xhci;
3510 struct xhci_virt_device *vdev;
3511 struct xhci_command *config_cmd;
3512 struct xhci_input_control_ctx *ctrl_ctx;
3513 unsigned int ep_index;
3514 unsigned int num_stream_ctxs;
3515 unsigned int max_packet;
3516 unsigned long flags;
3517 u32 changed_ep_bitmask = 0;
3518
3519 if (!eps)
3520 return -EINVAL;
3521
3522
3523
3524
3525 num_streams += 1;
3526 xhci = hcd_to_xhci(hcd);
3527 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3528 num_streams);
3529
3530
3531 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3532 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3533 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3534 return -ENOSYS;
3535 }
3536
3537 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3538 if (!config_cmd)
3539 return -ENOMEM;
3540
3541 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3542 if (!ctrl_ctx) {
3543 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3544 __func__);
3545 xhci_free_command(xhci, config_cmd);
3546 return -ENOMEM;
3547 }
3548
3549
3550
3551
3552
3553 spin_lock_irqsave(&xhci->lock, flags);
3554 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3555 num_eps, &num_streams, &changed_ep_bitmask);
3556 if (ret < 0) {
3557 xhci_free_command(xhci, config_cmd);
3558 spin_unlock_irqrestore(&xhci->lock, flags);
3559 return ret;
3560 }
3561 if (num_streams <= 1) {
3562 xhci_warn(xhci, "WARN: endpoints can't handle "
3563 "more than one stream.\n");
3564 xhci_free_command(xhci, config_cmd);
3565 spin_unlock_irqrestore(&xhci->lock, flags);
3566 return -EINVAL;
3567 }
3568 vdev = xhci->devs[udev->slot_id];
3569
3570
3571
3572 for (i = 0; i < num_eps; i++) {
3573 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3574 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3575 }
3576 spin_unlock_irqrestore(&xhci->lock, flags);
3577
3578
3579
3580
3581
3582 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3583 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3584 num_stream_ctxs, num_streams);
3585
3586 for (i = 0; i < num_eps; i++) {
3587 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3588 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3589 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3590 num_stream_ctxs,
3591 num_streams,
3592 max_packet, mem_flags);
3593 if (!vdev->eps[ep_index].stream_info)
3594 goto cleanup;
3595
3596
3597
3598 }
3599
3600
3601 for (i = 0; i < num_eps; i++) {
3602 struct xhci_ep_ctx *ep_ctx;
3603
3604 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3605 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3606
3607 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3608 vdev->out_ctx, ep_index);
3609 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3610 vdev->eps[ep_index].stream_info);
3611 }
3612
3613
3614
3615 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3616 vdev->out_ctx, ctrl_ctx,
3617 changed_ep_bitmask, changed_ep_bitmask);
3618
3619
3620 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3621 false, false);
3622
3623
3624
3625
3626
3627 if (ret < 0)
3628 goto cleanup;
3629
3630 spin_lock_irqsave(&xhci->lock, flags);
3631 for (i = 0; i < num_eps; i++) {
3632 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3633 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3634 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3635 udev->slot_id, ep_index);
3636 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3637 }
3638 xhci_free_command(xhci, config_cmd);
3639 spin_unlock_irqrestore(&xhci->lock, flags);
3640
3641 for (i = 0; i < num_eps; i++) {
3642 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3643 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3644 }
3645
3646 return num_streams - 1;
3647
3648 cleanup:
3649
3650 for (i = 0; i < num_eps; i++) {
3651 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3652 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3653 vdev->eps[ep_index].stream_info = NULL;
3654
3655
3656
3657 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3658 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3659 xhci_endpoint_zero(xhci, vdev, eps[i]);
3660 }
3661 xhci_free_command(xhci, config_cmd);
3662 return -ENOMEM;
3663 }
3664
3665
3666
3667
3668
3669
3670
3671 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3672 struct usb_host_endpoint **eps, unsigned int num_eps,
3673 gfp_t mem_flags)
3674 {
3675 int i, ret;
3676 struct xhci_hcd *xhci;
3677 struct xhci_virt_device *vdev;
3678 struct xhci_command *command;
3679 struct xhci_input_control_ctx *ctrl_ctx;
3680 unsigned int ep_index;
3681 unsigned long flags;
3682 u32 changed_ep_bitmask;
3683
3684 xhci = hcd_to_xhci(hcd);
3685 vdev = xhci->devs[udev->slot_id];
3686
3687
3688 spin_lock_irqsave(&xhci->lock, flags);
3689 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3690 udev, eps, num_eps);
3691 if (changed_ep_bitmask == 0) {
3692 spin_unlock_irqrestore(&xhci->lock, flags);
3693 return -EINVAL;
3694 }
3695
3696
3697
3698
3699
3700 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3701 command = vdev->eps[ep_index].stream_info->free_streams_command;
3702 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3703 if (!ctrl_ctx) {
3704 spin_unlock_irqrestore(&xhci->lock, flags);
3705 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 for (i = 0; i < num_eps; i++) {
3711 struct xhci_ep_ctx *ep_ctx;
3712
3713 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3714 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3715 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3716 EP_GETTING_NO_STREAMS;
3717
3718 xhci_endpoint_copy(xhci, command->in_ctx,
3719 vdev->out_ctx, ep_index);
3720 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3721 &vdev->eps[ep_index]);
3722 }
3723 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3724 vdev->out_ctx, ctrl_ctx,
3725 changed_ep_bitmask, changed_ep_bitmask);
3726 spin_unlock_irqrestore(&xhci->lock, flags);
3727
3728
3729
3730
3731 ret = xhci_configure_endpoint(xhci, udev, command,
3732 false, true);
3733
3734
3735
3736
3737 if (ret < 0)
3738 return ret;
3739
3740 spin_lock_irqsave(&xhci->lock, flags);
3741 for (i = 0; i < num_eps; i++) {
3742 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3743 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3744 vdev->eps[ep_index].stream_info = NULL;
3745
3746
3747
3748 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3749 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3750 }
3751 spin_unlock_irqrestore(&xhci->lock, flags);
3752
3753 return 0;
3754 }
3755
3756
3757
3758
3759
3760
3761
3762
3763 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3764 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3765 {
3766 int i;
3767 unsigned int num_dropped_eps = 0;
3768 unsigned int drop_flags = 0;
3769
3770 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3771 if (virt_dev->eps[i].ring) {
3772 drop_flags |= 1 << i;
3773 num_dropped_eps++;
3774 }
3775 }
3776 xhci->num_active_eps -= num_dropped_eps;
3777 if (num_dropped_eps)
3778 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3779 "Dropped %u ep ctxs, flags = 0x%x, "
3780 "%u now active.",
3781 num_dropped_eps, drop_flags,
3782 xhci->num_active_eps);
3783 }
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3804 struct usb_device *udev)
3805 {
3806 int ret, i;
3807 unsigned long flags;
3808 struct xhci_hcd *xhci;
3809 unsigned int slot_id;
3810 struct xhci_virt_device *virt_dev;
3811 struct xhci_command *reset_device_cmd;
3812 struct xhci_slot_ctx *slot_ctx;
3813 int old_active_eps = 0;
3814
3815 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3816 if (ret <= 0)
3817 return ret;
3818 xhci = hcd_to_xhci(hcd);
3819 slot_id = udev->slot_id;
3820 virt_dev = xhci->devs[slot_id];
3821 if (!virt_dev) {
3822 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3823 "not exist. Re-allocate the device\n", slot_id);
3824 ret = xhci_alloc_dev(hcd, udev);
3825 if (ret == 1)
3826 return 0;
3827 else
3828 return -EINVAL;
3829 }
3830
3831 if (virt_dev->tt_info)
3832 old_active_eps = virt_dev->tt_info->active_eps;
3833
3834 if (virt_dev->udev != udev) {
3835
3836
3837
3838
3839 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3840 "not match the udev. Re-allocate the device\n",
3841 slot_id);
3842 ret = xhci_alloc_dev(hcd, udev);
3843 if (ret == 1)
3844 return 0;
3845 else
3846 return -EINVAL;
3847 }
3848
3849
3850 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3851 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3852 SLOT_STATE_DISABLED)
3853 return 0;
3854
3855 trace_xhci_discover_or_reset_device(slot_ctx);
3856
3857 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3858
3859
3860
3861
3862
3863
3864 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3865 if (!reset_device_cmd) {
3866 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3867 return -ENOMEM;
3868 }
3869
3870
3871 spin_lock_irqsave(&xhci->lock, flags);
3872
3873 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3874 if (ret) {
3875 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3876 spin_unlock_irqrestore(&xhci->lock, flags);
3877 goto command_cleanup;
3878 }
3879 xhci_ring_cmd_db(xhci);
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3881
3882
3883 wait_for_completion(reset_device_cmd->completion);
3884
3885
3886
3887
3888
3889 ret = reset_device_cmd->status;
3890 switch (ret) {
3891 case COMP_COMMAND_ABORTED:
3892 case COMP_COMMAND_RING_STOPPED:
3893 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3894 ret = -ETIME;
3895 goto command_cleanup;
3896 case COMP_SLOT_NOT_ENABLED_ERROR:
3897 case COMP_CONTEXT_STATE_ERROR:
3898 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3899 slot_id,
3900 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3901 xhci_dbg(xhci, "Not freeing device rings.\n");
3902
3903 ret = 0;
3904 goto command_cleanup;
3905 case COMP_SUCCESS:
3906 xhci_dbg(xhci, "Successful reset device command.\n");
3907 break;
3908 default:
3909 if (xhci_is_vendor_info_code(xhci, ret))
3910 break;
3911 xhci_warn(xhci, "Unknown completion code %u for "
3912 "reset device command.\n", ret);
3913 ret = -EINVAL;
3914 goto command_cleanup;
3915 }
3916
3917
3918 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3919 spin_lock_irqsave(&xhci->lock, flags);
3920
3921 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3922 spin_unlock_irqrestore(&xhci->lock, flags);
3923 }
3924
3925
3926 for (i = 1; i < 31; i++) {
3927 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3928
3929 if (ep->ep_state & EP_HAS_STREAMS) {
3930 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3931 xhci_get_endpoint_address(i));
3932 xhci_free_stream_info(xhci, ep->stream_info);
3933 ep->stream_info = NULL;
3934 ep->ep_state &= ~EP_HAS_STREAMS;
3935 }
3936
3937 if (ep->ring) {
3938 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3939 xhci_free_endpoint_ring(xhci, virt_dev, i);
3940 }
3941 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3942 xhci_drop_ep_from_interval_table(xhci,
3943 &virt_dev->eps[i].bw_info,
3944 virt_dev->bw_table,
3945 udev,
3946 &virt_dev->eps[i],
3947 virt_dev->tt_info);
3948 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3949 }
3950
3951 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3952 virt_dev->flags = 0;
3953 ret = 0;
3954
3955 command_cleanup:
3956 xhci_free_command(xhci, reset_device_cmd);
3957 return ret;
3958 }
3959
3960
3961
3962
3963
3964
3965 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3966 {
3967 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3968 struct xhci_virt_device *virt_dev;
3969 struct xhci_slot_ctx *slot_ctx;
3970 int i, ret;
3971
3972
3973
3974
3975
3976
3977 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3978 pm_runtime_put_noidle(hcd->self.controller);
3979
3980 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3981
3982
3983
3984 if (ret <= 0 && ret != -ENODEV)
3985 return;
3986
3987 virt_dev = xhci->devs[udev->slot_id];
3988 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3989 trace_xhci_free_dev(slot_ctx);
3990
3991
3992 for (i = 0; i < 31; i++)
3993 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3994 virt_dev->udev = NULL;
3995 xhci_disable_slot(xhci, udev->slot_id);
3996 xhci_free_virt_device(xhci, udev->slot_id);
3997 }
3998
3999 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
4000 {
4001 struct xhci_command *command;
4002 unsigned long flags;
4003 u32 state;
4004 int ret;
4005
4006 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4007 if (!command)
4008 return -ENOMEM;
4009
4010 xhci_debugfs_remove_slot(xhci, slot_id);
4011
4012 spin_lock_irqsave(&xhci->lock, flags);
4013
4014 state = readl(&xhci->op_regs->status);
4015 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
4016 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4017 spin_unlock_irqrestore(&xhci->lock, flags);
4018 kfree(command);
4019 return -ENODEV;
4020 }
4021
4022 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
4023 slot_id);
4024 if (ret) {
4025 spin_unlock_irqrestore(&xhci->lock, flags);
4026 kfree(command);
4027 return ret;
4028 }
4029 xhci_ring_cmd_db(xhci);
4030 spin_unlock_irqrestore(&xhci->lock, flags);
4031
4032 wait_for_completion(command->completion);
4033
4034 if (command->status != COMP_SUCCESS)
4035 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
4036 slot_id, command->status);
4037
4038 xhci_free_command(xhci, command);
4039
4040 return 0;
4041 }
4042
4043
4044
4045
4046
4047
4048
4049 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4050 {
4051 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4052 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4053 "Not enough ep ctxs: "
4054 "%u active, need to add 1, limit is %u.",
4055 xhci->num_active_eps, xhci->limit_active_eps);
4056 return -ENOMEM;
4057 }
4058 xhci->num_active_eps += 1;
4059 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4060 "Adding 1 ep ctx, %u now active.",
4061 xhci->num_active_eps);
4062 return 0;
4063 }
4064
4065
4066
4067
4068
4069
4070 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
4071 {
4072 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4073 struct xhci_virt_device *vdev;
4074 struct xhci_slot_ctx *slot_ctx;
4075 unsigned long flags;
4076 int ret, slot_id;
4077 struct xhci_command *command;
4078
4079 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4080 if (!command)
4081 return 0;
4082
4083 spin_lock_irqsave(&xhci->lock, flags);
4084 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4085 if (ret) {
4086 spin_unlock_irqrestore(&xhci->lock, flags);
4087 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4088 xhci_free_command(xhci, command);
4089 return 0;
4090 }
4091 xhci_ring_cmd_db(xhci);
4092 spin_unlock_irqrestore(&xhci->lock, flags);
4093
4094 wait_for_completion(command->completion);
4095 slot_id = command->slot_id;
4096
4097 if (!slot_id || command->status != COMP_SUCCESS) {
4098 xhci_err(xhci, "Error while assigning device slot ID\n");
4099 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4100 HCS_MAX_SLOTS(
4101 readl(&xhci->cap_regs->hcs_params1)));
4102 xhci_free_command(xhci, command);
4103 return 0;
4104 }
4105
4106 xhci_free_command(xhci, command);
4107
4108 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4109 spin_lock_irqsave(&xhci->lock, flags);
4110 ret = xhci_reserve_host_control_ep_resources(xhci);
4111 if (ret) {
4112 spin_unlock_irqrestore(&xhci->lock, flags);
4113 xhci_warn(xhci, "Not enough host resources, "
4114 "active endpoint contexts = %u\n",
4115 xhci->num_active_eps);
4116 goto disable_slot;
4117 }
4118 spin_unlock_irqrestore(&xhci->lock, flags);
4119 }
4120
4121
4122
4123
4124 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4125 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4126 goto disable_slot;
4127 }
4128 vdev = xhci->devs[slot_id];
4129 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4130 trace_xhci_alloc_dev(slot_ctx);
4131
4132 udev->slot_id = slot_id;
4133
4134 xhci_debugfs_create_slot(xhci, slot_id);
4135
4136
4137
4138
4139
4140 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4141 pm_runtime_get_noresume(hcd->self.controller);
4142
4143
4144
4145 return 1;
4146
4147 disable_slot:
4148 xhci_disable_slot(xhci, udev->slot_id);
4149 xhci_free_virt_device(xhci, udev->slot_id);
4150
4151 return 0;
4152 }
4153
4154
4155
4156
4157
4158 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4159 enum xhci_setup_dev setup)
4160 {
4161 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4162 unsigned long flags;
4163 struct xhci_virt_device *virt_dev;
4164 int ret = 0;
4165 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4166 struct xhci_slot_ctx *slot_ctx;
4167 struct xhci_input_control_ctx *ctrl_ctx;
4168 u64 temp_64;
4169 struct xhci_command *command = NULL;
4170
4171 mutex_lock(&xhci->mutex);
4172
4173 if (xhci->xhc_state) {
4174 ret = -ESHUTDOWN;
4175 goto out;
4176 }
4177
4178 if (!udev->slot_id) {
4179 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4180 "Bad Slot ID %d", udev->slot_id);
4181 ret = -EINVAL;
4182 goto out;
4183 }
4184
4185 virt_dev = xhci->devs[udev->slot_id];
4186
4187 if (WARN_ON(!virt_dev)) {
4188
4189
4190
4191
4192
4193 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4194 udev->slot_id);
4195 ret = -EINVAL;
4196 goto out;
4197 }
4198 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4199 trace_xhci_setup_device_slot(slot_ctx);
4200
4201 if (setup == SETUP_CONTEXT_ONLY) {
4202 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4203 SLOT_STATE_DEFAULT) {
4204 xhci_dbg(xhci, "Slot already in default state\n");
4205 goto out;
4206 }
4207 }
4208
4209 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4210 if (!command) {
4211 ret = -ENOMEM;
4212 goto out;
4213 }
4214
4215 command->in_ctx = virt_dev->in_ctx;
4216
4217 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4218 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4219 if (!ctrl_ctx) {
4220 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4221 __func__);
4222 ret = -EINVAL;
4223 goto out;
4224 }
4225
4226
4227
4228
4229
4230 if (!slot_ctx->dev_info)
4231 xhci_setup_addressable_virt_dev(xhci, udev);
4232
4233 else
4234 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4235 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4236 ctrl_ctx->drop_flags = 0;
4237
4238 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4239 le32_to_cpu(slot_ctx->dev_info) >> 27);
4240
4241 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4242 spin_lock_irqsave(&xhci->lock, flags);
4243 trace_xhci_setup_device(virt_dev);
4244 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4245 udev->slot_id, setup);
4246 if (ret) {
4247 spin_unlock_irqrestore(&xhci->lock, flags);
4248 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4249 "FIXME: allocate a command ring segment");
4250 goto out;
4251 }
4252 xhci_ring_cmd_db(xhci);
4253 spin_unlock_irqrestore(&xhci->lock, flags);
4254
4255
4256 wait_for_completion(command->completion);
4257
4258
4259
4260
4261
4262 switch (command->status) {
4263 case COMP_COMMAND_ABORTED:
4264 case COMP_COMMAND_RING_STOPPED:
4265 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4266 ret = -ETIME;
4267 break;
4268 case COMP_CONTEXT_STATE_ERROR:
4269 case COMP_SLOT_NOT_ENABLED_ERROR:
4270 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4271 act, udev->slot_id);
4272 ret = -EINVAL;
4273 break;
4274 case COMP_USB_TRANSACTION_ERROR:
4275 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4276
4277 mutex_unlock(&xhci->mutex);
4278 ret = xhci_disable_slot(xhci, udev->slot_id);
4279 xhci_free_virt_device(xhci, udev->slot_id);
4280 if (!ret)
4281 xhci_alloc_dev(hcd, udev);
4282 kfree(command->completion);
4283 kfree(command);
4284 return -EPROTO;
4285 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4286 dev_warn(&udev->dev,
4287 "ERROR: Incompatible device for setup %s command\n", act);
4288 ret = -ENODEV;
4289 break;
4290 case COMP_SUCCESS:
4291 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4292 "Successful setup %s command", act);
4293 break;
4294 default:
4295 xhci_err(xhci,
4296 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4297 act, command->status);
4298 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4299 ret = -EINVAL;
4300 break;
4301 }
4302 if (ret)
4303 goto out;
4304 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4305 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4306 "Op regs DCBAA ptr = %#016llx", temp_64);
4307 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4308 "Slot ID %d dcbaa entry @%p = %#016llx",
4309 udev->slot_id,
4310 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4311 (unsigned long long)
4312 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4313 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4314 "Output Context DMA address = %#08llx",
4315 (unsigned long long)virt_dev->out_ctx->dma);
4316 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4317 le32_to_cpu(slot_ctx->dev_info) >> 27);
4318
4319
4320
4321
4322 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4323 le32_to_cpu(slot_ctx->dev_info) >> 27);
4324
4325 ctrl_ctx->add_flags = 0;
4326 ctrl_ctx->drop_flags = 0;
4327 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4328 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4329
4330 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4331 "Internal device address = %d",
4332 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4333 out:
4334 mutex_unlock(&xhci->mutex);
4335 if (command) {
4336 kfree(command->completion);
4337 kfree(command);
4338 }
4339 return ret;
4340 }
4341
4342 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4343 {
4344 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4345 }
4346
4347 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4348 {
4349 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4350 }
4351
4352
4353
4354
4355
4356
4357
4358 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4359 {
4360 struct xhci_hub *rhub;
4361
4362 rhub = xhci_get_rhub(hcd);
4363 return rhub->ports[port1 - 1]->hw_portnum + 1;
4364 }
4365
4366
4367
4368
4369
4370 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4371 struct usb_device *udev, u16 max_exit_latency)
4372 {
4373 struct xhci_virt_device *virt_dev;
4374 struct xhci_command *command;
4375 struct xhci_input_control_ctx *ctrl_ctx;
4376 struct xhci_slot_ctx *slot_ctx;
4377 unsigned long flags;
4378 int ret;
4379
4380 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4381 if (!command)
4382 return -ENOMEM;
4383
4384 spin_lock_irqsave(&xhci->lock, flags);
4385
4386 virt_dev = xhci->devs[udev->slot_id];
4387
4388
4389
4390
4391
4392
4393
4394 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4395 spin_unlock_irqrestore(&xhci->lock, flags);
4396 return 0;
4397 }
4398
4399
4400 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4401 if (!ctrl_ctx) {
4402 spin_unlock_irqrestore(&xhci->lock, flags);
4403 xhci_free_command(xhci, command);
4404 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4405 __func__);
4406 return -ENOMEM;
4407 }
4408
4409 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4410 spin_unlock_irqrestore(&xhci->lock, flags);
4411
4412 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4413 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4414 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4415 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4416 slot_ctx->dev_state = 0;
4417
4418 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4419 "Set up evaluate context for LPM MEL change.");
4420
4421
4422 ret = xhci_configure_endpoint(xhci, udev, command,
4423 true, true);
4424
4425 if (!ret) {
4426 spin_lock_irqsave(&xhci->lock, flags);
4427 virt_dev->current_mel = max_exit_latency;
4428 spin_unlock_irqrestore(&xhci->lock, flags);
4429 }
4430
4431 xhci_free_command(xhci, command);
4432
4433 return ret;
4434 }
4435
4436 #ifdef CONFIG_PM
4437
4438
4439 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4440 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4441
4442
4443 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4444 struct usb_device *udev)
4445 {
4446 int u2del, besl, besl_host;
4447 int besl_device = 0;
4448 u32 field;
4449
4450 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4451 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4452
4453 if (field & USB_BESL_SUPPORT) {
4454 for (besl_host = 0; besl_host < 16; besl_host++) {
4455 if (xhci_besl_encoding[besl_host] >= u2del)
4456 break;
4457 }
4458
4459 if (field & USB_BESL_BASELINE_VALID)
4460 besl_device = USB_GET_BESL_BASELINE(field);
4461 else if (field & USB_BESL_DEEP_VALID)
4462 besl_device = USB_GET_BESL_DEEP(field);
4463 } else {
4464 if (u2del <= 50)
4465 besl_host = 0;
4466 else
4467 besl_host = (u2del - 51) / 75 + 1;
4468 }
4469
4470 besl = besl_host + besl_device;
4471 if (besl > 15)
4472 besl = 15;
4473
4474 return besl;
4475 }
4476
4477
4478 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4479 {
4480 u32 field;
4481 int l1;
4482 int besld = 0;
4483 int hirdm = 0;
4484
4485 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4486
4487
4488 l1 = udev->l1_params.timeout / 256;
4489
4490
4491 if (field & USB_BESL_DEEP_VALID) {
4492 besld = USB_GET_BESL_DEEP(field);
4493 hirdm = 1;
4494 }
4495
4496 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4497 }
4498
4499 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4500 struct usb_device *udev, int enable)
4501 {
4502 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4503 struct xhci_port **ports;
4504 __le32 __iomem *pm_addr, *hlpm_addr;
4505 u32 pm_val, hlpm_val, field;
4506 unsigned int port_num;
4507 unsigned long flags;
4508 int hird, exit_latency;
4509 int ret;
4510
4511 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4512 return -EPERM;
4513
4514 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4515 !udev->lpm_capable)
4516 return -EPERM;
4517
4518 if (!udev->parent || udev->parent->parent ||
4519 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4520 return -EPERM;
4521
4522 if (udev->usb2_hw_lpm_capable != 1)
4523 return -EPERM;
4524
4525 spin_lock_irqsave(&xhci->lock, flags);
4526
4527 ports = xhci->usb2_rhub.ports;
4528 port_num = udev->portnum - 1;
4529 pm_addr = ports[port_num]->addr + PORTPMSC;
4530 pm_val = readl(pm_addr);
4531 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4532
4533 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4534 enable ? "enable" : "disable", port_num + 1);
4535
4536 if (enable) {
4537
4538 if (udev->usb2_hw_lpm_besl_capable) {
4539
4540
4541
4542
4543 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4544 if ((field & USB_BESL_SUPPORT) &&
4545 (field & USB_BESL_BASELINE_VALID))
4546 hird = USB_GET_BESL_BASELINE(field);
4547 else
4548 hird = udev->l1_params.besl;
4549
4550 exit_latency = xhci_besl_encoding[hird];
4551 spin_unlock_irqrestore(&xhci->lock, flags);
4552
4553 ret = xhci_change_max_exit_latency(xhci, udev,
4554 exit_latency);
4555 if (ret < 0)
4556 return ret;
4557 spin_lock_irqsave(&xhci->lock, flags);
4558
4559 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4560 writel(hlpm_val, hlpm_addr);
4561
4562 readl(hlpm_addr);
4563 } else {
4564 hird = xhci_calculate_hird_besl(xhci, udev);
4565 }
4566
4567 pm_val &= ~PORT_HIRD_MASK;
4568 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4569 writel(pm_val, pm_addr);
4570 pm_val = readl(pm_addr);
4571 pm_val |= PORT_HLE;
4572 writel(pm_val, pm_addr);
4573
4574 readl(pm_addr);
4575 } else {
4576 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4577 writel(pm_val, pm_addr);
4578
4579 readl(pm_addr);
4580 if (udev->usb2_hw_lpm_besl_capable) {
4581 spin_unlock_irqrestore(&xhci->lock, flags);
4582 xhci_change_max_exit_latency(xhci, udev, 0);
4583 readl_poll_timeout(ports[port_num]->addr, pm_val,
4584 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4585 100, 10000);
4586 return 0;
4587 }
4588 }
4589
4590 spin_unlock_irqrestore(&xhci->lock, flags);
4591 return 0;
4592 }
4593
4594
4595
4596
4597
4598 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4599 unsigned capability)
4600 {
4601 u32 port_offset, port_count;
4602 int i;
4603
4604 for (i = 0; i < xhci->num_ext_caps; i++) {
4605 if (xhci->ext_caps[i] & capability) {
4606
4607 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4608 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4609 if (port >= port_offset &&
4610 port < port_offset + port_count)
4611 return 1;
4612 }
4613 }
4614 return 0;
4615 }
4616
4617 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4618 {
4619 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4620 int portnum = udev->portnum - 1;
4621
4622 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4623 return 0;
4624
4625
4626 if (!udev->parent || udev->parent->parent ||
4627 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4628 return 0;
4629
4630 if (xhci->hw_lpm_support == 1 &&
4631 xhci_check_usb2_port_capability(
4632 xhci, portnum, XHCI_HLC)) {
4633 udev->usb2_hw_lpm_capable = 1;
4634 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4635 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4636 if (xhci_check_usb2_port_capability(xhci, portnum,
4637 XHCI_BLC))
4638 udev->usb2_hw_lpm_besl_capable = 1;
4639 }
4640
4641 return 0;
4642 }
4643
4644
4645
4646
4647 static unsigned long long xhci_service_interval_to_ns(
4648 struct usb_endpoint_descriptor *desc)
4649 {
4650 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4651 }
4652
4653 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4654 enum usb3_link_state state)
4655 {
4656 unsigned long long sel;
4657 unsigned long long pel;
4658 unsigned int max_sel_pel;
4659 char *state_name;
4660
4661 switch (state) {
4662 case USB3_LPM_U1:
4663
4664 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4665 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4666 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4667 state_name = "U1";
4668 break;
4669 case USB3_LPM_U2:
4670 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4671 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4672 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4673 state_name = "U2";
4674 break;
4675 default:
4676 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4677 __func__);
4678 return USB3_LPM_DISABLED;
4679 }
4680
4681 if (sel <= max_sel_pel && pel <= max_sel_pel)
4682 return USB3_LPM_DEVICE_INITIATED;
4683
4684 if (sel > max_sel_pel)
4685 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4686 "due to long SEL %llu ms\n",
4687 state_name, sel);
4688 else
4689 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4690 "due to long PEL %llu ms\n",
4691 state_name, pel);
4692 return USB3_LPM_DISABLED;
4693 }
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703 static unsigned long long xhci_calculate_intel_u1_timeout(
4704 struct usb_device *udev,
4705 struct usb_endpoint_descriptor *desc)
4706 {
4707 unsigned long long timeout_ns;
4708 int ep_type;
4709 int intr_type;
4710
4711 ep_type = usb_endpoint_type(desc);
4712 switch (ep_type) {
4713 case USB_ENDPOINT_XFER_CONTROL:
4714 timeout_ns = udev->u1_params.sel * 3;
4715 break;
4716 case USB_ENDPOINT_XFER_BULK:
4717 timeout_ns = udev->u1_params.sel * 5;
4718 break;
4719 case USB_ENDPOINT_XFER_INT:
4720 intr_type = usb_endpoint_interrupt_type(desc);
4721 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4722 timeout_ns = udev->u1_params.sel * 3;
4723 break;
4724 }
4725
4726 fallthrough;
4727 case USB_ENDPOINT_XFER_ISOC:
4728 timeout_ns = xhci_service_interval_to_ns(desc);
4729 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4730 if (timeout_ns < udev->u1_params.sel * 2)
4731 timeout_ns = udev->u1_params.sel * 2;
4732 break;
4733 default:
4734 return 0;
4735 }
4736
4737 return timeout_ns;
4738 }
4739
4740
4741 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4742 struct usb_device *udev,
4743 struct usb_endpoint_descriptor *desc)
4744 {
4745 unsigned long long timeout_ns;
4746
4747
4748 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4749 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4750 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4751 return USB3_LPM_DISABLED;
4752 }
4753 }
4754
4755 if (xhci->quirks & XHCI_INTEL_HOST)
4756 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4757 else
4758 timeout_ns = udev->u1_params.sel;
4759
4760
4761
4762
4763 if (timeout_ns == USB3_LPM_DISABLED)
4764 timeout_ns = 1;
4765 else
4766 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4767
4768
4769
4770
4771 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4772 return timeout_ns;
4773 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4774 "due to long timeout %llu ms\n", timeout_ns);
4775 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4776 }
4777
4778
4779
4780
4781
4782
4783
4784 static unsigned long long xhci_calculate_intel_u2_timeout(
4785 struct usb_device *udev,
4786 struct usb_endpoint_descriptor *desc)
4787 {
4788 unsigned long long timeout_ns;
4789 unsigned long long u2_del_ns;
4790
4791 timeout_ns = 10 * 1000 * 1000;
4792
4793 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4794 (xhci_service_interval_to_ns(desc) > timeout_ns))
4795 timeout_ns = xhci_service_interval_to_ns(desc);
4796
4797 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4798 if (u2_del_ns > timeout_ns)
4799 timeout_ns = u2_del_ns;
4800
4801 return timeout_ns;
4802 }
4803
4804
4805 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4806 struct usb_device *udev,
4807 struct usb_endpoint_descriptor *desc)
4808 {
4809 unsigned long long timeout_ns;
4810
4811
4812 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4813 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4814 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4815 return USB3_LPM_DISABLED;
4816 }
4817 }
4818
4819 if (xhci->quirks & XHCI_INTEL_HOST)
4820 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4821 else
4822 timeout_ns = udev->u2_params.sel;
4823
4824
4825 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4826
4827
4828
4829 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4830 return timeout_ns;
4831 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4832 "due to long timeout %llu ms\n", timeout_ns);
4833 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4834 }
4835
4836 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4837 struct usb_device *udev,
4838 struct usb_endpoint_descriptor *desc,
4839 enum usb3_link_state state,
4840 u16 *timeout)
4841 {
4842 if (state == USB3_LPM_U1)
4843 return xhci_calculate_u1_timeout(xhci, udev, desc);
4844 else if (state == USB3_LPM_U2)
4845 return xhci_calculate_u2_timeout(xhci, udev, desc);
4846
4847 return USB3_LPM_DISABLED;
4848 }
4849
4850 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4851 struct usb_device *udev,
4852 struct usb_endpoint_descriptor *desc,
4853 enum usb3_link_state state,
4854 u16 *timeout)
4855 {
4856 u16 alt_timeout;
4857
4858 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4859 desc, state, timeout);
4860
4861
4862
4863
4864
4865
4866 if (alt_timeout == USB3_LPM_DISABLED) {
4867 *timeout = alt_timeout;
4868 return -E2BIG;
4869 }
4870 if (alt_timeout > *timeout)
4871 *timeout = alt_timeout;
4872 return 0;
4873 }
4874
4875 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4876 struct usb_device *udev,
4877 struct usb_host_interface *alt,
4878 enum usb3_link_state state,
4879 u16 *timeout)
4880 {
4881 int j;
4882
4883 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4884 if (xhci_update_timeout_for_endpoint(xhci, udev,
4885 &alt->endpoint[j].desc, state, timeout))
4886 return -E2BIG;
4887 }
4888 return 0;
4889 }
4890
4891 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4892 enum usb3_link_state state)
4893 {
4894 struct usb_device *parent;
4895 unsigned int num_hubs;
4896
4897
4898 for (parent = udev->parent, num_hubs = 0; parent->parent;
4899 parent = parent->parent)
4900 num_hubs++;
4901
4902 if (num_hubs < 2)
4903 return 0;
4904
4905 dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
4906 " below second-tier hub.\n");
4907 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4908 "to decrease power consumption.\n");
4909 return -E2BIG;
4910 }
4911
4912 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4913 struct usb_device *udev,
4914 enum usb3_link_state state)
4915 {
4916 if (xhci->quirks & XHCI_INTEL_HOST)
4917 return xhci_check_intel_tier_policy(udev, state);
4918 else
4919 return 0;
4920 }
4921
4922
4923
4924
4925
4926
4927 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4928 struct usb_device *udev, enum usb3_link_state state)
4929 {
4930 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4931 struct usb_host_config *config;
4932 char *state_name;
4933 int i;
4934 u16 timeout = USB3_LPM_DISABLED;
4935
4936 if (state == USB3_LPM_U1)
4937 state_name = "U1";
4938 else if (state == USB3_LPM_U2)
4939 state_name = "U2";
4940 else {
4941 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4942 state);
4943 return timeout;
4944 }
4945
4946
4947
4948
4949 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4950 state, &timeout))
4951 return timeout;
4952
4953 config = udev->actconfig;
4954 if (!config)
4955 return timeout;
4956
4957 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4958 struct usb_driver *driver;
4959 struct usb_interface *intf = config->interface[i];
4960
4961 if (!intf)
4962 continue;
4963
4964
4965
4966
4967 if (intf->dev.driver) {
4968 driver = to_usb_driver(intf->dev.driver);
4969 if (driver && driver->disable_hub_initiated_lpm) {
4970 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4971 state_name, driver->name);
4972 timeout = xhci_get_timeout_no_hub_lpm(udev,
4973 state);
4974 if (timeout == USB3_LPM_DISABLED)
4975 return timeout;
4976 }
4977 }
4978
4979
4980 if (!intf->cur_altsetting)
4981 continue;
4982
4983 if (xhci_update_timeout_for_interface(xhci, udev,
4984 intf->cur_altsetting,
4985 state, &timeout))
4986 return timeout;
4987 }
4988 return timeout;
4989 }
4990
4991 static int calculate_max_exit_latency(struct usb_device *udev,
4992 enum usb3_link_state state_changed,
4993 u16 hub_encoded_timeout)
4994 {
4995 unsigned long long u1_mel_us = 0;
4996 unsigned long long u2_mel_us = 0;
4997 unsigned long long mel_us = 0;
4998 bool disabling_u1;
4999 bool disabling_u2;
5000 bool enabling_u1;
5001 bool enabling_u2;
5002
5003 disabling_u1 = (state_changed == USB3_LPM_U1 &&
5004 hub_encoded_timeout == USB3_LPM_DISABLED);
5005 disabling_u2 = (state_changed == USB3_LPM_U2 &&
5006 hub_encoded_timeout == USB3_LPM_DISABLED);
5007
5008 enabling_u1 = (state_changed == USB3_LPM_U1 &&
5009 hub_encoded_timeout != USB3_LPM_DISABLED);
5010 enabling_u2 = (state_changed == USB3_LPM_U2 &&
5011 hub_encoded_timeout != USB3_LPM_DISABLED);
5012
5013
5014
5015
5016 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
5017 enabling_u1)
5018 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
5019 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
5020 enabling_u2)
5021 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
5022
5023 mel_us = max(u1_mel_us, u2_mel_us);
5024
5025
5026 if (mel_us > MAX_EXIT) {
5027 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
5028 "is too big.\n", mel_us);
5029 return -E2BIG;
5030 }
5031 return mel_us;
5032 }
5033
5034
5035 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5036 struct usb_device *udev, enum usb3_link_state state)
5037 {
5038 struct xhci_hcd *xhci;
5039 u16 hub_encoded_timeout;
5040 int mel;
5041 int ret;
5042
5043 xhci = hcd_to_xhci(hcd);
5044
5045
5046
5047
5048 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5049 !xhci->devs[udev->slot_id])
5050 return USB3_LPM_DISABLED;
5051
5052 if (xhci_check_tier_policy(xhci, udev, state) < 0)
5053 return USB3_LPM_DISABLED;
5054
5055 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
5056 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
5057 if (mel < 0) {
5058
5059 hub_encoded_timeout = USB3_LPM_DISABLED;
5060 mel = 0;
5061 }
5062
5063 ret = xhci_change_max_exit_latency(xhci, udev, mel);
5064 if (ret)
5065 return ret;
5066 return hub_encoded_timeout;
5067 }
5068
5069 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5070 struct usb_device *udev, enum usb3_link_state state)
5071 {
5072 struct xhci_hcd *xhci;
5073 u16 mel;
5074
5075 xhci = hcd_to_xhci(hcd);
5076 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5077 !xhci->devs[udev->slot_id])
5078 return 0;
5079
5080 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5081 return xhci_change_max_exit_latency(xhci, udev, mel);
5082 }
5083 #else
5084
5085 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5086 struct usb_device *udev, int enable)
5087 {
5088 return 0;
5089 }
5090
5091 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5092 {
5093 return 0;
5094 }
5095
5096 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5097 struct usb_device *udev, enum usb3_link_state state)
5098 {
5099 return USB3_LPM_DISABLED;
5100 }
5101
5102 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5103 struct usb_device *udev, enum usb3_link_state state)
5104 {
5105 return 0;
5106 }
5107 #endif
5108
5109
5110
5111
5112
5113
5114 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5115 struct usb_tt *tt, gfp_t mem_flags)
5116 {
5117 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5118 struct xhci_virt_device *vdev;
5119 struct xhci_command *config_cmd;
5120 struct xhci_input_control_ctx *ctrl_ctx;
5121 struct xhci_slot_ctx *slot_ctx;
5122 unsigned long flags;
5123 unsigned think_time;
5124 int ret;
5125
5126
5127 if (!hdev->parent)
5128 return 0;
5129
5130 vdev = xhci->devs[hdev->slot_id];
5131 if (!vdev) {
5132 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5133 return -EINVAL;
5134 }
5135
5136 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5137 if (!config_cmd)
5138 return -ENOMEM;
5139
5140 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5141 if (!ctrl_ctx) {
5142 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5143 __func__);
5144 xhci_free_command(xhci, config_cmd);
5145 return -ENOMEM;
5146 }
5147
5148 spin_lock_irqsave(&xhci->lock, flags);
5149 if (hdev->speed == USB_SPEED_HIGH &&
5150 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5151 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5152 xhci_free_command(xhci, config_cmd);
5153 spin_unlock_irqrestore(&xhci->lock, flags);
5154 return -ENOMEM;
5155 }
5156
5157 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5158 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5159 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5160 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5161
5162
5163
5164
5165
5166 if (tt->multi)
5167 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5168 else if (hdev->speed == USB_SPEED_FULL)
5169 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5170
5171 if (xhci->hci_version > 0x95) {
5172 xhci_dbg(xhci, "xHCI version %x needs hub "
5173 "TT think time and number of ports\n",
5174 (unsigned int) xhci->hci_version);
5175 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5176
5177
5178
5179
5180
5181
5182
5183 think_time = tt->think_time;
5184 if (think_time != 0)
5185 think_time = (think_time / 666) - 1;
5186 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5187 slot_ctx->tt_info |=
5188 cpu_to_le32(TT_THINK_TIME(think_time));
5189 } else {
5190 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5191 "TT think time or number of ports\n",
5192 (unsigned int) xhci->hci_version);
5193 }
5194 slot_ctx->dev_state = 0;
5195 spin_unlock_irqrestore(&xhci->lock, flags);
5196
5197 xhci_dbg(xhci, "Set up %s for hub device.\n",
5198 (xhci->hci_version > 0x95) ?
5199 "configure endpoint" : "evaluate context");
5200
5201
5202
5203
5204 if (xhci->hci_version > 0x95)
5205 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5206 false, false);
5207 else
5208 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5209 true, false);
5210
5211 xhci_free_command(xhci, config_cmd);
5212 return ret;
5213 }
5214
5215 static int xhci_get_frame(struct usb_hcd *hcd)
5216 {
5217 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5218
5219 return readl(&xhci->run_regs->microframe_index) >> 3;
5220 }
5221
5222 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5223 {
5224 xhci->usb2_rhub.hcd = hcd;
5225 hcd->speed = HCD_USB2;
5226 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5227
5228
5229
5230
5231
5232 hcd->has_tt = 1;
5233 }
5234
5235 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5236 {
5237 unsigned int minor_rev;
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248 if (xhci->usb3_rhub.min_rev == 0x1)
5249 minor_rev = 1;
5250 else
5251 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5252
5253 switch (minor_rev) {
5254 case 2:
5255 hcd->speed = HCD_USB32;
5256 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5257 hcd->self.root_hub->rx_lanes = 2;
5258 hcd->self.root_hub->tx_lanes = 2;
5259 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5260 break;
5261 case 1:
5262 hcd->speed = HCD_USB31;
5263 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5264 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5265 break;
5266 }
5267 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5268 minor_rev, minor_rev ? "Enhanced " : "");
5269
5270 xhci->usb3_rhub.hcd = hcd;
5271 }
5272
5273 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5274 {
5275 struct xhci_hcd *xhci;
5276
5277
5278
5279
5280 struct device *dev = hcd->self.sysdev;
5281 int retval;
5282
5283
5284 hcd->self.sg_tablesize = ~0;
5285
5286
5287 hcd->self.no_sg_constraint = 1;
5288
5289
5290 hcd->self.no_stop_on_short = 1;
5291
5292 xhci = hcd_to_xhci(hcd);
5293
5294 if (!usb_hcd_is_primary_hcd(hcd)) {
5295 xhci_hcd_init_usb3_data(xhci, hcd);
5296 return 0;
5297 }
5298
5299 mutex_init(&xhci->mutex);
5300 xhci->main_hcd = hcd;
5301 xhci->cap_regs = hcd->regs;
5302 xhci->op_regs = hcd->regs +
5303 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5304 xhci->run_regs = hcd->regs +
5305 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5306
5307 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5308 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5309 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5310 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5311 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5312 if (xhci->hci_version > 0x100)
5313 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5314
5315 xhci->quirks |= quirks;
5316
5317 get_quirks(dev, xhci);
5318
5319
5320
5321
5322
5323 if (xhci->hci_version > 0x96)
5324 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5325
5326
5327 retval = xhci_halt(xhci);
5328 if (retval)
5329 return retval;
5330
5331 xhci_zero_64b_regs(xhci);
5332
5333 xhci_dbg(xhci, "Resetting HCD\n");
5334
5335 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5336 if (retval)
5337 return retval;
5338 xhci_dbg(xhci, "Reset complete\n");
5339
5340
5341
5342
5343
5344
5345
5346
5347 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5348 xhci->hcc_params &= ~BIT(0);
5349
5350
5351
5352 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5353 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5354 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5355 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5356 } else {
5357
5358
5359
5360
5361 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5362 if (retval)
5363 return retval;
5364 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5365 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5366 }
5367
5368 xhci_dbg(xhci, "Calling HCD init\n");
5369
5370 retval = xhci_init(hcd);
5371 if (retval)
5372 return retval;
5373 xhci_dbg(xhci, "Called HCD init\n");
5374
5375 if (xhci_hcd_is_usb3(hcd))
5376 xhci_hcd_init_usb3_data(xhci, hcd);
5377 else
5378 xhci_hcd_init_usb2_data(xhci, hcd);
5379
5380 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5381 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5382
5383 return 0;
5384 }
5385 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5386
5387 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5388 struct usb_host_endpoint *ep)
5389 {
5390 struct xhci_hcd *xhci;
5391 struct usb_device *udev;
5392 unsigned int slot_id;
5393 unsigned int ep_index;
5394 unsigned long flags;
5395
5396 xhci = hcd_to_xhci(hcd);
5397
5398 spin_lock_irqsave(&xhci->lock, flags);
5399 udev = (struct usb_device *)ep->hcpriv;
5400 slot_id = udev->slot_id;
5401 ep_index = xhci_get_endpoint_index(&ep->desc);
5402
5403 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5404 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5405 spin_unlock_irqrestore(&xhci->lock, flags);
5406 }
5407
5408 static const struct hc_driver xhci_hc_driver = {
5409 .description = "xhci-hcd",
5410 .product_desc = "xHCI Host Controller",
5411 .hcd_priv_size = sizeof(struct xhci_hcd),
5412
5413
5414
5415
5416 .irq = xhci_irq,
5417 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5418 HCD_BH,
5419
5420
5421
5422
5423 .reset = NULL,
5424 .start = xhci_run,
5425 .stop = xhci_stop,
5426 .shutdown = xhci_shutdown,
5427
5428
5429
5430
5431 .map_urb_for_dma = xhci_map_urb_for_dma,
5432 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5433 .urb_enqueue = xhci_urb_enqueue,
5434 .urb_dequeue = xhci_urb_dequeue,
5435 .alloc_dev = xhci_alloc_dev,
5436 .free_dev = xhci_free_dev,
5437 .alloc_streams = xhci_alloc_streams,
5438 .free_streams = xhci_free_streams,
5439 .add_endpoint = xhci_add_endpoint,
5440 .drop_endpoint = xhci_drop_endpoint,
5441 .endpoint_disable = xhci_endpoint_disable,
5442 .endpoint_reset = xhci_endpoint_reset,
5443 .check_bandwidth = xhci_check_bandwidth,
5444 .reset_bandwidth = xhci_reset_bandwidth,
5445 .address_device = xhci_address_device,
5446 .enable_device = xhci_enable_device,
5447 .update_hub_device = xhci_update_hub_device,
5448 .reset_device = xhci_discover_or_reset_device,
5449
5450
5451
5452
5453 .get_frame_number = xhci_get_frame,
5454
5455
5456
5457
5458 .hub_control = xhci_hub_control,
5459 .hub_status_data = xhci_hub_status_data,
5460 .bus_suspend = xhci_bus_suspend,
5461 .bus_resume = xhci_bus_resume,
5462 .get_resuming_ports = xhci_get_resuming_ports,
5463
5464
5465
5466
5467 .update_device = xhci_update_device,
5468 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5469 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5470 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5471 .find_raw_port_number = xhci_find_raw_port_number,
5472 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5473 };
5474
5475 void xhci_init_driver(struct hc_driver *drv,
5476 const struct xhci_driver_overrides *over)
5477 {
5478 BUG_ON(!over);
5479
5480
5481 *drv = xhci_hc_driver;
5482
5483 if (over) {
5484 drv->hcd_priv_size += over->extra_priv_size;
5485 if (over->reset)
5486 drv->reset = over->reset;
5487 if (over->start)
5488 drv->start = over->start;
5489 if (over->add_endpoint)
5490 drv->add_endpoint = over->add_endpoint;
5491 if (over->drop_endpoint)
5492 drv->drop_endpoint = over->drop_endpoint;
5493 if (over->check_bandwidth)
5494 drv->check_bandwidth = over->check_bandwidth;
5495 if (over->reset_bandwidth)
5496 drv->reset_bandwidth = over->reset_bandwidth;
5497 }
5498 }
5499 EXPORT_SYMBOL_GPL(xhci_init_driver);
5500
5501 MODULE_DESCRIPTION(DRIVER_DESC);
5502 MODULE_AUTHOR(DRIVER_AUTHOR);
5503 MODULE_LICENSE("GPL");
5504
5505 static int __init xhci_hcd_init(void)
5506 {
5507
5508
5509
5510
5511 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5512 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5513 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5514
5515
5516
5517 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5518 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5519 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5520 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5521 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5522
5523 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5524
5525 if (usb_disabled())
5526 return -ENODEV;
5527
5528 xhci_debugfs_create_root();
5529 xhci_dbc_init();
5530
5531 return 0;
5532 }
5533
5534
5535
5536
5537
5538 static void __exit xhci_hcd_fini(void)
5539 {
5540 xhci_debugfs_remove_root();
5541 xhci_dbc_exit();
5542 }
5543
5544 module_init(xhci_hcd_init);
5545 module_exit(xhci_hcd_fini);