0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/delay.h>
0010 #include <linux/idr.h>
0011 #include <linux/nvmem-provider.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/sched/signal.h>
0014 #include <linux/sizes.h>
0015 #include <linux/slab.h>
0016 #include <linux/module.h>
0017
0018 #include "tb.h"
0019
0020
0021
0022 #define NVM_CSS 0x10
0023
0024 struct nvm_auth_status {
0025 struct list_head list;
0026 uuid_t uuid;
0027 u32 status;
0028 };
0029
0030 static bool clx_enabled = true;
0031 module_param_named(clx, clx_enabled, bool, 0444);
0032 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
0033
0034
0035
0036
0037
0038
0039 static LIST_HEAD(nvm_auth_status_cache);
0040 static DEFINE_MUTEX(nvm_auth_status_lock);
0041
0042 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
0043 {
0044 struct nvm_auth_status *st;
0045
0046 list_for_each_entry(st, &nvm_auth_status_cache, list) {
0047 if (uuid_equal(&st->uuid, sw->uuid))
0048 return st;
0049 }
0050
0051 return NULL;
0052 }
0053
0054 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
0055 {
0056 struct nvm_auth_status *st;
0057
0058 mutex_lock(&nvm_auth_status_lock);
0059 st = __nvm_get_auth_status(sw);
0060 mutex_unlock(&nvm_auth_status_lock);
0061
0062 *status = st ? st->status : 0;
0063 }
0064
0065 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
0066 {
0067 struct nvm_auth_status *st;
0068
0069 if (WARN_ON(!sw->uuid))
0070 return;
0071
0072 mutex_lock(&nvm_auth_status_lock);
0073 st = __nvm_get_auth_status(sw);
0074
0075 if (!st) {
0076 st = kzalloc(sizeof(*st), GFP_KERNEL);
0077 if (!st)
0078 goto unlock;
0079
0080 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
0081 INIT_LIST_HEAD(&st->list);
0082 list_add_tail(&st->list, &nvm_auth_status_cache);
0083 }
0084
0085 st->status = status;
0086 unlock:
0087 mutex_unlock(&nvm_auth_status_lock);
0088 }
0089
0090 static void nvm_clear_auth_status(const struct tb_switch *sw)
0091 {
0092 struct nvm_auth_status *st;
0093
0094 mutex_lock(&nvm_auth_status_lock);
0095 st = __nvm_get_auth_status(sw);
0096 if (st) {
0097 list_del(&st->list);
0098 kfree(st);
0099 }
0100 mutex_unlock(&nvm_auth_status_lock);
0101 }
0102
0103 static int nvm_validate_and_write(struct tb_switch *sw)
0104 {
0105 unsigned int image_size, hdr_size;
0106 const u8 *buf = sw->nvm->buf;
0107 u16 ds_size;
0108 int ret;
0109
0110 if (!buf)
0111 return -EINVAL;
0112
0113 image_size = sw->nvm->buf_data_size;
0114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
0115 return -EINVAL;
0116
0117
0118
0119
0120
0121 hdr_size = (*(u32 *)buf) & 0xffffff;
0122 if (hdr_size + NVM_DEVID + 2 >= image_size)
0123 return -EINVAL;
0124
0125
0126 if (!IS_ALIGNED(hdr_size, SZ_4K))
0127 return -EINVAL;
0128
0129
0130
0131
0132
0133 ds_size = *(u16 *)(buf + hdr_size);
0134 if (ds_size >= image_size)
0135 return -EINVAL;
0136
0137 if (!sw->safe_mode) {
0138 u16 device_id;
0139
0140
0141
0142
0143
0144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
0145 if (device_id != sw->config.device_id)
0146 return -EINVAL;
0147
0148 if (sw->generation < 3) {
0149
0150 ret = dma_port_flash_write(sw->dma_port,
0151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
0152 DMA_PORT_CSS_MAX_SIZE);
0153 if (ret)
0154 return ret;
0155 }
0156
0157
0158 buf += hdr_size;
0159 image_size -= hdr_size;
0160 }
0161
0162 if (tb_switch_is_usb4(sw))
0163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
0164 else
0165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
0166 if (!ret)
0167 sw->nvm->flushed = true;
0168 return ret;
0169 }
0170
0171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
0172 {
0173 int ret = 0;
0174
0175
0176
0177
0178
0179
0180 if (!sw->safe_mode) {
0181 u32 status;
0182
0183 ret = tb_domain_disconnect_all_paths(sw->tb);
0184 if (ret)
0185 return ret;
0186
0187
0188
0189
0190 ret = dma_port_flash_update_auth(sw->dma_port);
0191 if (!ret || ret == -ETIMEDOUT)
0192 return 0;
0193
0194
0195
0196
0197
0198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
0199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
0200 nvm_set_auth_status(sw, status);
0201 }
0202
0203
0204
0205
0206
0207 dma_port_power_cycle(sw->dma_port);
0208 return ret;
0209 }
0210
0211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
0212 {
0213 int ret, retries = 10;
0214
0215 ret = dma_port_flash_update_auth(sw->dma_port);
0216 switch (ret) {
0217 case 0:
0218 case -ETIMEDOUT:
0219 case -EACCES:
0220 case -EINVAL:
0221
0222 break;
0223 default:
0224 return ret;
0225 }
0226
0227
0228
0229
0230
0231
0232
0233 do {
0234 u32 status;
0235
0236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
0237 if (ret < 0 && ret != -ETIMEDOUT)
0238 return ret;
0239 if (ret > 0) {
0240 if (status) {
0241 tb_sw_warn(sw, "failed to authenticate NVM\n");
0242 nvm_set_auth_status(sw, status);
0243 }
0244
0245 tb_sw_info(sw, "power cycling the switch now\n");
0246 dma_port_power_cycle(sw->dma_port);
0247 return 0;
0248 }
0249
0250 msleep(500);
0251 } while (--retries);
0252
0253 return -ETIMEDOUT;
0254 }
0255
0256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
0257 {
0258 struct pci_dev *root_port;
0259
0260
0261
0262
0263
0264
0265
0266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
0267 if (root_port)
0268 pm_runtime_get_noresume(&root_port->dev);
0269 }
0270
0271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
0272 {
0273 struct pci_dev *root_port;
0274
0275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
0276 if (root_port)
0277 pm_runtime_put(&root_port->dev);
0278 }
0279
0280 static inline bool nvm_readable(struct tb_switch *sw)
0281 {
0282 if (tb_switch_is_usb4(sw)) {
0283
0284
0285
0286
0287
0288
0289 return usb4_switch_nvm_sector_size(sw) > 0;
0290 }
0291
0292
0293 return !!sw->dma_port;
0294 }
0295
0296 static inline bool nvm_upgradeable(struct tb_switch *sw)
0297 {
0298 if (sw->no_nvm_upgrade)
0299 return false;
0300 return nvm_readable(sw);
0301 }
0302
0303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
0304 void *buf, size_t size)
0305 {
0306 if (tb_switch_is_usb4(sw))
0307 return usb4_switch_nvm_read(sw, address, buf, size);
0308 return dma_port_flash_read(sw->dma_port, address, buf, size);
0309 }
0310
0311 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
0312 {
0313 int ret;
0314
0315 if (tb_switch_is_usb4(sw)) {
0316 if (auth_only) {
0317 ret = usb4_switch_nvm_set_offset(sw, 0);
0318 if (ret)
0319 return ret;
0320 }
0321 sw->nvm->authenticating = true;
0322 return usb4_switch_nvm_authenticate(sw);
0323 } else if (auth_only) {
0324 return -EOPNOTSUPP;
0325 }
0326
0327 sw->nvm->authenticating = true;
0328 if (!tb_route(sw)) {
0329 nvm_authenticate_start_dma_port(sw);
0330 ret = nvm_authenticate_host_dma_port(sw);
0331 } else {
0332 ret = nvm_authenticate_device_dma_port(sw);
0333 }
0334
0335 return ret;
0336 }
0337
0338 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
0339 size_t bytes)
0340 {
0341 struct tb_nvm *nvm = priv;
0342 struct tb_switch *sw = tb_to_switch(nvm->dev);
0343 int ret;
0344
0345 pm_runtime_get_sync(&sw->dev);
0346
0347 if (!mutex_trylock(&sw->tb->lock)) {
0348 ret = restart_syscall();
0349 goto out;
0350 }
0351
0352 ret = nvm_read(sw, offset, val, bytes);
0353 mutex_unlock(&sw->tb->lock);
0354
0355 out:
0356 pm_runtime_mark_last_busy(&sw->dev);
0357 pm_runtime_put_autosuspend(&sw->dev);
0358
0359 return ret;
0360 }
0361
0362 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
0363 size_t bytes)
0364 {
0365 struct tb_nvm *nvm = priv;
0366 struct tb_switch *sw = tb_to_switch(nvm->dev);
0367 int ret;
0368
0369 if (!mutex_trylock(&sw->tb->lock))
0370 return restart_syscall();
0371
0372
0373
0374
0375
0376
0377
0378 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
0379 mutex_unlock(&sw->tb->lock);
0380
0381 return ret;
0382 }
0383
0384 static int tb_switch_nvm_add(struct tb_switch *sw)
0385 {
0386 struct tb_nvm *nvm;
0387 u32 val;
0388 int ret;
0389
0390 if (!nvm_readable(sw))
0391 return 0;
0392
0393
0394
0395
0396
0397
0398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
0399 sw->config.vendor_id != 0x8087) {
0400 dev_info(&sw->dev,
0401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
0402 sw->config.vendor_id);
0403 return 0;
0404 }
0405
0406 nvm = tb_nvm_alloc(&sw->dev);
0407 if (IS_ERR(nvm))
0408 return PTR_ERR(nvm);
0409
0410
0411
0412
0413
0414
0415 if (!sw->safe_mode) {
0416 u32 nvm_size, hdr_size;
0417
0418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
0419 if (ret)
0420 goto err_nvm;
0421
0422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
0423 nvm_size = (SZ_1M << (val & 7)) / 8;
0424 nvm_size = (nvm_size - hdr_size) / 2;
0425
0426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
0427 if (ret)
0428 goto err_nvm;
0429
0430 nvm->major = val >> 16;
0431 nvm->minor = val >> 8;
0432
0433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
0434 if (ret)
0435 goto err_nvm;
0436 }
0437
0438 if (!sw->no_nvm_upgrade) {
0439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
0440 tb_switch_nvm_write);
0441 if (ret)
0442 goto err_nvm;
0443 }
0444
0445 sw->nvm = nvm;
0446 return 0;
0447
0448 err_nvm:
0449 tb_nvm_free(nvm);
0450 return ret;
0451 }
0452
0453 static void tb_switch_nvm_remove(struct tb_switch *sw)
0454 {
0455 struct tb_nvm *nvm;
0456
0457 nvm = sw->nvm;
0458 sw->nvm = NULL;
0459
0460 if (!nvm)
0461 return;
0462
0463
0464 if (!nvm->authenticating)
0465 nvm_clear_auth_status(sw);
0466
0467 tb_nvm_free(nvm);
0468 }
0469
0470
0471
0472 static const char *tb_port_type(const struct tb_regs_port_header *port)
0473 {
0474 switch (port->type >> 16) {
0475 case 0:
0476 switch ((u8) port->type) {
0477 case 0:
0478 return "Inactive";
0479 case 1:
0480 return "Port";
0481 case 2:
0482 return "NHI";
0483 default:
0484 return "unknown";
0485 }
0486 case 0x2:
0487 return "Ethernet";
0488 case 0x8:
0489 return "SATA";
0490 case 0xe:
0491 return "DP/HDMI";
0492 case 0x10:
0493 return "PCIe";
0494 case 0x20:
0495 return "USB";
0496 default:
0497 return "unknown";
0498 }
0499 }
0500
0501 static void tb_dump_port(struct tb *tb, const struct tb_port *port)
0502 {
0503 const struct tb_regs_port_header *regs = &port->config;
0504
0505 tb_dbg(tb,
0506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
0507 regs->port_number, regs->vendor_id, regs->device_id,
0508 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
0509 regs->type);
0510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
0511 regs->max_in_hop_id, regs->max_out_hop_id);
0512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
0513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
0514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
0515 port->ctl_credits);
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 int tb_port_state(struct tb_port *port)
0527 {
0528 struct tb_cap_phy phy;
0529 int res;
0530 if (port->cap_phy == 0) {
0531 tb_port_WARN(port, "does not have a PHY\n");
0532 return -EINVAL;
0533 }
0534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
0535 if (res)
0536 return res;
0537 return phy.state;
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
0556 {
0557 int retries = 10;
0558 int state;
0559 if (!port->cap_phy) {
0560 tb_port_WARN(port, "does not have PHY\n");
0561 return -EINVAL;
0562 }
0563 if (tb_is_upstream_port(port)) {
0564 tb_port_WARN(port, "is the upstream port\n");
0565 return -EINVAL;
0566 }
0567
0568 while (retries--) {
0569 state = tb_port_state(port);
0570 if (state < 0)
0571 return state;
0572 if (state == TB_PORT_DISABLED) {
0573 tb_port_dbg(port, "is disabled (state: 0)\n");
0574 return 0;
0575 }
0576 if (state == TB_PORT_UNPLUGGED) {
0577 if (wait_if_unplugged) {
0578
0579 tb_port_dbg(port,
0580 "is unplugged (state: 7), retrying...\n");
0581 msleep(100);
0582 continue;
0583 }
0584 tb_port_dbg(port, "is unplugged (state: 7)\n");
0585 return 0;
0586 }
0587 if (state == TB_PORT_UP) {
0588 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
0589 return 1;
0590 }
0591
0592
0593
0594
0595
0596 tb_port_dbg(port,
0597 "is connected, link is not up (state: %d), retrying...\n",
0598 state);
0599 msleep(100);
0600 }
0601 tb_port_warn(port,
0602 "failed to reach state TB_PORT_UP. Ignoring port...\n");
0603 return 0;
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
0617 {
0618 u32 nfc_credits;
0619
0620 if (credits == 0 || port->sw->is_unplugged)
0621 return 0;
0622
0623
0624
0625
0626
0627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
0628 return 0;
0629
0630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
0631 if (credits < 0)
0632 credits = max_t(int, -nfc_credits, credits);
0633
0634 nfc_credits += credits;
0635
0636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
0637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
0638
0639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
0640 port->config.nfc_credits |= nfc_credits;
0641
0642 return tb_port_write(port, &port->config.nfc_credits,
0643 TB_CFG_PORT, ADP_CS_4, 1);
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653 int tb_port_clear_counter(struct tb_port *port, int counter)
0654 {
0655 u32 zero[3] = { 0, 0, 0 };
0656 tb_port_dbg(port, "clearing counter %d\n", counter);
0657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
0658 }
0659
0660
0661
0662
0663
0664
0665
0666
0667 int tb_port_unlock(struct tb_port *port)
0668 {
0669 if (tb_switch_is_icm(port->sw))
0670 return 0;
0671 if (!tb_port_is_null(port))
0672 return -EINVAL;
0673 if (tb_switch_is_usb4(port->sw))
0674 return usb4_port_unlock(port);
0675 return 0;
0676 }
0677
0678 static int __tb_port_enable(struct tb_port *port, bool enable)
0679 {
0680 int ret;
0681 u32 phy;
0682
0683 if (!tb_port_is_null(port))
0684 return -EINVAL;
0685
0686 ret = tb_port_read(port, &phy, TB_CFG_PORT,
0687 port->cap_phy + LANE_ADP_CS_1, 1);
0688 if (ret)
0689 return ret;
0690
0691 if (enable)
0692 phy &= ~LANE_ADP_CS_1_LD;
0693 else
0694 phy |= LANE_ADP_CS_1_LD;
0695
0696
0697 ret = tb_port_write(port, &phy, TB_CFG_PORT,
0698 port->cap_phy + LANE_ADP_CS_1, 1);
0699 if (ret)
0700 return ret;
0701
0702 tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
0703 return 0;
0704 }
0705
0706
0707
0708
0709
0710
0711
0712 int tb_port_enable(struct tb_port *port)
0713 {
0714 return __tb_port_enable(port, true);
0715 }
0716
0717
0718
0719
0720
0721
0722
0723 int tb_port_disable(struct tb_port *port)
0724 {
0725 return __tb_port_enable(port, false);
0726 }
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 static int tb_init_port(struct tb_port *port)
0737 {
0738 int res;
0739 int cap;
0740
0741 INIT_LIST_HEAD(&port->list);
0742
0743
0744 if (!port->port)
0745 return 0;
0746
0747 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
0748 if (res) {
0749 if (res == -ENODEV) {
0750 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
0751 port->port);
0752 port->disabled = true;
0753 return 0;
0754 }
0755 return res;
0756 }
0757
0758
0759 if (port->config.type == TB_TYPE_PORT) {
0760 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
0761
0762 if (cap > 0)
0763 port->cap_phy = cap;
0764 else
0765 tb_port_WARN(port, "non switch port without a PHY\n");
0766
0767 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
0768 if (cap > 0)
0769 port->cap_usb4 = cap;
0770
0771
0772
0773
0774
0775
0776 if (tb_switch_is_usb4(port->sw)) {
0777 struct tb_regs_hop hop;
0778
0779 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
0780 port->ctl_credits = hop.initial_credits;
0781 }
0782 if (!port->ctl_credits)
0783 port->ctl_credits = 2;
0784
0785 } else {
0786 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
0787 if (cap > 0)
0788 port->cap_adap = cap;
0789 }
0790
0791 port->total_credits =
0792 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
0793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
0794
0795 tb_dump_port(port->sw->tb, port);
0796 return 0;
0797 }
0798
0799 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
0800 int max_hopid)
0801 {
0802 int port_max_hopid;
0803 struct ida *ida;
0804
0805 if (in) {
0806 port_max_hopid = port->config.max_in_hop_id;
0807 ida = &port->in_hopids;
0808 } else {
0809 port_max_hopid = port->config.max_out_hop_id;
0810 ida = &port->out_hopids;
0811 }
0812
0813
0814
0815
0816
0817 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0818 min_hopid = TB_PATH_MIN_HOPID;
0819
0820 if (max_hopid < 0 || max_hopid > port_max_hopid)
0821 max_hopid = port_max_hopid;
0822
0823 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
0824 }
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
0836 {
0837 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
0838 }
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
0850 {
0851 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
0852 }
0853
0854
0855
0856
0857
0858
0859 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
0860 {
0861 ida_simple_remove(&port->in_hopids, hopid);
0862 }
0863
0864
0865
0866
0867
0868
0869 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
0870 {
0871 ida_simple_remove(&port->out_hopids, hopid);
0872 }
0873
0874 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
0875 const struct tb_switch *sw)
0876 {
0877 u64 mask = (1ULL << parent->config.depth * 8) - 1;
0878 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
0879 }
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
0897 struct tb_port *prev)
0898 {
0899 struct tb_port *next;
0900
0901 if (!prev)
0902 return start;
0903
0904 if (prev->sw == end->sw) {
0905 if (prev == end)
0906 return NULL;
0907 return end;
0908 }
0909
0910 if (tb_switch_is_reachable(prev->sw, end->sw)) {
0911 next = tb_port_at(tb_route(end->sw), prev->sw);
0912
0913 if (prev->remote &&
0914 (next == prev || next->dual_link_port == prev))
0915 next = prev->remote;
0916 } else {
0917 if (tb_is_upstream_port(prev)) {
0918 next = prev->remote;
0919 } else {
0920 next = tb_upstream_port(prev->sw);
0921
0922
0923
0924
0925 if (next->dual_link_port &&
0926 next->link_nr != prev->link_nr) {
0927 next = next->dual_link_port;
0928 }
0929 }
0930 }
0931
0932 return next != prev ? next : NULL;
0933 }
0934
0935
0936
0937
0938
0939
0940
0941 int tb_port_get_link_speed(struct tb_port *port)
0942 {
0943 u32 val, speed;
0944 int ret;
0945
0946 if (!port->cap_phy)
0947 return -EINVAL;
0948
0949 ret = tb_port_read(port, &val, TB_CFG_PORT,
0950 port->cap_phy + LANE_ADP_CS_1, 1);
0951 if (ret)
0952 return ret;
0953
0954 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
0955 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
0956 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
0957 }
0958
0959
0960
0961
0962
0963
0964
0965
0966 int tb_port_get_link_width(struct tb_port *port)
0967 {
0968 u32 val;
0969 int ret;
0970
0971 if (!port->cap_phy)
0972 return -EINVAL;
0973
0974 ret = tb_port_read(port, &val, TB_CFG_PORT,
0975 port->cap_phy + LANE_ADP_CS_1, 1);
0976 if (ret)
0977 return ret;
0978
0979 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
0980 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
0981 }
0982
0983 static bool tb_port_is_width_supported(struct tb_port *port, int width)
0984 {
0985 u32 phy, widths;
0986 int ret;
0987
0988 if (!port->cap_phy)
0989 return false;
0990
0991 ret = tb_port_read(port, &phy, TB_CFG_PORT,
0992 port->cap_phy + LANE_ADP_CS_0, 1);
0993 if (ret)
0994 return false;
0995
0996 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
0997 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
0998
0999 return !!(widths & width);
1000 }
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 int tb_port_set_link_width(struct tb_port *port, unsigned int width)
1013 {
1014 u32 val;
1015 int ret;
1016
1017 if (!port->cap_phy)
1018 return -EINVAL;
1019
1020 ret = tb_port_read(port, &val, TB_CFG_PORT,
1021 port->cap_phy + LANE_ADP_CS_1, 1);
1022 if (ret)
1023 return ret;
1024
1025 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1026 switch (width) {
1027 case 1:
1028 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1029 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1030 break;
1031 case 2:
1032 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1033 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
1039 return tb_port_write(port, &val, TB_CFG_PORT,
1040 port->cap_phy + LANE_ADP_CS_1, 1);
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1059 {
1060 u32 val;
1061 int ret;
1062
1063 if (!port->cap_phy)
1064 return -EINVAL;
1065
1066 ret = tb_port_read(port, &val, TB_CFG_PORT,
1067 port->cap_phy + LANE_ADP_CS_1, 1);
1068 if (ret)
1069 return ret;
1070
1071 if (bonding)
1072 val |= LANE_ADP_CS_1_LB;
1073 else
1074 val &= ~LANE_ADP_CS_1_LB;
1075
1076 ret = tb_port_write(port, &val, TB_CFG_PORT,
1077 port->cap_phy + LANE_ADP_CS_1, 1);
1078 if (ret)
1079 return ret;
1080
1081
1082
1083
1084
1085 port->bonded = bonding;
1086 port->dual_link_port->bonded = bonding;
1087
1088 return 0;
1089 }
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 int tb_port_lane_bonding_enable(struct tb_port *port)
1104 {
1105 int ret;
1106
1107
1108
1109
1110
1111 ret = tb_port_get_link_width(port);
1112 if (ret == 1) {
1113 ret = tb_port_set_link_width(port, 2);
1114 if (ret)
1115 goto err_lane0;
1116 }
1117
1118 ret = tb_port_get_link_width(port->dual_link_port);
1119 if (ret == 1) {
1120 ret = tb_port_set_link_width(port->dual_link_port, 2);
1121 if (ret)
1122 goto err_lane0;
1123 }
1124
1125 ret = tb_port_set_lane_bonding(port, true);
1126 if (ret)
1127 goto err_lane1;
1128
1129 return 0;
1130
1131 err_lane1:
1132 tb_port_set_link_width(port->dual_link_port, 1);
1133 err_lane0:
1134 tb_port_set_link_width(port, 1);
1135 return ret;
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145 void tb_port_lane_bonding_disable(struct tb_port *port)
1146 {
1147 tb_port_set_lane_bonding(port, false);
1148 tb_port_set_link_width(port->dual_link_port, 1);
1149 tb_port_set_link_width(port, 1);
1150 }
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 int tb_port_wait_for_link_width(struct tb_port *port, int width,
1164 int timeout_msec)
1165 {
1166 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1167 int ret;
1168
1169 do {
1170 ret = tb_port_get_link_width(port);
1171 if (ret < 0) {
1172
1173
1174
1175
1176
1177 if (ret != -EACCES)
1178 return ret;
1179 } else if (ret == width) {
1180 return 0;
1181 }
1182
1183 usleep_range(1000, 2000);
1184 } while (ktime_before(ktime_get(), timeout));
1185
1186 return -ETIMEDOUT;
1187 }
1188
1189 static int tb_port_do_update_credits(struct tb_port *port)
1190 {
1191 u32 nfc_credits;
1192 int ret;
1193
1194 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1195 if (ret)
1196 return ret;
1197
1198 if (nfc_credits != port->config.nfc_credits) {
1199 u32 total;
1200
1201 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1202 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1203
1204 tb_port_dbg(port, "total credits changed %u -> %u\n",
1205 port->total_credits, total);
1206
1207 port->config.nfc_credits = nfc_credits;
1208 port->total_credits = total;
1209 }
1210
1211 return 0;
1212 }
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 int tb_port_update_credits(struct tb_port *port)
1223 {
1224 int ret;
1225
1226 ret = tb_port_do_update_credits(port);
1227 if (ret)
1228 return ret;
1229 return tb_port_do_update_credits(port->dual_link_port);
1230 }
1231
1232 static int tb_port_start_lane_initialization(struct tb_port *port)
1233 {
1234 int ret;
1235
1236 if (tb_switch_is_usb4(port->sw))
1237 return 0;
1238
1239 ret = tb_lc_start_lane_initialization(port);
1240 return ret == -EINVAL ? 0 : ret;
1241 }
1242
1243
1244
1245
1246
1247 static bool tb_port_resume(struct tb_port *port)
1248 {
1249 bool has_remote = tb_port_has_remote(port);
1250
1251 if (port->usb4) {
1252 usb4_port_device_resume(port->usb4);
1253 } else if (!has_remote) {
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 if (!tb_is_upstream_port(port) || port->xdomain)
1264 tb_port_start_lane_initialization(port);
1265 }
1266
1267 return has_remote || port->xdomain;
1268 }
1269
1270
1271
1272
1273
1274 bool tb_port_is_enabled(struct tb_port *port)
1275 {
1276 switch (port->config.type) {
1277 case TB_TYPE_PCIE_UP:
1278 case TB_TYPE_PCIE_DOWN:
1279 return tb_pci_port_is_enabled(port);
1280
1281 case TB_TYPE_DP_HDMI_IN:
1282 case TB_TYPE_DP_HDMI_OUT:
1283 return tb_dp_port_is_enabled(port);
1284
1285 case TB_TYPE_USB3_UP:
1286 case TB_TYPE_USB3_DOWN:
1287 return tb_usb3_port_is_enabled(port);
1288
1289 default:
1290 return false;
1291 }
1292 }
1293
1294
1295
1296
1297
1298 bool tb_usb3_port_is_enabled(struct tb_port *port)
1299 {
1300 u32 data;
1301
1302 if (tb_port_read(port, &data, TB_CFG_PORT,
1303 port->cap_adap + ADP_USB3_CS_0, 1))
1304 return false;
1305
1306 return !!(data & ADP_USB3_CS_0_PE);
1307 }
1308
1309
1310
1311
1312
1313
1314 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1315 {
1316 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1317 : ADP_USB3_CS_0_V;
1318
1319 if (!port->cap_adap)
1320 return -ENXIO;
1321 return tb_port_write(port, &word, TB_CFG_PORT,
1322 port->cap_adap + ADP_USB3_CS_0, 1);
1323 }
1324
1325
1326
1327
1328
1329 bool tb_pci_port_is_enabled(struct tb_port *port)
1330 {
1331 u32 data;
1332
1333 if (tb_port_read(port, &data, TB_CFG_PORT,
1334 port->cap_adap + ADP_PCIE_CS_0, 1))
1335 return false;
1336
1337 return !!(data & ADP_PCIE_CS_0_PE);
1338 }
1339
1340
1341
1342
1343
1344
1345 int tb_pci_port_enable(struct tb_port *port, bool enable)
1346 {
1347 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1348 if (!port->cap_adap)
1349 return -ENXIO;
1350 return tb_port_write(port, &word, TB_CFG_PORT,
1351 port->cap_adap + ADP_PCIE_CS_0, 1);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360 int tb_dp_port_hpd_is_active(struct tb_port *port)
1361 {
1362 u32 data;
1363 int ret;
1364
1365 ret = tb_port_read(port, &data, TB_CFG_PORT,
1366 port->cap_adap + ADP_DP_CS_2, 1);
1367 if (ret)
1368 return ret;
1369
1370 return !!(data & ADP_DP_CS_2_HDP);
1371 }
1372
1373
1374
1375
1376
1377
1378
1379 int tb_dp_port_hpd_clear(struct tb_port *port)
1380 {
1381 u32 data;
1382 int ret;
1383
1384 ret = tb_port_read(port, &data, TB_CFG_PORT,
1385 port->cap_adap + ADP_DP_CS_3, 1);
1386 if (ret)
1387 return ret;
1388
1389 data |= ADP_DP_CS_3_HDPC;
1390 return tb_port_write(port, &data, TB_CFG_PORT,
1391 port->cap_adap + ADP_DP_CS_3, 1);
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1406 unsigned int aux_tx, unsigned int aux_rx)
1407 {
1408 u32 data[2];
1409 int ret;
1410
1411 if (tb_switch_is_usb4(port->sw))
1412 return 0;
1413
1414 ret = tb_port_read(port, data, TB_CFG_PORT,
1415 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1416 if (ret)
1417 return ret;
1418
1419 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1420 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1421 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1422
1423 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1424 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1425 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1426 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1427 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1428
1429 return tb_port_write(port, data, TB_CFG_PORT,
1430 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1431 }
1432
1433
1434
1435
1436
1437 bool tb_dp_port_is_enabled(struct tb_port *port)
1438 {
1439 u32 data[2];
1440
1441 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1442 ARRAY_SIZE(data)))
1443 return false;
1444
1445 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 int tb_dp_port_enable(struct tb_port *port, bool enable)
1457 {
1458 u32 data[2];
1459 int ret;
1460
1461 ret = tb_port_read(port, data, TB_CFG_PORT,
1462 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1463 if (ret)
1464 return ret;
1465
1466 if (enable)
1467 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1468 else
1469 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1470
1471 return tb_port_write(port, data, TB_CFG_PORT,
1472 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1473 }
1474
1475
1476
1477 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1478 {
1479 switch (sw->generation) {
1480 case 1:
1481 return "Thunderbolt 1";
1482 case 2:
1483 return "Thunderbolt 2";
1484 case 3:
1485 return "Thunderbolt 3";
1486 case 4:
1487 return "USB4";
1488 default:
1489 return "Unknown";
1490 }
1491 }
1492
1493 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1494 {
1495 const struct tb_regs_switch_header *regs = &sw->config;
1496
1497 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1498 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1499 regs->revision, regs->thunderbolt_version);
1500 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1501 tb_dbg(tb, " Config:\n");
1502 tb_dbg(tb,
1503 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1504 regs->upstream_port_number, regs->depth,
1505 (((u64) regs->route_hi) << 32) | regs->route_lo,
1506 regs->enabled, regs->plug_events_delay);
1507 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1508 regs->__unknown1, regs->__unknown4);
1509 }
1510
1511
1512
1513
1514
1515
1516
1517 int tb_switch_reset(struct tb_switch *sw)
1518 {
1519 struct tb_cfg_result res;
1520
1521 if (sw->generation > 1)
1522 return 0;
1523
1524 tb_sw_dbg(sw, "resetting switch\n");
1525
1526 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1527 TB_CFG_SWITCH, 2, 2);
1528 if (res.err)
1529 return res.err;
1530 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1531 if (res.err > 0)
1532 return -EIO;
1533 return res.err;
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1549 u32 value, int timeout_msec)
1550 {
1551 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1552
1553 do {
1554 u32 val;
1555 int ret;
1556
1557 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1558 if (ret)
1559 return ret;
1560
1561 if ((val & bit) == value)
1562 return 0;
1563
1564 usleep_range(50, 100);
1565 } while (ktime_before(ktime_get(), timeout));
1566
1567 return -ETIMEDOUT;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1578 {
1579 u32 data;
1580 int res;
1581
1582 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1583 return 0;
1584
1585 sw->config.plug_events_delay = 0xff;
1586 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1587 if (res)
1588 return res;
1589
1590 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1591 if (res)
1592 return res;
1593
1594 if (active) {
1595 data = data & 0xFFFFFF83;
1596 switch (sw->config.device_id) {
1597 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1598 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1599 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1600 break;
1601 default:
1602
1603
1604
1605
1606
1607 if (!tb_switch_is_alpine_ridge(sw))
1608 data |= TB_PLUG_EVENTS_USB_DISABLE;
1609 }
1610 } else {
1611 data = data | 0x7c;
1612 }
1613 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1614 sw->cap_plug_events + 1, 1);
1615 }
1616
1617 static ssize_t authorized_show(struct device *dev,
1618 struct device_attribute *attr,
1619 char *buf)
1620 {
1621 struct tb_switch *sw = tb_to_switch(dev);
1622
1623 return sprintf(buf, "%u\n", sw->authorized);
1624 }
1625
1626 static int disapprove_switch(struct device *dev, void *not_used)
1627 {
1628 char *envp[] = { "AUTHORIZED=0", NULL };
1629 struct tb_switch *sw;
1630
1631 sw = tb_to_switch(dev);
1632 if (sw && sw->authorized) {
1633 int ret;
1634
1635
1636 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1637 if (ret)
1638 return ret;
1639
1640 ret = tb_domain_disapprove_switch(sw->tb, sw);
1641 if (ret)
1642 return ret;
1643
1644 sw->authorized = 0;
1645 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1646 }
1647
1648 return 0;
1649 }
1650
1651 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1652 {
1653 char envp_string[13];
1654 int ret = -EINVAL;
1655 char *envp[] = { envp_string, NULL };
1656
1657 if (!mutex_trylock(&sw->tb->lock))
1658 return restart_syscall();
1659
1660 if (!!sw->authorized == !!val)
1661 goto unlock;
1662
1663 switch (val) {
1664
1665 case 0:
1666 if (tb_route(sw)) {
1667 ret = disapprove_switch(&sw->dev, NULL);
1668 goto unlock;
1669 }
1670 break;
1671
1672
1673 case 1:
1674 if (sw->key)
1675 ret = tb_domain_approve_switch_key(sw->tb, sw);
1676 else
1677 ret = tb_domain_approve_switch(sw->tb, sw);
1678 break;
1679
1680
1681 case 2:
1682 if (sw->key)
1683 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 if (!ret) {
1691 sw->authorized = val;
1692
1693
1694
1695
1696 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1697 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1698 }
1699
1700 unlock:
1701 mutex_unlock(&sw->tb->lock);
1702 return ret;
1703 }
1704
1705 static ssize_t authorized_store(struct device *dev,
1706 struct device_attribute *attr,
1707 const char *buf, size_t count)
1708 {
1709 struct tb_switch *sw = tb_to_switch(dev);
1710 unsigned int val;
1711 ssize_t ret;
1712
1713 ret = kstrtouint(buf, 0, &val);
1714 if (ret)
1715 return ret;
1716 if (val > 2)
1717 return -EINVAL;
1718
1719 pm_runtime_get_sync(&sw->dev);
1720 ret = tb_switch_set_authorized(sw, val);
1721 pm_runtime_mark_last_busy(&sw->dev);
1722 pm_runtime_put_autosuspend(&sw->dev);
1723
1724 return ret ? ret : count;
1725 }
1726 static DEVICE_ATTR_RW(authorized);
1727
1728 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1729 char *buf)
1730 {
1731 struct tb_switch *sw = tb_to_switch(dev);
1732
1733 return sprintf(buf, "%u\n", sw->boot);
1734 }
1735 static DEVICE_ATTR_RO(boot);
1736
1737 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1738 char *buf)
1739 {
1740 struct tb_switch *sw = tb_to_switch(dev);
1741
1742 return sprintf(buf, "%#x\n", sw->device);
1743 }
1744 static DEVICE_ATTR_RO(device);
1745
1746 static ssize_t
1747 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1748 {
1749 struct tb_switch *sw = tb_to_switch(dev);
1750
1751 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1752 }
1753 static DEVICE_ATTR_RO(device_name);
1754
1755 static ssize_t
1756 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1757 {
1758 struct tb_switch *sw = tb_to_switch(dev);
1759
1760 return sprintf(buf, "%u\n", sw->generation);
1761 }
1762 static DEVICE_ATTR_RO(generation);
1763
1764 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1765 char *buf)
1766 {
1767 struct tb_switch *sw = tb_to_switch(dev);
1768 ssize_t ret;
1769
1770 if (!mutex_trylock(&sw->tb->lock))
1771 return restart_syscall();
1772
1773 if (sw->key)
1774 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1775 else
1776 ret = sprintf(buf, "\n");
1777
1778 mutex_unlock(&sw->tb->lock);
1779 return ret;
1780 }
1781
1782 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1783 const char *buf, size_t count)
1784 {
1785 struct tb_switch *sw = tb_to_switch(dev);
1786 u8 key[TB_SWITCH_KEY_SIZE];
1787 ssize_t ret = count;
1788 bool clear = false;
1789
1790 if (!strcmp(buf, "\n"))
1791 clear = true;
1792 else if (hex2bin(key, buf, sizeof(key)))
1793 return -EINVAL;
1794
1795 if (!mutex_trylock(&sw->tb->lock))
1796 return restart_syscall();
1797
1798 if (sw->authorized) {
1799 ret = -EBUSY;
1800 } else {
1801 kfree(sw->key);
1802 if (clear) {
1803 sw->key = NULL;
1804 } else {
1805 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1806 if (!sw->key)
1807 ret = -ENOMEM;
1808 }
1809 }
1810
1811 mutex_unlock(&sw->tb->lock);
1812 return ret;
1813 }
1814 static DEVICE_ATTR(key, 0600, key_show, key_store);
1815
1816 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1817 char *buf)
1818 {
1819 struct tb_switch *sw = tb_to_switch(dev);
1820
1821 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1822 }
1823
1824
1825
1826
1827
1828 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1829 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1830
1831 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1832 char *buf)
1833 {
1834 struct tb_switch *sw = tb_to_switch(dev);
1835
1836 return sprintf(buf, "%u\n", sw->link_width);
1837 }
1838
1839
1840
1841
1842
1843 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1844 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1845
1846 static ssize_t nvm_authenticate_show(struct device *dev,
1847 struct device_attribute *attr, char *buf)
1848 {
1849 struct tb_switch *sw = tb_to_switch(dev);
1850 u32 status;
1851
1852 nvm_get_auth_status(sw, &status);
1853 return sprintf(buf, "%#x\n", status);
1854 }
1855
1856 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1857 bool disconnect)
1858 {
1859 struct tb_switch *sw = tb_to_switch(dev);
1860 int val, ret;
1861
1862 pm_runtime_get_sync(&sw->dev);
1863
1864 if (!mutex_trylock(&sw->tb->lock)) {
1865 ret = restart_syscall();
1866 goto exit_rpm;
1867 }
1868
1869
1870 if (!sw->nvm) {
1871 ret = -EAGAIN;
1872 goto exit_unlock;
1873 }
1874
1875 ret = kstrtoint(buf, 10, &val);
1876 if (ret)
1877 goto exit_unlock;
1878
1879
1880 nvm_clear_auth_status(sw);
1881
1882 if (val > 0) {
1883 if (val == AUTHENTICATE_ONLY) {
1884 if (disconnect)
1885 ret = -EINVAL;
1886 else
1887 ret = nvm_authenticate(sw, true);
1888 } else {
1889 if (!sw->nvm->flushed) {
1890 if (!sw->nvm->buf) {
1891 ret = -EINVAL;
1892 goto exit_unlock;
1893 }
1894
1895 ret = nvm_validate_and_write(sw);
1896 if (ret || val == WRITE_ONLY)
1897 goto exit_unlock;
1898 }
1899 if (val == WRITE_AND_AUTHENTICATE) {
1900 if (disconnect)
1901 ret = tb_lc_force_power(sw);
1902 else
1903 ret = nvm_authenticate(sw, false);
1904 }
1905 }
1906 }
1907
1908 exit_unlock:
1909 mutex_unlock(&sw->tb->lock);
1910 exit_rpm:
1911 pm_runtime_mark_last_busy(&sw->dev);
1912 pm_runtime_put_autosuspend(&sw->dev);
1913
1914 return ret;
1915 }
1916
1917 static ssize_t nvm_authenticate_store(struct device *dev,
1918 struct device_attribute *attr, const char *buf, size_t count)
1919 {
1920 int ret = nvm_authenticate_sysfs(dev, buf, false);
1921 if (ret)
1922 return ret;
1923 return count;
1924 }
1925 static DEVICE_ATTR_RW(nvm_authenticate);
1926
1927 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1928 struct device_attribute *attr, char *buf)
1929 {
1930 return nvm_authenticate_show(dev, attr, buf);
1931 }
1932
1933 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1934 struct device_attribute *attr, const char *buf, size_t count)
1935 {
1936 int ret;
1937
1938 ret = nvm_authenticate_sysfs(dev, buf, true);
1939 return ret ? ret : count;
1940 }
1941 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1942
1943 static ssize_t nvm_version_show(struct device *dev,
1944 struct device_attribute *attr, char *buf)
1945 {
1946 struct tb_switch *sw = tb_to_switch(dev);
1947 int ret;
1948
1949 if (!mutex_trylock(&sw->tb->lock))
1950 return restart_syscall();
1951
1952 if (sw->safe_mode)
1953 ret = -ENODATA;
1954 else if (!sw->nvm)
1955 ret = -EAGAIN;
1956 else
1957 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1958
1959 mutex_unlock(&sw->tb->lock);
1960
1961 return ret;
1962 }
1963 static DEVICE_ATTR_RO(nvm_version);
1964
1965 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1966 char *buf)
1967 {
1968 struct tb_switch *sw = tb_to_switch(dev);
1969
1970 return sprintf(buf, "%#x\n", sw->vendor);
1971 }
1972 static DEVICE_ATTR_RO(vendor);
1973
1974 static ssize_t
1975 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1976 {
1977 struct tb_switch *sw = tb_to_switch(dev);
1978
1979 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1980 }
1981 static DEVICE_ATTR_RO(vendor_name);
1982
1983 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1984 char *buf)
1985 {
1986 struct tb_switch *sw = tb_to_switch(dev);
1987
1988 return sprintf(buf, "%pUb\n", sw->uuid);
1989 }
1990 static DEVICE_ATTR_RO(unique_id);
1991
1992 static struct attribute *switch_attrs[] = {
1993 &dev_attr_authorized.attr,
1994 &dev_attr_boot.attr,
1995 &dev_attr_device.attr,
1996 &dev_attr_device_name.attr,
1997 &dev_attr_generation.attr,
1998 &dev_attr_key.attr,
1999 &dev_attr_nvm_authenticate.attr,
2000 &dev_attr_nvm_authenticate_on_disconnect.attr,
2001 &dev_attr_nvm_version.attr,
2002 &dev_attr_rx_speed.attr,
2003 &dev_attr_rx_lanes.attr,
2004 &dev_attr_tx_speed.attr,
2005 &dev_attr_tx_lanes.attr,
2006 &dev_attr_vendor.attr,
2007 &dev_attr_vendor_name.attr,
2008 &dev_attr_unique_id.attr,
2009 NULL,
2010 };
2011
2012 static umode_t switch_attr_is_visible(struct kobject *kobj,
2013 struct attribute *attr, int n)
2014 {
2015 struct device *dev = kobj_to_dev(kobj);
2016 struct tb_switch *sw = tb_to_switch(dev);
2017
2018 if (attr == &dev_attr_authorized.attr) {
2019 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2020 sw->tb->security_level == TB_SECURITY_DPONLY)
2021 return 0;
2022 } else if (attr == &dev_attr_device.attr) {
2023 if (!sw->device)
2024 return 0;
2025 } else if (attr == &dev_attr_device_name.attr) {
2026 if (!sw->device_name)
2027 return 0;
2028 } else if (attr == &dev_attr_vendor.attr) {
2029 if (!sw->vendor)
2030 return 0;
2031 } else if (attr == &dev_attr_vendor_name.attr) {
2032 if (!sw->vendor_name)
2033 return 0;
2034 } else if (attr == &dev_attr_key.attr) {
2035 if (tb_route(sw) &&
2036 sw->tb->security_level == TB_SECURITY_SECURE &&
2037 sw->security_level == TB_SECURITY_SECURE)
2038 return attr->mode;
2039 return 0;
2040 } else if (attr == &dev_attr_rx_speed.attr ||
2041 attr == &dev_attr_rx_lanes.attr ||
2042 attr == &dev_attr_tx_speed.attr ||
2043 attr == &dev_attr_tx_lanes.attr) {
2044 if (tb_route(sw))
2045 return attr->mode;
2046 return 0;
2047 } else if (attr == &dev_attr_nvm_authenticate.attr) {
2048 if (nvm_upgradeable(sw))
2049 return attr->mode;
2050 return 0;
2051 } else if (attr == &dev_attr_nvm_version.attr) {
2052 if (nvm_readable(sw))
2053 return attr->mode;
2054 return 0;
2055 } else if (attr == &dev_attr_boot.attr) {
2056 if (tb_route(sw))
2057 return attr->mode;
2058 return 0;
2059 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2060 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2061 return attr->mode;
2062 return 0;
2063 }
2064
2065 return sw->safe_mode ? 0 : attr->mode;
2066 }
2067
2068 static const struct attribute_group switch_group = {
2069 .is_visible = switch_attr_is_visible,
2070 .attrs = switch_attrs,
2071 };
2072
2073 static const struct attribute_group *switch_groups[] = {
2074 &switch_group,
2075 NULL,
2076 };
2077
2078 static void tb_switch_release(struct device *dev)
2079 {
2080 struct tb_switch *sw = tb_to_switch(dev);
2081 struct tb_port *port;
2082
2083 dma_port_free(sw->dma_port);
2084
2085 tb_switch_for_each_port(sw, port) {
2086 ida_destroy(&port->in_hopids);
2087 ida_destroy(&port->out_hopids);
2088 }
2089
2090 kfree(sw->uuid);
2091 kfree(sw->device_name);
2092 kfree(sw->vendor_name);
2093 kfree(sw->ports);
2094 kfree(sw->drom);
2095 kfree(sw->key);
2096 kfree(sw);
2097 }
2098
2099 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2100 {
2101 struct tb_switch *sw = tb_to_switch(dev);
2102 const char *type;
2103
2104 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2105 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2106 return -ENOMEM;
2107 }
2108
2109 if (!tb_route(sw)) {
2110 type = "host";
2111 } else {
2112 const struct tb_port *port;
2113 bool hub = false;
2114
2115
2116 tb_switch_for_each_port(sw, port) {
2117 if (!port->disabled && !tb_is_upstream_port(port) &&
2118 tb_port_is_null(port)) {
2119 hub = true;
2120 break;
2121 }
2122 }
2123
2124 type = hub ? "hub" : "device";
2125 }
2126
2127 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2128 return -ENOMEM;
2129 return 0;
2130 }
2131
2132
2133
2134
2135
2136 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2137 {
2138 struct tb_switch *sw = tb_to_switch(dev);
2139 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2140
2141 if (cm_ops->runtime_suspend_switch)
2142 return cm_ops->runtime_suspend_switch(sw);
2143
2144 return 0;
2145 }
2146
2147 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2148 {
2149 struct tb_switch *sw = tb_to_switch(dev);
2150 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2151
2152 if (cm_ops->runtime_resume_switch)
2153 return cm_ops->runtime_resume_switch(sw);
2154 return 0;
2155 }
2156
2157 static const struct dev_pm_ops tb_switch_pm_ops = {
2158 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2159 NULL)
2160 };
2161
2162 struct device_type tb_switch_type = {
2163 .name = "thunderbolt_device",
2164 .release = tb_switch_release,
2165 .uevent = tb_switch_uevent,
2166 .pm = &tb_switch_pm_ops,
2167 };
2168
2169 static int tb_switch_get_generation(struct tb_switch *sw)
2170 {
2171 switch (sw->config.device_id) {
2172 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2173 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2174 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2175 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2176 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2177 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2178 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2179 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2180 return 1;
2181
2182 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2183 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2184 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2185 return 2;
2186
2187 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2188 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2189 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2190 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2191 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2192 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2193 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2194 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2195 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2196 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2197 return 3;
2198
2199 default:
2200 if (tb_switch_is_usb4(sw))
2201 return 4;
2202
2203
2204
2205
2206
2207 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2208 sw->config.device_id);
2209 return 1;
2210 }
2211 }
2212
2213 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2214 {
2215 int max_depth;
2216
2217 if (tb_switch_is_usb4(sw) ||
2218 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2219 max_depth = USB4_SWITCH_MAX_DEPTH;
2220 else
2221 max_depth = TB_SWITCH_MAX_DEPTH;
2222
2223 return depth > max_depth;
2224 }
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2241 u64 route)
2242 {
2243 struct tb_switch *sw;
2244 int upstream_port;
2245 int i, ret, depth;
2246
2247
2248 if (route) {
2249 struct tb_switch *parent_sw = tb_to_switch(parent);
2250 struct tb_port *down;
2251
2252 down = tb_port_at(route, parent_sw);
2253 tb_port_unlock(down);
2254 }
2255
2256 depth = tb_route_length(route);
2257
2258 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2259 if (upstream_port < 0)
2260 return ERR_PTR(upstream_port);
2261
2262 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2263 if (!sw)
2264 return ERR_PTR(-ENOMEM);
2265
2266 sw->tb = tb;
2267 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2268 if (ret)
2269 goto err_free_sw_ports;
2270
2271 sw->generation = tb_switch_get_generation(sw);
2272
2273 tb_dbg(tb, "current switch config:\n");
2274 tb_dump_switch(tb, sw);
2275
2276
2277 sw->config.upstream_port_number = upstream_port;
2278 sw->config.depth = depth;
2279 sw->config.route_hi = upper_32_bits(route);
2280 sw->config.route_lo = lower_32_bits(route);
2281 sw->config.enabled = 0;
2282
2283
2284 if (tb_switch_exceeds_max_depth(sw, depth)) {
2285 ret = -EADDRNOTAVAIL;
2286 goto err_free_sw_ports;
2287 }
2288
2289
2290 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2291 GFP_KERNEL);
2292 if (!sw->ports) {
2293 ret = -ENOMEM;
2294 goto err_free_sw_ports;
2295 }
2296
2297 for (i = 0; i <= sw->config.max_port_number; i++) {
2298
2299 sw->ports[i].sw = sw;
2300 sw->ports[i].port = i;
2301
2302
2303 if (i) {
2304 ida_init(&sw->ports[i].in_hopids);
2305 ida_init(&sw->ports[i].out_hopids);
2306 }
2307 }
2308
2309 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2310 if (ret > 0)
2311 sw->cap_plug_events = ret;
2312
2313 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2314 if (ret > 0)
2315 sw->cap_vsec_tmu = ret;
2316
2317 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2318 if (ret > 0)
2319 sw->cap_lc = ret;
2320
2321 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2322 if (ret > 0)
2323 sw->cap_lp = ret;
2324
2325
2326 if (!route)
2327 sw->authorized = true;
2328
2329 device_initialize(&sw->dev);
2330 sw->dev.parent = parent;
2331 sw->dev.bus = &tb_bus_type;
2332 sw->dev.type = &tb_switch_type;
2333 sw->dev.groups = switch_groups;
2334 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2335
2336 return sw;
2337
2338 err_free_sw_ports:
2339 kfree(sw->ports);
2340 kfree(sw);
2341
2342 return ERR_PTR(ret);
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359 struct tb_switch *
2360 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2361 {
2362 struct tb_switch *sw;
2363
2364 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2365 if (!sw)
2366 return ERR_PTR(-ENOMEM);
2367
2368 sw->tb = tb;
2369 sw->config.depth = tb_route_length(route);
2370 sw->config.route_hi = upper_32_bits(route);
2371 sw->config.route_lo = lower_32_bits(route);
2372 sw->safe_mode = true;
2373
2374 device_initialize(&sw->dev);
2375 sw->dev.parent = parent;
2376 sw->dev.bus = &tb_bus_type;
2377 sw->dev.type = &tb_switch_type;
2378 sw->dev.groups = switch_groups;
2379 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2380
2381 return sw;
2382 }
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 int tb_switch_configure(struct tb_switch *sw)
2396 {
2397 struct tb *tb = sw->tb;
2398 u64 route;
2399 int ret;
2400
2401 route = tb_route(sw);
2402
2403 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2404 sw->config.enabled ? "restoring" : "initializing", route,
2405 tb_route_length(route), sw->config.upstream_port_number);
2406
2407 sw->config.enabled = 1;
2408
2409 if (tb_switch_is_usb4(sw)) {
2410
2411
2412
2413
2414
2415 sw->config.cmuv = USB4_VERSION_1_0;
2416 sw->config.plug_events_delay = 0xa;
2417
2418
2419 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2420 ROUTER_CS_1, 4);
2421 if (ret)
2422 return ret;
2423
2424 ret = usb4_switch_setup(sw);
2425 } else {
2426 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2427 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2428 sw->config.vendor_id);
2429
2430 if (!sw->cap_plug_events) {
2431 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2432 return -ENODEV;
2433 }
2434
2435
2436 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2437 ROUTER_CS_1, 3);
2438 }
2439 if (ret)
2440 return ret;
2441
2442 return tb_plug_events_active(sw, true);
2443 }
2444
2445 static int tb_switch_set_uuid(struct tb_switch *sw)
2446 {
2447 bool uid = false;
2448 u32 uuid[4];
2449 int ret;
2450
2451 if (sw->uuid)
2452 return 0;
2453
2454 if (tb_switch_is_usb4(sw)) {
2455 ret = usb4_switch_read_uid(sw, &sw->uid);
2456 if (ret)
2457 return ret;
2458 uid = true;
2459 } else {
2460
2461
2462
2463
2464 ret = tb_lc_read_uuid(sw, uuid);
2465 if (ret) {
2466 if (ret != -EINVAL)
2467 return ret;
2468 uid = true;
2469 }
2470 }
2471
2472 if (uid) {
2473
2474
2475
2476
2477
2478
2479 uuid[0] = sw->uid & 0xffffffff;
2480 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2481 uuid[2] = 0xffffffff;
2482 uuid[3] = 0xffffffff;
2483 }
2484
2485 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2486 if (!sw->uuid)
2487 return -ENOMEM;
2488 return 0;
2489 }
2490
2491 static int tb_switch_add_dma_port(struct tb_switch *sw)
2492 {
2493 u32 status;
2494 int ret;
2495
2496 switch (sw->generation) {
2497 case 2:
2498
2499 if (tb_route(sw))
2500 return 0;
2501
2502 fallthrough;
2503 case 3:
2504 case 4:
2505 ret = tb_switch_set_uuid(sw);
2506 if (ret)
2507 return ret;
2508 break;
2509
2510 default:
2511
2512
2513
2514
2515 if (!sw->safe_mode)
2516 return 0;
2517 break;
2518 }
2519
2520 if (sw->no_nvm_upgrade)
2521 return 0;
2522
2523 if (tb_switch_is_usb4(sw)) {
2524 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2525 if (ret)
2526 return ret;
2527
2528 if (status) {
2529 tb_sw_info(sw, "switch flash authentication failed\n");
2530 nvm_set_auth_status(sw, status);
2531 }
2532
2533 return 0;
2534 }
2535
2536
2537 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2538 return 0;
2539
2540 sw->dma_port = dma_port_alloc(sw);
2541 if (!sw->dma_port)
2542 return 0;
2543
2544
2545
2546
2547
2548
2549
2550 nvm_get_auth_status(sw, &status);
2551 if (status) {
2552 if (!tb_route(sw))
2553 nvm_authenticate_complete_dma_port(sw);
2554 return 0;
2555 }
2556
2557
2558
2559
2560
2561
2562 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2563 if (ret <= 0)
2564 return ret;
2565
2566
2567 if (!tb_route(sw))
2568 nvm_authenticate_complete_dma_port(sw);
2569
2570 if (status) {
2571 tb_sw_info(sw, "switch flash authentication failed\n");
2572 nvm_set_auth_status(sw, status);
2573 }
2574
2575 tb_sw_info(sw, "power cycling the switch now\n");
2576 dma_port_power_cycle(sw->dma_port);
2577
2578
2579
2580
2581
2582 return -ESHUTDOWN;
2583 }
2584
2585 static void tb_switch_default_link_ports(struct tb_switch *sw)
2586 {
2587 int i;
2588
2589 for (i = 1; i <= sw->config.max_port_number; i++) {
2590 struct tb_port *port = &sw->ports[i];
2591 struct tb_port *subordinate;
2592
2593 if (!tb_port_is_null(port))
2594 continue;
2595
2596
2597 if (i == sw->config.max_port_number ||
2598 !tb_port_is_null(&sw->ports[i + 1]))
2599 continue;
2600
2601
2602 subordinate = &sw->ports[i + 1];
2603 if (!port->dual_link_port && !subordinate->dual_link_port) {
2604 port->link_nr = 0;
2605 port->dual_link_port = subordinate;
2606 subordinate->link_nr = 1;
2607 subordinate->dual_link_port = port;
2608
2609 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2610 port->port, subordinate->port);
2611 }
2612 }
2613 }
2614
2615 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2616 {
2617 const struct tb_port *up = tb_upstream_port(sw);
2618
2619 if (!up->dual_link_port || !up->dual_link_port->remote)
2620 return false;
2621
2622 if (tb_switch_is_usb4(sw))
2623 return usb4_switch_lane_bonding_possible(sw);
2624 return tb_lc_lane_bonding_possible(sw);
2625 }
2626
2627 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2628 {
2629 struct tb_port *up;
2630 bool change = false;
2631 int ret;
2632
2633 if (!tb_route(sw) || tb_switch_is_icm(sw))
2634 return 0;
2635
2636 up = tb_upstream_port(sw);
2637
2638 ret = tb_port_get_link_speed(up);
2639 if (ret < 0)
2640 return ret;
2641 if (sw->link_speed != ret)
2642 change = true;
2643 sw->link_speed = ret;
2644
2645 ret = tb_port_get_link_width(up);
2646 if (ret < 0)
2647 return ret;
2648 if (sw->link_width != ret)
2649 change = true;
2650 sw->link_width = ret;
2651
2652
2653 if (device_is_registered(&sw->dev) && change)
2654 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2655
2656 return 0;
2657 }
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2668 {
2669 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2670 struct tb_port *up, *down;
2671 u64 route = tb_route(sw);
2672 int ret;
2673
2674 if (!route)
2675 return 0;
2676
2677 if (!tb_switch_lane_bonding_possible(sw))
2678 return 0;
2679
2680 up = tb_upstream_port(sw);
2681 down = tb_port_at(route, parent);
2682
2683 if (!tb_port_is_width_supported(up, 2) ||
2684 !tb_port_is_width_supported(down, 2))
2685 return 0;
2686
2687 ret = tb_port_lane_bonding_enable(up);
2688 if (ret) {
2689 tb_port_warn(up, "failed to enable lane bonding\n");
2690 return ret;
2691 }
2692
2693 ret = tb_port_lane_bonding_enable(down);
2694 if (ret) {
2695 tb_port_warn(down, "failed to enable lane bonding\n");
2696 tb_port_lane_bonding_disable(up);
2697 return ret;
2698 }
2699
2700 ret = tb_port_wait_for_link_width(down, 2, 100);
2701 if (ret) {
2702 tb_port_warn(down, "timeout enabling lane bonding\n");
2703 return ret;
2704 }
2705
2706 tb_port_update_credits(down);
2707 tb_port_update_credits(up);
2708 tb_switch_update_link_attributes(sw);
2709
2710 tb_sw_dbg(sw, "lane bonding enabled\n");
2711 return ret;
2712 }
2713
2714
2715
2716
2717
2718
2719
2720
2721 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2722 {
2723 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2724 struct tb_port *up, *down;
2725
2726 if (!tb_route(sw))
2727 return;
2728
2729 up = tb_upstream_port(sw);
2730 if (!up->bonded)
2731 return;
2732
2733 down = tb_port_at(tb_route(sw), parent);
2734
2735 tb_port_lane_bonding_disable(up);
2736 tb_port_lane_bonding_disable(down);
2737
2738
2739
2740
2741
2742 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2743 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2744
2745 tb_port_update_credits(down);
2746 tb_port_update_credits(up);
2747 tb_switch_update_link_attributes(sw);
2748
2749 tb_sw_dbg(sw, "lane bonding disabled\n");
2750 }
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764 int tb_switch_configure_link(struct tb_switch *sw)
2765 {
2766 struct tb_port *up, *down;
2767 int ret;
2768
2769 if (!tb_route(sw) || tb_switch_is_icm(sw))
2770 return 0;
2771
2772 up = tb_upstream_port(sw);
2773 if (tb_switch_is_usb4(up->sw))
2774 ret = usb4_port_configure(up);
2775 else
2776 ret = tb_lc_configure_port(up);
2777 if (ret)
2778 return ret;
2779
2780 down = up->remote;
2781 if (tb_switch_is_usb4(down->sw))
2782 return usb4_port_configure(down);
2783 return tb_lc_configure_port(down);
2784 }
2785
2786
2787
2788
2789
2790
2791
2792
2793 void tb_switch_unconfigure_link(struct tb_switch *sw)
2794 {
2795 struct tb_port *up, *down;
2796
2797 if (sw->is_unplugged)
2798 return;
2799 if (!tb_route(sw) || tb_switch_is_icm(sw))
2800 return;
2801
2802 up = tb_upstream_port(sw);
2803 if (tb_switch_is_usb4(up->sw))
2804 usb4_port_unconfigure(up);
2805 else
2806 tb_lc_unconfigure_port(up);
2807
2808 down = up->remote;
2809 if (tb_switch_is_usb4(down->sw))
2810 usb4_port_unconfigure(down);
2811 else
2812 tb_lc_unconfigure_port(down);
2813 }
2814
2815 static void tb_switch_credits_init(struct tb_switch *sw)
2816 {
2817 if (tb_switch_is_icm(sw))
2818 return;
2819 if (!tb_switch_is_usb4(sw))
2820 return;
2821 if (usb4_switch_credits_init(sw))
2822 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2823 }
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837 int tb_switch_add(struct tb_switch *sw)
2838 {
2839 int i, ret;
2840
2841
2842
2843
2844
2845
2846
2847
2848 ret = tb_switch_add_dma_port(sw);
2849 if (ret) {
2850 dev_err(&sw->dev, "failed to add DMA port\n");
2851 return ret;
2852 }
2853
2854 if (!sw->safe_mode) {
2855 tb_switch_credits_init(sw);
2856
2857
2858 ret = tb_drom_read(sw);
2859 if (ret)
2860 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2861 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2862
2863 tb_check_quirks(sw);
2864
2865 ret = tb_switch_set_uuid(sw);
2866 if (ret) {
2867 dev_err(&sw->dev, "failed to set UUID\n");
2868 return ret;
2869 }
2870
2871 for (i = 0; i <= sw->config.max_port_number; i++) {
2872 if (sw->ports[i].disabled) {
2873 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2874 continue;
2875 }
2876 ret = tb_init_port(&sw->ports[i]);
2877 if (ret) {
2878 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2879 return ret;
2880 }
2881 }
2882
2883 tb_switch_default_link_ports(sw);
2884
2885 ret = tb_switch_update_link_attributes(sw);
2886 if (ret)
2887 return ret;
2888
2889 ret = tb_switch_tmu_init(sw);
2890 if (ret)
2891 return ret;
2892 }
2893
2894 ret = device_add(&sw->dev);
2895 if (ret) {
2896 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2897 return ret;
2898 }
2899
2900 if (tb_route(sw)) {
2901 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2902 sw->vendor, sw->device);
2903 if (sw->vendor_name && sw->device_name)
2904 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2905 sw->device_name);
2906 }
2907
2908 ret = usb4_switch_add_ports(sw);
2909 if (ret) {
2910 dev_err(&sw->dev, "failed to add USB4 ports\n");
2911 goto err_del;
2912 }
2913
2914 ret = tb_switch_nvm_add(sw);
2915 if (ret) {
2916 dev_err(&sw->dev, "failed to add NVM devices\n");
2917 goto err_ports;
2918 }
2919
2920
2921
2922
2923
2924
2925 device_init_wakeup(&sw->dev, true);
2926
2927 pm_runtime_set_active(&sw->dev);
2928 if (sw->rpm) {
2929 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2930 pm_runtime_use_autosuspend(&sw->dev);
2931 pm_runtime_mark_last_busy(&sw->dev);
2932 pm_runtime_enable(&sw->dev);
2933 pm_request_autosuspend(&sw->dev);
2934 }
2935
2936 tb_switch_debugfs_init(sw);
2937 return 0;
2938
2939 err_ports:
2940 usb4_switch_remove_ports(sw);
2941 err_del:
2942 device_del(&sw->dev);
2943
2944 return ret;
2945 }
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 void tb_switch_remove(struct tb_switch *sw)
2956 {
2957 struct tb_port *port;
2958
2959 tb_switch_debugfs_remove(sw);
2960
2961 if (sw->rpm) {
2962 pm_runtime_get_sync(&sw->dev);
2963 pm_runtime_disable(&sw->dev);
2964 }
2965
2966
2967 tb_switch_for_each_port(sw, port) {
2968 if (tb_port_has_remote(port)) {
2969 tb_switch_remove(port->remote->sw);
2970 port->remote = NULL;
2971 } else if (port->xdomain) {
2972 tb_xdomain_remove(port->xdomain);
2973 port->xdomain = NULL;
2974 }
2975
2976
2977 tb_retimer_remove_all(port);
2978 }
2979
2980 if (!sw->is_unplugged)
2981 tb_plug_events_active(sw, false);
2982
2983 tb_switch_nvm_remove(sw);
2984 usb4_switch_remove_ports(sw);
2985
2986 if (tb_route(sw))
2987 dev_info(&sw->dev, "device disconnected\n");
2988 device_unregister(&sw->dev);
2989 }
2990
2991
2992
2993
2994
2995 void tb_sw_set_unplugged(struct tb_switch *sw)
2996 {
2997 struct tb_port *port;
2998
2999 if (sw == sw->tb->root_switch) {
3000 tb_sw_WARN(sw, "cannot unplug root switch\n");
3001 return;
3002 }
3003 if (sw->is_unplugged) {
3004 tb_sw_WARN(sw, "is_unplugged already set\n");
3005 return;
3006 }
3007 sw->is_unplugged = true;
3008 tb_switch_for_each_port(sw, port) {
3009 if (tb_port_has_remote(port))
3010 tb_sw_set_unplugged(port->remote->sw);
3011 else if (port->xdomain)
3012 port->xdomain->is_unplugged = true;
3013 }
3014 }
3015
3016 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3017 {
3018 if (flags)
3019 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3020 else
3021 tb_sw_dbg(sw, "disabling wakeup\n");
3022
3023 if (tb_switch_is_usb4(sw))
3024 return usb4_switch_set_wake(sw, flags);
3025 return tb_lc_set_wake(sw, flags);
3026 }
3027
3028 int tb_switch_resume(struct tb_switch *sw)
3029 {
3030 struct tb_port *port;
3031 int err;
3032
3033 tb_sw_dbg(sw, "resuming switch\n");
3034
3035
3036
3037
3038
3039 if (tb_route(sw)) {
3040 u64 uid;
3041
3042
3043
3044
3045
3046
3047 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3048 if (err < 0) {
3049 tb_sw_info(sw, "switch not present anymore\n");
3050 return err;
3051 }
3052
3053
3054 if (!sw->uid)
3055 return -ENODEV;
3056
3057 if (tb_switch_is_usb4(sw))
3058 err = usb4_switch_read_uid(sw, &uid);
3059 else
3060 err = tb_drom_read_uid_only(sw, &uid);
3061 if (err) {
3062 tb_sw_warn(sw, "uid read failed\n");
3063 return err;
3064 }
3065 if (sw->uid != uid) {
3066 tb_sw_info(sw,
3067 "changed while suspended (uid %#llx -> %#llx)\n",
3068 sw->uid, uid);
3069 return -ENODEV;
3070 }
3071 }
3072
3073 err = tb_switch_configure(sw);
3074 if (err)
3075 return err;
3076
3077
3078 tb_switch_set_wake(sw, 0);
3079
3080 err = tb_switch_tmu_init(sw);
3081 if (err)
3082 return err;
3083
3084
3085 tb_switch_for_each_port(sw, port) {
3086 if (!tb_port_is_null(port))
3087 continue;
3088
3089 if (!tb_port_resume(port))
3090 continue;
3091
3092 if (tb_wait_for_port(port, true) <= 0) {
3093 tb_port_warn(port,
3094 "lost during suspend, disconnecting\n");
3095 if (tb_port_has_remote(port))
3096 tb_sw_set_unplugged(port->remote->sw);
3097 else if (port->xdomain)
3098 port->xdomain->is_unplugged = true;
3099 } else {
3100
3101
3102
3103
3104 if (tb_port_unlock(port))
3105 tb_port_warn(port, "failed to unlock port\n");
3106 if (port->remote && tb_switch_resume(port->remote->sw)) {
3107 tb_port_warn(port,
3108 "lost during suspend, disconnecting\n");
3109 tb_sw_set_unplugged(port->remote->sw);
3110 }
3111 }
3112 }
3113 return 0;
3114 }
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3127 {
3128 unsigned int flags = 0;
3129 struct tb_port *port;
3130 int err;
3131
3132 tb_sw_dbg(sw, "suspending switch\n");
3133
3134
3135
3136
3137
3138
3139 if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3140 if (tb_switch_disable_clx(sw, TB_CL1))
3141 tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3142 tb_switch_clx_name(TB_CL1));
3143 }
3144
3145 err = tb_plug_events_active(sw, false);
3146 if (err)
3147 return;
3148
3149 tb_switch_for_each_port(sw, port) {
3150 if (tb_port_has_remote(port))
3151 tb_switch_suspend(port->remote->sw, runtime);
3152 }
3153
3154 if (runtime) {
3155
3156 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3157 flags |= TB_WAKE_ON_USB4;
3158 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3159 } else if (device_may_wakeup(&sw->dev)) {
3160 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3161 }
3162
3163 tb_switch_set_wake(sw, flags);
3164
3165 if (tb_switch_is_usb4(sw))
3166 usb4_switch_set_sleep(sw);
3167 else
3168 tb_lc_set_sleep(sw);
3169 }
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3180 {
3181 if (tb_switch_is_usb4(sw))
3182 return usb4_switch_query_dp_resource(sw, in);
3183 return tb_lc_dp_sink_query(sw, in);
3184 }
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3196 {
3197 int ret;
3198
3199 if (tb_switch_is_usb4(sw))
3200 ret = usb4_switch_alloc_dp_resource(sw, in);
3201 else
3202 ret = tb_lc_dp_sink_alloc(sw, in);
3203
3204 if (ret)
3205 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3206 in->port);
3207 else
3208 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3209
3210 return ret;
3211 }
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3222 {
3223 int ret;
3224
3225 if (tb_switch_is_usb4(sw))
3226 ret = usb4_switch_dealloc_dp_resource(sw, in);
3227 else
3228 ret = tb_lc_dp_sink_dealloc(sw, in);
3229
3230 if (ret)
3231 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3232 in->port);
3233 else
3234 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3235 }
3236
3237 struct tb_sw_lookup {
3238 struct tb *tb;
3239 u8 link;
3240 u8 depth;
3241 const uuid_t *uuid;
3242 u64 route;
3243 };
3244
3245 static int tb_switch_match(struct device *dev, const void *data)
3246 {
3247 struct tb_switch *sw = tb_to_switch(dev);
3248 const struct tb_sw_lookup *lookup = data;
3249
3250 if (!sw)
3251 return 0;
3252 if (sw->tb != lookup->tb)
3253 return 0;
3254
3255 if (lookup->uuid)
3256 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3257
3258 if (lookup->route) {
3259 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3260 sw->config.route_hi == upper_32_bits(lookup->route);
3261 }
3262
3263
3264 if (!lookup->depth)
3265 return !sw->depth;
3266
3267 return sw->link == lookup->link && sw->depth == lookup->depth;
3268 }
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3280 {
3281 struct tb_sw_lookup lookup;
3282 struct device *dev;
3283
3284 memset(&lookup, 0, sizeof(lookup));
3285 lookup.tb = tb;
3286 lookup.link = link;
3287 lookup.depth = depth;
3288
3289 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3290 if (dev)
3291 return tb_to_switch(dev);
3292
3293 return NULL;
3294 }
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3305 {
3306 struct tb_sw_lookup lookup;
3307 struct device *dev;
3308
3309 memset(&lookup, 0, sizeof(lookup));
3310 lookup.tb = tb;
3311 lookup.uuid = uuid;
3312
3313 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3314 if (dev)
3315 return tb_to_switch(dev);
3316
3317 return NULL;
3318 }
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3329 {
3330 struct tb_sw_lookup lookup;
3331 struct device *dev;
3332
3333 if (!route)
3334 return tb_switch_get(tb->root_switch);
3335
3336 memset(&lookup, 0, sizeof(lookup));
3337 lookup.tb = tb;
3338 lookup.route = route;
3339
3340 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3341 if (dev)
3342 return tb_to_switch(dev);
3343
3344 return NULL;
3345 }
3346
3347
3348
3349
3350
3351
3352 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3353 enum tb_port_type type)
3354 {
3355 struct tb_port *port;
3356
3357 tb_switch_for_each_port(sw, port) {
3358 if (port->config.type == type)
3359 return port;
3360 }
3361
3362 return NULL;
3363 }
3364
3365 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3366 {
3367 u32 phy;
3368 int ret;
3369
3370 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3371 port->cap_phy + LANE_ADP_CS_1, 1);
3372 if (ret)
3373 return ret;
3374
3375 if (secondary)
3376 phy |= LANE_ADP_CS_1_PMS;
3377 else
3378 phy &= ~LANE_ADP_CS_1_PMS;
3379
3380 return tb_port_write(port, &phy, TB_CFG_PORT,
3381 port->cap_phy + LANE_ADP_CS_1, 1);
3382 }
3383
3384 static int tb_port_pm_secondary_enable(struct tb_port *port)
3385 {
3386 return __tb_port_pm_secondary_set(port, true);
3387 }
3388
3389 static int tb_port_pm_secondary_disable(struct tb_port *port)
3390 {
3391 return __tb_port_pm_secondary_set(port, false);
3392 }
3393
3394 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3395 {
3396 struct tb_switch *parent = tb_switch_parent(sw);
3397 struct tb_port *up, *down;
3398 int ret;
3399
3400 if (!tb_route(sw))
3401 return 0;
3402
3403 up = tb_upstream_port(sw);
3404 down = tb_port_at(tb_route(sw), parent);
3405 ret = tb_port_pm_secondary_enable(up);
3406 if (ret)
3407 return ret;
3408
3409 return tb_port_pm_secondary_disable(down);
3410 }
3411
3412
3413 static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
3414 {
3415 u32 mask, val;
3416 bool ret;
3417
3418
3419 if (!port->bonded && port->dual_link_port)
3420 return false;
3421
3422
3423 if (port->xdomain)
3424 return false;
3425
3426 if (tb_switch_is_usb4(port->sw)) {
3427 if (!usb4_port_clx_supported(port))
3428 return false;
3429 } else if (!tb_lc_is_clx_supported(port)) {
3430 return false;
3431 }
3432
3433 switch (clx) {
3434 case TB_CL1:
3435
3436 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
3437 break;
3438
3439
3440 case TB_CL2:
3441 default:
3442 return false;
3443 }
3444
3445 ret = tb_port_read(port, &val, TB_CFG_PORT,
3446 port->cap_phy + LANE_ADP_CS_0, 1);
3447 if (ret)
3448 return false;
3449
3450 return !!(val & mask);
3451 }
3452
3453 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
3454 {
3455 u32 phy, mask;
3456 int ret;
3457
3458
3459 if (clx == TB_CL1)
3460 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
3461 else
3462
3463 return -EOPNOTSUPP;
3464
3465 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3466 port->cap_phy + LANE_ADP_CS_1, 1);
3467 if (ret)
3468 return ret;
3469
3470 if (enable)
3471 phy |= mask;
3472 else
3473 phy &= ~mask;
3474
3475 return tb_port_write(port, &phy, TB_CFG_PORT,
3476 port->cap_phy + LANE_ADP_CS_1, 1);
3477 }
3478
3479 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
3480 {
3481 return __tb_port_clx_set(port, clx, false);
3482 }
3483
3484 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
3485 {
3486 return __tb_port_clx_set(port, clx, true);
3487 }
3488
3489 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3490 {
3491 struct tb_switch *parent = tb_switch_parent(sw);
3492 bool up_clx_support, down_clx_support;
3493 struct tb_port *up, *down;
3494 int ret;
3495
3496 if (!tb_switch_is_clx_supported(sw))
3497 return 0;
3498
3499
3500
3501
3502
3503 if (!tb_route(sw))
3504 return 0;
3505
3506
3507 if (tb_route(parent))
3508 return 0;
3509
3510 ret = tb_switch_pm_secondary_resolve(sw);
3511 if (ret)
3512 return ret;
3513
3514 up = tb_upstream_port(sw);
3515 down = tb_port_at(tb_route(sw), parent);
3516
3517 up_clx_support = tb_port_clx_supported(up, clx);
3518 down_clx_support = tb_port_clx_supported(down, clx);
3519
3520 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3521 up_clx_support ? "" : "not ");
3522 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3523 down_clx_support ? "" : "not ");
3524
3525 if (!up_clx_support || !down_clx_support)
3526 return -EOPNOTSUPP;
3527
3528 ret = tb_port_clx_enable(up, clx);
3529 if (ret)
3530 return ret;
3531
3532 ret = tb_port_clx_enable(down, clx);
3533 if (ret) {
3534 tb_port_clx_disable(up, clx);
3535 return ret;
3536 }
3537
3538 ret = tb_switch_mask_clx_objections(sw);
3539 if (ret) {
3540 tb_port_clx_disable(up, clx);
3541 tb_port_clx_disable(down, clx);
3542 return ret;
3543 }
3544
3545 sw->clx = clx;
3546
3547 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
3548 return 0;
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3566 {
3567 struct tb_switch *root_sw = sw->tb->root_switch;
3568
3569 if (!clx_enabled)
3570 return 0;
3571
3572
3573
3574
3575
3576 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3577 return 0;
3578
3579 switch (clx) {
3580 case TB_CL1:
3581
3582 return __tb_switch_enable_clx(sw, clx);
3583
3584 default:
3585 return -EOPNOTSUPP;
3586 }
3587 }
3588
3589 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3590 {
3591 struct tb_switch *parent = tb_switch_parent(sw);
3592 struct tb_port *up, *down;
3593 int ret;
3594
3595 if (!tb_switch_is_clx_supported(sw))
3596 return 0;
3597
3598
3599
3600
3601
3602 if (!tb_route(sw))
3603 return 0;
3604
3605
3606 if (tb_route(parent))
3607 return 0;
3608
3609 up = tb_upstream_port(sw);
3610 down = tb_port_at(tb_route(sw), parent);
3611 ret = tb_port_clx_disable(up, clx);
3612 if (ret)
3613 return ret;
3614
3615 ret = tb_port_clx_disable(down, clx);
3616 if (ret)
3617 return ret;
3618
3619 sw->clx = TB_CLX_DISABLE;
3620
3621 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
3622 return 0;
3623 }
3624
3625
3626
3627
3628
3629
3630
3631
3632 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3633 {
3634 if (!clx_enabled)
3635 return 0;
3636
3637 switch (clx) {
3638 case TB_CL1:
3639
3640 return __tb_switch_disable_clx(sw, clx);
3641
3642 default:
3643 return -EOPNOTSUPP;
3644 }
3645 }
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655 int tb_switch_mask_clx_objections(struct tb_switch *sw)
3656 {
3657 int up_port = sw->config.upstream_port_number;
3658 u32 offset, val[2], mask_obj, unmask_obj;
3659 int ret, i;
3660
3661
3662 if (!tb_switch_is_titan_ridge(sw))
3663 return 0;
3664
3665 if (!tb_route(sw))
3666 return 0;
3667
3668
3669
3670
3671
3672
3673
3674
3675 if (up_port == 1) {
3676 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3677 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3678 offset = TB_LOW_PWR_C1_CL1;
3679 } else {
3680 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3681 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3682 offset = TB_LOW_PWR_C3_CL1;
3683 }
3684
3685 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3686 sw->cap_lp + offset, ARRAY_SIZE(val));
3687 if (ret)
3688 return ret;
3689
3690 for (i = 0; i < ARRAY_SIZE(val); i++) {
3691 val[i] |= mask_obj;
3692 val[i] &= ~unmask_obj;
3693 }
3694
3695 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3696 sw->cap_lp + offset, ARRAY_SIZE(val));
3697 }
3698
3699
3700
3701
3702
3703 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3704 unsigned int pcie_offset, u32 value)
3705 {
3706 u32 offset, command, val;
3707 int ret;
3708
3709 if (sw->generation != 3)
3710 return -EOPNOTSUPP;
3711
3712 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3713 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3714 if (ret)
3715 return ret;
3716
3717 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3718 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3719 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3720 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3721 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3722 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3723
3724 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3725
3726 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3727 if (ret)
3728 return ret;
3729
3730 ret = tb_switch_wait_for_bit(sw, offset,
3731 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3732 if (ret)
3733 return ret;
3734
3735 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3736 if (ret)
3737 return ret;
3738
3739 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3740 return -ETIMEDOUT;
3741
3742 return 0;
3743 }
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3755 {
3756 struct tb_switch *parent = tb_switch_parent(sw);
3757 int ret;
3758
3759 if (!tb_route(sw))
3760 return 0;
3761
3762 if (!tb_switch_is_titan_ridge(sw))
3763 return 0;
3764
3765
3766 if (tb_route(parent))
3767 return 0;
3768
3769
3770 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3771 if (ret)
3772 return ret;
3773
3774
3775 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3776 }
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788 int tb_switch_xhci_connect(struct tb_switch *sw)
3789 {
3790 struct tb_port *port1, *port3;
3791 int ret;
3792
3793 if (sw->generation != 3)
3794 return 0;
3795
3796 port1 = &sw->ports[1];
3797 port3 = &sw->ports[3];
3798
3799 if (tb_switch_is_alpine_ridge(sw)) {
3800 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3801
3802 usb_port1 = tb_lc_is_usb_plugged(port1);
3803 usb_port3 = tb_lc_is_usb_plugged(port3);
3804 xhci_port1 = tb_lc_is_xhci_connected(port1);
3805 xhci_port3 = tb_lc_is_xhci_connected(port3);
3806
3807
3808 if (usb_port1 && !xhci_port1) {
3809 ret = tb_lc_xhci_connect(port1);
3810 if (ret)
3811 return ret;
3812 }
3813 if (usb_port3 && !xhci_port3)
3814 return tb_lc_xhci_connect(port3);
3815 } else if (tb_switch_is_titan_ridge(sw)) {
3816 ret = tb_lc_xhci_connect(port1);
3817 if (ret)
3818 return ret;
3819 return tb_lc_xhci_connect(port3);
3820 }
3821
3822 return 0;
3823 }
3824
3825
3826
3827
3828
3829
3830
3831
3832 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3833 {
3834 if (sw->generation == 3) {
3835 struct tb_port *port1 = &sw->ports[1];
3836 struct tb_port *port3 = &sw->ports[3];
3837
3838 tb_lc_xhci_disconnect(port1);
3839 tb_port_dbg(port1, "disconnected xHCI\n");
3840 tb_lc_xhci_disconnect(port3);
3841 tb_port_dbg(port3, "disconnected xHCI\n");
3842 }
3843 }