Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Thunderbolt driver - switch/port utility functions
0004  *
0005  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
0006  * Copyright (C) 2018, Intel Corporation
0007  */
0008 
0009 #include <linux/delay.h>
0010 #include <linux/idr.h>
0011 #include <linux/nvmem-provider.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/sched/signal.h>
0014 #include <linux/sizes.h>
0015 #include <linux/slab.h>
0016 #include <linux/module.h>
0017 
0018 #include "tb.h"
0019 
0020 /* Switch NVM support */
0021 
0022 #define NVM_CSS         0x10
0023 
0024 struct nvm_auth_status {
0025     struct list_head list;
0026     uuid_t uuid;
0027     u32 status;
0028 };
0029 
0030 static bool clx_enabled = true;
0031 module_param_named(clx, clx_enabled, bool, 0444);
0032 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
0033 
0034 /*
0035  * Hold NVM authentication failure status per switch This information
0036  * needs to stay around even when the switch gets power cycled so we
0037  * keep it separately.
0038  */
0039 static LIST_HEAD(nvm_auth_status_cache);
0040 static DEFINE_MUTEX(nvm_auth_status_lock);
0041 
0042 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
0043 {
0044     struct nvm_auth_status *st;
0045 
0046     list_for_each_entry(st, &nvm_auth_status_cache, list) {
0047         if (uuid_equal(&st->uuid, sw->uuid))
0048             return st;
0049     }
0050 
0051     return NULL;
0052 }
0053 
0054 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
0055 {
0056     struct nvm_auth_status *st;
0057 
0058     mutex_lock(&nvm_auth_status_lock);
0059     st = __nvm_get_auth_status(sw);
0060     mutex_unlock(&nvm_auth_status_lock);
0061 
0062     *status = st ? st->status : 0;
0063 }
0064 
0065 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
0066 {
0067     struct nvm_auth_status *st;
0068 
0069     if (WARN_ON(!sw->uuid))
0070         return;
0071 
0072     mutex_lock(&nvm_auth_status_lock);
0073     st = __nvm_get_auth_status(sw);
0074 
0075     if (!st) {
0076         st = kzalloc(sizeof(*st), GFP_KERNEL);
0077         if (!st)
0078             goto unlock;
0079 
0080         memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
0081         INIT_LIST_HEAD(&st->list);
0082         list_add_tail(&st->list, &nvm_auth_status_cache);
0083     }
0084 
0085     st->status = status;
0086 unlock:
0087     mutex_unlock(&nvm_auth_status_lock);
0088 }
0089 
0090 static void nvm_clear_auth_status(const struct tb_switch *sw)
0091 {
0092     struct nvm_auth_status *st;
0093 
0094     mutex_lock(&nvm_auth_status_lock);
0095     st = __nvm_get_auth_status(sw);
0096     if (st) {
0097         list_del(&st->list);
0098         kfree(st);
0099     }
0100     mutex_unlock(&nvm_auth_status_lock);
0101 }
0102 
0103 static int nvm_validate_and_write(struct tb_switch *sw)
0104 {
0105     unsigned int image_size, hdr_size;
0106     const u8 *buf = sw->nvm->buf;
0107     u16 ds_size;
0108     int ret;
0109 
0110     if (!buf)
0111         return -EINVAL;
0112 
0113     image_size = sw->nvm->buf_data_size;
0114     if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
0115         return -EINVAL;
0116 
0117     /*
0118      * FARB pointer must point inside the image and must at least
0119      * contain parts of the digital section we will be reading here.
0120      */
0121     hdr_size = (*(u32 *)buf) & 0xffffff;
0122     if (hdr_size + NVM_DEVID + 2 >= image_size)
0123         return -EINVAL;
0124 
0125     /* Digital section start should be aligned to 4k page */
0126     if (!IS_ALIGNED(hdr_size, SZ_4K))
0127         return -EINVAL;
0128 
0129     /*
0130      * Read digital section size and check that it also fits inside
0131      * the image.
0132      */
0133     ds_size = *(u16 *)(buf + hdr_size);
0134     if (ds_size >= image_size)
0135         return -EINVAL;
0136 
0137     if (!sw->safe_mode) {
0138         u16 device_id;
0139 
0140         /*
0141          * Make sure the device ID in the image matches the one
0142          * we read from the switch config space.
0143          */
0144         device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
0145         if (device_id != sw->config.device_id)
0146             return -EINVAL;
0147 
0148         if (sw->generation < 3) {
0149             /* Write CSS headers first */
0150             ret = dma_port_flash_write(sw->dma_port,
0151                 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
0152                 DMA_PORT_CSS_MAX_SIZE);
0153             if (ret)
0154                 return ret;
0155         }
0156 
0157         /* Skip headers in the image */
0158         buf += hdr_size;
0159         image_size -= hdr_size;
0160     }
0161 
0162     if (tb_switch_is_usb4(sw))
0163         ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
0164     else
0165         ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
0166     if (!ret)
0167         sw->nvm->flushed = true;
0168     return ret;
0169 }
0170 
0171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
0172 {
0173     int ret = 0;
0174 
0175     /*
0176      * Root switch NVM upgrade requires that we disconnect the
0177      * existing paths first (in case it is not in safe mode
0178      * already).
0179      */
0180     if (!sw->safe_mode) {
0181         u32 status;
0182 
0183         ret = tb_domain_disconnect_all_paths(sw->tb);
0184         if (ret)
0185             return ret;
0186         /*
0187          * The host controller goes away pretty soon after this if
0188          * everything goes well so getting timeout is expected.
0189          */
0190         ret = dma_port_flash_update_auth(sw->dma_port);
0191         if (!ret || ret == -ETIMEDOUT)
0192             return 0;
0193 
0194         /*
0195          * Any error from update auth operation requires power
0196          * cycling of the host router.
0197          */
0198         tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
0199         if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
0200             nvm_set_auth_status(sw, status);
0201     }
0202 
0203     /*
0204      * From safe mode we can get out by just power cycling the
0205      * switch.
0206      */
0207     dma_port_power_cycle(sw->dma_port);
0208     return ret;
0209 }
0210 
0211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
0212 {
0213     int ret, retries = 10;
0214 
0215     ret = dma_port_flash_update_auth(sw->dma_port);
0216     switch (ret) {
0217     case 0:
0218     case -ETIMEDOUT:
0219     case -EACCES:
0220     case -EINVAL:
0221         /* Power cycle is required */
0222         break;
0223     default:
0224         return ret;
0225     }
0226 
0227     /*
0228      * Poll here for the authentication status. It takes some time
0229      * for the device to respond (we get timeout for a while). Once
0230      * we get response the device needs to be power cycled in order
0231      * to the new NVM to be taken into use.
0232      */
0233     do {
0234         u32 status;
0235 
0236         ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
0237         if (ret < 0 && ret != -ETIMEDOUT)
0238             return ret;
0239         if (ret > 0) {
0240             if (status) {
0241                 tb_sw_warn(sw, "failed to authenticate NVM\n");
0242                 nvm_set_auth_status(sw, status);
0243             }
0244 
0245             tb_sw_info(sw, "power cycling the switch now\n");
0246             dma_port_power_cycle(sw->dma_port);
0247             return 0;
0248         }
0249 
0250         msleep(500);
0251     } while (--retries);
0252 
0253     return -ETIMEDOUT;
0254 }
0255 
0256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
0257 {
0258     struct pci_dev *root_port;
0259 
0260     /*
0261      * During host router NVM upgrade we should not allow root port to
0262      * go into D3cold because some root ports cannot trigger PME
0263      * itself. To be on the safe side keep the root port in D0 during
0264      * the whole upgrade process.
0265      */
0266     root_port = pcie_find_root_port(sw->tb->nhi->pdev);
0267     if (root_port)
0268         pm_runtime_get_noresume(&root_port->dev);
0269 }
0270 
0271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
0272 {
0273     struct pci_dev *root_port;
0274 
0275     root_port = pcie_find_root_port(sw->tb->nhi->pdev);
0276     if (root_port)
0277         pm_runtime_put(&root_port->dev);
0278 }
0279 
0280 static inline bool nvm_readable(struct tb_switch *sw)
0281 {
0282     if (tb_switch_is_usb4(sw)) {
0283         /*
0284          * USB4 devices must support NVM operations but it is
0285          * optional for hosts. Therefore we query the NVM sector
0286          * size here and if it is supported assume NVM
0287          * operations are implemented.
0288          */
0289         return usb4_switch_nvm_sector_size(sw) > 0;
0290     }
0291 
0292     /* Thunderbolt 2 and 3 devices support NVM through DMA port */
0293     return !!sw->dma_port;
0294 }
0295 
0296 static inline bool nvm_upgradeable(struct tb_switch *sw)
0297 {
0298     if (sw->no_nvm_upgrade)
0299         return false;
0300     return nvm_readable(sw);
0301 }
0302 
0303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
0304                void *buf, size_t size)
0305 {
0306     if (tb_switch_is_usb4(sw))
0307         return usb4_switch_nvm_read(sw, address, buf, size);
0308     return dma_port_flash_read(sw->dma_port, address, buf, size);
0309 }
0310 
0311 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
0312 {
0313     int ret;
0314 
0315     if (tb_switch_is_usb4(sw)) {
0316         if (auth_only) {
0317             ret = usb4_switch_nvm_set_offset(sw, 0);
0318             if (ret)
0319                 return ret;
0320         }
0321         sw->nvm->authenticating = true;
0322         return usb4_switch_nvm_authenticate(sw);
0323     } else if (auth_only) {
0324         return -EOPNOTSUPP;
0325     }
0326 
0327     sw->nvm->authenticating = true;
0328     if (!tb_route(sw)) {
0329         nvm_authenticate_start_dma_port(sw);
0330         ret = nvm_authenticate_host_dma_port(sw);
0331     } else {
0332         ret = nvm_authenticate_device_dma_port(sw);
0333     }
0334 
0335     return ret;
0336 }
0337 
0338 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
0339                   size_t bytes)
0340 {
0341     struct tb_nvm *nvm = priv;
0342     struct tb_switch *sw = tb_to_switch(nvm->dev);
0343     int ret;
0344 
0345     pm_runtime_get_sync(&sw->dev);
0346 
0347     if (!mutex_trylock(&sw->tb->lock)) {
0348         ret = restart_syscall();
0349         goto out;
0350     }
0351 
0352     ret = nvm_read(sw, offset, val, bytes);
0353     mutex_unlock(&sw->tb->lock);
0354 
0355 out:
0356     pm_runtime_mark_last_busy(&sw->dev);
0357     pm_runtime_put_autosuspend(&sw->dev);
0358 
0359     return ret;
0360 }
0361 
0362 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
0363                    size_t bytes)
0364 {
0365     struct tb_nvm *nvm = priv;
0366     struct tb_switch *sw = tb_to_switch(nvm->dev);
0367     int ret;
0368 
0369     if (!mutex_trylock(&sw->tb->lock))
0370         return restart_syscall();
0371 
0372     /*
0373      * Since writing the NVM image might require some special steps,
0374      * for example when CSS headers are written, we cache the image
0375      * locally here and handle the special cases when the user asks
0376      * us to authenticate the image.
0377      */
0378     ret = tb_nvm_write_buf(nvm, offset, val, bytes);
0379     mutex_unlock(&sw->tb->lock);
0380 
0381     return ret;
0382 }
0383 
0384 static int tb_switch_nvm_add(struct tb_switch *sw)
0385 {
0386     struct tb_nvm *nvm;
0387     u32 val;
0388     int ret;
0389 
0390     if (!nvm_readable(sw))
0391         return 0;
0392 
0393     /*
0394      * The NVM format of non-Intel hardware is not known so
0395      * currently restrict NVM upgrade for Intel hardware. We may
0396      * relax this in the future when we learn other NVM formats.
0397      */
0398     if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
0399         sw->config.vendor_id != 0x8087) {
0400         dev_info(&sw->dev,
0401              "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
0402              sw->config.vendor_id);
0403         return 0;
0404     }
0405 
0406     nvm = tb_nvm_alloc(&sw->dev);
0407     if (IS_ERR(nvm))
0408         return PTR_ERR(nvm);
0409 
0410     /*
0411      * If the switch is in safe-mode the only accessible portion of
0412      * the NVM is the non-active one where userspace is expected to
0413      * write new functional NVM.
0414      */
0415     if (!sw->safe_mode) {
0416         u32 nvm_size, hdr_size;
0417 
0418         ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
0419         if (ret)
0420             goto err_nvm;
0421 
0422         hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
0423         nvm_size = (SZ_1M << (val & 7)) / 8;
0424         nvm_size = (nvm_size - hdr_size) / 2;
0425 
0426         ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
0427         if (ret)
0428             goto err_nvm;
0429 
0430         nvm->major = val >> 16;
0431         nvm->minor = val >> 8;
0432 
0433         ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
0434         if (ret)
0435             goto err_nvm;
0436     }
0437 
0438     if (!sw->no_nvm_upgrade) {
0439         ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
0440                         tb_switch_nvm_write);
0441         if (ret)
0442             goto err_nvm;
0443     }
0444 
0445     sw->nvm = nvm;
0446     return 0;
0447 
0448 err_nvm:
0449     tb_nvm_free(nvm);
0450     return ret;
0451 }
0452 
0453 static void tb_switch_nvm_remove(struct tb_switch *sw)
0454 {
0455     struct tb_nvm *nvm;
0456 
0457     nvm = sw->nvm;
0458     sw->nvm = NULL;
0459 
0460     if (!nvm)
0461         return;
0462 
0463     /* Remove authentication status in case the switch is unplugged */
0464     if (!nvm->authenticating)
0465         nvm_clear_auth_status(sw);
0466 
0467     tb_nvm_free(nvm);
0468 }
0469 
0470 /* port utility functions */
0471 
0472 static const char *tb_port_type(const struct tb_regs_port_header *port)
0473 {
0474     switch (port->type >> 16) {
0475     case 0:
0476         switch ((u8) port->type) {
0477         case 0:
0478             return "Inactive";
0479         case 1:
0480             return "Port";
0481         case 2:
0482             return "NHI";
0483         default:
0484             return "unknown";
0485         }
0486     case 0x2:
0487         return "Ethernet";
0488     case 0x8:
0489         return "SATA";
0490     case 0xe:
0491         return "DP/HDMI";
0492     case 0x10:
0493         return "PCIe";
0494     case 0x20:
0495         return "USB";
0496     default:
0497         return "unknown";
0498     }
0499 }
0500 
0501 static void tb_dump_port(struct tb *tb, const struct tb_port *port)
0502 {
0503     const struct tb_regs_port_header *regs = &port->config;
0504 
0505     tb_dbg(tb,
0506            " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
0507            regs->port_number, regs->vendor_id, regs->device_id,
0508            regs->revision, regs->thunderbolt_version, tb_port_type(regs),
0509            regs->type);
0510     tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
0511            regs->max_in_hop_id, regs->max_out_hop_id);
0512     tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
0513     tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
0514     tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
0515            port->ctl_credits);
0516 }
0517 
0518 /**
0519  * tb_port_state() - get connectedness state of a port
0520  * @port: the port to check
0521  *
0522  * The port must have a TB_CAP_PHY (i.e. it should be a real port).
0523  *
0524  * Return: Returns an enum tb_port_state on success or an error code on failure.
0525  */
0526 int tb_port_state(struct tb_port *port)
0527 {
0528     struct tb_cap_phy phy;
0529     int res;
0530     if (port->cap_phy == 0) {
0531         tb_port_WARN(port, "does not have a PHY\n");
0532         return -EINVAL;
0533     }
0534     res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
0535     if (res)
0536         return res;
0537     return phy.state;
0538 }
0539 
0540 /**
0541  * tb_wait_for_port() - wait for a port to become ready
0542  * @port: Port to wait
0543  * @wait_if_unplugged: Wait also when port is unplugged
0544  *
0545  * Wait up to 1 second for a port to reach state TB_PORT_UP. If
0546  * wait_if_unplugged is set then we also wait if the port is in state
0547  * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
0548  * switch resume). Otherwise we only wait if a device is registered but the link
0549  * has not yet been established.
0550  *
0551  * Return: Returns an error code on failure. Returns 0 if the port is not
0552  * connected or failed to reach state TB_PORT_UP within one second. Returns 1
0553  * if the port is connected and in state TB_PORT_UP.
0554  */
0555 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
0556 {
0557     int retries = 10;
0558     int state;
0559     if (!port->cap_phy) {
0560         tb_port_WARN(port, "does not have PHY\n");
0561         return -EINVAL;
0562     }
0563     if (tb_is_upstream_port(port)) {
0564         tb_port_WARN(port, "is the upstream port\n");
0565         return -EINVAL;
0566     }
0567 
0568     while (retries--) {
0569         state = tb_port_state(port);
0570         if (state < 0)
0571             return state;
0572         if (state == TB_PORT_DISABLED) {
0573             tb_port_dbg(port, "is disabled (state: 0)\n");
0574             return 0;
0575         }
0576         if (state == TB_PORT_UNPLUGGED) {
0577             if (wait_if_unplugged) {
0578                 /* used during resume */
0579                 tb_port_dbg(port,
0580                         "is unplugged (state: 7), retrying...\n");
0581                 msleep(100);
0582                 continue;
0583             }
0584             tb_port_dbg(port, "is unplugged (state: 7)\n");
0585             return 0;
0586         }
0587         if (state == TB_PORT_UP) {
0588             tb_port_dbg(port, "is connected, link is up (state: 2)\n");
0589             return 1;
0590         }
0591 
0592         /*
0593          * After plug-in the state is TB_PORT_CONNECTING. Give it some
0594          * time.
0595          */
0596         tb_port_dbg(port,
0597                 "is connected, link is not up (state: %d), retrying...\n",
0598                 state);
0599         msleep(100);
0600     }
0601     tb_port_warn(port,
0602              "failed to reach state TB_PORT_UP. Ignoring port...\n");
0603     return 0;
0604 }
0605 
0606 /**
0607  * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
0608  * @port: Port to add/remove NFC credits
0609  * @credits: Credits to add/remove
0610  *
0611  * Change the number of NFC credits allocated to @port by @credits. To remove
0612  * NFC credits pass a negative amount of credits.
0613  *
0614  * Return: Returns 0 on success or an error code on failure.
0615  */
0616 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
0617 {
0618     u32 nfc_credits;
0619 
0620     if (credits == 0 || port->sw->is_unplugged)
0621         return 0;
0622 
0623     /*
0624      * USB4 restricts programming NFC buffers to lane adapters only
0625      * so skip other ports.
0626      */
0627     if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
0628         return 0;
0629 
0630     nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
0631     if (credits < 0)
0632         credits = max_t(int, -nfc_credits, credits);
0633 
0634     nfc_credits += credits;
0635 
0636     tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
0637             port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
0638 
0639     port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
0640     port->config.nfc_credits |= nfc_credits;
0641 
0642     return tb_port_write(port, &port->config.nfc_credits,
0643                  TB_CFG_PORT, ADP_CS_4, 1);
0644 }
0645 
0646 /**
0647  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
0648  * @port: Port whose counters to clear
0649  * @counter: Counter index to clear
0650  *
0651  * Return: Returns 0 on success or an error code on failure.
0652  */
0653 int tb_port_clear_counter(struct tb_port *port, int counter)
0654 {
0655     u32 zero[3] = { 0, 0, 0 };
0656     tb_port_dbg(port, "clearing counter %d\n", counter);
0657     return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
0658 }
0659 
0660 /**
0661  * tb_port_unlock() - Unlock downstream port
0662  * @port: Port to unlock
0663  *
0664  * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
0665  * downstream router accessible for CM.
0666  */
0667 int tb_port_unlock(struct tb_port *port)
0668 {
0669     if (tb_switch_is_icm(port->sw))
0670         return 0;
0671     if (!tb_port_is_null(port))
0672         return -EINVAL;
0673     if (tb_switch_is_usb4(port->sw))
0674         return usb4_port_unlock(port);
0675     return 0;
0676 }
0677 
0678 static int __tb_port_enable(struct tb_port *port, bool enable)
0679 {
0680     int ret;
0681     u32 phy;
0682 
0683     if (!tb_port_is_null(port))
0684         return -EINVAL;
0685 
0686     ret = tb_port_read(port, &phy, TB_CFG_PORT,
0687                port->cap_phy + LANE_ADP_CS_1, 1);
0688     if (ret)
0689         return ret;
0690 
0691     if (enable)
0692         phy &= ~LANE_ADP_CS_1_LD;
0693     else
0694         phy |= LANE_ADP_CS_1_LD;
0695 
0696 
0697     ret = tb_port_write(port, &phy, TB_CFG_PORT,
0698                 port->cap_phy + LANE_ADP_CS_1, 1);
0699     if (ret)
0700         return ret;
0701 
0702     tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
0703     return 0;
0704 }
0705 
0706 /**
0707  * tb_port_enable() - Enable lane adapter
0708  * @port: Port to enable (can be %NULL)
0709  *
0710  * This is used for lane 0 and 1 adapters to enable it.
0711  */
0712 int tb_port_enable(struct tb_port *port)
0713 {
0714     return __tb_port_enable(port, true);
0715 }
0716 
0717 /**
0718  * tb_port_disable() - Disable lane adapter
0719  * @port: Port to disable (can be %NULL)
0720  *
0721  * This is used for lane 0 and 1 adapters to disable it.
0722  */
0723 int tb_port_disable(struct tb_port *port)
0724 {
0725     return __tb_port_enable(port, false);
0726 }
0727 
0728 /*
0729  * tb_init_port() - initialize a port
0730  *
0731  * This is a helper method for tb_switch_alloc. Does not check or initialize
0732  * any downstream switches.
0733  *
0734  * Return: Returns 0 on success or an error code on failure.
0735  */
0736 static int tb_init_port(struct tb_port *port)
0737 {
0738     int res;
0739     int cap;
0740 
0741     INIT_LIST_HEAD(&port->list);
0742 
0743     /* Control adapter does not have configuration space */
0744     if (!port->port)
0745         return 0;
0746 
0747     res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
0748     if (res) {
0749         if (res == -ENODEV) {
0750             tb_dbg(port->sw->tb, " Port %d: not implemented\n",
0751                    port->port);
0752             port->disabled = true;
0753             return 0;
0754         }
0755         return res;
0756     }
0757 
0758     /* Port 0 is the switch itself and has no PHY. */
0759     if (port->config.type == TB_TYPE_PORT) {
0760         cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
0761 
0762         if (cap > 0)
0763             port->cap_phy = cap;
0764         else
0765             tb_port_WARN(port, "non switch port without a PHY\n");
0766 
0767         cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
0768         if (cap > 0)
0769             port->cap_usb4 = cap;
0770 
0771         /*
0772          * USB4 ports the buffers allocated for the control path
0773          * can be read from the path config space. Legacy
0774          * devices we use hard-coded value.
0775          */
0776         if (tb_switch_is_usb4(port->sw)) {
0777             struct tb_regs_hop hop;
0778 
0779             if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
0780                 port->ctl_credits = hop.initial_credits;
0781         }
0782         if (!port->ctl_credits)
0783             port->ctl_credits = 2;
0784 
0785     } else {
0786         cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
0787         if (cap > 0)
0788             port->cap_adap = cap;
0789     }
0790 
0791     port->total_credits =
0792         (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
0793         ADP_CS_4_TOTAL_BUFFERS_SHIFT;
0794 
0795     tb_dump_port(port->sw->tb, port);
0796     return 0;
0797 }
0798 
0799 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
0800                    int max_hopid)
0801 {
0802     int port_max_hopid;
0803     struct ida *ida;
0804 
0805     if (in) {
0806         port_max_hopid = port->config.max_in_hop_id;
0807         ida = &port->in_hopids;
0808     } else {
0809         port_max_hopid = port->config.max_out_hop_id;
0810         ida = &port->out_hopids;
0811     }
0812 
0813     /*
0814      * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
0815      * reserved.
0816      */
0817     if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0818         min_hopid = TB_PATH_MIN_HOPID;
0819 
0820     if (max_hopid < 0 || max_hopid > port_max_hopid)
0821         max_hopid = port_max_hopid;
0822 
0823     return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
0824 }
0825 
0826 /**
0827  * tb_port_alloc_in_hopid() - Allocate input HopID from port
0828  * @port: Port to allocate HopID for
0829  * @min_hopid: Minimum acceptable input HopID
0830  * @max_hopid: Maximum acceptable input HopID
0831  *
0832  * Return: HopID between @min_hopid and @max_hopid or negative errno in
0833  * case of error.
0834  */
0835 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
0836 {
0837     return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
0838 }
0839 
0840 /**
0841  * tb_port_alloc_out_hopid() - Allocate output HopID from port
0842  * @port: Port to allocate HopID for
0843  * @min_hopid: Minimum acceptable output HopID
0844  * @max_hopid: Maximum acceptable output HopID
0845  *
0846  * Return: HopID between @min_hopid and @max_hopid or negative errno in
0847  * case of error.
0848  */
0849 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
0850 {
0851     return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
0852 }
0853 
0854 /**
0855  * tb_port_release_in_hopid() - Release allocated input HopID from port
0856  * @port: Port whose HopID to release
0857  * @hopid: HopID to release
0858  */
0859 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
0860 {
0861     ida_simple_remove(&port->in_hopids, hopid);
0862 }
0863 
0864 /**
0865  * tb_port_release_out_hopid() - Release allocated output HopID from port
0866  * @port: Port whose HopID to release
0867  * @hopid: HopID to release
0868  */
0869 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
0870 {
0871     ida_simple_remove(&port->out_hopids, hopid);
0872 }
0873 
0874 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
0875                       const struct tb_switch *sw)
0876 {
0877     u64 mask = (1ULL << parent->config.depth * 8) - 1;
0878     return (tb_route(parent) & mask) == (tb_route(sw) & mask);
0879 }
0880 
0881 /**
0882  * tb_next_port_on_path() - Return next port for given port on a path
0883  * @start: Start port of the walk
0884  * @end: End port of the walk
0885  * @prev: Previous port (%NULL if this is the first)
0886  *
0887  * This function can be used to walk from one port to another if they
0888  * are connected through zero or more switches. If the @prev is dual
0889  * link port, the function follows that link and returns another end on
0890  * that same link.
0891  *
0892  * If the @end port has been reached, return %NULL.
0893  *
0894  * Domain tb->lock must be held when this function is called.
0895  */
0896 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
0897                      struct tb_port *prev)
0898 {
0899     struct tb_port *next;
0900 
0901     if (!prev)
0902         return start;
0903 
0904     if (prev->sw == end->sw) {
0905         if (prev == end)
0906             return NULL;
0907         return end;
0908     }
0909 
0910     if (tb_switch_is_reachable(prev->sw, end->sw)) {
0911         next = tb_port_at(tb_route(end->sw), prev->sw);
0912         /* Walk down the topology if next == prev */
0913         if (prev->remote &&
0914             (next == prev || next->dual_link_port == prev))
0915             next = prev->remote;
0916     } else {
0917         if (tb_is_upstream_port(prev)) {
0918             next = prev->remote;
0919         } else {
0920             next = tb_upstream_port(prev->sw);
0921             /*
0922              * Keep the same link if prev and next are both
0923              * dual link ports.
0924              */
0925             if (next->dual_link_port &&
0926                 next->link_nr != prev->link_nr) {
0927                 next = next->dual_link_port;
0928             }
0929         }
0930     }
0931 
0932     return next != prev ? next : NULL;
0933 }
0934 
0935 /**
0936  * tb_port_get_link_speed() - Get current link speed
0937  * @port: Port to check (USB4 or CIO)
0938  *
0939  * Returns link speed in Gb/s or negative errno in case of failure.
0940  */
0941 int tb_port_get_link_speed(struct tb_port *port)
0942 {
0943     u32 val, speed;
0944     int ret;
0945 
0946     if (!port->cap_phy)
0947         return -EINVAL;
0948 
0949     ret = tb_port_read(port, &val, TB_CFG_PORT,
0950                port->cap_phy + LANE_ADP_CS_1, 1);
0951     if (ret)
0952         return ret;
0953 
0954     speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
0955         LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
0956     return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
0957 }
0958 
0959 /**
0960  * tb_port_get_link_width() - Get current link width
0961  * @port: Port to check (USB4 or CIO)
0962  *
0963  * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
0964  * or negative errno in case of failure.
0965  */
0966 int tb_port_get_link_width(struct tb_port *port)
0967 {
0968     u32 val;
0969     int ret;
0970 
0971     if (!port->cap_phy)
0972         return -EINVAL;
0973 
0974     ret = tb_port_read(port, &val, TB_CFG_PORT,
0975                port->cap_phy + LANE_ADP_CS_1, 1);
0976     if (ret)
0977         return ret;
0978 
0979     return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
0980         LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
0981 }
0982 
0983 static bool tb_port_is_width_supported(struct tb_port *port, int width)
0984 {
0985     u32 phy, widths;
0986     int ret;
0987 
0988     if (!port->cap_phy)
0989         return false;
0990 
0991     ret = tb_port_read(port, &phy, TB_CFG_PORT,
0992                port->cap_phy + LANE_ADP_CS_0, 1);
0993     if (ret)
0994         return false;
0995 
0996     widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
0997         LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
0998 
0999     return !!(widths & width);
1000 }
1001 
1002 /**
1003  * tb_port_set_link_width() - Set target link width of the lane adapter
1004  * @port: Lane adapter
1005  * @width: Target link width (%1 or %2)
1006  *
1007  * Sets the target link width of the lane adapter to @width. Does not
1008  * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1009  *
1010  * Return: %0 in case of success and negative errno in case of error
1011  */
1012 int tb_port_set_link_width(struct tb_port *port, unsigned int width)
1013 {
1014     u32 val;
1015     int ret;
1016 
1017     if (!port->cap_phy)
1018         return -EINVAL;
1019 
1020     ret = tb_port_read(port, &val, TB_CFG_PORT,
1021                port->cap_phy + LANE_ADP_CS_1, 1);
1022     if (ret)
1023         return ret;
1024 
1025     val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1026     switch (width) {
1027     case 1:
1028         val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1029             LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1030         break;
1031     case 2:
1032         val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1033             LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1034         break;
1035     default:
1036         return -EINVAL;
1037     }
1038 
1039     return tb_port_write(port, &val, TB_CFG_PORT,
1040                  port->cap_phy + LANE_ADP_CS_1, 1);
1041 }
1042 
1043 /**
1044  * tb_port_set_lane_bonding() - Enable/disable lane bonding
1045  * @port: Lane adapter
1046  * @bonding: enable/disable bonding
1047  *
1048  * Enables or disables lane bonding. This should be called after target
1049  * link width has been set (tb_port_set_link_width()). Note in most
1050  * cases one should use tb_port_lane_bonding_enable() instead to enable
1051  * lane bonding.
1052  *
1053  * As a side effect sets @port->bonding accordingly (and does the same
1054  * for lane 1 too).
1055  *
1056  * Return: %0 in case of success and negative errno in case of error
1057  */
1058 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1059 {
1060     u32 val;
1061     int ret;
1062 
1063     if (!port->cap_phy)
1064         return -EINVAL;
1065 
1066     ret = tb_port_read(port, &val, TB_CFG_PORT,
1067                port->cap_phy + LANE_ADP_CS_1, 1);
1068     if (ret)
1069         return ret;
1070 
1071     if (bonding)
1072         val |= LANE_ADP_CS_1_LB;
1073     else
1074         val &= ~LANE_ADP_CS_1_LB;
1075 
1076     ret = tb_port_write(port, &val, TB_CFG_PORT,
1077                 port->cap_phy + LANE_ADP_CS_1, 1);
1078     if (ret)
1079         return ret;
1080 
1081     /*
1082      * When lane 0 bonding is set it will affect lane 1 too so
1083      * update both.
1084      */
1085     port->bonded = bonding;
1086     port->dual_link_port->bonded = bonding;
1087 
1088     return 0;
1089 }
1090 
1091 /**
1092  * tb_port_lane_bonding_enable() - Enable bonding on port
1093  * @port: port to enable
1094  *
1095  * Enable bonding by setting the link width of the port and the other
1096  * port in case of dual link port. Does not wait for the link to
1097  * actually reach the bonded state so caller needs to call
1098  * tb_port_wait_for_link_width() before enabling any paths through the
1099  * link to make sure the link is in expected state.
1100  *
1101  * Return: %0 in case of success and negative errno in case of error
1102  */
1103 int tb_port_lane_bonding_enable(struct tb_port *port)
1104 {
1105     int ret;
1106 
1107     /*
1108      * Enable lane bonding for both links if not already enabled by
1109      * for example the boot firmware.
1110      */
1111     ret = tb_port_get_link_width(port);
1112     if (ret == 1) {
1113         ret = tb_port_set_link_width(port, 2);
1114         if (ret)
1115             goto err_lane0;
1116     }
1117 
1118     ret = tb_port_get_link_width(port->dual_link_port);
1119     if (ret == 1) {
1120         ret = tb_port_set_link_width(port->dual_link_port, 2);
1121         if (ret)
1122             goto err_lane0;
1123     }
1124 
1125     ret = tb_port_set_lane_bonding(port, true);
1126     if (ret)
1127         goto err_lane1;
1128 
1129     return 0;
1130 
1131 err_lane1:
1132     tb_port_set_link_width(port->dual_link_port, 1);
1133 err_lane0:
1134     tb_port_set_link_width(port, 1);
1135     return ret;
1136 }
1137 
1138 /**
1139  * tb_port_lane_bonding_disable() - Disable bonding on port
1140  * @port: port to disable
1141  *
1142  * Disable bonding by setting the link width of the port and the
1143  * other port in case of dual link port.
1144  */
1145 void tb_port_lane_bonding_disable(struct tb_port *port)
1146 {
1147     tb_port_set_lane_bonding(port, false);
1148     tb_port_set_link_width(port->dual_link_port, 1);
1149     tb_port_set_link_width(port, 1);
1150 }
1151 
1152 /**
1153  * tb_port_wait_for_link_width() - Wait until link reaches specific width
1154  * @port: Port to wait for
1155  * @width: Expected link width (%1 or %2)
1156  * @timeout_msec: Timeout in ms how long to wait
1157  *
1158  * Should be used after both ends of the link have been bonded (or
1159  * bonding has been disabled) to wait until the link actually reaches
1160  * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1161  * within the given timeout, %0 if it did.
1162  */
1163 int tb_port_wait_for_link_width(struct tb_port *port, int width,
1164                 int timeout_msec)
1165 {
1166     ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1167     int ret;
1168 
1169     do {
1170         ret = tb_port_get_link_width(port);
1171         if (ret < 0) {
1172             /*
1173              * Sometimes we get port locked error when
1174              * polling the lanes so we can ignore it and
1175              * retry.
1176              */
1177             if (ret != -EACCES)
1178                 return ret;
1179         } else if (ret == width) {
1180             return 0;
1181         }
1182 
1183         usleep_range(1000, 2000);
1184     } while (ktime_before(ktime_get(), timeout));
1185 
1186     return -ETIMEDOUT;
1187 }
1188 
1189 static int tb_port_do_update_credits(struct tb_port *port)
1190 {
1191     u32 nfc_credits;
1192     int ret;
1193 
1194     ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1195     if (ret)
1196         return ret;
1197 
1198     if (nfc_credits != port->config.nfc_credits) {
1199         u32 total;
1200 
1201         total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1202             ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1203 
1204         tb_port_dbg(port, "total credits changed %u -> %u\n",
1205                 port->total_credits, total);
1206 
1207         port->config.nfc_credits = nfc_credits;
1208         port->total_credits = total;
1209     }
1210 
1211     return 0;
1212 }
1213 
1214 /**
1215  * tb_port_update_credits() - Re-read port total credits
1216  * @port: Port to update
1217  *
1218  * After the link is bonded (or bonding was disabled) the port total
1219  * credits may change, so this function needs to be called to re-read
1220  * the credits. Updates also the second lane adapter.
1221  */
1222 int tb_port_update_credits(struct tb_port *port)
1223 {
1224     int ret;
1225 
1226     ret = tb_port_do_update_credits(port);
1227     if (ret)
1228         return ret;
1229     return tb_port_do_update_credits(port->dual_link_port);
1230 }
1231 
1232 static int tb_port_start_lane_initialization(struct tb_port *port)
1233 {
1234     int ret;
1235 
1236     if (tb_switch_is_usb4(port->sw))
1237         return 0;
1238 
1239     ret = tb_lc_start_lane_initialization(port);
1240     return ret == -EINVAL ? 0 : ret;
1241 }
1242 
1243 /*
1244  * Returns true if the port had something (router, XDomain) connected
1245  * before suspend.
1246  */
1247 static bool tb_port_resume(struct tb_port *port)
1248 {
1249     bool has_remote = tb_port_has_remote(port);
1250 
1251     if (port->usb4) {
1252         usb4_port_device_resume(port->usb4);
1253     } else if (!has_remote) {
1254         /*
1255          * For disconnected downstream lane adapters start lane
1256          * initialization now so we detect future connects.
1257          *
1258          * For XDomain start the lane initialzation now so the
1259          * link gets re-established.
1260          *
1261          * This is only needed for non-USB4 ports.
1262          */
1263         if (!tb_is_upstream_port(port) || port->xdomain)
1264             tb_port_start_lane_initialization(port);
1265     }
1266 
1267     return has_remote || port->xdomain;
1268 }
1269 
1270 /**
1271  * tb_port_is_enabled() - Is the adapter port enabled
1272  * @port: Port to check
1273  */
1274 bool tb_port_is_enabled(struct tb_port *port)
1275 {
1276     switch (port->config.type) {
1277     case TB_TYPE_PCIE_UP:
1278     case TB_TYPE_PCIE_DOWN:
1279         return tb_pci_port_is_enabled(port);
1280 
1281     case TB_TYPE_DP_HDMI_IN:
1282     case TB_TYPE_DP_HDMI_OUT:
1283         return tb_dp_port_is_enabled(port);
1284 
1285     case TB_TYPE_USB3_UP:
1286     case TB_TYPE_USB3_DOWN:
1287         return tb_usb3_port_is_enabled(port);
1288 
1289     default:
1290         return false;
1291     }
1292 }
1293 
1294 /**
1295  * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1296  * @port: USB3 adapter port to check
1297  */
1298 bool tb_usb3_port_is_enabled(struct tb_port *port)
1299 {
1300     u32 data;
1301 
1302     if (tb_port_read(port, &data, TB_CFG_PORT,
1303              port->cap_adap + ADP_USB3_CS_0, 1))
1304         return false;
1305 
1306     return !!(data & ADP_USB3_CS_0_PE);
1307 }
1308 
1309 /**
1310  * tb_usb3_port_enable() - Enable USB3 adapter port
1311  * @port: USB3 adapter port to enable
1312  * @enable: Enable/disable the USB3 adapter
1313  */
1314 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1315 {
1316     u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1317               : ADP_USB3_CS_0_V;
1318 
1319     if (!port->cap_adap)
1320         return -ENXIO;
1321     return tb_port_write(port, &word, TB_CFG_PORT,
1322                  port->cap_adap + ADP_USB3_CS_0, 1);
1323 }
1324 
1325 /**
1326  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1327  * @port: PCIe port to check
1328  */
1329 bool tb_pci_port_is_enabled(struct tb_port *port)
1330 {
1331     u32 data;
1332 
1333     if (tb_port_read(port, &data, TB_CFG_PORT,
1334              port->cap_adap + ADP_PCIE_CS_0, 1))
1335         return false;
1336 
1337     return !!(data & ADP_PCIE_CS_0_PE);
1338 }
1339 
1340 /**
1341  * tb_pci_port_enable() - Enable PCIe adapter port
1342  * @port: PCIe port to enable
1343  * @enable: Enable/disable the PCIe adapter
1344  */
1345 int tb_pci_port_enable(struct tb_port *port, bool enable)
1346 {
1347     u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1348     if (!port->cap_adap)
1349         return -ENXIO;
1350     return tb_port_write(port, &word, TB_CFG_PORT,
1351                  port->cap_adap + ADP_PCIE_CS_0, 1);
1352 }
1353 
1354 /**
1355  * tb_dp_port_hpd_is_active() - Is HPD already active
1356  * @port: DP out port to check
1357  *
1358  * Checks if the DP OUT adapter port has HDP bit already set.
1359  */
1360 int tb_dp_port_hpd_is_active(struct tb_port *port)
1361 {
1362     u32 data;
1363     int ret;
1364 
1365     ret = tb_port_read(port, &data, TB_CFG_PORT,
1366                port->cap_adap + ADP_DP_CS_2, 1);
1367     if (ret)
1368         return ret;
1369 
1370     return !!(data & ADP_DP_CS_2_HDP);
1371 }
1372 
1373 /**
1374  * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1375  * @port: Port to clear HPD
1376  *
1377  * If the DP IN port has HDP set, this function can be used to clear it.
1378  */
1379 int tb_dp_port_hpd_clear(struct tb_port *port)
1380 {
1381     u32 data;
1382     int ret;
1383 
1384     ret = tb_port_read(port, &data, TB_CFG_PORT,
1385                port->cap_adap + ADP_DP_CS_3, 1);
1386     if (ret)
1387         return ret;
1388 
1389     data |= ADP_DP_CS_3_HDPC;
1390     return tb_port_write(port, &data, TB_CFG_PORT,
1391                  port->cap_adap + ADP_DP_CS_3, 1);
1392 }
1393 
1394 /**
1395  * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1396  * @port: DP IN/OUT port to set hops
1397  * @video: Video Hop ID
1398  * @aux_tx: AUX TX Hop ID
1399  * @aux_rx: AUX RX Hop ID
1400  *
1401  * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1402  * router DP adapters too but does not program the values as the fields
1403  * are read-only.
1404  */
1405 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1406             unsigned int aux_tx, unsigned int aux_rx)
1407 {
1408     u32 data[2];
1409     int ret;
1410 
1411     if (tb_switch_is_usb4(port->sw))
1412         return 0;
1413 
1414     ret = tb_port_read(port, data, TB_CFG_PORT,
1415                port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1416     if (ret)
1417         return ret;
1418 
1419     data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1420     data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1421     data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1422 
1423     data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1424         ADP_DP_CS_0_VIDEO_HOPID_MASK;
1425     data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1426     data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1427         ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1428 
1429     return tb_port_write(port, data, TB_CFG_PORT,
1430                  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1431 }
1432 
1433 /**
1434  * tb_dp_port_is_enabled() - Is DP adapter port enabled
1435  * @port: DP adapter port to check
1436  */
1437 bool tb_dp_port_is_enabled(struct tb_port *port)
1438 {
1439     u32 data[2];
1440 
1441     if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1442              ARRAY_SIZE(data)))
1443         return false;
1444 
1445     return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1446 }
1447 
1448 /**
1449  * tb_dp_port_enable() - Enables/disables DP paths of a port
1450  * @port: DP IN/OUT port
1451  * @enable: Enable/disable DP path
1452  *
1453  * Once Hop IDs are programmed DP paths can be enabled or disabled by
1454  * calling this function.
1455  */
1456 int tb_dp_port_enable(struct tb_port *port, bool enable)
1457 {
1458     u32 data[2];
1459     int ret;
1460 
1461     ret = tb_port_read(port, data, TB_CFG_PORT,
1462               port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1463     if (ret)
1464         return ret;
1465 
1466     if (enable)
1467         data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1468     else
1469         data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1470 
1471     return tb_port_write(port, data, TB_CFG_PORT,
1472                  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1473 }
1474 
1475 /* switch utility functions */
1476 
1477 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1478 {
1479     switch (sw->generation) {
1480     case 1:
1481         return "Thunderbolt 1";
1482     case 2:
1483         return "Thunderbolt 2";
1484     case 3:
1485         return "Thunderbolt 3";
1486     case 4:
1487         return "USB4";
1488     default:
1489         return "Unknown";
1490     }
1491 }
1492 
1493 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1494 {
1495     const struct tb_regs_switch_header *regs = &sw->config;
1496 
1497     tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1498            tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1499            regs->revision, regs->thunderbolt_version);
1500     tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1501     tb_dbg(tb, "  Config:\n");
1502     tb_dbg(tb,
1503         "   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1504            regs->upstream_port_number, regs->depth,
1505            (((u64) regs->route_hi) << 32) | regs->route_lo,
1506            regs->enabled, regs->plug_events_delay);
1507     tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1508            regs->__unknown1, regs->__unknown4);
1509 }
1510 
1511 /**
1512  * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1513  * @sw: Switch to reset
1514  *
1515  * Return: Returns 0 on success or an error code on failure.
1516  */
1517 int tb_switch_reset(struct tb_switch *sw)
1518 {
1519     struct tb_cfg_result res;
1520 
1521     if (sw->generation > 1)
1522         return 0;
1523 
1524     tb_sw_dbg(sw, "resetting switch\n");
1525 
1526     res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1527                   TB_CFG_SWITCH, 2, 2);
1528     if (res.err)
1529         return res.err;
1530     res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1531     if (res.err > 0)
1532         return -EIO;
1533     return res.err;
1534 }
1535 
1536 /**
1537  * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1538  * @sw: Router to read the offset value from
1539  * @offset: Offset in the router config space to read from
1540  * @bit: Bit mask in the offset to wait for
1541  * @value: Value of the bits to wait for
1542  * @timeout_msec: Timeout in ms how long to wait
1543  *
1544  * Wait till the specified bits in specified offset reach specified value.
1545  * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1546  * within the given timeout or a negative errno in case of failure.
1547  */
1548 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1549                u32 value, int timeout_msec)
1550 {
1551     ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1552 
1553     do {
1554         u32 val;
1555         int ret;
1556 
1557         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1558         if (ret)
1559             return ret;
1560 
1561         if ((val & bit) == value)
1562             return 0;
1563 
1564         usleep_range(50, 100);
1565     } while (ktime_before(ktime_get(), timeout));
1566 
1567     return -ETIMEDOUT;
1568 }
1569 
1570 /*
1571  * tb_plug_events_active() - enable/disable plug events on a switch
1572  *
1573  * Also configures a sane plug_events_delay of 255ms.
1574  *
1575  * Return: Returns 0 on success or an error code on failure.
1576  */
1577 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1578 {
1579     u32 data;
1580     int res;
1581 
1582     if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1583         return 0;
1584 
1585     sw->config.plug_events_delay = 0xff;
1586     res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1587     if (res)
1588         return res;
1589 
1590     res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1591     if (res)
1592         return res;
1593 
1594     if (active) {
1595         data = data & 0xFFFFFF83;
1596         switch (sw->config.device_id) {
1597         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1598         case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1599         case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1600             break;
1601         default:
1602             /*
1603              * Skip Alpine Ridge, it needs to have vendor
1604              * specific USB hotplug event enabled for the
1605              * internal xHCI to work.
1606              */
1607             if (!tb_switch_is_alpine_ridge(sw))
1608                 data |= TB_PLUG_EVENTS_USB_DISABLE;
1609         }
1610     } else {
1611         data = data | 0x7c;
1612     }
1613     return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1614                sw->cap_plug_events + 1, 1);
1615 }
1616 
1617 static ssize_t authorized_show(struct device *dev,
1618                    struct device_attribute *attr,
1619                    char *buf)
1620 {
1621     struct tb_switch *sw = tb_to_switch(dev);
1622 
1623     return sprintf(buf, "%u\n", sw->authorized);
1624 }
1625 
1626 static int disapprove_switch(struct device *dev, void *not_used)
1627 {
1628     char *envp[] = { "AUTHORIZED=0", NULL };
1629     struct tb_switch *sw;
1630 
1631     sw = tb_to_switch(dev);
1632     if (sw && sw->authorized) {
1633         int ret;
1634 
1635         /* First children */
1636         ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1637         if (ret)
1638             return ret;
1639 
1640         ret = tb_domain_disapprove_switch(sw->tb, sw);
1641         if (ret)
1642             return ret;
1643 
1644         sw->authorized = 0;
1645         kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1646     }
1647 
1648     return 0;
1649 }
1650 
1651 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1652 {
1653     char envp_string[13];
1654     int ret = -EINVAL;
1655     char *envp[] = { envp_string, NULL };
1656 
1657     if (!mutex_trylock(&sw->tb->lock))
1658         return restart_syscall();
1659 
1660     if (!!sw->authorized == !!val)
1661         goto unlock;
1662 
1663     switch (val) {
1664     /* Disapprove switch */
1665     case 0:
1666         if (tb_route(sw)) {
1667             ret = disapprove_switch(&sw->dev, NULL);
1668             goto unlock;
1669         }
1670         break;
1671 
1672     /* Approve switch */
1673     case 1:
1674         if (sw->key)
1675             ret = tb_domain_approve_switch_key(sw->tb, sw);
1676         else
1677             ret = tb_domain_approve_switch(sw->tb, sw);
1678         break;
1679 
1680     /* Challenge switch */
1681     case 2:
1682         if (sw->key)
1683             ret = tb_domain_challenge_switch_key(sw->tb, sw);
1684         break;
1685 
1686     default:
1687         break;
1688     }
1689 
1690     if (!ret) {
1691         sw->authorized = val;
1692         /*
1693          * Notify status change to the userspace, informing the new
1694          * value of /sys/bus/thunderbolt/devices/.../authorized.
1695          */
1696         sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1697         kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1698     }
1699 
1700 unlock:
1701     mutex_unlock(&sw->tb->lock);
1702     return ret;
1703 }
1704 
1705 static ssize_t authorized_store(struct device *dev,
1706                 struct device_attribute *attr,
1707                 const char *buf, size_t count)
1708 {
1709     struct tb_switch *sw = tb_to_switch(dev);
1710     unsigned int val;
1711     ssize_t ret;
1712 
1713     ret = kstrtouint(buf, 0, &val);
1714     if (ret)
1715         return ret;
1716     if (val > 2)
1717         return -EINVAL;
1718 
1719     pm_runtime_get_sync(&sw->dev);
1720     ret = tb_switch_set_authorized(sw, val);
1721     pm_runtime_mark_last_busy(&sw->dev);
1722     pm_runtime_put_autosuspend(&sw->dev);
1723 
1724     return ret ? ret : count;
1725 }
1726 static DEVICE_ATTR_RW(authorized);
1727 
1728 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1729              char *buf)
1730 {
1731     struct tb_switch *sw = tb_to_switch(dev);
1732 
1733     return sprintf(buf, "%u\n", sw->boot);
1734 }
1735 static DEVICE_ATTR_RO(boot);
1736 
1737 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1738                char *buf)
1739 {
1740     struct tb_switch *sw = tb_to_switch(dev);
1741 
1742     return sprintf(buf, "%#x\n", sw->device);
1743 }
1744 static DEVICE_ATTR_RO(device);
1745 
1746 static ssize_t
1747 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1748 {
1749     struct tb_switch *sw = tb_to_switch(dev);
1750 
1751     return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1752 }
1753 static DEVICE_ATTR_RO(device_name);
1754 
1755 static ssize_t
1756 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1757 {
1758     struct tb_switch *sw = tb_to_switch(dev);
1759 
1760     return sprintf(buf, "%u\n", sw->generation);
1761 }
1762 static DEVICE_ATTR_RO(generation);
1763 
1764 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1765             char *buf)
1766 {
1767     struct tb_switch *sw = tb_to_switch(dev);
1768     ssize_t ret;
1769 
1770     if (!mutex_trylock(&sw->tb->lock))
1771         return restart_syscall();
1772 
1773     if (sw->key)
1774         ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1775     else
1776         ret = sprintf(buf, "\n");
1777 
1778     mutex_unlock(&sw->tb->lock);
1779     return ret;
1780 }
1781 
1782 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1783              const char *buf, size_t count)
1784 {
1785     struct tb_switch *sw = tb_to_switch(dev);
1786     u8 key[TB_SWITCH_KEY_SIZE];
1787     ssize_t ret = count;
1788     bool clear = false;
1789 
1790     if (!strcmp(buf, "\n"))
1791         clear = true;
1792     else if (hex2bin(key, buf, sizeof(key)))
1793         return -EINVAL;
1794 
1795     if (!mutex_trylock(&sw->tb->lock))
1796         return restart_syscall();
1797 
1798     if (sw->authorized) {
1799         ret = -EBUSY;
1800     } else {
1801         kfree(sw->key);
1802         if (clear) {
1803             sw->key = NULL;
1804         } else {
1805             sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1806             if (!sw->key)
1807                 ret = -ENOMEM;
1808         }
1809     }
1810 
1811     mutex_unlock(&sw->tb->lock);
1812     return ret;
1813 }
1814 static DEVICE_ATTR(key, 0600, key_show, key_store);
1815 
1816 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1817               char *buf)
1818 {
1819     struct tb_switch *sw = tb_to_switch(dev);
1820 
1821     return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1822 }
1823 
1824 /*
1825  * Currently all lanes must run at the same speed but we expose here
1826  * both directions to allow possible asymmetric links in the future.
1827  */
1828 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1829 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1830 
1831 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1832               char *buf)
1833 {
1834     struct tb_switch *sw = tb_to_switch(dev);
1835 
1836     return sprintf(buf, "%u\n", sw->link_width);
1837 }
1838 
1839 /*
1840  * Currently link has same amount of lanes both directions (1 or 2) but
1841  * expose them separately to allow possible asymmetric links in the future.
1842  */
1843 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1844 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1845 
1846 static ssize_t nvm_authenticate_show(struct device *dev,
1847     struct device_attribute *attr, char *buf)
1848 {
1849     struct tb_switch *sw = tb_to_switch(dev);
1850     u32 status;
1851 
1852     nvm_get_auth_status(sw, &status);
1853     return sprintf(buf, "%#x\n", status);
1854 }
1855 
1856 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1857                       bool disconnect)
1858 {
1859     struct tb_switch *sw = tb_to_switch(dev);
1860     int val, ret;
1861 
1862     pm_runtime_get_sync(&sw->dev);
1863 
1864     if (!mutex_trylock(&sw->tb->lock)) {
1865         ret = restart_syscall();
1866         goto exit_rpm;
1867     }
1868 
1869     /* If NVMem devices are not yet added */
1870     if (!sw->nvm) {
1871         ret = -EAGAIN;
1872         goto exit_unlock;
1873     }
1874 
1875     ret = kstrtoint(buf, 10, &val);
1876     if (ret)
1877         goto exit_unlock;
1878 
1879     /* Always clear the authentication status */
1880     nvm_clear_auth_status(sw);
1881 
1882     if (val > 0) {
1883         if (val == AUTHENTICATE_ONLY) {
1884             if (disconnect)
1885                 ret = -EINVAL;
1886             else
1887                 ret = nvm_authenticate(sw, true);
1888         } else {
1889             if (!sw->nvm->flushed) {
1890                 if (!sw->nvm->buf) {
1891                     ret = -EINVAL;
1892                     goto exit_unlock;
1893                 }
1894 
1895                 ret = nvm_validate_and_write(sw);
1896                 if (ret || val == WRITE_ONLY)
1897                     goto exit_unlock;
1898             }
1899             if (val == WRITE_AND_AUTHENTICATE) {
1900                 if (disconnect)
1901                     ret = tb_lc_force_power(sw);
1902                 else
1903                     ret = nvm_authenticate(sw, false);
1904             }
1905         }
1906     }
1907 
1908 exit_unlock:
1909     mutex_unlock(&sw->tb->lock);
1910 exit_rpm:
1911     pm_runtime_mark_last_busy(&sw->dev);
1912     pm_runtime_put_autosuspend(&sw->dev);
1913 
1914     return ret;
1915 }
1916 
1917 static ssize_t nvm_authenticate_store(struct device *dev,
1918     struct device_attribute *attr, const char *buf, size_t count)
1919 {
1920     int ret = nvm_authenticate_sysfs(dev, buf, false);
1921     if (ret)
1922         return ret;
1923     return count;
1924 }
1925 static DEVICE_ATTR_RW(nvm_authenticate);
1926 
1927 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1928     struct device_attribute *attr, char *buf)
1929 {
1930     return nvm_authenticate_show(dev, attr, buf);
1931 }
1932 
1933 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1934     struct device_attribute *attr, const char *buf, size_t count)
1935 {
1936     int ret;
1937 
1938     ret = nvm_authenticate_sysfs(dev, buf, true);
1939     return ret ? ret : count;
1940 }
1941 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1942 
1943 static ssize_t nvm_version_show(struct device *dev,
1944                 struct device_attribute *attr, char *buf)
1945 {
1946     struct tb_switch *sw = tb_to_switch(dev);
1947     int ret;
1948 
1949     if (!mutex_trylock(&sw->tb->lock))
1950         return restart_syscall();
1951 
1952     if (sw->safe_mode)
1953         ret = -ENODATA;
1954     else if (!sw->nvm)
1955         ret = -EAGAIN;
1956     else
1957         ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1958 
1959     mutex_unlock(&sw->tb->lock);
1960 
1961     return ret;
1962 }
1963 static DEVICE_ATTR_RO(nvm_version);
1964 
1965 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1966                char *buf)
1967 {
1968     struct tb_switch *sw = tb_to_switch(dev);
1969 
1970     return sprintf(buf, "%#x\n", sw->vendor);
1971 }
1972 static DEVICE_ATTR_RO(vendor);
1973 
1974 static ssize_t
1975 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1976 {
1977     struct tb_switch *sw = tb_to_switch(dev);
1978 
1979     return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1980 }
1981 static DEVICE_ATTR_RO(vendor_name);
1982 
1983 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1984                   char *buf)
1985 {
1986     struct tb_switch *sw = tb_to_switch(dev);
1987 
1988     return sprintf(buf, "%pUb\n", sw->uuid);
1989 }
1990 static DEVICE_ATTR_RO(unique_id);
1991 
1992 static struct attribute *switch_attrs[] = {
1993     &dev_attr_authorized.attr,
1994     &dev_attr_boot.attr,
1995     &dev_attr_device.attr,
1996     &dev_attr_device_name.attr,
1997     &dev_attr_generation.attr,
1998     &dev_attr_key.attr,
1999     &dev_attr_nvm_authenticate.attr,
2000     &dev_attr_nvm_authenticate_on_disconnect.attr,
2001     &dev_attr_nvm_version.attr,
2002     &dev_attr_rx_speed.attr,
2003     &dev_attr_rx_lanes.attr,
2004     &dev_attr_tx_speed.attr,
2005     &dev_attr_tx_lanes.attr,
2006     &dev_attr_vendor.attr,
2007     &dev_attr_vendor_name.attr,
2008     &dev_attr_unique_id.attr,
2009     NULL,
2010 };
2011 
2012 static umode_t switch_attr_is_visible(struct kobject *kobj,
2013                       struct attribute *attr, int n)
2014 {
2015     struct device *dev = kobj_to_dev(kobj);
2016     struct tb_switch *sw = tb_to_switch(dev);
2017 
2018     if (attr == &dev_attr_authorized.attr) {
2019         if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2020             sw->tb->security_level == TB_SECURITY_DPONLY)
2021             return 0;
2022     } else if (attr == &dev_attr_device.attr) {
2023         if (!sw->device)
2024             return 0;
2025     } else if (attr == &dev_attr_device_name.attr) {
2026         if (!sw->device_name)
2027             return 0;
2028     } else if (attr == &dev_attr_vendor.attr)  {
2029         if (!sw->vendor)
2030             return 0;
2031     } else if (attr == &dev_attr_vendor_name.attr)  {
2032         if (!sw->vendor_name)
2033             return 0;
2034     } else if (attr == &dev_attr_key.attr) {
2035         if (tb_route(sw) &&
2036             sw->tb->security_level == TB_SECURITY_SECURE &&
2037             sw->security_level == TB_SECURITY_SECURE)
2038             return attr->mode;
2039         return 0;
2040     } else if (attr == &dev_attr_rx_speed.attr ||
2041            attr == &dev_attr_rx_lanes.attr ||
2042            attr == &dev_attr_tx_speed.attr ||
2043            attr == &dev_attr_tx_lanes.attr) {
2044         if (tb_route(sw))
2045             return attr->mode;
2046         return 0;
2047     } else if (attr == &dev_attr_nvm_authenticate.attr) {
2048         if (nvm_upgradeable(sw))
2049             return attr->mode;
2050         return 0;
2051     } else if (attr == &dev_attr_nvm_version.attr) {
2052         if (nvm_readable(sw))
2053             return attr->mode;
2054         return 0;
2055     } else if (attr == &dev_attr_boot.attr) {
2056         if (tb_route(sw))
2057             return attr->mode;
2058         return 0;
2059     } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2060         if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2061             return attr->mode;
2062         return 0;
2063     }
2064 
2065     return sw->safe_mode ? 0 : attr->mode;
2066 }
2067 
2068 static const struct attribute_group switch_group = {
2069     .is_visible = switch_attr_is_visible,
2070     .attrs = switch_attrs,
2071 };
2072 
2073 static const struct attribute_group *switch_groups[] = {
2074     &switch_group,
2075     NULL,
2076 };
2077 
2078 static void tb_switch_release(struct device *dev)
2079 {
2080     struct tb_switch *sw = tb_to_switch(dev);
2081     struct tb_port *port;
2082 
2083     dma_port_free(sw->dma_port);
2084 
2085     tb_switch_for_each_port(sw, port) {
2086         ida_destroy(&port->in_hopids);
2087         ida_destroy(&port->out_hopids);
2088     }
2089 
2090     kfree(sw->uuid);
2091     kfree(sw->device_name);
2092     kfree(sw->vendor_name);
2093     kfree(sw->ports);
2094     kfree(sw->drom);
2095     kfree(sw->key);
2096     kfree(sw);
2097 }
2098 
2099 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2100 {
2101     struct tb_switch *sw = tb_to_switch(dev);
2102     const char *type;
2103 
2104     if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2105         if (add_uevent_var(env, "USB4_VERSION=1.0"))
2106             return -ENOMEM;
2107     }
2108 
2109     if (!tb_route(sw)) {
2110         type = "host";
2111     } else {
2112         const struct tb_port *port;
2113         bool hub = false;
2114 
2115         /* Device is hub if it has any downstream ports */
2116         tb_switch_for_each_port(sw, port) {
2117             if (!port->disabled && !tb_is_upstream_port(port) &&
2118                  tb_port_is_null(port)) {
2119                 hub = true;
2120                 break;
2121             }
2122         }
2123 
2124         type = hub ? "hub" : "device";
2125     }
2126 
2127     if (add_uevent_var(env, "USB4_TYPE=%s", type))
2128         return -ENOMEM;
2129     return 0;
2130 }
2131 
2132 /*
2133  * Currently only need to provide the callbacks. Everything else is handled
2134  * in the connection manager.
2135  */
2136 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2137 {
2138     struct tb_switch *sw = tb_to_switch(dev);
2139     const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2140 
2141     if (cm_ops->runtime_suspend_switch)
2142         return cm_ops->runtime_suspend_switch(sw);
2143 
2144     return 0;
2145 }
2146 
2147 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2148 {
2149     struct tb_switch *sw = tb_to_switch(dev);
2150     const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2151 
2152     if (cm_ops->runtime_resume_switch)
2153         return cm_ops->runtime_resume_switch(sw);
2154     return 0;
2155 }
2156 
2157 static const struct dev_pm_ops tb_switch_pm_ops = {
2158     SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2159                NULL)
2160 };
2161 
2162 struct device_type tb_switch_type = {
2163     .name = "thunderbolt_device",
2164     .release = tb_switch_release,
2165     .uevent = tb_switch_uevent,
2166     .pm = &tb_switch_pm_ops,
2167 };
2168 
2169 static int tb_switch_get_generation(struct tb_switch *sw)
2170 {
2171     switch (sw->config.device_id) {
2172     case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2173     case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2174     case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2175     case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2176     case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2177     case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2178     case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2179     case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2180         return 1;
2181 
2182     case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2183     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2184     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2185         return 2;
2186 
2187     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2188     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2189     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2190     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2191     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2192     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2193     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2194     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2195     case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2196     case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2197         return 3;
2198 
2199     default:
2200         if (tb_switch_is_usb4(sw))
2201             return 4;
2202 
2203         /*
2204          * For unknown switches assume generation to be 1 to be
2205          * on the safe side.
2206          */
2207         tb_sw_warn(sw, "unsupported switch device id %#x\n",
2208                sw->config.device_id);
2209         return 1;
2210     }
2211 }
2212 
2213 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2214 {
2215     int max_depth;
2216 
2217     if (tb_switch_is_usb4(sw) ||
2218         (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2219         max_depth = USB4_SWITCH_MAX_DEPTH;
2220     else
2221         max_depth = TB_SWITCH_MAX_DEPTH;
2222 
2223     return depth > max_depth;
2224 }
2225 
2226 /**
2227  * tb_switch_alloc() - allocate a switch
2228  * @tb: Pointer to the owning domain
2229  * @parent: Parent device for this switch
2230  * @route: Route string for this switch
2231  *
2232  * Allocates and initializes a switch. Will not upload configuration to
2233  * the switch. For that you need to call tb_switch_configure()
2234  * separately. The returned switch should be released by calling
2235  * tb_switch_put().
2236  *
2237  * Return: Pointer to the allocated switch or ERR_PTR() in case of
2238  * failure.
2239  */
2240 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2241                   u64 route)
2242 {
2243     struct tb_switch *sw;
2244     int upstream_port;
2245     int i, ret, depth;
2246 
2247     /* Unlock the downstream port so we can access the switch below */
2248     if (route) {
2249         struct tb_switch *parent_sw = tb_to_switch(parent);
2250         struct tb_port *down;
2251 
2252         down = tb_port_at(route, parent_sw);
2253         tb_port_unlock(down);
2254     }
2255 
2256     depth = tb_route_length(route);
2257 
2258     upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2259     if (upstream_port < 0)
2260         return ERR_PTR(upstream_port);
2261 
2262     sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2263     if (!sw)
2264         return ERR_PTR(-ENOMEM);
2265 
2266     sw->tb = tb;
2267     ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2268     if (ret)
2269         goto err_free_sw_ports;
2270 
2271     sw->generation = tb_switch_get_generation(sw);
2272 
2273     tb_dbg(tb, "current switch config:\n");
2274     tb_dump_switch(tb, sw);
2275 
2276     /* configure switch */
2277     sw->config.upstream_port_number = upstream_port;
2278     sw->config.depth = depth;
2279     sw->config.route_hi = upper_32_bits(route);
2280     sw->config.route_lo = lower_32_bits(route);
2281     sw->config.enabled = 0;
2282 
2283     /* Make sure we do not exceed maximum topology limit */
2284     if (tb_switch_exceeds_max_depth(sw, depth)) {
2285         ret = -EADDRNOTAVAIL;
2286         goto err_free_sw_ports;
2287     }
2288 
2289     /* initialize ports */
2290     sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2291                 GFP_KERNEL);
2292     if (!sw->ports) {
2293         ret = -ENOMEM;
2294         goto err_free_sw_ports;
2295     }
2296 
2297     for (i = 0; i <= sw->config.max_port_number; i++) {
2298         /* minimum setup for tb_find_cap and tb_drom_read to work */
2299         sw->ports[i].sw = sw;
2300         sw->ports[i].port = i;
2301 
2302         /* Control port does not need HopID allocation */
2303         if (i) {
2304             ida_init(&sw->ports[i].in_hopids);
2305             ida_init(&sw->ports[i].out_hopids);
2306         }
2307     }
2308 
2309     ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2310     if (ret > 0)
2311         sw->cap_plug_events = ret;
2312 
2313     ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2314     if (ret > 0)
2315         sw->cap_vsec_tmu = ret;
2316 
2317     ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2318     if (ret > 0)
2319         sw->cap_lc = ret;
2320 
2321     ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2322     if (ret > 0)
2323         sw->cap_lp = ret;
2324 
2325     /* Root switch is always authorized */
2326     if (!route)
2327         sw->authorized = true;
2328 
2329     device_initialize(&sw->dev);
2330     sw->dev.parent = parent;
2331     sw->dev.bus = &tb_bus_type;
2332     sw->dev.type = &tb_switch_type;
2333     sw->dev.groups = switch_groups;
2334     dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2335 
2336     return sw;
2337 
2338 err_free_sw_ports:
2339     kfree(sw->ports);
2340     kfree(sw);
2341 
2342     return ERR_PTR(ret);
2343 }
2344 
2345 /**
2346  * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2347  * @tb: Pointer to the owning domain
2348  * @parent: Parent device for this switch
2349  * @route: Route string for this switch
2350  *
2351  * This creates a switch in safe mode. This means the switch pretty much
2352  * lacks all capabilities except DMA configuration port before it is
2353  * flashed with a valid NVM firmware.
2354  *
2355  * The returned switch must be released by calling tb_switch_put().
2356  *
2357  * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2358  */
2359 struct tb_switch *
2360 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2361 {
2362     struct tb_switch *sw;
2363 
2364     sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2365     if (!sw)
2366         return ERR_PTR(-ENOMEM);
2367 
2368     sw->tb = tb;
2369     sw->config.depth = tb_route_length(route);
2370     sw->config.route_hi = upper_32_bits(route);
2371     sw->config.route_lo = lower_32_bits(route);
2372     sw->safe_mode = true;
2373 
2374     device_initialize(&sw->dev);
2375     sw->dev.parent = parent;
2376     sw->dev.bus = &tb_bus_type;
2377     sw->dev.type = &tb_switch_type;
2378     sw->dev.groups = switch_groups;
2379     dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2380 
2381     return sw;
2382 }
2383 
2384 /**
2385  * tb_switch_configure() - Uploads configuration to the switch
2386  * @sw: Switch to configure
2387  *
2388  * Call this function before the switch is added to the system. It will
2389  * upload configuration to the switch and makes it available for the
2390  * connection manager to use. Can be called to the switch again after
2391  * resume from low power states to re-initialize it.
2392  *
2393  * Return: %0 in case of success and negative errno in case of failure
2394  */
2395 int tb_switch_configure(struct tb_switch *sw)
2396 {
2397     struct tb *tb = sw->tb;
2398     u64 route;
2399     int ret;
2400 
2401     route = tb_route(sw);
2402 
2403     tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2404            sw->config.enabled ? "restoring" : "initializing", route,
2405            tb_route_length(route), sw->config.upstream_port_number);
2406 
2407     sw->config.enabled = 1;
2408 
2409     if (tb_switch_is_usb4(sw)) {
2410         /*
2411          * For USB4 devices, we need to program the CM version
2412          * accordingly so that it knows to expose all the
2413          * additional capabilities.
2414          */
2415         sw->config.cmuv = USB4_VERSION_1_0;
2416         sw->config.plug_events_delay = 0xa;
2417 
2418         /* Enumerate the switch */
2419         ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2420                   ROUTER_CS_1, 4);
2421         if (ret)
2422             return ret;
2423 
2424         ret = usb4_switch_setup(sw);
2425     } else {
2426         if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2427             tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2428                    sw->config.vendor_id);
2429 
2430         if (!sw->cap_plug_events) {
2431             tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2432             return -ENODEV;
2433         }
2434 
2435         /* Enumerate the switch */
2436         ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2437                   ROUTER_CS_1, 3);
2438     }
2439     if (ret)
2440         return ret;
2441 
2442     return tb_plug_events_active(sw, true);
2443 }
2444 
2445 static int tb_switch_set_uuid(struct tb_switch *sw)
2446 {
2447     bool uid = false;
2448     u32 uuid[4];
2449     int ret;
2450 
2451     if (sw->uuid)
2452         return 0;
2453 
2454     if (tb_switch_is_usb4(sw)) {
2455         ret = usb4_switch_read_uid(sw, &sw->uid);
2456         if (ret)
2457             return ret;
2458         uid = true;
2459     } else {
2460         /*
2461          * The newer controllers include fused UUID as part of
2462          * link controller specific registers
2463          */
2464         ret = tb_lc_read_uuid(sw, uuid);
2465         if (ret) {
2466             if (ret != -EINVAL)
2467                 return ret;
2468             uid = true;
2469         }
2470     }
2471 
2472     if (uid) {
2473         /*
2474          * ICM generates UUID based on UID and fills the upper
2475          * two words with ones. This is not strictly following
2476          * UUID format but we want to be compatible with it so
2477          * we do the same here.
2478          */
2479         uuid[0] = sw->uid & 0xffffffff;
2480         uuid[1] = (sw->uid >> 32) & 0xffffffff;
2481         uuid[2] = 0xffffffff;
2482         uuid[3] = 0xffffffff;
2483     }
2484 
2485     sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2486     if (!sw->uuid)
2487         return -ENOMEM;
2488     return 0;
2489 }
2490 
2491 static int tb_switch_add_dma_port(struct tb_switch *sw)
2492 {
2493     u32 status;
2494     int ret;
2495 
2496     switch (sw->generation) {
2497     case 2:
2498         /* Only root switch can be upgraded */
2499         if (tb_route(sw))
2500             return 0;
2501 
2502         fallthrough;
2503     case 3:
2504     case 4:
2505         ret = tb_switch_set_uuid(sw);
2506         if (ret)
2507             return ret;
2508         break;
2509 
2510     default:
2511         /*
2512          * DMA port is the only thing available when the switch
2513          * is in safe mode.
2514          */
2515         if (!sw->safe_mode)
2516             return 0;
2517         break;
2518     }
2519 
2520     if (sw->no_nvm_upgrade)
2521         return 0;
2522 
2523     if (tb_switch_is_usb4(sw)) {
2524         ret = usb4_switch_nvm_authenticate_status(sw, &status);
2525         if (ret)
2526             return ret;
2527 
2528         if (status) {
2529             tb_sw_info(sw, "switch flash authentication failed\n");
2530             nvm_set_auth_status(sw, status);
2531         }
2532 
2533         return 0;
2534     }
2535 
2536     /* Root switch DMA port requires running firmware */
2537     if (!tb_route(sw) && !tb_switch_is_icm(sw))
2538         return 0;
2539 
2540     sw->dma_port = dma_port_alloc(sw);
2541     if (!sw->dma_port)
2542         return 0;
2543 
2544     /*
2545      * If there is status already set then authentication failed
2546      * when the dma_port_flash_update_auth() returned. Power cycling
2547      * is not needed (it was done already) so only thing we do here
2548      * is to unblock runtime PM of the root port.
2549      */
2550     nvm_get_auth_status(sw, &status);
2551     if (status) {
2552         if (!tb_route(sw))
2553             nvm_authenticate_complete_dma_port(sw);
2554         return 0;
2555     }
2556 
2557     /*
2558      * Check status of the previous flash authentication. If there
2559      * is one we need to power cycle the switch in any case to make
2560      * it functional again.
2561      */
2562     ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2563     if (ret <= 0)
2564         return ret;
2565 
2566     /* Now we can allow root port to suspend again */
2567     if (!tb_route(sw))
2568         nvm_authenticate_complete_dma_port(sw);
2569 
2570     if (status) {
2571         tb_sw_info(sw, "switch flash authentication failed\n");
2572         nvm_set_auth_status(sw, status);
2573     }
2574 
2575     tb_sw_info(sw, "power cycling the switch now\n");
2576     dma_port_power_cycle(sw->dma_port);
2577 
2578     /*
2579      * We return error here which causes the switch adding failure.
2580      * It should appear back after power cycle is complete.
2581      */
2582     return -ESHUTDOWN;
2583 }
2584 
2585 static void tb_switch_default_link_ports(struct tb_switch *sw)
2586 {
2587     int i;
2588 
2589     for (i = 1; i <= sw->config.max_port_number; i++) {
2590         struct tb_port *port = &sw->ports[i];
2591         struct tb_port *subordinate;
2592 
2593         if (!tb_port_is_null(port))
2594             continue;
2595 
2596         /* Check for the subordinate port */
2597         if (i == sw->config.max_port_number ||
2598             !tb_port_is_null(&sw->ports[i + 1]))
2599             continue;
2600 
2601         /* Link them if not already done so (by DROM) */
2602         subordinate = &sw->ports[i + 1];
2603         if (!port->dual_link_port && !subordinate->dual_link_port) {
2604             port->link_nr = 0;
2605             port->dual_link_port = subordinate;
2606             subordinate->link_nr = 1;
2607             subordinate->dual_link_port = port;
2608 
2609             tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2610                   port->port, subordinate->port);
2611         }
2612     }
2613 }
2614 
2615 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2616 {
2617     const struct tb_port *up = tb_upstream_port(sw);
2618 
2619     if (!up->dual_link_port || !up->dual_link_port->remote)
2620         return false;
2621 
2622     if (tb_switch_is_usb4(sw))
2623         return usb4_switch_lane_bonding_possible(sw);
2624     return tb_lc_lane_bonding_possible(sw);
2625 }
2626 
2627 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2628 {
2629     struct tb_port *up;
2630     bool change = false;
2631     int ret;
2632 
2633     if (!tb_route(sw) || tb_switch_is_icm(sw))
2634         return 0;
2635 
2636     up = tb_upstream_port(sw);
2637 
2638     ret = tb_port_get_link_speed(up);
2639     if (ret < 0)
2640         return ret;
2641     if (sw->link_speed != ret)
2642         change = true;
2643     sw->link_speed = ret;
2644 
2645     ret = tb_port_get_link_width(up);
2646     if (ret < 0)
2647         return ret;
2648     if (sw->link_width != ret)
2649         change = true;
2650     sw->link_width = ret;
2651 
2652     /* Notify userspace that there is possible link attribute change */
2653     if (device_is_registered(&sw->dev) && change)
2654         kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2655 
2656     return 0;
2657 }
2658 
2659 /**
2660  * tb_switch_lane_bonding_enable() - Enable lane bonding
2661  * @sw: Switch to enable lane bonding
2662  *
2663  * Connection manager can call this function to enable lane bonding of a
2664  * switch. If conditions are correct and both switches support the feature,
2665  * lanes are bonded. It is safe to call this to any switch.
2666  */
2667 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2668 {
2669     struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2670     struct tb_port *up, *down;
2671     u64 route = tb_route(sw);
2672     int ret;
2673 
2674     if (!route)
2675         return 0;
2676 
2677     if (!tb_switch_lane_bonding_possible(sw))
2678         return 0;
2679 
2680     up = tb_upstream_port(sw);
2681     down = tb_port_at(route, parent);
2682 
2683     if (!tb_port_is_width_supported(up, 2) ||
2684         !tb_port_is_width_supported(down, 2))
2685         return 0;
2686 
2687     ret = tb_port_lane_bonding_enable(up);
2688     if (ret) {
2689         tb_port_warn(up, "failed to enable lane bonding\n");
2690         return ret;
2691     }
2692 
2693     ret = tb_port_lane_bonding_enable(down);
2694     if (ret) {
2695         tb_port_warn(down, "failed to enable lane bonding\n");
2696         tb_port_lane_bonding_disable(up);
2697         return ret;
2698     }
2699 
2700     ret = tb_port_wait_for_link_width(down, 2, 100);
2701     if (ret) {
2702         tb_port_warn(down, "timeout enabling lane bonding\n");
2703         return ret;
2704     }
2705 
2706     tb_port_update_credits(down);
2707     tb_port_update_credits(up);
2708     tb_switch_update_link_attributes(sw);
2709 
2710     tb_sw_dbg(sw, "lane bonding enabled\n");
2711     return ret;
2712 }
2713 
2714 /**
2715  * tb_switch_lane_bonding_disable() - Disable lane bonding
2716  * @sw: Switch whose lane bonding to disable
2717  *
2718  * Disables lane bonding between @sw and parent. This can be called even
2719  * if lanes were not bonded originally.
2720  */
2721 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2722 {
2723     struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2724     struct tb_port *up, *down;
2725 
2726     if (!tb_route(sw))
2727         return;
2728 
2729     up = tb_upstream_port(sw);
2730     if (!up->bonded)
2731         return;
2732 
2733     down = tb_port_at(tb_route(sw), parent);
2734 
2735     tb_port_lane_bonding_disable(up);
2736     tb_port_lane_bonding_disable(down);
2737 
2738     /*
2739      * It is fine if we get other errors as the router might have
2740      * been unplugged.
2741      */
2742     if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2743         tb_sw_warn(sw, "timeout disabling lane bonding\n");
2744 
2745     tb_port_update_credits(down);
2746     tb_port_update_credits(up);
2747     tb_switch_update_link_attributes(sw);
2748 
2749     tb_sw_dbg(sw, "lane bonding disabled\n");
2750 }
2751 
2752 /**
2753  * tb_switch_configure_link() - Set link configured
2754  * @sw: Switch whose link is configured
2755  *
2756  * Sets the link upstream from @sw configured (from both ends) so that
2757  * it will not be disconnected when the domain exits sleep. Can be
2758  * called for any switch.
2759  *
2760  * It is recommended that this is called after lane bonding is enabled.
2761  *
2762  * Returns %0 on success and negative errno in case of error.
2763  */
2764 int tb_switch_configure_link(struct tb_switch *sw)
2765 {
2766     struct tb_port *up, *down;
2767     int ret;
2768 
2769     if (!tb_route(sw) || tb_switch_is_icm(sw))
2770         return 0;
2771 
2772     up = tb_upstream_port(sw);
2773     if (tb_switch_is_usb4(up->sw))
2774         ret = usb4_port_configure(up);
2775     else
2776         ret = tb_lc_configure_port(up);
2777     if (ret)
2778         return ret;
2779 
2780     down = up->remote;
2781     if (tb_switch_is_usb4(down->sw))
2782         return usb4_port_configure(down);
2783     return tb_lc_configure_port(down);
2784 }
2785 
2786 /**
2787  * tb_switch_unconfigure_link() - Unconfigure link
2788  * @sw: Switch whose link is unconfigured
2789  *
2790  * Sets the link unconfigured so the @sw will be disconnected if the
2791  * domain exists sleep.
2792  */
2793 void tb_switch_unconfigure_link(struct tb_switch *sw)
2794 {
2795     struct tb_port *up, *down;
2796 
2797     if (sw->is_unplugged)
2798         return;
2799     if (!tb_route(sw) || tb_switch_is_icm(sw))
2800         return;
2801 
2802     up = tb_upstream_port(sw);
2803     if (tb_switch_is_usb4(up->sw))
2804         usb4_port_unconfigure(up);
2805     else
2806         tb_lc_unconfigure_port(up);
2807 
2808     down = up->remote;
2809     if (tb_switch_is_usb4(down->sw))
2810         usb4_port_unconfigure(down);
2811     else
2812         tb_lc_unconfigure_port(down);
2813 }
2814 
2815 static void tb_switch_credits_init(struct tb_switch *sw)
2816 {
2817     if (tb_switch_is_icm(sw))
2818         return;
2819     if (!tb_switch_is_usb4(sw))
2820         return;
2821     if (usb4_switch_credits_init(sw))
2822         tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2823 }
2824 
2825 /**
2826  * tb_switch_add() - Add a switch to the domain
2827  * @sw: Switch to add
2828  *
2829  * This is the last step in adding switch to the domain. It will read
2830  * identification information from DROM and initializes ports so that
2831  * they can be used to connect other switches. The switch will be
2832  * exposed to the userspace when this function successfully returns. To
2833  * remove and release the switch, call tb_switch_remove().
2834  *
2835  * Return: %0 in case of success and negative errno in case of failure
2836  */
2837 int tb_switch_add(struct tb_switch *sw)
2838 {
2839     int i, ret;
2840 
2841     /*
2842      * Initialize DMA control port now before we read DROM. Recent
2843      * host controllers have more complete DROM on NVM that includes
2844      * vendor and model identification strings which we then expose
2845      * to the userspace. NVM can be accessed through DMA
2846      * configuration based mailbox.
2847      */
2848     ret = tb_switch_add_dma_port(sw);
2849     if (ret) {
2850         dev_err(&sw->dev, "failed to add DMA port\n");
2851         return ret;
2852     }
2853 
2854     if (!sw->safe_mode) {
2855         tb_switch_credits_init(sw);
2856 
2857         /* read drom */
2858         ret = tb_drom_read(sw);
2859         if (ret)
2860             dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2861         tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2862 
2863         tb_check_quirks(sw);
2864 
2865         ret = tb_switch_set_uuid(sw);
2866         if (ret) {
2867             dev_err(&sw->dev, "failed to set UUID\n");
2868             return ret;
2869         }
2870 
2871         for (i = 0; i <= sw->config.max_port_number; i++) {
2872             if (sw->ports[i].disabled) {
2873                 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2874                 continue;
2875             }
2876             ret = tb_init_port(&sw->ports[i]);
2877             if (ret) {
2878                 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2879                 return ret;
2880             }
2881         }
2882 
2883         tb_switch_default_link_ports(sw);
2884 
2885         ret = tb_switch_update_link_attributes(sw);
2886         if (ret)
2887             return ret;
2888 
2889         ret = tb_switch_tmu_init(sw);
2890         if (ret)
2891             return ret;
2892     }
2893 
2894     ret = device_add(&sw->dev);
2895     if (ret) {
2896         dev_err(&sw->dev, "failed to add device: %d\n", ret);
2897         return ret;
2898     }
2899 
2900     if (tb_route(sw)) {
2901         dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2902              sw->vendor, sw->device);
2903         if (sw->vendor_name && sw->device_name)
2904             dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2905                  sw->device_name);
2906     }
2907 
2908     ret = usb4_switch_add_ports(sw);
2909     if (ret) {
2910         dev_err(&sw->dev, "failed to add USB4 ports\n");
2911         goto err_del;
2912     }
2913 
2914     ret = tb_switch_nvm_add(sw);
2915     if (ret) {
2916         dev_err(&sw->dev, "failed to add NVM devices\n");
2917         goto err_ports;
2918     }
2919 
2920     /*
2921      * Thunderbolt routers do not generate wakeups themselves but
2922      * they forward wakeups from tunneled protocols, so enable it
2923      * here.
2924      */
2925     device_init_wakeup(&sw->dev, true);
2926 
2927     pm_runtime_set_active(&sw->dev);
2928     if (sw->rpm) {
2929         pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2930         pm_runtime_use_autosuspend(&sw->dev);
2931         pm_runtime_mark_last_busy(&sw->dev);
2932         pm_runtime_enable(&sw->dev);
2933         pm_request_autosuspend(&sw->dev);
2934     }
2935 
2936     tb_switch_debugfs_init(sw);
2937     return 0;
2938 
2939 err_ports:
2940     usb4_switch_remove_ports(sw);
2941 err_del:
2942     device_del(&sw->dev);
2943 
2944     return ret;
2945 }
2946 
2947 /**
2948  * tb_switch_remove() - Remove and release a switch
2949  * @sw: Switch to remove
2950  *
2951  * This will remove the switch from the domain and release it after last
2952  * reference count drops to zero. If there are switches connected below
2953  * this switch, they will be removed as well.
2954  */
2955 void tb_switch_remove(struct tb_switch *sw)
2956 {
2957     struct tb_port *port;
2958 
2959     tb_switch_debugfs_remove(sw);
2960 
2961     if (sw->rpm) {
2962         pm_runtime_get_sync(&sw->dev);
2963         pm_runtime_disable(&sw->dev);
2964     }
2965 
2966     /* port 0 is the switch itself and never has a remote */
2967     tb_switch_for_each_port(sw, port) {
2968         if (tb_port_has_remote(port)) {
2969             tb_switch_remove(port->remote->sw);
2970             port->remote = NULL;
2971         } else if (port->xdomain) {
2972             tb_xdomain_remove(port->xdomain);
2973             port->xdomain = NULL;
2974         }
2975 
2976         /* Remove any downstream retimers */
2977         tb_retimer_remove_all(port);
2978     }
2979 
2980     if (!sw->is_unplugged)
2981         tb_plug_events_active(sw, false);
2982 
2983     tb_switch_nvm_remove(sw);
2984     usb4_switch_remove_ports(sw);
2985 
2986     if (tb_route(sw))
2987         dev_info(&sw->dev, "device disconnected\n");
2988     device_unregister(&sw->dev);
2989 }
2990 
2991 /**
2992  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2993  * @sw: Router to mark unplugged
2994  */
2995 void tb_sw_set_unplugged(struct tb_switch *sw)
2996 {
2997     struct tb_port *port;
2998 
2999     if (sw == sw->tb->root_switch) {
3000         tb_sw_WARN(sw, "cannot unplug root switch\n");
3001         return;
3002     }
3003     if (sw->is_unplugged) {
3004         tb_sw_WARN(sw, "is_unplugged already set\n");
3005         return;
3006     }
3007     sw->is_unplugged = true;
3008     tb_switch_for_each_port(sw, port) {
3009         if (tb_port_has_remote(port))
3010             tb_sw_set_unplugged(port->remote->sw);
3011         else if (port->xdomain)
3012             port->xdomain->is_unplugged = true;
3013     }
3014 }
3015 
3016 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3017 {
3018     if (flags)
3019         tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3020     else
3021         tb_sw_dbg(sw, "disabling wakeup\n");
3022 
3023     if (tb_switch_is_usb4(sw))
3024         return usb4_switch_set_wake(sw, flags);
3025     return tb_lc_set_wake(sw, flags);
3026 }
3027 
3028 int tb_switch_resume(struct tb_switch *sw)
3029 {
3030     struct tb_port *port;
3031     int err;
3032 
3033     tb_sw_dbg(sw, "resuming switch\n");
3034 
3035     /*
3036      * Check for UID of the connected switches except for root
3037      * switch which we assume cannot be removed.
3038      */
3039     if (tb_route(sw)) {
3040         u64 uid;
3041 
3042         /*
3043          * Check first that we can still read the switch config
3044          * space. It may be that there is now another domain
3045          * connected.
3046          */
3047         err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3048         if (err < 0) {
3049             tb_sw_info(sw, "switch not present anymore\n");
3050             return err;
3051         }
3052 
3053         /* We don't have any way to confirm this was the same device */
3054         if (!sw->uid)
3055             return -ENODEV;
3056 
3057         if (tb_switch_is_usb4(sw))
3058             err = usb4_switch_read_uid(sw, &uid);
3059         else
3060             err = tb_drom_read_uid_only(sw, &uid);
3061         if (err) {
3062             tb_sw_warn(sw, "uid read failed\n");
3063             return err;
3064         }
3065         if (sw->uid != uid) {
3066             tb_sw_info(sw,
3067                 "changed while suspended (uid %#llx -> %#llx)\n",
3068                 sw->uid, uid);
3069             return -ENODEV;
3070         }
3071     }
3072 
3073     err = tb_switch_configure(sw);
3074     if (err)
3075         return err;
3076 
3077     /* Disable wakes */
3078     tb_switch_set_wake(sw, 0);
3079 
3080     err = tb_switch_tmu_init(sw);
3081     if (err)
3082         return err;
3083 
3084     /* check for surviving downstream switches */
3085     tb_switch_for_each_port(sw, port) {
3086         if (!tb_port_is_null(port))
3087             continue;
3088 
3089         if (!tb_port_resume(port))
3090             continue;
3091 
3092         if (tb_wait_for_port(port, true) <= 0) {
3093             tb_port_warn(port,
3094                      "lost during suspend, disconnecting\n");
3095             if (tb_port_has_remote(port))
3096                 tb_sw_set_unplugged(port->remote->sw);
3097             else if (port->xdomain)
3098                 port->xdomain->is_unplugged = true;
3099         } else {
3100             /*
3101              * Always unlock the port so the downstream
3102              * switch/domain is accessible.
3103              */
3104             if (tb_port_unlock(port))
3105                 tb_port_warn(port, "failed to unlock port\n");
3106             if (port->remote && tb_switch_resume(port->remote->sw)) {
3107                 tb_port_warn(port,
3108                          "lost during suspend, disconnecting\n");
3109                 tb_sw_set_unplugged(port->remote->sw);
3110             }
3111         }
3112     }
3113     return 0;
3114 }
3115 
3116 /**
3117  * tb_switch_suspend() - Put a switch to sleep
3118  * @sw: Switch to suspend
3119  * @runtime: Is this runtime suspend or system sleep
3120  *
3121  * Suspends router and all its children. Enables wakes according to
3122  * value of @runtime and then sets sleep bit for the router. If @sw is
3123  * host router the domain is ready to go to sleep once this function
3124  * returns.
3125  */
3126 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3127 {
3128     unsigned int flags = 0;
3129     struct tb_port *port;
3130     int err;
3131 
3132     tb_sw_dbg(sw, "suspending switch\n");
3133 
3134     /*
3135      * Actually only needed for Titan Ridge but for simplicity can be
3136      * done for USB4 device too as CLx is re-enabled at resume.
3137      * CL0s and CL1 are enabled and supported together.
3138      */
3139     if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3140         if (tb_switch_disable_clx(sw, TB_CL1))
3141             tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3142                    tb_switch_clx_name(TB_CL1));
3143     }
3144 
3145     err = tb_plug_events_active(sw, false);
3146     if (err)
3147         return;
3148 
3149     tb_switch_for_each_port(sw, port) {
3150         if (tb_port_has_remote(port))
3151             tb_switch_suspend(port->remote->sw, runtime);
3152     }
3153 
3154     if (runtime) {
3155         /* Trigger wake when something is plugged in/out */
3156         flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3157         flags |= TB_WAKE_ON_USB4;
3158         flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3159     } else if (device_may_wakeup(&sw->dev)) {
3160         flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3161     }
3162 
3163     tb_switch_set_wake(sw, flags);
3164 
3165     if (tb_switch_is_usb4(sw))
3166         usb4_switch_set_sleep(sw);
3167     else
3168         tb_lc_set_sleep(sw);
3169 }
3170 
3171 /**
3172  * tb_switch_query_dp_resource() - Query availability of DP resource
3173  * @sw: Switch whose DP resource is queried
3174  * @in: DP IN port
3175  *
3176  * Queries availability of DP resource for DP tunneling using switch
3177  * specific means. Returns %true if resource is available.
3178  */
3179 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3180 {
3181     if (tb_switch_is_usb4(sw))
3182         return usb4_switch_query_dp_resource(sw, in);
3183     return tb_lc_dp_sink_query(sw, in);
3184 }
3185 
3186 /**
3187  * tb_switch_alloc_dp_resource() - Allocate available DP resource
3188  * @sw: Switch whose DP resource is allocated
3189  * @in: DP IN port
3190  *
3191  * Allocates DP resource for DP tunneling. The resource must be
3192  * available for this to succeed (see tb_switch_query_dp_resource()).
3193  * Returns %0 in success and negative errno otherwise.
3194  */
3195 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3196 {
3197     int ret;
3198 
3199     if (tb_switch_is_usb4(sw))
3200         ret = usb4_switch_alloc_dp_resource(sw, in);
3201     else
3202         ret = tb_lc_dp_sink_alloc(sw, in);
3203 
3204     if (ret)
3205         tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3206                in->port);
3207     else
3208         tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3209 
3210     return ret;
3211 }
3212 
3213 /**
3214  * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3215  * @sw: Switch whose DP resource is de-allocated
3216  * @in: DP IN port
3217  *
3218  * De-allocates DP resource that was previously allocated for DP
3219  * tunneling.
3220  */
3221 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3222 {
3223     int ret;
3224 
3225     if (tb_switch_is_usb4(sw))
3226         ret = usb4_switch_dealloc_dp_resource(sw, in);
3227     else
3228         ret = tb_lc_dp_sink_dealloc(sw, in);
3229 
3230     if (ret)
3231         tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3232                in->port);
3233     else
3234         tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3235 }
3236 
3237 struct tb_sw_lookup {
3238     struct tb *tb;
3239     u8 link;
3240     u8 depth;
3241     const uuid_t *uuid;
3242     u64 route;
3243 };
3244 
3245 static int tb_switch_match(struct device *dev, const void *data)
3246 {
3247     struct tb_switch *sw = tb_to_switch(dev);
3248     const struct tb_sw_lookup *lookup = data;
3249 
3250     if (!sw)
3251         return 0;
3252     if (sw->tb != lookup->tb)
3253         return 0;
3254 
3255     if (lookup->uuid)
3256         return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3257 
3258     if (lookup->route) {
3259         return sw->config.route_lo == lower_32_bits(lookup->route) &&
3260                sw->config.route_hi == upper_32_bits(lookup->route);
3261     }
3262 
3263     /* Root switch is matched only by depth */
3264     if (!lookup->depth)
3265         return !sw->depth;
3266 
3267     return sw->link == lookup->link && sw->depth == lookup->depth;
3268 }
3269 
3270 /**
3271  * tb_switch_find_by_link_depth() - Find switch by link and depth
3272  * @tb: Domain the switch belongs
3273  * @link: Link number the switch is connected
3274  * @depth: Depth of the switch in link
3275  *
3276  * Returned switch has reference count increased so the caller needs to
3277  * call tb_switch_put() when done with the switch.
3278  */
3279 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3280 {
3281     struct tb_sw_lookup lookup;
3282     struct device *dev;
3283 
3284     memset(&lookup, 0, sizeof(lookup));
3285     lookup.tb = tb;
3286     lookup.link = link;
3287     lookup.depth = depth;
3288 
3289     dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3290     if (dev)
3291         return tb_to_switch(dev);
3292 
3293     return NULL;
3294 }
3295 
3296 /**
3297  * tb_switch_find_by_uuid() - Find switch by UUID
3298  * @tb: Domain the switch belongs
3299  * @uuid: UUID to look for
3300  *
3301  * Returned switch has reference count increased so the caller needs to
3302  * call tb_switch_put() when done with the switch.
3303  */
3304 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3305 {
3306     struct tb_sw_lookup lookup;
3307     struct device *dev;
3308 
3309     memset(&lookup, 0, sizeof(lookup));
3310     lookup.tb = tb;
3311     lookup.uuid = uuid;
3312 
3313     dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3314     if (dev)
3315         return tb_to_switch(dev);
3316 
3317     return NULL;
3318 }
3319 
3320 /**
3321  * tb_switch_find_by_route() - Find switch by route string
3322  * @tb: Domain the switch belongs
3323  * @route: Route string to look for
3324  *
3325  * Returned switch has reference count increased so the caller needs to
3326  * call tb_switch_put() when done with the switch.
3327  */
3328 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3329 {
3330     struct tb_sw_lookup lookup;
3331     struct device *dev;
3332 
3333     if (!route)
3334         return tb_switch_get(tb->root_switch);
3335 
3336     memset(&lookup, 0, sizeof(lookup));
3337     lookup.tb = tb;
3338     lookup.route = route;
3339 
3340     dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3341     if (dev)
3342         return tb_to_switch(dev);
3343 
3344     return NULL;
3345 }
3346 
3347 /**
3348  * tb_switch_find_port() - return the first port of @type on @sw or NULL
3349  * @sw: Switch to find the port from
3350  * @type: Port type to look for
3351  */
3352 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3353                     enum tb_port_type type)
3354 {
3355     struct tb_port *port;
3356 
3357     tb_switch_for_each_port(sw, port) {
3358         if (port->config.type == type)
3359             return port;
3360     }
3361 
3362     return NULL;
3363 }
3364 
3365 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3366 {
3367     u32 phy;
3368     int ret;
3369 
3370     ret = tb_port_read(port, &phy, TB_CFG_PORT,
3371                port->cap_phy + LANE_ADP_CS_1, 1);
3372     if (ret)
3373         return ret;
3374 
3375     if (secondary)
3376         phy |= LANE_ADP_CS_1_PMS;
3377     else
3378         phy &= ~LANE_ADP_CS_1_PMS;
3379 
3380     return tb_port_write(port, &phy, TB_CFG_PORT,
3381                  port->cap_phy + LANE_ADP_CS_1, 1);
3382 }
3383 
3384 static int tb_port_pm_secondary_enable(struct tb_port *port)
3385 {
3386     return __tb_port_pm_secondary_set(port, true);
3387 }
3388 
3389 static int tb_port_pm_secondary_disable(struct tb_port *port)
3390 {
3391     return __tb_port_pm_secondary_set(port, false);
3392 }
3393 
3394 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3395 {
3396     struct tb_switch *parent = tb_switch_parent(sw);
3397     struct tb_port *up, *down;
3398     int ret;
3399 
3400     if (!tb_route(sw))
3401         return 0;
3402 
3403     up = tb_upstream_port(sw);
3404     down = tb_port_at(tb_route(sw), parent);
3405     ret = tb_port_pm_secondary_enable(up);
3406     if (ret)
3407         return ret;
3408 
3409     return tb_port_pm_secondary_disable(down);
3410 }
3411 
3412 /* Called for USB4 or Titan Ridge routers only */
3413 static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
3414 {
3415     u32 mask, val;
3416     bool ret;
3417 
3418     /* Don't enable CLx in case of two single-lane links */
3419     if (!port->bonded && port->dual_link_port)
3420         return false;
3421 
3422     /* Don't enable CLx in case of inter-domain link */
3423     if (port->xdomain)
3424         return false;
3425 
3426     if (tb_switch_is_usb4(port->sw)) {
3427         if (!usb4_port_clx_supported(port))
3428             return false;
3429     } else if (!tb_lc_is_clx_supported(port)) {
3430         return false;
3431     }
3432 
3433     switch (clx) {
3434     case TB_CL1:
3435         /* CL0s and CL1 are enabled and supported together */
3436         mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
3437         break;
3438 
3439     /* For now we support only CL0s and CL1. Not CL2 */
3440     case TB_CL2:
3441     default:
3442         return false;
3443     }
3444 
3445     ret = tb_port_read(port, &val, TB_CFG_PORT,
3446                port->cap_phy + LANE_ADP_CS_0, 1);
3447     if (ret)
3448         return false;
3449 
3450     return !!(val & mask);
3451 }
3452 
3453 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
3454 {
3455     u32 phy, mask;
3456     int ret;
3457 
3458     /* CL0s and CL1 are enabled and supported together */
3459     if (clx == TB_CL1)
3460         mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
3461     else
3462         /* For now we support only CL0s and CL1. Not CL2 */
3463         return -EOPNOTSUPP;
3464 
3465     ret = tb_port_read(port, &phy, TB_CFG_PORT,
3466                port->cap_phy + LANE_ADP_CS_1, 1);
3467     if (ret)
3468         return ret;
3469 
3470     if (enable)
3471         phy |= mask;
3472     else
3473         phy &= ~mask;
3474 
3475     return tb_port_write(port, &phy, TB_CFG_PORT,
3476                  port->cap_phy + LANE_ADP_CS_1, 1);
3477 }
3478 
3479 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
3480 {
3481     return __tb_port_clx_set(port, clx, false);
3482 }
3483 
3484 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
3485 {
3486     return __tb_port_clx_set(port, clx, true);
3487 }
3488 
3489 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3490 {
3491     struct tb_switch *parent = tb_switch_parent(sw);
3492     bool up_clx_support, down_clx_support;
3493     struct tb_port *up, *down;
3494     int ret;
3495 
3496     if (!tb_switch_is_clx_supported(sw))
3497         return 0;
3498 
3499     /*
3500      * Enable CLx for host router's downstream port as part of the
3501      * downstream router enabling procedure.
3502      */
3503     if (!tb_route(sw))
3504         return 0;
3505 
3506     /* Enable CLx only for first hop router (depth = 1) */
3507     if (tb_route(parent))
3508         return 0;
3509 
3510     ret = tb_switch_pm_secondary_resolve(sw);
3511     if (ret)
3512         return ret;
3513 
3514     up = tb_upstream_port(sw);
3515     down = tb_port_at(tb_route(sw), parent);
3516 
3517     up_clx_support = tb_port_clx_supported(up, clx);
3518     down_clx_support = tb_port_clx_supported(down, clx);
3519 
3520     tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3521             up_clx_support ? "" : "not ");
3522     tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3523             down_clx_support ? "" : "not ");
3524 
3525     if (!up_clx_support || !down_clx_support)
3526         return -EOPNOTSUPP;
3527 
3528     ret = tb_port_clx_enable(up, clx);
3529     if (ret)
3530         return ret;
3531 
3532     ret = tb_port_clx_enable(down, clx);
3533     if (ret) {
3534         tb_port_clx_disable(up, clx);
3535         return ret;
3536     }
3537 
3538     ret = tb_switch_mask_clx_objections(sw);
3539     if (ret) {
3540         tb_port_clx_disable(up, clx);
3541         tb_port_clx_disable(down, clx);
3542         return ret;
3543     }
3544 
3545     sw->clx = clx;
3546 
3547     tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
3548     return 0;
3549 }
3550 
3551 /**
3552  * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3553  * @sw: Router to enable CLx for
3554  * @clx: The CLx state to enable
3555  *
3556  * Enable CLx state only for first hop router. That is the most common
3557  * use-case, that is intended for better thermal management, and so helps
3558  * to improve performance. CLx is enabled only if both sides of the link
3559  * support CLx, and if both sides of the link are not configured as two
3560  * single lane links and only if the link is not inter-domain link. The
3561  * complete set of conditions is described in CM Guide 1.0 section 8.1.
3562  *
3563  * Return: Returns 0 on success or an error code on failure.
3564  */
3565 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3566 {
3567     struct tb_switch *root_sw = sw->tb->root_switch;
3568 
3569     if (!clx_enabled)
3570         return 0;
3571 
3572     /*
3573      * CLx is not enabled and validated on Intel USB4 platforms before
3574      * Alder Lake.
3575      */
3576     if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3577         return 0;
3578 
3579     switch (clx) {
3580     case TB_CL1:
3581         /* CL0s and CL1 are enabled and supported together */
3582         return __tb_switch_enable_clx(sw, clx);
3583 
3584     default:
3585         return -EOPNOTSUPP;
3586     }
3587 }
3588 
3589 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3590 {
3591     struct tb_switch *parent = tb_switch_parent(sw);
3592     struct tb_port *up, *down;
3593     int ret;
3594 
3595     if (!tb_switch_is_clx_supported(sw))
3596         return 0;
3597 
3598     /*
3599      * Disable CLx for host router's downstream port as part of the
3600      * downstream router enabling procedure.
3601      */
3602     if (!tb_route(sw))
3603         return 0;
3604 
3605     /* Disable CLx only for first hop router (depth = 1) */
3606     if (tb_route(parent))
3607         return 0;
3608 
3609     up = tb_upstream_port(sw);
3610     down = tb_port_at(tb_route(sw), parent);
3611     ret = tb_port_clx_disable(up, clx);
3612     if (ret)
3613         return ret;
3614 
3615     ret = tb_port_clx_disable(down, clx);
3616     if (ret)
3617         return ret;
3618 
3619     sw->clx = TB_CLX_DISABLE;
3620 
3621     tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
3622     return 0;
3623 }
3624 
3625 /**
3626  * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3627  * @sw: Router to disable CLx for
3628  * @clx: The CLx state to disable
3629  *
3630  * Return: Returns 0 on success or an error code on failure.
3631  */
3632 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3633 {
3634     if (!clx_enabled)
3635         return 0;
3636 
3637     switch (clx) {
3638     case TB_CL1:
3639         /* CL0s and CL1 are enabled and supported together */
3640         return __tb_switch_disable_clx(sw, clx);
3641 
3642     default:
3643         return -EOPNOTSUPP;
3644     }
3645 }
3646 
3647 /**
3648  * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3649  * @sw: Router to mask objections for
3650  *
3651  * Mask the objections coming from the second depth routers in order to
3652  * stop these objections from interfering with the CLx states of the first
3653  * depth link.
3654  */
3655 int tb_switch_mask_clx_objections(struct tb_switch *sw)
3656 {
3657     int up_port = sw->config.upstream_port_number;
3658     u32 offset, val[2], mask_obj, unmask_obj;
3659     int ret, i;
3660 
3661     /* Only Titan Ridge of pre-USB4 devices support CLx states */
3662     if (!tb_switch_is_titan_ridge(sw))
3663         return 0;
3664 
3665     if (!tb_route(sw))
3666         return 0;
3667 
3668     /*
3669      * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3670      * Port A consists of lane adapters 1,2 and
3671      * Port B consists of lane adapters 3,4
3672      * If upstream port is A, (lanes are 1,2), we mask objections from
3673      * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3674      */
3675     if (up_port == 1) {
3676         mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3677         unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3678         offset = TB_LOW_PWR_C1_CL1;
3679     } else {
3680         mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3681         unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3682         offset = TB_LOW_PWR_C3_CL1;
3683     }
3684 
3685     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3686              sw->cap_lp + offset, ARRAY_SIZE(val));
3687     if (ret)
3688         return ret;
3689 
3690     for (i = 0; i < ARRAY_SIZE(val); i++) {
3691         val[i] |= mask_obj;
3692         val[i] &= ~unmask_obj;
3693     }
3694 
3695     return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3696                sw->cap_lp + offset, ARRAY_SIZE(val));
3697 }
3698 
3699 /*
3700  * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3701  * device. For now used only for Titan Ridge.
3702  */
3703 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3704                        unsigned int pcie_offset, u32 value)
3705 {
3706     u32 offset, command, val;
3707     int ret;
3708 
3709     if (sw->generation != 3)
3710         return -EOPNOTSUPP;
3711 
3712     offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3713     ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3714     if (ret)
3715         return ret;
3716 
3717     command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3718     command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3719     command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3720     command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3721             << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3722     command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3723 
3724     offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3725 
3726     ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3727     if (ret)
3728         return ret;
3729 
3730     ret = tb_switch_wait_for_bit(sw, offset,
3731                      TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3732     if (ret)
3733         return ret;
3734 
3735     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3736     if (ret)
3737         return ret;
3738 
3739     if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3740         return -ETIMEDOUT;
3741 
3742     return 0;
3743 }
3744 
3745 /**
3746  * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3747  * @sw: Router to enable PCIe L1
3748  *
3749  * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3750  * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3751  * was configured. Due to Intel platforms limitation, shall be called only
3752  * for first hop switch.
3753  */
3754 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3755 {
3756     struct tb_switch *parent = tb_switch_parent(sw);
3757     int ret;
3758 
3759     if (!tb_route(sw))
3760         return 0;
3761 
3762     if (!tb_switch_is_titan_ridge(sw))
3763         return 0;
3764 
3765     /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3766     if (tb_route(parent))
3767         return 0;
3768 
3769     /* Write to downstream PCIe bridge #5 aka Dn4 */
3770     ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3771     if (ret)
3772         return ret;
3773 
3774     /* Write to Upstream PCIe bridge #0 aka Up0 */
3775     return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3776 }
3777 
3778 /**
3779  * tb_switch_xhci_connect() - Connect internal xHCI
3780  * @sw: Router whose xHCI to connect
3781  *
3782  * Can be called to any router. For Alpine Ridge and Titan Ridge
3783  * performs special flows that bring the xHCI functional for any device
3784  * connected to the type-C port. Call only after PCIe tunnel has been
3785  * established. The function only does the connect if not done already
3786  * so can be called several times for the same router.
3787  */
3788 int tb_switch_xhci_connect(struct tb_switch *sw)
3789 {
3790     struct tb_port *port1, *port3;
3791     int ret;
3792 
3793     if (sw->generation != 3)
3794         return 0;
3795 
3796     port1 = &sw->ports[1];
3797     port3 = &sw->ports[3];
3798 
3799     if (tb_switch_is_alpine_ridge(sw)) {
3800         bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3801 
3802         usb_port1 = tb_lc_is_usb_plugged(port1);
3803         usb_port3 = tb_lc_is_usb_plugged(port3);
3804         xhci_port1 = tb_lc_is_xhci_connected(port1);
3805         xhci_port3 = tb_lc_is_xhci_connected(port3);
3806 
3807         /* Figure out correct USB port to connect */
3808         if (usb_port1 && !xhci_port1) {
3809             ret = tb_lc_xhci_connect(port1);
3810             if (ret)
3811                 return ret;
3812         }
3813         if (usb_port3 && !xhci_port3)
3814             return tb_lc_xhci_connect(port3);
3815     } else if (tb_switch_is_titan_ridge(sw)) {
3816         ret = tb_lc_xhci_connect(port1);
3817         if (ret)
3818             return ret;
3819         return tb_lc_xhci_connect(port3);
3820     }
3821 
3822     return 0;
3823 }
3824 
3825 /**
3826  * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3827  * @sw: Router whose xHCI to disconnect
3828  *
3829  * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3830  * ports.
3831  */
3832 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3833 {
3834     if (sw->generation == 3) {
3835         struct tb_port *port1 = &sw->ports[1];
3836         struct tb_port *port3 = &sw->ports[3];
3837 
3838         tb_lc_xhci_disconnect(port1);
3839         tb_port_dbg(port1, "disconnected xHCI\n");
3840         tb_lc_xhci_disconnect(port3);
3841         tb_port_dbg(port3, "disconnected xHCI\n");
3842     }
3843 }