Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Internal Thunderbolt Connection Manager. This is a firmware running on
0004  * the Thunderbolt host controller performing most of the low-level
0005  * handling.
0006  *
0007  * Copyright (C) 2017, Intel Corporation
0008  * Authors: Michael Jamet <michael.jamet@intel.com>
0009  *          Mika Westerberg <mika.westerberg@linux.intel.com>
0010  */
0011 
0012 #include <linux/delay.h>
0013 #include <linux/mutex.h>
0014 #include <linux/moduleparam.h>
0015 #include <linux/pci.h>
0016 #include <linux/pm_runtime.h>
0017 #include <linux/platform_data/x86/apple.h>
0018 #include <linux/sizes.h>
0019 #include <linux/slab.h>
0020 #include <linux/workqueue.h>
0021 
0022 #include "ctl.h"
0023 #include "nhi_regs.h"
0024 #include "tb.h"
0025 
0026 #define PCIE2CIO_CMD            0x30
0027 #define PCIE2CIO_CMD_TIMEOUT        BIT(31)
0028 #define PCIE2CIO_CMD_START      BIT(30)
0029 #define PCIE2CIO_CMD_WRITE      BIT(21)
0030 #define PCIE2CIO_CMD_CS_MASK        GENMASK(20, 19)
0031 #define PCIE2CIO_CMD_CS_SHIFT       19
0032 #define PCIE2CIO_CMD_PORT_MASK      GENMASK(18, 13)
0033 #define PCIE2CIO_CMD_PORT_SHIFT     13
0034 
0035 #define PCIE2CIO_WRDATA         0x34
0036 #define PCIE2CIO_RDDATA         0x38
0037 
0038 #define PHY_PORT_CS1            0x37
0039 #define PHY_PORT_CS1_LINK_DISABLE   BIT(14)
0040 #define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
0041 #define PHY_PORT_CS1_LINK_STATE_SHIFT   26
0042 
0043 #define ICM_TIMEOUT         5000    /* ms */
0044 #define ICM_APPROVE_TIMEOUT     10000   /* ms */
0045 #define ICM_MAX_LINK            4
0046 
0047 static bool start_icm;
0048 module_param(start_icm, bool, 0444);
0049 MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
0050 
0051 /**
0052  * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status
0053  * @reply: Reply from ICM firmware is placed here
0054  * @request: Request that is sent to ICM firmware
0055  * @icm: Pointer to ICM private data
0056  */
0057 struct usb4_switch_nvm_auth {
0058     struct icm_usb4_switch_op_response reply;
0059     struct icm_usb4_switch_op request;
0060     struct icm *icm;
0061 };
0062 
0063 /**
0064  * struct icm - Internal connection manager private data
0065  * @request_lock: Makes sure only one message is send to ICM at time
0066  * @rescan_work: Work used to rescan the surviving switches after resume
0067  * @upstream_port: Pointer to the PCIe upstream port this host
0068  *         controller is connected. This is only set for systems
0069  *         where ICM needs to be started manually
0070  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
0071  *       (only set when @upstream_port is not %NULL)
0072  * @safe_mode: ICM is in safe mode
0073  * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
0074  * @rpm: Does the controller support runtime PM (RTD3)
0075  * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
0076  * @proto_version: Firmware protocol version
0077  * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set)
0078  * @veto: Is RTD3 veto in effect
0079  * @is_supported: Checks if we can support ICM on this controller
0080  * @cio_reset: Trigger CIO reset
0081  * @get_mode: Read and return the ICM firmware mode (optional)
0082  * @get_route: Find a route string for given switch
0083  * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
0084  * @driver_ready: Send driver ready message to ICM
0085  * @set_uuid: Set UUID for the root switch (optional)
0086  * @device_connected: Handle device connected ICM message
0087  * @device_disconnected: Handle device disconnected ICM message
0088  * @xdomain_connected: Handle XDomain connected ICM message
0089  * @xdomain_disconnected: Handle XDomain disconnected ICM message
0090  * @rtd3_veto: Handle RTD3 veto notification ICM message
0091  */
0092 struct icm {
0093     struct mutex request_lock;
0094     struct delayed_work rescan_work;
0095     struct pci_dev *upstream_port;
0096     int vnd_cap;
0097     bool safe_mode;
0098     size_t max_boot_acl;
0099     bool rpm;
0100     bool can_upgrade_nvm;
0101     u8 proto_version;
0102     struct usb4_switch_nvm_auth *last_nvm_auth;
0103     bool veto;
0104     bool (*is_supported)(struct tb *tb);
0105     int (*cio_reset)(struct tb *tb);
0106     int (*get_mode)(struct tb *tb);
0107     int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
0108     void (*save_devices)(struct tb *tb);
0109     int (*driver_ready)(struct tb *tb,
0110                 enum tb_security_level *security_level,
0111                 u8 *proto_version, size_t *nboot_acl, bool *rpm);
0112     void (*set_uuid)(struct tb *tb);
0113     void (*device_connected)(struct tb *tb,
0114                  const struct icm_pkg_header *hdr);
0115     void (*device_disconnected)(struct tb *tb,
0116                     const struct icm_pkg_header *hdr);
0117     void (*xdomain_connected)(struct tb *tb,
0118                   const struct icm_pkg_header *hdr);
0119     void (*xdomain_disconnected)(struct tb *tb,
0120                      const struct icm_pkg_header *hdr);
0121     void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
0122 };
0123 
0124 struct icm_notification {
0125     struct work_struct work;
0126     struct icm_pkg_header *pkg;
0127     struct tb *tb;
0128 };
0129 
0130 struct ep_name_entry {
0131     u8 len;
0132     u8 type;
0133     u8 data[];
0134 };
0135 
0136 #define EP_NAME_INTEL_VSS   0x10
0137 
0138 /* Intel Vendor specific structure */
0139 struct intel_vss {
0140     u16 vendor;
0141     u16 model;
0142     u8 mc;
0143     u8 flags;
0144     u16 pci_devid;
0145     u32 nvm_version;
0146 };
0147 
0148 #define INTEL_VSS_FLAGS_RTD3    BIT(0)
0149 
0150 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
0151 {
0152     const void *end = ep_name + size;
0153 
0154     while (ep_name < end) {
0155         const struct ep_name_entry *ep = ep_name;
0156 
0157         if (!ep->len)
0158             break;
0159         if (ep_name + ep->len > end)
0160             break;
0161 
0162         if (ep->type == EP_NAME_INTEL_VSS)
0163             return (const struct intel_vss *)ep->data;
0164 
0165         ep_name += ep->len;
0166     }
0167 
0168     return NULL;
0169 }
0170 
0171 static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
0172 {
0173     const struct intel_vss *vss;
0174 
0175     vss = parse_intel_vss(ep_name, size);
0176     if (vss)
0177         return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
0178 
0179     return false;
0180 }
0181 
0182 static inline struct tb *icm_to_tb(struct icm *icm)
0183 {
0184     return ((void *)icm - sizeof(struct tb));
0185 }
0186 
0187 static inline u8 phy_port_from_route(u64 route, u8 depth)
0188 {
0189     u8 link;
0190 
0191     link = depth ? route >> ((depth - 1) * 8) : route;
0192     return tb_phy_port_from_link(link);
0193 }
0194 
0195 static inline u8 dual_link_from_link(u8 link)
0196 {
0197     return link ? ((link - 1) ^ 0x01) + 1 : 0;
0198 }
0199 
0200 static inline u64 get_route(u32 route_hi, u32 route_lo)
0201 {
0202     return (u64)route_hi << 32 | route_lo;
0203 }
0204 
0205 static inline u64 get_parent_route(u64 route)
0206 {
0207     int depth = tb_route_length(route);
0208     return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
0209 }
0210 
0211 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
0212 {
0213     unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
0214     u32 cmd;
0215 
0216     do {
0217         pci_read_config_dword(icm->upstream_port,
0218                       icm->vnd_cap + PCIE2CIO_CMD, &cmd);
0219         if (!(cmd & PCIE2CIO_CMD_START)) {
0220             if (cmd & PCIE2CIO_CMD_TIMEOUT)
0221                 break;
0222             return 0;
0223         }
0224 
0225         msleep(50);
0226     } while (time_before(jiffies, end));
0227 
0228     return -ETIMEDOUT;
0229 }
0230 
0231 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
0232              unsigned int port, unsigned int index, u32 *data)
0233 {
0234     struct pci_dev *pdev = icm->upstream_port;
0235     int ret, vnd_cap = icm->vnd_cap;
0236     u32 cmd;
0237 
0238     cmd = index;
0239     cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
0240     cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
0241     cmd |= PCIE2CIO_CMD_START;
0242     pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
0243 
0244     ret = pci2cio_wait_completion(icm, 5000);
0245     if (ret)
0246         return ret;
0247 
0248     pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
0249     return 0;
0250 }
0251 
0252 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
0253               unsigned int port, unsigned int index, u32 data)
0254 {
0255     struct pci_dev *pdev = icm->upstream_port;
0256     int vnd_cap = icm->vnd_cap;
0257     u32 cmd;
0258 
0259     pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
0260 
0261     cmd = index;
0262     cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
0263     cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
0264     cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
0265     pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
0266 
0267     return pci2cio_wait_completion(icm, 5000);
0268 }
0269 
0270 static bool icm_match(const struct tb_cfg_request *req,
0271               const struct ctl_pkg *pkg)
0272 {
0273     const struct icm_pkg_header *res_hdr = pkg->buffer;
0274     const struct icm_pkg_header *req_hdr = req->request;
0275 
0276     if (pkg->frame.eof != req->response_type)
0277         return false;
0278     if (res_hdr->code != req_hdr->code)
0279         return false;
0280 
0281     return true;
0282 }
0283 
0284 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
0285 {
0286     const struct icm_pkg_header *hdr = pkg->buffer;
0287 
0288     if (hdr->packet_id < req->npackets) {
0289         size_t offset = hdr->packet_id * req->response_size;
0290 
0291         memcpy(req->response + offset, pkg->buffer, req->response_size);
0292     }
0293 
0294     return hdr->packet_id == hdr->total_packets - 1;
0295 }
0296 
0297 static int icm_request(struct tb *tb, const void *request, size_t request_size,
0298                void *response, size_t response_size, size_t npackets,
0299                unsigned int timeout_msec)
0300 {
0301     struct icm *icm = tb_priv(tb);
0302     int retries = 3;
0303 
0304     do {
0305         struct tb_cfg_request *req;
0306         struct tb_cfg_result res;
0307 
0308         req = tb_cfg_request_alloc();
0309         if (!req)
0310             return -ENOMEM;
0311 
0312         req->match = icm_match;
0313         req->copy = icm_copy;
0314         req->request = request;
0315         req->request_size = request_size;
0316         req->request_type = TB_CFG_PKG_ICM_CMD;
0317         req->response = response;
0318         req->npackets = npackets;
0319         req->response_size = response_size;
0320         req->response_type = TB_CFG_PKG_ICM_RESP;
0321 
0322         mutex_lock(&icm->request_lock);
0323         res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
0324         mutex_unlock(&icm->request_lock);
0325 
0326         tb_cfg_request_put(req);
0327 
0328         if (res.err != -ETIMEDOUT)
0329             return res.err == 1 ? -EIO : res.err;
0330 
0331         usleep_range(20, 50);
0332     } while (retries--);
0333 
0334     return -ETIMEDOUT;
0335 }
0336 
0337 /*
0338  * If rescan is queued to run (we are resuming), postpone it to give the
0339  * firmware some more time to send device connected notifications for next
0340  * devices in the chain.
0341  */
0342 static void icm_postpone_rescan(struct tb *tb)
0343 {
0344     struct icm *icm = tb_priv(tb);
0345 
0346     if (delayed_work_pending(&icm->rescan_work))
0347         mod_delayed_work(tb->wq, &icm->rescan_work,
0348                  msecs_to_jiffies(500));
0349 }
0350 
0351 static void icm_veto_begin(struct tb *tb)
0352 {
0353     struct icm *icm = tb_priv(tb);
0354 
0355     if (!icm->veto) {
0356         icm->veto = true;
0357         /* Keep the domain powered while veto is in effect */
0358         pm_runtime_get(&tb->dev);
0359     }
0360 }
0361 
0362 static void icm_veto_end(struct tb *tb)
0363 {
0364     struct icm *icm = tb_priv(tb);
0365 
0366     if (icm->veto) {
0367         icm->veto = false;
0368         /* Allow the domain suspend now */
0369         pm_runtime_mark_last_busy(&tb->dev);
0370         pm_runtime_put_autosuspend(&tb->dev);
0371     }
0372 }
0373 
0374 static bool icm_firmware_running(const struct tb_nhi *nhi)
0375 {
0376     u32 val;
0377 
0378     val = ioread32(nhi->iobase + REG_FW_STS);
0379     return !!(val & REG_FW_STS_ICM_EN);
0380 }
0381 
0382 static bool icm_fr_is_supported(struct tb *tb)
0383 {
0384     return !x86_apple_machine;
0385 }
0386 
0387 static inline int icm_fr_get_switch_index(u32 port)
0388 {
0389     int index;
0390 
0391     if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
0392         return 0;
0393 
0394     index = port >> ICM_PORT_INDEX_SHIFT;
0395     return index != 0xff ? index : 0;
0396 }
0397 
0398 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
0399 {
0400     struct icm_fr_pkg_get_topology_response *switches, *sw;
0401     struct icm_fr_pkg_get_topology request = {
0402         .hdr = { .code = ICM_GET_TOPOLOGY },
0403     };
0404     size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
0405     int ret, index;
0406     u8 i;
0407 
0408     switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
0409     if (!switches)
0410         return -ENOMEM;
0411 
0412     ret = icm_request(tb, &request, sizeof(request), switches,
0413               sizeof(*switches), npackets, ICM_TIMEOUT);
0414     if (ret)
0415         goto err_free;
0416 
0417     sw = &switches[0];
0418     index = icm_fr_get_switch_index(sw->ports[link]);
0419     if (!index) {
0420         ret = -ENODEV;
0421         goto err_free;
0422     }
0423 
0424     sw = &switches[index];
0425     for (i = 1; i < depth; i++) {
0426         unsigned int j;
0427 
0428         if (!(sw->first_data & ICM_SWITCH_USED)) {
0429             ret = -ENODEV;
0430             goto err_free;
0431         }
0432 
0433         for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
0434             index = icm_fr_get_switch_index(sw->ports[j]);
0435             if (index > sw->switch_index) {
0436                 sw = &switches[index];
0437                 break;
0438             }
0439         }
0440     }
0441 
0442     *route = get_route(sw->route_hi, sw->route_lo);
0443 
0444 err_free:
0445     kfree(switches);
0446     return ret;
0447 }
0448 
0449 static void icm_fr_save_devices(struct tb *tb)
0450 {
0451     nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
0452 }
0453 
0454 static int
0455 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
0456             u8 *proto_version, size_t *nboot_acl, bool *rpm)
0457 {
0458     struct icm_fr_pkg_driver_ready_response reply;
0459     struct icm_pkg_driver_ready request = {
0460         .hdr.code = ICM_DRIVER_READY,
0461     };
0462     int ret;
0463 
0464     memset(&reply, 0, sizeof(reply));
0465     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
0466               1, ICM_TIMEOUT);
0467     if (ret)
0468         return ret;
0469 
0470     if (security_level)
0471         *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
0472 
0473     return 0;
0474 }
0475 
0476 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
0477 {
0478     struct icm_fr_pkg_approve_device request;
0479     struct icm_fr_pkg_approve_device reply;
0480     int ret;
0481 
0482     memset(&request, 0, sizeof(request));
0483     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
0484     request.hdr.code = ICM_APPROVE_DEVICE;
0485     request.connection_id = sw->connection_id;
0486     request.connection_key = sw->connection_key;
0487 
0488     memset(&reply, 0, sizeof(reply));
0489     /* Use larger timeout as establishing tunnels can take some time */
0490     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
0491               1, ICM_APPROVE_TIMEOUT);
0492     if (ret)
0493         return ret;
0494 
0495     if (reply.hdr.flags & ICM_FLAGS_ERROR) {
0496         tb_warn(tb, "PCIe tunnel creation failed\n");
0497         return -EIO;
0498     }
0499 
0500     return 0;
0501 }
0502 
0503 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
0504 {
0505     struct icm_fr_pkg_add_device_key request;
0506     struct icm_fr_pkg_add_device_key_response reply;
0507     int ret;
0508 
0509     memset(&request, 0, sizeof(request));
0510     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
0511     request.hdr.code = ICM_ADD_DEVICE_KEY;
0512     request.connection_id = sw->connection_id;
0513     request.connection_key = sw->connection_key;
0514     memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
0515 
0516     memset(&reply, 0, sizeof(reply));
0517     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
0518               1, ICM_TIMEOUT);
0519     if (ret)
0520         return ret;
0521 
0522     if (reply.hdr.flags & ICM_FLAGS_ERROR) {
0523         tb_warn(tb, "Adding key to switch failed\n");
0524         return -EIO;
0525     }
0526 
0527     return 0;
0528 }
0529 
0530 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
0531                        const u8 *challenge, u8 *response)
0532 {
0533     struct icm_fr_pkg_challenge_device request;
0534     struct icm_fr_pkg_challenge_device_response reply;
0535     int ret;
0536 
0537     memset(&request, 0, sizeof(request));
0538     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
0539     request.hdr.code = ICM_CHALLENGE_DEVICE;
0540     request.connection_id = sw->connection_id;
0541     request.connection_key = sw->connection_key;
0542     memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
0543 
0544     memset(&reply, 0, sizeof(reply));
0545     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
0546               1, ICM_TIMEOUT);
0547     if (ret)
0548         return ret;
0549 
0550     if (reply.hdr.flags & ICM_FLAGS_ERROR)
0551         return -EKEYREJECTED;
0552     if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
0553         return -ENOKEY;
0554 
0555     memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
0556 
0557     return 0;
0558 }
0559 
0560 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
0561                     int transmit_path, int transmit_ring,
0562                     int receive_path, int receive_ring)
0563 {
0564     struct icm_fr_pkg_approve_xdomain_response reply;
0565     struct icm_fr_pkg_approve_xdomain request;
0566     int ret;
0567 
0568     memset(&request, 0, sizeof(request));
0569     request.hdr.code = ICM_APPROVE_XDOMAIN;
0570     request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
0571     memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
0572 
0573     request.transmit_path = transmit_path;
0574     request.transmit_ring = transmit_ring;
0575     request.receive_path = receive_path;
0576     request.receive_ring = receive_ring;
0577 
0578     memset(&reply, 0, sizeof(reply));
0579     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
0580               1, ICM_TIMEOUT);
0581     if (ret)
0582         return ret;
0583 
0584     if (reply.hdr.flags & ICM_FLAGS_ERROR)
0585         return -EIO;
0586 
0587     return 0;
0588 }
0589 
0590 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
0591                        int transmit_path, int transmit_ring,
0592                        int receive_path, int receive_ring)
0593 {
0594     u8 phy_port;
0595     u8 cmd;
0596 
0597     phy_port = tb_phy_port_from_link(xd->link);
0598     if (phy_port == 0)
0599         cmd = NHI_MAILBOX_DISCONNECT_PA;
0600     else
0601         cmd = NHI_MAILBOX_DISCONNECT_PB;
0602 
0603     nhi_mailbox_cmd(tb->nhi, cmd, 1);
0604     usleep_range(10, 50);
0605     nhi_mailbox_cmd(tb->nhi, cmd, 2);
0606     return 0;
0607 }
0608 
0609 static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
0610                       const uuid_t *uuid)
0611 {
0612     struct tb *tb = parent_sw->tb;
0613     struct tb_switch *sw;
0614 
0615     sw = tb_switch_alloc(tb, &parent_sw->dev, route);
0616     if (IS_ERR(sw)) {
0617         tb_warn(tb, "failed to allocate switch at %llx\n", route);
0618         return sw;
0619     }
0620 
0621     sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
0622     if (!sw->uuid) {
0623         tb_switch_put(sw);
0624         return ERR_PTR(-ENOMEM);
0625     }
0626 
0627     init_completion(&sw->rpm_complete);
0628     return sw;
0629 }
0630 
0631 static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
0632 {
0633     u64 route = tb_route(sw);
0634     int ret;
0635 
0636     /* Link the two switches now */
0637     tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
0638     tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
0639 
0640     ret = tb_switch_add(sw);
0641     if (ret)
0642         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
0643 
0644     return ret;
0645 }
0646 
0647 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
0648               u64 route, u8 connection_id, u8 connection_key,
0649               u8 link, u8 depth, bool boot)
0650 {
0651     /* Disconnect from parent */
0652     tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
0653     /* Re-connect via updated port*/
0654     tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
0655 
0656     /* Update with the new addressing information */
0657     sw->config.route_hi = upper_32_bits(route);
0658     sw->config.route_lo = lower_32_bits(route);
0659     sw->connection_id = connection_id;
0660     sw->connection_key = connection_key;
0661     sw->link = link;
0662     sw->depth = depth;
0663     sw->boot = boot;
0664 
0665     /* This switch still exists */
0666     sw->is_unplugged = false;
0667 
0668     /* Runtime resume is now complete */
0669     complete(&sw->rpm_complete);
0670 }
0671 
0672 static void remove_switch(struct tb_switch *sw)
0673 {
0674     struct tb_switch *parent_sw;
0675 
0676     parent_sw = tb_to_switch(sw->dev.parent);
0677     tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
0678     tb_switch_remove(sw);
0679 }
0680 
0681 static void add_xdomain(struct tb_switch *sw, u64 route,
0682             const uuid_t *local_uuid, const uuid_t *remote_uuid,
0683             u8 link, u8 depth)
0684 {
0685     struct tb_xdomain *xd;
0686 
0687     pm_runtime_get_sync(&sw->dev);
0688 
0689     xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
0690     if (!xd)
0691         goto out;
0692 
0693     xd->link = link;
0694     xd->depth = depth;
0695 
0696     tb_port_at(route, sw)->xdomain = xd;
0697 
0698     tb_xdomain_add(xd);
0699 
0700 out:
0701     pm_runtime_mark_last_busy(&sw->dev);
0702     pm_runtime_put_autosuspend(&sw->dev);
0703 }
0704 
0705 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
0706 {
0707     xd->link = link;
0708     xd->route = route;
0709     xd->is_unplugged = false;
0710 }
0711 
0712 static void remove_xdomain(struct tb_xdomain *xd)
0713 {
0714     struct tb_switch *sw;
0715 
0716     sw = tb_to_switch(xd->dev.parent);
0717     tb_port_at(xd->route, sw)->xdomain = NULL;
0718     tb_xdomain_remove(xd);
0719 }
0720 
0721 static void
0722 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
0723 {
0724     const struct icm_fr_event_device_connected *pkg =
0725         (const struct icm_fr_event_device_connected *)hdr;
0726     enum tb_security_level security_level;
0727     struct tb_switch *sw, *parent_sw;
0728     bool boot, dual_lane, speed_gen3;
0729     struct icm *icm = tb_priv(tb);
0730     bool authorized = false;
0731     struct tb_xdomain *xd;
0732     u8 link, depth;
0733     u64 route;
0734     int ret;
0735 
0736     icm_postpone_rescan(tb);
0737 
0738     link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
0739     depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
0740         ICM_LINK_INFO_DEPTH_SHIFT;
0741     authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
0742     security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
0743              ICM_FLAGS_SLEVEL_SHIFT;
0744     boot = pkg->link_info & ICM_LINK_INFO_BOOT;
0745     dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
0746     speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
0747 
0748     if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
0749         tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
0750             link, depth);
0751         return;
0752     }
0753 
0754     sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
0755     if (sw) {
0756         u8 phy_port, sw_phy_port;
0757 
0758         parent_sw = tb_to_switch(sw->dev.parent);
0759         sw_phy_port = tb_phy_port_from_link(sw->link);
0760         phy_port = tb_phy_port_from_link(link);
0761 
0762         /*
0763          * On resume ICM will send us connected events for the
0764          * devices that still are present. However, that
0765          * information might have changed for example by the
0766          * fact that a switch on a dual-link connection might
0767          * have been enumerated using the other link now. Make
0768          * sure our book keeping matches that.
0769          */
0770         if (sw->depth == depth && sw_phy_port == phy_port &&
0771             !!sw->authorized == authorized) {
0772             /*
0773              * It was enumerated through another link so update
0774              * route string accordingly.
0775              */
0776             if (sw->link != link) {
0777                 ret = icm->get_route(tb, link, depth, &route);
0778                 if (ret) {
0779                     tb_err(tb, "failed to update route string for switch at %u.%u\n",
0780                            link, depth);
0781                     tb_switch_put(sw);
0782                     return;
0783                 }
0784             } else {
0785                 route = tb_route(sw);
0786             }
0787 
0788             update_switch(parent_sw, sw, route, pkg->connection_id,
0789                       pkg->connection_key, link, depth, boot);
0790             tb_switch_put(sw);
0791             return;
0792         }
0793 
0794         /*
0795          * User connected the same switch to another physical
0796          * port or to another part of the topology. Remove the
0797          * existing switch now before adding the new one.
0798          */
0799         remove_switch(sw);
0800         tb_switch_put(sw);
0801     }
0802 
0803     /*
0804      * If the switch was not found by UUID, look for a switch on
0805      * same physical port (taking possible link aggregation into
0806      * account) and depth. If we found one it is definitely a stale
0807      * one so remove it first.
0808      */
0809     sw = tb_switch_find_by_link_depth(tb, link, depth);
0810     if (!sw) {
0811         u8 dual_link;
0812 
0813         dual_link = dual_link_from_link(link);
0814         if (dual_link)
0815             sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
0816     }
0817     if (sw) {
0818         remove_switch(sw);
0819         tb_switch_put(sw);
0820     }
0821 
0822     /* Remove existing XDomain connection if found */
0823     xd = tb_xdomain_find_by_link_depth(tb, link, depth);
0824     if (xd) {
0825         remove_xdomain(xd);
0826         tb_xdomain_put(xd);
0827     }
0828 
0829     parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
0830     if (!parent_sw) {
0831         tb_err(tb, "failed to find parent switch for %u.%u\n",
0832                link, depth);
0833         return;
0834     }
0835 
0836     ret = icm->get_route(tb, link, depth, &route);
0837     if (ret) {
0838         tb_err(tb, "failed to find route string for switch at %u.%u\n",
0839                link, depth);
0840         tb_switch_put(parent_sw);
0841         return;
0842     }
0843 
0844     pm_runtime_get_sync(&parent_sw->dev);
0845 
0846     sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
0847     if (!IS_ERR(sw)) {
0848         sw->connection_id = pkg->connection_id;
0849         sw->connection_key = pkg->connection_key;
0850         sw->link = link;
0851         sw->depth = depth;
0852         sw->authorized = authorized;
0853         sw->security_level = security_level;
0854         sw->boot = boot;
0855         sw->link_speed = speed_gen3 ? 20 : 10;
0856         sw->link_width = dual_lane ? 2 : 1;
0857         sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
0858 
0859         if (add_switch(parent_sw, sw))
0860             tb_switch_put(sw);
0861     }
0862 
0863     pm_runtime_mark_last_busy(&parent_sw->dev);
0864     pm_runtime_put_autosuspend(&parent_sw->dev);
0865 
0866     tb_switch_put(parent_sw);
0867 }
0868 
0869 static void
0870 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
0871 {
0872     const struct icm_fr_event_device_disconnected *pkg =
0873         (const struct icm_fr_event_device_disconnected *)hdr;
0874     struct tb_switch *sw;
0875     u8 link, depth;
0876 
0877     link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
0878     depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
0879         ICM_LINK_INFO_DEPTH_SHIFT;
0880 
0881     if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
0882         tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
0883         return;
0884     }
0885 
0886     sw = tb_switch_find_by_link_depth(tb, link, depth);
0887     if (!sw) {
0888         tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
0889             depth);
0890         return;
0891     }
0892 
0893     pm_runtime_get_sync(sw->dev.parent);
0894 
0895     remove_switch(sw);
0896 
0897     pm_runtime_mark_last_busy(sw->dev.parent);
0898     pm_runtime_put_autosuspend(sw->dev.parent);
0899 
0900     tb_switch_put(sw);
0901 }
0902 
0903 static void
0904 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
0905 {
0906     const struct icm_fr_event_xdomain_connected *pkg =
0907         (const struct icm_fr_event_xdomain_connected *)hdr;
0908     struct tb_xdomain *xd;
0909     struct tb_switch *sw;
0910     u8 link, depth;
0911     u64 route;
0912 
0913     link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
0914     depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
0915         ICM_LINK_INFO_DEPTH_SHIFT;
0916 
0917     if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
0918         tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
0919         return;
0920     }
0921 
0922     route = get_route(pkg->local_route_hi, pkg->local_route_lo);
0923 
0924     xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
0925     if (xd) {
0926         u8 xd_phy_port, phy_port;
0927 
0928         xd_phy_port = phy_port_from_route(xd->route, xd->depth);
0929         phy_port = phy_port_from_route(route, depth);
0930 
0931         if (xd->depth == depth && xd_phy_port == phy_port) {
0932             update_xdomain(xd, route, link);
0933             tb_xdomain_put(xd);
0934             return;
0935         }
0936 
0937         /*
0938          * If we find an existing XDomain connection remove it
0939          * now. We need to go through login handshake and
0940          * everything anyway to be able to re-establish the
0941          * connection.
0942          */
0943         remove_xdomain(xd);
0944         tb_xdomain_put(xd);
0945     }
0946 
0947     /*
0948      * Look if there already exists an XDomain in the same place
0949      * than the new one and in that case remove it because it is
0950      * most likely another host that got disconnected.
0951      */
0952     xd = tb_xdomain_find_by_link_depth(tb, link, depth);
0953     if (!xd) {
0954         u8 dual_link;
0955 
0956         dual_link = dual_link_from_link(link);
0957         if (dual_link)
0958             xd = tb_xdomain_find_by_link_depth(tb, dual_link,
0959                                depth);
0960     }
0961     if (xd) {
0962         remove_xdomain(xd);
0963         tb_xdomain_put(xd);
0964     }
0965 
0966     /*
0967      * If the user disconnected a switch during suspend and
0968      * connected another host to the same port, remove the switch
0969      * first.
0970      */
0971     sw = tb_switch_find_by_route(tb, route);
0972     if (sw) {
0973         remove_switch(sw);
0974         tb_switch_put(sw);
0975     }
0976 
0977     sw = tb_switch_find_by_link_depth(tb, link, depth);
0978     if (!sw) {
0979         tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
0980             depth);
0981         return;
0982     }
0983 
0984     add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
0985             depth);
0986     tb_switch_put(sw);
0987 }
0988 
0989 static void
0990 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
0991 {
0992     const struct icm_fr_event_xdomain_disconnected *pkg =
0993         (const struct icm_fr_event_xdomain_disconnected *)hdr;
0994     struct tb_xdomain *xd;
0995 
0996     /*
0997      * If the connection is through one or multiple devices, the
0998      * XDomain device is removed along with them so it is fine if we
0999      * cannot find it here.
1000      */
1001     xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1002     if (xd) {
1003         remove_xdomain(xd);
1004         tb_xdomain_put(xd);
1005     }
1006 }
1007 
1008 static int icm_tr_cio_reset(struct tb *tb)
1009 {
1010     return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
1011 }
1012 
1013 static int
1014 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1015             u8 *proto_version, size_t *nboot_acl, bool *rpm)
1016 {
1017     struct icm_tr_pkg_driver_ready_response reply;
1018     struct icm_pkg_driver_ready request = {
1019         .hdr.code = ICM_DRIVER_READY,
1020     };
1021     int ret;
1022 
1023     memset(&reply, 0, sizeof(reply));
1024     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1025               1, 20000);
1026     if (ret)
1027         return ret;
1028 
1029     if (security_level)
1030         *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
1031     if (proto_version)
1032         *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
1033                 ICM_TR_INFO_PROTO_VERSION_SHIFT;
1034     if (nboot_acl)
1035         *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
1036                 ICM_TR_INFO_BOOT_ACL_SHIFT;
1037     if (rpm)
1038         *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
1039 
1040     return 0;
1041 }
1042 
1043 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
1044 {
1045     struct icm_tr_pkg_approve_device request;
1046     struct icm_tr_pkg_approve_device reply;
1047     int ret;
1048 
1049     memset(&request, 0, sizeof(request));
1050     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1051     request.hdr.code = ICM_APPROVE_DEVICE;
1052     request.route_lo = sw->config.route_lo;
1053     request.route_hi = sw->config.route_hi;
1054     request.connection_id = sw->connection_id;
1055 
1056     memset(&reply, 0, sizeof(reply));
1057     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1058               1, ICM_APPROVE_TIMEOUT);
1059     if (ret)
1060         return ret;
1061 
1062     if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1063         tb_warn(tb, "PCIe tunnel creation failed\n");
1064         return -EIO;
1065     }
1066 
1067     return 0;
1068 }
1069 
1070 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
1071 {
1072     struct icm_tr_pkg_add_device_key_response reply;
1073     struct icm_tr_pkg_add_device_key request;
1074     int ret;
1075 
1076     memset(&request, 0, sizeof(request));
1077     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1078     request.hdr.code = ICM_ADD_DEVICE_KEY;
1079     request.route_lo = sw->config.route_lo;
1080     request.route_hi = sw->config.route_hi;
1081     request.connection_id = sw->connection_id;
1082     memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
1083 
1084     memset(&reply, 0, sizeof(reply));
1085     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1086               1, ICM_TIMEOUT);
1087     if (ret)
1088         return ret;
1089 
1090     if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1091         tb_warn(tb, "Adding key to switch failed\n");
1092         return -EIO;
1093     }
1094 
1095     return 0;
1096 }
1097 
1098 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
1099                        const u8 *challenge, u8 *response)
1100 {
1101     struct icm_tr_pkg_challenge_device_response reply;
1102     struct icm_tr_pkg_challenge_device request;
1103     int ret;
1104 
1105     memset(&request, 0, sizeof(request));
1106     memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1107     request.hdr.code = ICM_CHALLENGE_DEVICE;
1108     request.route_lo = sw->config.route_lo;
1109     request.route_hi = sw->config.route_hi;
1110     request.connection_id = sw->connection_id;
1111     memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
1112 
1113     memset(&reply, 0, sizeof(reply));
1114     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1115               1, ICM_TIMEOUT);
1116     if (ret)
1117         return ret;
1118 
1119     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1120         return -EKEYREJECTED;
1121     if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
1122         return -ENOKEY;
1123 
1124     memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
1125 
1126     return 0;
1127 }
1128 
1129 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1130                     int transmit_path, int transmit_ring,
1131                     int receive_path, int receive_ring)
1132 {
1133     struct icm_tr_pkg_approve_xdomain_response reply;
1134     struct icm_tr_pkg_approve_xdomain request;
1135     int ret;
1136 
1137     memset(&request, 0, sizeof(request));
1138     request.hdr.code = ICM_APPROVE_XDOMAIN;
1139     request.route_hi = upper_32_bits(xd->route);
1140     request.route_lo = lower_32_bits(xd->route);
1141     request.transmit_path = transmit_path;
1142     request.transmit_ring = transmit_ring;
1143     request.receive_path = receive_path;
1144     request.receive_ring = receive_ring;
1145     memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1146 
1147     memset(&reply, 0, sizeof(reply));
1148     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1149               1, ICM_TIMEOUT);
1150     if (ret)
1151         return ret;
1152 
1153     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1154         return -EIO;
1155 
1156     return 0;
1157 }
1158 
1159 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
1160                     int stage)
1161 {
1162     struct icm_tr_pkg_disconnect_xdomain_response reply;
1163     struct icm_tr_pkg_disconnect_xdomain request;
1164     int ret;
1165 
1166     memset(&request, 0, sizeof(request));
1167     request.hdr.code = ICM_DISCONNECT_XDOMAIN;
1168     request.stage = stage;
1169     request.route_hi = upper_32_bits(xd->route);
1170     request.route_lo = lower_32_bits(xd->route);
1171     memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1172 
1173     memset(&reply, 0, sizeof(reply));
1174     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1175               1, ICM_TIMEOUT);
1176     if (ret)
1177         return ret;
1178 
1179     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1180         return -EIO;
1181 
1182     return 0;
1183 }
1184 
1185 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1186                        int transmit_path, int transmit_ring,
1187                        int receive_path, int receive_ring)
1188 {
1189     int ret;
1190 
1191     ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1192     if (ret)
1193         return ret;
1194 
1195     usleep_range(10, 50);
1196     return icm_tr_xdomain_tear_down(tb, xd, 2);
1197 }
1198 
1199 static void
1200 __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
1201               bool force_rtd3)
1202 {
1203     const struct icm_tr_event_device_connected *pkg =
1204         (const struct icm_tr_event_device_connected *)hdr;
1205     bool authorized, boot, dual_lane, speed_gen3;
1206     enum tb_security_level security_level;
1207     struct tb_switch *sw, *parent_sw;
1208     struct tb_xdomain *xd;
1209     u64 route;
1210 
1211     icm_postpone_rescan(tb);
1212 
1213     /*
1214      * Currently we don't use the QoS information coming with the
1215      * device connected message so simply just ignore that extra
1216      * packet for now.
1217      */
1218     if (pkg->hdr.packet_id)
1219         return;
1220 
1221     route = get_route(pkg->route_hi, pkg->route_lo);
1222     authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1223     security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1224              ICM_FLAGS_SLEVEL_SHIFT;
1225     boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1226     dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
1227     speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
1228 
1229     if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1230         tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1231             route);
1232         return;
1233     }
1234 
1235     sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1236     if (sw) {
1237         /* Update the switch if it is still in the same place */
1238         if (tb_route(sw) == route && !!sw->authorized == authorized) {
1239             parent_sw = tb_to_switch(sw->dev.parent);
1240             update_switch(parent_sw, sw, route, pkg->connection_id,
1241                       0, 0, 0, boot);
1242             tb_switch_put(sw);
1243             return;
1244         }
1245 
1246         remove_switch(sw);
1247         tb_switch_put(sw);
1248     }
1249 
1250     /* Another switch with the same address */
1251     sw = tb_switch_find_by_route(tb, route);
1252     if (sw) {
1253         remove_switch(sw);
1254         tb_switch_put(sw);
1255     }
1256 
1257     /* XDomain connection with the same address */
1258     xd = tb_xdomain_find_by_route(tb, route);
1259     if (xd) {
1260         remove_xdomain(xd);
1261         tb_xdomain_put(xd);
1262     }
1263 
1264     parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1265     if (!parent_sw) {
1266         tb_err(tb, "failed to find parent switch for %llx\n", route);
1267         return;
1268     }
1269 
1270     pm_runtime_get_sync(&parent_sw->dev);
1271 
1272     sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
1273     if (!IS_ERR(sw)) {
1274         sw->connection_id = pkg->connection_id;
1275         sw->authorized = authorized;
1276         sw->security_level = security_level;
1277         sw->boot = boot;
1278         sw->link_speed = speed_gen3 ? 20 : 10;
1279         sw->link_width = dual_lane ? 2 : 1;
1280         sw->rpm = force_rtd3;
1281         if (!sw->rpm)
1282             sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
1283                             sizeof(pkg->ep_name));
1284 
1285         if (add_switch(parent_sw, sw))
1286             tb_switch_put(sw);
1287     }
1288 
1289     pm_runtime_mark_last_busy(&parent_sw->dev);
1290     pm_runtime_put_autosuspend(&parent_sw->dev);
1291 
1292     tb_switch_put(parent_sw);
1293 }
1294 
1295 static void
1296 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1297 {
1298     __icm_tr_device_connected(tb, hdr, false);
1299 }
1300 
1301 static void
1302 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1303 {
1304     const struct icm_tr_event_device_disconnected *pkg =
1305         (const struct icm_tr_event_device_disconnected *)hdr;
1306     struct tb_switch *sw;
1307     u64 route;
1308 
1309     route = get_route(pkg->route_hi, pkg->route_lo);
1310 
1311     sw = tb_switch_find_by_route(tb, route);
1312     if (!sw) {
1313         tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1314         return;
1315     }
1316     pm_runtime_get_sync(sw->dev.parent);
1317 
1318     remove_switch(sw);
1319 
1320     pm_runtime_mark_last_busy(sw->dev.parent);
1321     pm_runtime_put_autosuspend(sw->dev.parent);
1322 
1323     tb_switch_put(sw);
1324 }
1325 
1326 static void
1327 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1328 {
1329     const struct icm_tr_event_xdomain_connected *pkg =
1330         (const struct icm_tr_event_xdomain_connected *)hdr;
1331     struct tb_xdomain *xd;
1332     struct tb_switch *sw;
1333     u64 route;
1334 
1335     if (!tb->root_switch)
1336         return;
1337 
1338     route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1339 
1340     xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1341     if (xd) {
1342         if (xd->route == route) {
1343             update_xdomain(xd, route, 0);
1344             tb_xdomain_put(xd);
1345             return;
1346         }
1347 
1348         remove_xdomain(xd);
1349         tb_xdomain_put(xd);
1350     }
1351 
1352     /* An existing xdomain with the same address */
1353     xd = tb_xdomain_find_by_route(tb, route);
1354     if (xd) {
1355         remove_xdomain(xd);
1356         tb_xdomain_put(xd);
1357     }
1358 
1359     /*
1360      * If the user disconnected a switch during suspend and
1361      * connected another host to the same port, remove the switch
1362      * first.
1363      */
1364     sw = tb_switch_find_by_route(tb, route);
1365     if (sw) {
1366         remove_switch(sw);
1367         tb_switch_put(sw);
1368     }
1369 
1370     sw = tb_switch_find_by_route(tb, get_parent_route(route));
1371     if (!sw) {
1372         tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1373         return;
1374     }
1375 
1376     add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1377     tb_switch_put(sw);
1378 }
1379 
1380 static void
1381 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1382 {
1383     const struct icm_tr_event_xdomain_disconnected *pkg =
1384         (const struct icm_tr_event_xdomain_disconnected *)hdr;
1385     struct tb_xdomain *xd;
1386     u64 route;
1387 
1388     route = get_route(pkg->route_hi, pkg->route_lo);
1389 
1390     xd = tb_xdomain_find_by_route(tb, route);
1391     if (xd) {
1392         remove_xdomain(xd);
1393         tb_xdomain_put(xd);
1394     }
1395 }
1396 
1397 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1398 {
1399     struct pci_dev *parent;
1400 
1401     parent = pci_upstream_bridge(pdev);
1402     while (parent) {
1403         if (!pci_is_pcie(parent))
1404             return NULL;
1405         if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1406             break;
1407         parent = pci_upstream_bridge(parent);
1408     }
1409 
1410     if (!parent)
1411         return NULL;
1412 
1413     switch (parent->device) {
1414     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1415     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1416     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1417     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1418     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1419     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1420     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1421         return parent;
1422     }
1423 
1424     return NULL;
1425 }
1426 
1427 static bool icm_ar_is_supported(struct tb *tb)
1428 {
1429     struct pci_dev *upstream_port;
1430     struct icm *icm = tb_priv(tb);
1431 
1432     /*
1433      * Starting from Alpine Ridge we can use ICM on Apple machines
1434      * as well. We just need to reset and re-enable it first.
1435      * However, only start it if explicitly asked by the user.
1436      */
1437     if (icm_firmware_running(tb->nhi))
1438         return true;
1439     if (!start_icm)
1440         return false;
1441 
1442     /*
1443      * Find the upstream PCIe port in case we need to do reset
1444      * through its vendor specific registers.
1445      */
1446     upstream_port = get_upstream_port(tb->nhi->pdev);
1447     if (upstream_port) {
1448         int cap;
1449 
1450         cap = pci_find_ext_capability(upstream_port,
1451                           PCI_EXT_CAP_ID_VNDR);
1452         if (cap > 0) {
1453             icm->upstream_port = upstream_port;
1454             icm->vnd_cap = cap;
1455 
1456             return true;
1457         }
1458     }
1459 
1460     return false;
1461 }
1462 
1463 static int icm_ar_cio_reset(struct tb *tb)
1464 {
1465     return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
1466 }
1467 
1468 static int icm_ar_get_mode(struct tb *tb)
1469 {
1470     struct tb_nhi *nhi = tb->nhi;
1471     int retries = 60;
1472     u32 val;
1473 
1474     do {
1475         val = ioread32(nhi->iobase + REG_FW_STS);
1476         if (val & REG_FW_STS_NVM_AUTH_DONE)
1477             break;
1478         msleep(50);
1479     } while (--retries);
1480 
1481     if (!retries) {
1482         dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1483         return -ENODEV;
1484     }
1485 
1486     return nhi_mailbox_mode(nhi);
1487 }
1488 
1489 static int
1490 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1491             u8 *proto_version, size_t *nboot_acl, bool *rpm)
1492 {
1493     struct icm_ar_pkg_driver_ready_response reply;
1494     struct icm_pkg_driver_ready request = {
1495         .hdr.code = ICM_DRIVER_READY,
1496     };
1497     int ret;
1498 
1499     memset(&reply, 0, sizeof(reply));
1500     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1501               1, ICM_TIMEOUT);
1502     if (ret)
1503         return ret;
1504 
1505     if (security_level)
1506         *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1507     if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1508         *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1509                 ICM_AR_INFO_BOOT_ACL_SHIFT;
1510     if (rpm)
1511         *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1512 
1513     return 0;
1514 }
1515 
1516 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1517 {
1518     struct icm_ar_pkg_get_route_response reply;
1519     struct icm_ar_pkg_get_route request = {
1520         .hdr = { .code = ICM_GET_ROUTE },
1521         .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1522     };
1523     int ret;
1524 
1525     memset(&reply, 0, sizeof(reply));
1526     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1527               1, ICM_TIMEOUT);
1528     if (ret)
1529         return ret;
1530 
1531     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1532         return -EIO;
1533 
1534     *route = get_route(reply.route_hi, reply.route_lo);
1535     return 0;
1536 }
1537 
1538 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1539 {
1540     struct icm_ar_pkg_preboot_acl_response reply;
1541     struct icm_ar_pkg_preboot_acl request = {
1542         .hdr = { .code = ICM_PREBOOT_ACL },
1543     };
1544     int ret, i;
1545 
1546     memset(&reply, 0, sizeof(reply));
1547     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1548               1, ICM_TIMEOUT);
1549     if (ret)
1550         return ret;
1551 
1552     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1553         return -EIO;
1554 
1555     for (i = 0; i < nuuids; i++) {
1556         u32 *uuid = (u32 *)&uuids[i];
1557 
1558         uuid[0] = reply.acl[i].uuid_lo;
1559         uuid[1] = reply.acl[i].uuid_hi;
1560 
1561         if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1562             /* Map empty entries to null UUID */
1563             uuid[0] = 0;
1564             uuid[1] = 0;
1565         } else if (uuid[0] != 0 || uuid[1] != 0) {
1566             /* Upper two DWs are always one's */
1567             uuid[2] = 0xffffffff;
1568             uuid[3] = 0xffffffff;
1569         }
1570     }
1571 
1572     return ret;
1573 }
1574 
1575 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1576                    size_t nuuids)
1577 {
1578     struct icm_ar_pkg_preboot_acl_response reply;
1579     struct icm_ar_pkg_preboot_acl request = {
1580         .hdr = {
1581             .code = ICM_PREBOOT_ACL,
1582             .flags = ICM_FLAGS_WRITE,
1583         },
1584     };
1585     int ret, i;
1586 
1587     for (i = 0; i < nuuids; i++) {
1588         const u32 *uuid = (const u32 *)&uuids[i];
1589 
1590         if (uuid_is_null(&uuids[i])) {
1591             /*
1592              * Map null UUID to the empty (all one) entries
1593              * for ICM.
1594              */
1595             request.acl[i].uuid_lo = 0xffffffff;
1596             request.acl[i].uuid_hi = 0xffffffff;
1597         } else {
1598             /* Two high DWs need to be set to all one */
1599             if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1600                 return -EINVAL;
1601 
1602             request.acl[i].uuid_lo = uuid[0];
1603             request.acl[i].uuid_hi = uuid[1];
1604         }
1605     }
1606 
1607     memset(&reply, 0, sizeof(reply));
1608     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1609               1, ICM_TIMEOUT);
1610     if (ret)
1611         return ret;
1612 
1613     if (reply.hdr.flags & ICM_FLAGS_ERROR)
1614         return -EIO;
1615 
1616     return 0;
1617 }
1618 
1619 static int
1620 icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1621              u8 *proto_version, size_t *nboot_acl, bool *rpm)
1622 {
1623     struct icm_tr_pkg_driver_ready_response reply;
1624     struct icm_pkg_driver_ready request = {
1625         .hdr.code = ICM_DRIVER_READY,
1626     };
1627     int ret;
1628 
1629     memset(&reply, 0, sizeof(reply));
1630     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1631               1, 20000);
1632     if (ret)
1633         return ret;
1634 
1635     if (proto_version)
1636         *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
1637                 ICM_TR_INFO_PROTO_VERSION_SHIFT;
1638 
1639     /* Ice Lake always supports RTD3 */
1640     if (rpm)
1641         *rpm = true;
1642 
1643     return 0;
1644 }
1645 
1646 static void icm_icl_set_uuid(struct tb *tb)
1647 {
1648     struct tb_nhi *nhi = tb->nhi;
1649     u32 uuid[4];
1650 
1651     pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
1652     pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
1653     uuid[2] = 0xffffffff;
1654     uuid[3] = 0xffffffff;
1655 
1656     tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1657 }
1658 
1659 static void
1660 icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1661 {
1662     __icm_tr_device_connected(tb, hdr, true);
1663 }
1664 
1665 static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
1666 {
1667     const struct icm_icl_event_rtd3_veto *pkg =
1668         (const struct icm_icl_event_rtd3_veto *)hdr;
1669 
1670     tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
1671 
1672     if (pkg->veto_reason)
1673         icm_veto_begin(tb);
1674     else
1675         icm_veto_end(tb);
1676 }
1677 
1678 static bool icm_tgl_is_supported(struct tb *tb)
1679 {
1680     unsigned long end = jiffies + msecs_to_jiffies(10);
1681 
1682     do {
1683         u32 val;
1684 
1685         val = ioread32(tb->nhi->iobase + REG_FW_STS);
1686         if (val & REG_FW_STS_NVM_AUTH_DONE)
1687             return true;
1688         usleep_range(100, 500);
1689     } while (time_before(jiffies, end));
1690 
1691     return false;
1692 }
1693 
1694 static void icm_handle_notification(struct work_struct *work)
1695 {
1696     struct icm_notification *n = container_of(work, typeof(*n), work);
1697     struct tb *tb = n->tb;
1698     struct icm *icm = tb_priv(tb);
1699 
1700     mutex_lock(&tb->lock);
1701 
1702     /*
1703      * When the domain is stopped we flush its workqueue but before
1704      * that the root switch is removed. In that case we should treat
1705      * the queued events as being canceled.
1706      */
1707     if (tb->root_switch) {
1708         switch (n->pkg->code) {
1709         case ICM_EVENT_DEVICE_CONNECTED:
1710             icm->device_connected(tb, n->pkg);
1711             break;
1712         case ICM_EVENT_DEVICE_DISCONNECTED:
1713             icm->device_disconnected(tb, n->pkg);
1714             break;
1715         case ICM_EVENT_XDOMAIN_CONNECTED:
1716             if (tb_is_xdomain_enabled())
1717                 icm->xdomain_connected(tb, n->pkg);
1718             break;
1719         case ICM_EVENT_XDOMAIN_DISCONNECTED:
1720             if (tb_is_xdomain_enabled())
1721                 icm->xdomain_disconnected(tb, n->pkg);
1722             break;
1723         case ICM_EVENT_RTD3_VETO:
1724             icm->rtd3_veto(tb, n->pkg);
1725             break;
1726         }
1727     }
1728 
1729     mutex_unlock(&tb->lock);
1730 
1731     kfree(n->pkg);
1732     kfree(n);
1733 }
1734 
1735 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1736                  const void *buf, size_t size)
1737 {
1738     struct icm_notification *n;
1739 
1740     n = kmalloc(sizeof(*n), GFP_KERNEL);
1741     if (!n)
1742         return;
1743 
1744     n->pkg = kmemdup(buf, size, GFP_KERNEL);
1745     if (!n->pkg) {
1746         kfree(n);
1747         return;
1748     }
1749 
1750     INIT_WORK(&n->work, icm_handle_notification);
1751     n->tb = tb;
1752 
1753     queue_work(tb->wq, &n->work);
1754 }
1755 
1756 static int
1757 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1758            u8 *proto_version, size_t *nboot_acl, bool *rpm)
1759 {
1760     struct icm *icm = tb_priv(tb);
1761     unsigned int retries = 50;
1762     int ret;
1763 
1764     ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl,
1765                 rpm);
1766     if (ret) {
1767         tb_err(tb, "failed to send driver ready to ICM\n");
1768         return ret;
1769     }
1770 
1771     /*
1772      * Hold on here until the switch config space is accessible so
1773      * that we can read root switch config successfully.
1774      */
1775     do {
1776         struct tb_cfg_result res;
1777         u32 tmp;
1778 
1779         res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1780                       0, 1, 100);
1781         if (!res.err)
1782             return 0;
1783 
1784         msleep(50);
1785     } while (--retries);
1786 
1787     tb_err(tb, "failed to read root switch config space, giving up\n");
1788     return -ETIMEDOUT;
1789 }
1790 
1791 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1792 {
1793     struct icm *icm = tb_priv(tb);
1794     u32 val;
1795 
1796     if (!icm->upstream_port)
1797         return -ENODEV;
1798 
1799     /* Put ARC to wait for CIO reset event to happen */
1800     val = ioread32(nhi->iobase + REG_FW_STS);
1801     val |= REG_FW_STS_CIO_RESET_REQ;
1802     iowrite32(val, nhi->iobase + REG_FW_STS);
1803 
1804     /* Re-start ARC */
1805     val = ioread32(nhi->iobase + REG_FW_STS);
1806     val |= REG_FW_STS_ICM_EN_INVERT;
1807     val |= REG_FW_STS_ICM_EN_CPU;
1808     iowrite32(val, nhi->iobase + REG_FW_STS);
1809 
1810     /* Trigger CIO reset now */
1811     return icm->cio_reset(tb);
1812 }
1813 
1814 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1815 {
1816     unsigned int retries = 10;
1817     int ret;
1818     u32 val;
1819 
1820     /* Check if the ICM firmware is already running */
1821     if (icm_firmware_running(nhi))
1822         return 0;
1823 
1824     dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1825 
1826     ret = icm_firmware_reset(tb, nhi);
1827     if (ret)
1828         return ret;
1829 
1830     /* Wait until the ICM firmware tells us it is up and running */
1831     do {
1832         /* Check that the ICM firmware is running */
1833         val = ioread32(nhi->iobase + REG_FW_STS);
1834         if (val & REG_FW_STS_NVM_AUTH_DONE)
1835             return 0;
1836 
1837         msleep(300);
1838     } while (--retries);
1839 
1840     return -ETIMEDOUT;
1841 }
1842 
1843 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1844 {
1845     struct icm *icm = tb_priv(tb);
1846     u32 state0, state1;
1847     int port0, port1;
1848     u32 val0, val1;
1849     int ret;
1850 
1851     if (!icm->upstream_port)
1852         return 0;
1853 
1854     if (phy_port) {
1855         port0 = 3;
1856         port1 = 4;
1857     } else {
1858         port0 = 1;
1859         port1 = 2;
1860     }
1861 
1862     /*
1863      * Read link status of both null ports belonging to a single
1864      * physical port.
1865      */
1866     ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1867     if (ret)
1868         return ret;
1869     ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1870     if (ret)
1871         return ret;
1872 
1873     state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1874     state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1875     state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1876     state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1877 
1878     /* If they are both up we need to reset them now */
1879     if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1880         return 0;
1881 
1882     val0 |= PHY_PORT_CS1_LINK_DISABLE;
1883     ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1884     if (ret)
1885         return ret;
1886 
1887     val1 |= PHY_PORT_CS1_LINK_DISABLE;
1888     ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1889     if (ret)
1890         return ret;
1891 
1892     /* Wait a bit and then re-enable both ports */
1893     usleep_range(10, 100);
1894 
1895     ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1896     if (ret)
1897         return ret;
1898     ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1899     if (ret)
1900         return ret;
1901 
1902     val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1903     ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1904     if (ret)
1905         return ret;
1906 
1907     val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1908     return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1909 }
1910 
1911 static int icm_firmware_init(struct tb *tb)
1912 {
1913     struct icm *icm = tb_priv(tb);
1914     struct tb_nhi *nhi = tb->nhi;
1915     int ret;
1916 
1917     ret = icm_firmware_start(tb, nhi);
1918     if (ret) {
1919         dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1920         return ret;
1921     }
1922 
1923     if (icm->get_mode) {
1924         ret = icm->get_mode(tb);
1925 
1926         switch (ret) {
1927         case NHI_FW_SAFE_MODE:
1928             icm->safe_mode = true;
1929             break;
1930 
1931         case NHI_FW_CM_MODE:
1932             /* Ask ICM to accept all Thunderbolt devices */
1933             nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1934             break;
1935 
1936         default:
1937             if (ret < 0)
1938                 return ret;
1939 
1940             tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1941             return -ENODEV;
1942         }
1943     }
1944 
1945     /*
1946      * Reset both physical ports if there is anything connected to
1947      * them already.
1948      */
1949     ret = icm_reset_phy_port(tb, 0);
1950     if (ret)
1951         dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1952     ret = icm_reset_phy_port(tb, 1);
1953     if (ret)
1954         dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1955 
1956     return 0;
1957 }
1958 
1959 static int icm_driver_ready(struct tb *tb)
1960 {
1961     struct icm *icm = tb_priv(tb);
1962     int ret;
1963 
1964     ret = icm_firmware_init(tb);
1965     if (ret)
1966         return ret;
1967 
1968     if (icm->safe_mode) {
1969         tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1970         tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1971         tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1972         return 0;
1973     }
1974 
1975     ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version,
1976                  &tb->nboot_acl, &icm->rpm);
1977     if (ret)
1978         return ret;
1979 
1980     /*
1981      * Make sure the number of supported preboot ACL matches what we
1982      * expect or disable the whole feature.
1983      */
1984     if (tb->nboot_acl > icm->max_boot_acl)
1985         tb->nboot_acl = 0;
1986 
1987     if (icm->proto_version >= 3)
1988         tb_dbg(tb, "USB4 proxy operations supported\n");
1989 
1990     return 0;
1991 }
1992 
1993 static int icm_suspend(struct tb *tb)
1994 {
1995     struct icm *icm = tb_priv(tb);
1996 
1997     if (icm->save_devices)
1998         icm->save_devices(tb);
1999 
2000     nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2001     return 0;
2002 }
2003 
2004 /*
2005  * Mark all switches (except root switch) below this one unplugged. ICM
2006  * firmware will send us an updated list of switches after we have send
2007  * it driver ready command. If a switch is not in that list it will be
2008  * removed when we perform rescan.
2009  */
2010 static void icm_unplug_children(struct tb_switch *sw)
2011 {
2012     struct tb_port *port;
2013 
2014     if (tb_route(sw))
2015         sw->is_unplugged = true;
2016 
2017     tb_switch_for_each_port(sw, port) {
2018         if (port->xdomain)
2019             port->xdomain->is_unplugged = true;
2020         else if (tb_port_has_remote(port))
2021             icm_unplug_children(port->remote->sw);
2022     }
2023 }
2024 
2025 static int complete_rpm(struct device *dev, void *data)
2026 {
2027     struct tb_switch *sw = tb_to_switch(dev);
2028 
2029     if (sw)
2030         complete(&sw->rpm_complete);
2031     return 0;
2032 }
2033 
2034 static void remove_unplugged_switch(struct tb_switch *sw)
2035 {
2036     struct device *parent = get_device(sw->dev.parent);
2037 
2038     pm_runtime_get_sync(parent);
2039 
2040     /*
2041      * Signal this and switches below for rpm_complete because
2042      * tb_switch_remove() calls pm_runtime_get_sync() that then waits
2043      * for it.
2044      */
2045     complete_rpm(&sw->dev, NULL);
2046     bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
2047     tb_switch_remove(sw);
2048 
2049     pm_runtime_mark_last_busy(parent);
2050     pm_runtime_put_autosuspend(parent);
2051 
2052     put_device(parent);
2053 }
2054 
2055 static void icm_free_unplugged_children(struct tb_switch *sw)
2056 {
2057     struct tb_port *port;
2058 
2059     tb_switch_for_each_port(sw, port) {
2060         if (port->xdomain && port->xdomain->is_unplugged) {
2061             tb_xdomain_remove(port->xdomain);
2062             port->xdomain = NULL;
2063         } else if (tb_port_has_remote(port)) {
2064             if (port->remote->sw->is_unplugged) {
2065                 remove_unplugged_switch(port->remote->sw);
2066                 port->remote = NULL;
2067             } else {
2068                 icm_free_unplugged_children(port->remote->sw);
2069             }
2070         }
2071     }
2072 }
2073 
2074 static void icm_rescan_work(struct work_struct *work)
2075 {
2076     struct icm *icm = container_of(work, struct icm, rescan_work.work);
2077     struct tb *tb = icm_to_tb(icm);
2078 
2079     mutex_lock(&tb->lock);
2080     if (tb->root_switch)
2081         icm_free_unplugged_children(tb->root_switch);
2082     mutex_unlock(&tb->lock);
2083 }
2084 
2085 static void icm_complete(struct tb *tb)
2086 {
2087     struct icm *icm = tb_priv(tb);
2088 
2089     if (tb->nhi->going_away)
2090         return;
2091 
2092     /*
2093      * If RTD3 was vetoed before we entered system suspend allow it
2094      * again now before driver ready is sent. Firmware sends a new RTD3
2095      * veto if it is still the case after we have sent it driver ready
2096      * command.
2097      */
2098     icm_veto_end(tb);
2099     icm_unplug_children(tb->root_switch);
2100 
2101     /*
2102      * Now all existing children should be resumed, start events
2103      * from ICM to get updated status.
2104      */
2105     __icm_driver_ready(tb, NULL, NULL, NULL, NULL);
2106 
2107     /*
2108      * We do not get notifications of devices that have been
2109      * unplugged during suspend so schedule rescan to clean them up
2110      * if any.
2111      */
2112     queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
2113 }
2114 
2115 static int icm_runtime_suspend(struct tb *tb)
2116 {
2117     nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2118     return 0;
2119 }
2120 
2121 static int icm_runtime_suspend_switch(struct tb_switch *sw)
2122 {
2123     if (tb_route(sw))
2124         reinit_completion(&sw->rpm_complete);
2125     return 0;
2126 }
2127 
2128 static int icm_runtime_resume_switch(struct tb_switch *sw)
2129 {
2130     if (tb_route(sw)) {
2131         if (!wait_for_completion_timeout(&sw->rpm_complete,
2132                          msecs_to_jiffies(500))) {
2133             dev_dbg(&sw->dev, "runtime resuming timed out\n");
2134         }
2135     }
2136     return 0;
2137 }
2138 
2139 static int icm_runtime_resume(struct tb *tb)
2140 {
2141     /*
2142      * We can reuse the same resume functionality than with system
2143      * suspend.
2144      */
2145     icm_complete(tb);
2146     return 0;
2147 }
2148 
2149 static int icm_start(struct tb *tb)
2150 {
2151     struct icm *icm = tb_priv(tb);
2152     int ret;
2153 
2154     if (icm->safe_mode)
2155         tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
2156     else
2157         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2158     if (IS_ERR(tb->root_switch))
2159         return PTR_ERR(tb->root_switch);
2160 
2161     tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
2162     tb->root_switch->rpm = icm->rpm;
2163 
2164     if (icm->set_uuid)
2165         icm->set_uuid(tb);
2166 
2167     ret = tb_switch_add(tb->root_switch);
2168     if (ret) {
2169         tb_switch_put(tb->root_switch);
2170         tb->root_switch = NULL;
2171     }
2172 
2173     return ret;
2174 }
2175 
2176 static void icm_stop(struct tb *tb)
2177 {
2178     struct icm *icm = tb_priv(tb);
2179 
2180     cancel_delayed_work(&icm->rescan_work);
2181     tb_switch_remove(tb->root_switch);
2182     tb->root_switch = NULL;
2183     nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2184     kfree(icm->last_nvm_auth);
2185     icm->last_nvm_auth = NULL;
2186 }
2187 
2188 static int icm_disconnect_pcie_paths(struct tb *tb)
2189 {
2190     return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
2191 }
2192 
2193 static void icm_usb4_switch_nvm_auth_complete(void *data)
2194 {
2195     struct usb4_switch_nvm_auth *auth = data;
2196     struct icm *icm = auth->icm;
2197     struct tb *tb = icm_to_tb(icm);
2198 
2199     tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n",
2200            get_route(auth->reply.route_hi, auth->reply.route_lo),
2201            auth->reply.hdr.flags, auth->reply.status);
2202 
2203     mutex_lock(&tb->lock);
2204     if (WARN_ON(icm->last_nvm_auth))
2205         kfree(icm->last_nvm_auth);
2206     icm->last_nvm_auth = auth;
2207     mutex_unlock(&tb->lock);
2208 }
2209 
2210 static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route)
2211 {
2212     struct usb4_switch_nvm_auth *auth;
2213     struct icm *icm = tb_priv(tb);
2214     struct tb_cfg_request *req;
2215     int ret;
2216 
2217     auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2218     if (!auth)
2219         return -ENOMEM;
2220 
2221     auth->icm = icm;
2222     auth->request.hdr.code = ICM_USB4_SWITCH_OP;
2223     auth->request.route_hi = upper_32_bits(route);
2224     auth->request.route_lo = lower_32_bits(route);
2225     auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH;
2226 
2227     req = tb_cfg_request_alloc();
2228     if (!req) {
2229         ret = -ENOMEM;
2230         goto err_free_auth;
2231     }
2232 
2233     req->match = icm_match;
2234     req->copy = icm_copy;
2235     req->request = &auth->request;
2236     req->request_size = sizeof(auth->request);
2237     req->request_type = TB_CFG_PKG_ICM_CMD;
2238     req->response = &auth->reply;
2239     req->npackets = 1;
2240     req->response_size = sizeof(auth->reply);
2241     req->response_type = TB_CFG_PKG_ICM_RESP;
2242 
2243     tb_dbg(tb, "NVM_AUTH request for %llx\n", route);
2244 
2245     mutex_lock(&icm->request_lock);
2246     ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete,
2247                  auth);
2248     mutex_unlock(&icm->request_lock);
2249 
2250     tb_cfg_request_put(req);
2251     if (ret)
2252         goto err_free_auth;
2253     return 0;
2254 
2255 err_free_auth:
2256     kfree(auth);
2257     return ret;
2258 }
2259 
2260 static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
2261                   u8 *status, const void *tx_data, size_t tx_data_len,
2262                   void *rx_data, size_t rx_data_len)
2263 {
2264     struct icm_usb4_switch_op_response reply;
2265     struct icm_usb4_switch_op request;
2266     struct tb *tb = sw->tb;
2267     struct icm *icm = tb_priv(tb);
2268     u64 route = tb_route(sw);
2269     int ret;
2270 
2271     /*
2272      * USB4 router operation proxy is supported in firmware if the
2273      * protocol version is 3 or higher.
2274      */
2275     if (icm->proto_version < 3)
2276         return -EOPNOTSUPP;
2277 
2278     /*
2279      * NVM_AUTH is a special USB4 proxy operation that does not
2280      * return immediately so handle it separately.
2281      */
2282     if (opcode == USB4_SWITCH_OP_NVM_AUTH)
2283         return icm_usb4_switch_nvm_authenticate(tb, route);
2284 
2285     memset(&request, 0, sizeof(request));
2286     request.hdr.code = ICM_USB4_SWITCH_OP;
2287     request.route_hi = upper_32_bits(route);
2288     request.route_lo = lower_32_bits(route);
2289     request.opcode = opcode;
2290     if (metadata)
2291         request.metadata = *metadata;
2292 
2293     if (tx_data_len) {
2294         request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID;
2295         if (tx_data_len < ARRAY_SIZE(request.data))
2296             request.data_len_valid =
2297                 tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK;
2298         memcpy(request.data, tx_data, tx_data_len * sizeof(u32));
2299     }
2300 
2301     memset(&reply, 0, sizeof(reply));
2302     ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
2303               1, ICM_TIMEOUT);
2304     if (ret)
2305         return ret;
2306 
2307     if (reply.hdr.flags & ICM_FLAGS_ERROR)
2308         return -EIO;
2309 
2310     if (status)
2311         *status = reply.status;
2312 
2313     if (metadata)
2314         *metadata = reply.metadata;
2315 
2316     if (rx_data_len)
2317         memcpy(rx_data, reply.data, rx_data_len * sizeof(u32));
2318 
2319     return 0;
2320 }
2321 
2322 static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
2323                            u32 *status)
2324 {
2325     struct usb4_switch_nvm_auth *auth;
2326     struct tb *tb = sw->tb;
2327     struct icm *icm = tb_priv(tb);
2328     int ret = 0;
2329 
2330     if (icm->proto_version < 3)
2331         return -EOPNOTSUPP;
2332 
2333     auth = icm->last_nvm_auth;
2334     icm->last_nvm_auth = NULL;
2335 
2336     if (auth && auth->reply.route_hi == sw->config.route_hi &&
2337         auth->reply.route_lo == sw->config.route_lo) {
2338         tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
2339                tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
2340         if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
2341             ret = -EIO;
2342         else
2343             *status = auth->reply.status;
2344     } else {
2345         *status = 0;
2346     }
2347 
2348     kfree(auth);
2349     return ret;
2350 }
2351 
2352 /* Falcon Ridge */
2353 static const struct tb_cm_ops icm_fr_ops = {
2354     .driver_ready = icm_driver_ready,
2355     .start = icm_start,
2356     .stop = icm_stop,
2357     .suspend = icm_suspend,
2358     .complete = icm_complete,
2359     .handle_event = icm_handle_event,
2360     .approve_switch = icm_fr_approve_switch,
2361     .add_switch_key = icm_fr_add_switch_key,
2362     .challenge_switch_key = icm_fr_challenge_switch_key,
2363     .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2364     .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2365     .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2366 };
2367 
2368 /* Alpine Ridge */
2369 static const struct tb_cm_ops icm_ar_ops = {
2370     .driver_ready = icm_driver_ready,
2371     .start = icm_start,
2372     .stop = icm_stop,
2373     .suspend = icm_suspend,
2374     .complete = icm_complete,
2375     .runtime_suspend = icm_runtime_suspend,
2376     .runtime_resume = icm_runtime_resume,
2377     .runtime_suspend_switch = icm_runtime_suspend_switch,
2378     .runtime_resume_switch = icm_runtime_resume_switch,
2379     .handle_event = icm_handle_event,
2380     .get_boot_acl = icm_ar_get_boot_acl,
2381     .set_boot_acl = icm_ar_set_boot_acl,
2382     .approve_switch = icm_fr_approve_switch,
2383     .add_switch_key = icm_fr_add_switch_key,
2384     .challenge_switch_key = icm_fr_challenge_switch_key,
2385     .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2386     .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2387     .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2388 };
2389 
2390 /* Titan Ridge */
2391 static const struct tb_cm_ops icm_tr_ops = {
2392     .driver_ready = icm_driver_ready,
2393     .start = icm_start,
2394     .stop = icm_stop,
2395     .suspend = icm_suspend,
2396     .complete = icm_complete,
2397     .runtime_suspend = icm_runtime_suspend,
2398     .runtime_resume = icm_runtime_resume,
2399     .runtime_suspend_switch = icm_runtime_suspend_switch,
2400     .runtime_resume_switch = icm_runtime_resume_switch,
2401     .handle_event = icm_handle_event,
2402     .get_boot_acl = icm_ar_get_boot_acl,
2403     .set_boot_acl = icm_ar_set_boot_acl,
2404     .approve_switch = icm_tr_approve_switch,
2405     .add_switch_key = icm_tr_add_switch_key,
2406     .challenge_switch_key = icm_tr_challenge_switch_key,
2407     .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2408     .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2409     .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2410     .usb4_switch_op = icm_usb4_switch_op,
2411     .usb4_switch_nvm_authenticate_status =
2412         icm_usb4_switch_nvm_authenticate_status,
2413 };
2414 
2415 /* Ice Lake */
2416 static const struct tb_cm_ops icm_icl_ops = {
2417     .driver_ready = icm_driver_ready,
2418     .start = icm_start,
2419     .stop = icm_stop,
2420     .complete = icm_complete,
2421     .runtime_suspend = icm_runtime_suspend,
2422     .runtime_resume = icm_runtime_resume,
2423     .handle_event = icm_handle_event,
2424     .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2425     .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2426     .usb4_switch_op = icm_usb4_switch_op,
2427     .usb4_switch_nvm_authenticate_status =
2428         icm_usb4_switch_nvm_authenticate_status,
2429 };
2430 
2431 struct tb *icm_probe(struct tb_nhi *nhi)
2432 {
2433     struct icm *icm;
2434     struct tb *tb;
2435 
2436     tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm));
2437     if (!tb)
2438         return NULL;
2439 
2440     icm = tb_priv(tb);
2441     INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
2442     mutex_init(&icm->request_lock);
2443 
2444     switch (nhi->pdev->device) {
2445     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2446     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2447         icm->can_upgrade_nvm = true;
2448         icm->is_supported = icm_fr_is_supported;
2449         icm->get_route = icm_fr_get_route;
2450         icm->save_devices = icm_fr_save_devices;
2451         icm->driver_ready = icm_fr_driver_ready;
2452         icm->device_connected = icm_fr_device_connected;
2453         icm->device_disconnected = icm_fr_device_disconnected;
2454         icm->xdomain_connected = icm_fr_xdomain_connected;
2455         icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2456         tb->cm_ops = &icm_fr_ops;
2457         break;
2458 
2459     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
2460     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
2461     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
2462     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
2463     case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
2464         icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2465         /*
2466          * NVM upgrade has not been tested on Apple systems and
2467          * they don't provide images publicly either. To be on
2468          * the safe side prevent root switch NVM upgrade on Macs
2469          * for now.
2470          */
2471         icm->can_upgrade_nvm = !x86_apple_machine;
2472         icm->is_supported = icm_ar_is_supported;
2473         icm->cio_reset = icm_ar_cio_reset;
2474         icm->get_mode = icm_ar_get_mode;
2475         icm->get_route = icm_ar_get_route;
2476         icm->save_devices = icm_fr_save_devices;
2477         icm->driver_ready = icm_ar_driver_ready;
2478         icm->device_connected = icm_fr_device_connected;
2479         icm->device_disconnected = icm_fr_device_disconnected;
2480         icm->xdomain_connected = icm_fr_xdomain_connected;
2481         icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2482         tb->cm_ops = &icm_ar_ops;
2483         break;
2484 
2485     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2486     case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2487         icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2488         icm->can_upgrade_nvm = !x86_apple_machine;
2489         icm->is_supported = icm_ar_is_supported;
2490         icm->cio_reset = icm_tr_cio_reset;
2491         icm->get_mode = icm_ar_get_mode;
2492         icm->driver_ready = icm_tr_driver_ready;
2493         icm->device_connected = icm_tr_device_connected;
2494         icm->device_disconnected = icm_tr_device_disconnected;
2495         icm->xdomain_connected = icm_tr_xdomain_connected;
2496         icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2497         tb->cm_ops = &icm_tr_ops;
2498         break;
2499 
2500     case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2501     case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2502         icm->is_supported = icm_fr_is_supported;
2503         icm->driver_ready = icm_icl_driver_ready;
2504         icm->set_uuid = icm_icl_set_uuid;
2505         icm->device_connected = icm_icl_device_connected;
2506         icm->device_disconnected = icm_tr_device_disconnected;
2507         icm->xdomain_connected = icm_tr_xdomain_connected;
2508         icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2509         icm->rtd3_veto = icm_icl_rtd3_veto;
2510         tb->cm_ops = &icm_icl_ops;
2511         break;
2512 
2513     case PCI_DEVICE_ID_INTEL_TGL_NHI0:
2514     case PCI_DEVICE_ID_INTEL_TGL_NHI1:
2515     case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
2516     case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
2517     case PCI_DEVICE_ID_INTEL_ADL_NHI0:
2518     case PCI_DEVICE_ID_INTEL_ADL_NHI1:
2519     case PCI_DEVICE_ID_INTEL_RPL_NHI0:
2520     case PCI_DEVICE_ID_INTEL_RPL_NHI1:
2521         icm->is_supported = icm_tgl_is_supported;
2522         icm->driver_ready = icm_icl_driver_ready;
2523         icm->set_uuid = icm_icl_set_uuid;
2524         icm->device_connected = icm_icl_device_connected;
2525         icm->device_disconnected = icm_tr_device_disconnected;
2526         icm->xdomain_connected = icm_tr_xdomain_connected;
2527         icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2528         icm->rtd3_veto = icm_icl_rtd3_veto;
2529         tb->cm_ops = &icm_icl_ops;
2530         break;
2531 
2532     case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
2533     case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
2534         icm->is_supported = icm_tgl_is_supported;
2535         icm->get_mode = icm_ar_get_mode;
2536         icm->driver_ready = icm_tr_driver_ready;
2537         icm->device_connected = icm_tr_device_connected;
2538         icm->device_disconnected = icm_tr_device_disconnected;
2539         icm->xdomain_connected = icm_tr_xdomain_connected;
2540         icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2541         tb->cm_ops = &icm_tr_ops;
2542         break;
2543     }
2544 
2545     if (!icm->is_supported || !icm->is_supported(tb)) {
2546         dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
2547         tb_domain_put(tb);
2548         return NULL;
2549     }
2550 
2551     tb_dbg(tb, "using firmware connection manager\n");
2552 
2553     return tb;
2554 }