Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Thunderbolt driver - bus logic (NHI independent)
0004  *
0005  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
0006  * Copyright (C) 2019, Intel Corporation
0007  */
0008 
0009 #include <linux/slab.h>
0010 #include <linux/errno.h>
0011 #include <linux/delay.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/platform_data/x86/apple.h>
0014 
0015 #include "tb.h"
0016 #include "tb_regs.h"
0017 #include "tunnel.h"
0018 
0019 #define TB_TIMEOUT  100 /* ms */
0020 
0021 /**
0022  * struct tb_cm - Simple Thunderbolt connection manager
0023  * @tunnel_list: List of active tunnels
0024  * @dp_resources: List of available DP resources for DP tunneling
0025  * @hotplug_active: tb_handle_hotplug will stop progressing plug
0026  *          events and exit if this is not set (it needs to
0027  *          acquire the lock one more time). Used to drain wq
0028  *          after cfg has been paused.
0029  * @remove_work: Work used to remove any unplugged routers after
0030  *       runtime resume
0031  */
0032 struct tb_cm {
0033     struct list_head tunnel_list;
0034     struct list_head dp_resources;
0035     bool hotplug_active;
0036     struct delayed_work remove_work;
0037 };
0038 
0039 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
0040 {
0041     return ((void *)tcm - sizeof(struct tb));
0042 }
0043 
0044 struct tb_hotplug_event {
0045     struct work_struct work;
0046     struct tb *tb;
0047     u64 route;
0048     u8 port;
0049     bool unplug;
0050 };
0051 
0052 static void tb_handle_hotplug(struct work_struct *work);
0053 
0054 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
0055 {
0056     struct tb_hotplug_event *ev;
0057 
0058     ev = kmalloc(sizeof(*ev), GFP_KERNEL);
0059     if (!ev)
0060         return;
0061 
0062     ev->tb = tb;
0063     ev->route = route;
0064     ev->port = port;
0065     ev->unplug = unplug;
0066     INIT_WORK(&ev->work, tb_handle_hotplug);
0067     queue_work(tb->wq, &ev->work);
0068 }
0069 
0070 /* enumeration & hot plug handling */
0071 
0072 static void tb_add_dp_resources(struct tb_switch *sw)
0073 {
0074     struct tb_cm *tcm = tb_priv(sw->tb);
0075     struct tb_port *port;
0076 
0077     tb_switch_for_each_port(sw, port) {
0078         if (!tb_port_is_dpin(port))
0079             continue;
0080 
0081         if (!tb_switch_query_dp_resource(sw, port))
0082             continue;
0083 
0084         list_add_tail(&port->list, &tcm->dp_resources);
0085         tb_port_dbg(port, "DP IN resource available\n");
0086     }
0087 }
0088 
0089 static void tb_remove_dp_resources(struct tb_switch *sw)
0090 {
0091     struct tb_cm *tcm = tb_priv(sw->tb);
0092     struct tb_port *port, *tmp;
0093 
0094     /* Clear children resources first */
0095     tb_switch_for_each_port(sw, port) {
0096         if (tb_port_has_remote(port))
0097             tb_remove_dp_resources(port->remote->sw);
0098     }
0099 
0100     list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
0101         if (port->sw == sw) {
0102             tb_port_dbg(port, "DP OUT resource unavailable\n");
0103             list_del_init(&port->list);
0104         }
0105     }
0106 }
0107 
0108 static void tb_switch_discover_tunnels(struct tb_switch *sw,
0109                        struct list_head *list,
0110                        bool alloc_hopids)
0111 {
0112     struct tb *tb = sw->tb;
0113     struct tb_port *port;
0114 
0115     tb_switch_for_each_port(sw, port) {
0116         struct tb_tunnel *tunnel = NULL;
0117 
0118         switch (port->config.type) {
0119         case TB_TYPE_DP_HDMI_IN:
0120             tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
0121             /*
0122              * In case of DP tunnel exists, change host router's
0123              * 1st children TMU mode to HiFi for CL0s to work.
0124              */
0125             if (tunnel)
0126                 tb_switch_enable_tmu_1st_child(tb->root_switch,
0127                         TB_SWITCH_TMU_RATE_HIFI);
0128             break;
0129 
0130         case TB_TYPE_PCIE_DOWN:
0131             tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
0132             break;
0133 
0134         case TB_TYPE_USB3_DOWN:
0135             tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
0136             break;
0137 
0138         default:
0139             break;
0140         }
0141 
0142         if (tunnel)
0143             list_add_tail(&tunnel->list, list);
0144     }
0145 
0146     tb_switch_for_each_port(sw, port) {
0147         if (tb_port_has_remote(port)) {
0148             tb_switch_discover_tunnels(port->remote->sw, list,
0149                            alloc_hopids);
0150         }
0151     }
0152 }
0153 
0154 static void tb_discover_tunnels(struct tb *tb)
0155 {
0156     struct tb_cm *tcm = tb_priv(tb);
0157     struct tb_tunnel *tunnel;
0158 
0159     tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
0160 
0161     list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
0162         if (tb_tunnel_is_pci(tunnel)) {
0163             struct tb_switch *parent = tunnel->dst_port->sw;
0164 
0165             while (parent != tunnel->src_port->sw) {
0166                 parent->boot = true;
0167                 parent = tb_switch_parent(parent);
0168             }
0169         } else if (tb_tunnel_is_dp(tunnel)) {
0170             /* Keep the domain from powering down */
0171             pm_runtime_get_sync(&tunnel->src_port->sw->dev);
0172             pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
0173         }
0174     }
0175 }
0176 
0177 static int tb_port_configure_xdomain(struct tb_port *port)
0178 {
0179     if (tb_switch_is_usb4(port->sw))
0180         return usb4_port_configure_xdomain(port);
0181     return tb_lc_configure_xdomain(port);
0182 }
0183 
0184 static void tb_port_unconfigure_xdomain(struct tb_port *port)
0185 {
0186     if (tb_switch_is_usb4(port->sw))
0187         usb4_port_unconfigure_xdomain(port);
0188     else
0189         tb_lc_unconfigure_xdomain(port);
0190 
0191     tb_port_enable(port->dual_link_port);
0192 }
0193 
0194 static void tb_scan_xdomain(struct tb_port *port)
0195 {
0196     struct tb_switch *sw = port->sw;
0197     struct tb *tb = sw->tb;
0198     struct tb_xdomain *xd;
0199     u64 route;
0200 
0201     if (!tb_is_xdomain_enabled())
0202         return;
0203 
0204     route = tb_downstream_route(port);
0205     xd = tb_xdomain_find_by_route(tb, route);
0206     if (xd) {
0207         tb_xdomain_put(xd);
0208         return;
0209     }
0210 
0211     xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
0212                   NULL);
0213     if (xd) {
0214         tb_port_at(route, sw)->xdomain = xd;
0215         tb_port_configure_xdomain(port);
0216         tb_xdomain_add(xd);
0217     }
0218 }
0219 
0220 static int tb_enable_tmu(struct tb_switch *sw)
0221 {
0222     int ret;
0223 
0224     /* If it is already enabled in correct mode, don't touch it */
0225     if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
0226         return 0;
0227 
0228     ret = tb_switch_tmu_disable(sw);
0229     if (ret)
0230         return ret;
0231 
0232     ret = tb_switch_tmu_post_time(sw);
0233     if (ret)
0234         return ret;
0235 
0236     return tb_switch_tmu_enable(sw);
0237 }
0238 
0239 /**
0240  * tb_find_unused_port() - return the first inactive port on @sw
0241  * @sw: Switch to find the port on
0242  * @type: Port type to look for
0243  */
0244 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
0245                        enum tb_port_type type)
0246 {
0247     struct tb_port *port;
0248 
0249     tb_switch_for_each_port(sw, port) {
0250         if (tb_is_upstream_port(port))
0251             continue;
0252         if (port->config.type != type)
0253             continue;
0254         if (!port->cap_adap)
0255             continue;
0256         if (tb_port_is_enabled(port))
0257             continue;
0258         return port;
0259     }
0260     return NULL;
0261 }
0262 
0263 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
0264                      const struct tb_port *port)
0265 {
0266     struct tb_port *down;
0267 
0268     down = usb4_switch_map_usb3_down(sw, port);
0269     if (down && !tb_usb3_port_is_enabled(down))
0270         return down;
0271     return NULL;
0272 }
0273 
0274 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
0275                     struct tb_port *src_port,
0276                     struct tb_port *dst_port)
0277 {
0278     struct tb_cm *tcm = tb_priv(tb);
0279     struct tb_tunnel *tunnel;
0280 
0281     list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
0282         if (tunnel->type == type &&
0283             ((src_port && src_port == tunnel->src_port) ||
0284              (dst_port && dst_port == tunnel->dst_port))) {
0285             return tunnel;
0286         }
0287     }
0288 
0289     return NULL;
0290 }
0291 
0292 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
0293                            struct tb_port *src_port,
0294                            struct tb_port *dst_port)
0295 {
0296     struct tb_port *port, *usb3_down;
0297     struct tb_switch *sw;
0298 
0299     /* Pick the router that is deepest in the topology */
0300     if (dst_port->sw->config.depth > src_port->sw->config.depth)
0301         sw = dst_port->sw;
0302     else
0303         sw = src_port->sw;
0304 
0305     /* Can't be the host router */
0306     if (sw == tb->root_switch)
0307         return NULL;
0308 
0309     /* Find the downstream USB4 port that leads to this router */
0310     port = tb_port_at(tb_route(sw), tb->root_switch);
0311     /* Find the corresponding host router USB3 downstream port */
0312     usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
0313     if (!usb3_down)
0314         return NULL;
0315 
0316     return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
0317 }
0318 
0319 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
0320     struct tb_port *dst_port, int *available_up, int *available_down)
0321 {
0322     int usb3_consumed_up, usb3_consumed_down, ret;
0323     struct tb_cm *tcm = tb_priv(tb);
0324     struct tb_tunnel *tunnel;
0325     struct tb_port *port;
0326 
0327     tb_port_dbg(dst_port, "calculating available bandwidth\n");
0328 
0329     tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
0330     if (tunnel) {
0331         ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
0332                            &usb3_consumed_down);
0333         if (ret)
0334             return ret;
0335     } else {
0336         usb3_consumed_up = 0;
0337         usb3_consumed_down = 0;
0338     }
0339 
0340     *available_up = *available_down = 40000;
0341 
0342     /* Find the minimum available bandwidth over all links */
0343     tb_for_each_port_on_path(src_port, dst_port, port) {
0344         int link_speed, link_width, up_bw, down_bw;
0345 
0346         if (!tb_port_is_null(port))
0347             continue;
0348 
0349         if (tb_is_upstream_port(port)) {
0350             link_speed = port->sw->link_speed;
0351         } else {
0352             link_speed = tb_port_get_link_speed(port);
0353             if (link_speed < 0)
0354                 return link_speed;
0355         }
0356 
0357         link_width = port->bonded ? 2 : 1;
0358 
0359         up_bw = link_speed * link_width * 1000; /* Mb/s */
0360         /* Leave 10% guard band */
0361         up_bw -= up_bw / 10;
0362         down_bw = up_bw;
0363 
0364         tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
0365 
0366         /*
0367          * Find all DP tunnels that cross the port and reduce
0368          * their consumed bandwidth from the available.
0369          */
0370         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
0371             int dp_consumed_up, dp_consumed_down;
0372 
0373             if (!tb_tunnel_is_dp(tunnel))
0374                 continue;
0375 
0376             if (!tb_tunnel_port_on_path(tunnel, port))
0377                 continue;
0378 
0379             ret = tb_tunnel_consumed_bandwidth(tunnel,
0380                                &dp_consumed_up,
0381                                &dp_consumed_down);
0382             if (ret)
0383                 return ret;
0384 
0385             up_bw -= dp_consumed_up;
0386             down_bw -= dp_consumed_down;
0387         }
0388 
0389         /*
0390          * If USB3 is tunneled from the host router down to the
0391          * branch leading to port we need to take USB3 consumed
0392          * bandwidth into account regardless whether it actually
0393          * crosses the port.
0394          */
0395         up_bw -= usb3_consumed_up;
0396         down_bw -= usb3_consumed_down;
0397 
0398         if (up_bw < *available_up)
0399             *available_up = up_bw;
0400         if (down_bw < *available_down)
0401             *available_down = down_bw;
0402     }
0403 
0404     if (*available_up < 0)
0405         *available_up = 0;
0406     if (*available_down < 0)
0407         *available_down = 0;
0408 
0409     return 0;
0410 }
0411 
0412 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
0413                         struct tb_port *src_port,
0414                         struct tb_port *dst_port)
0415 {
0416     struct tb_tunnel *tunnel;
0417 
0418     tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
0419     return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
0420 }
0421 
0422 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
0423                       struct tb_port *dst_port)
0424 {
0425     int ret, available_up, available_down;
0426     struct tb_tunnel *tunnel;
0427 
0428     tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
0429     if (!tunnel)
0430         return;
0431 
0432     tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
0433 
0434     /*
0435      * Calculate available bandwidth for the first hop USB3 tunnel.
0436      * That determines the whole USB3 bandwidth for this branch.
0437      */
0438     ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
0439                      &available_up, &available_down);
0440     if (ret) {
0441         tb_warn(tb, "failed to calculate available bandwidth\n");
0442         return;
0443     }
0444 
0445     tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
0446            available_up, available_down);
0447 
0448     tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
0449 }
0450 
0451 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
0452 {
0453     struct tb_switch *parent = tb_switch_parent(sw);
0454     int ret, available_up, available_down;
0455     struct tb_port *up, *down, *port;
0456     struct tb_cm *tcm = tb_priv(tb);
0457     struct tb_tunnel *tunnel;
0458 
0459     if (!tb_acpi_may_tunnel_usb3()) {
0460         tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
0461         return 0;
0462     }
0463 
0464     up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
0465     if (!up)
0466         return 0;
0467 
0468     if (!sw->link_usb4)
0469         return 0;
0470 
0471     /*
0472      * Look up available down port. Since we are chaining it should
0473      * be found right above this switch.
0474      */
0475     port = tb_port_at(tb_route(sw), parent);
0476     down = tb_find_usb3_down(parent, port);
0477     if (!down)
0478         return 0;
0479 
0480     if (tb_route(parent)) {
0481         struct tb_port *parent_up;
0482         /*
0483          * Check first that the parent switch has its upstream USB3
0484          * port enabled. Otherwise the chain is not complete and
0485          * there is no point setting up a new tunnel.
0486          */
0487         parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
0488         if (!parent_up || !tb_port_is_enabled(parent_up))
0489             return 0;
0490 
0491         /* Make all unused bandwidth available for the new tunnel */
0492         ret = tb_release_unused_usb3_bandwidth(tb, down, up);
0493         if (ret)
0494             return ret;
0495     }
0496 
0497     ret = tb_available_bandwidth(tb, down, up, &available_up,
0498                      &available_down);
0499     if (ret)
0500         goto err_reclaim;
0501 
0502     tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
0503             available_up, available_down);
0504 
0505     tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
0506                       available_down);
0507     if (!tunnel) {
0508         ret = -ENOMEM;
0509         goto err_reclaim;
0510     }
0511 
0512     if (tb_tunnel_activate(tunnel)) {
0513         tb_port_info(up,
0514                  "USB3 tunnel activation failed, aborting\n");
0515         ret = -EIO;
0516         goto err_free;
0517     }
0518 
0519     list_add_tail(&tunnel->list, &tcm->tunnel_list);
0520     if (tb_route(parent))
0521         tb_reclaim_usb3_bandwidth(tb, down, up);
0522 
0523     return 0;
0524 
0525 err_free:
0526     tb_tunnel_free(tunnel);
0527 err_reclaim:
0528     if (tb_route(parent))
0529         tb_reclaim_usb3_bandwidth(tb, down, up);
0530 
0531     return ret;
0532 }
0533 
0534 static int tb_create_usb3_tunnels(struct tb_switch *sw)
0535 {
0536     struct tb_port *port;
0537     int ret;
0538 
0539     if (!tb_acpi_may_tunnel_usb3())
0540         return 0;
0541 
0542     if (tb_route(sw)) {
0543         ret = tb_tunnel_usb3(sw->tb, sw);
0544         if (ret)
0545             return ret;
0546     }
0547 
0548     tb_switch_for_each_port(sw, port) {
0549         if (!tb_port_has_remote(port))
0550             continue;
0551         ret = tb_create_usb3_tunnels(port->remote->sw);
0552         if (ret)
0553             return ret;
0554     }
0555 
0556     return 0;
0557 }
0558 
0559 static void tb_scan_port(struct tb_port *port);
0560 
0561 /*
0562  * tb_scan_switch() - scan for and initialize downstream switches
0563  */
0564 static void tb_scan_switch(struct tb_switch *sw)
0565 {
0566     struct tb_port *port;
0567 
0568     pm_runtime_get_sync(&sw->dev);
0569 
0570     tb_switch_for_each_port(sw, port)
0571         tb_scan_port(port);
0572 
0573     pm_runtime_mark_last_busy(&sw->dev);
0574     pm_runtime_put_autosuspend(&sw->dev);
0575 }
0576 
0577 /*
0578  * tb_scan_port() - check for and initialize switches below port
0579  */
0580 static void tb_scan_port(struct tb_port *port)
0581 {
0582     struct tb_cm *tcm = tb_priv(port->sw->tb);
0583     struct tb_port *upstream_port;
0584     struct tb_switch *sw;
0585     int ret;
0586 
0587     if (tb_is_upstream_port(port))
0588         return;
0589 
0590     if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
0591         !tb_dp_port_is_enabled(port)) {
0592         tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
0593         tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
0594                  false);
0595         return;
0596     }
0597 
0598     if (port->config.type != TB_TYPE_PORT)
0599         return;
0600     if (port->dual_link_port && port->link_nr)
0601         return; /*
0602              * Downstream switch is reachable through two ports.
0603              * Only scan on the primary port (link_nr == 0).
0604              */
0605     if (tb_wait_for_port(port, false) <= 0)
0606         return;
0607     if (port->remote) {
0608         tb_port_dbg(port, "port already has a remote\n");
0609         return;
0610     }
0611 
0612     tb_retimer_scan(port, true);
0613 
0614     sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
0615                  tb_downstream_route(port));
0616     if (IS_ERR(sw)) {
0617         /*
0618          * If there is an error accessing the connected switch
0619          * it may be connected to another domain. Also we allow
0620          * the other domain to be connected to a max depth switch.
0621          */
0622         if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
0623             tb_scan_xdomain(port);
0624         return;
0625     }
0626 
0627     if (tb_switch_configure(sw)) {
0628         tb_switch_put(sw);
0629         return;
0630     }
0631 
0632     /*
0633      * If there was previously another domain connected remove it
0634      * first.
0635      */
0636     if (port->xdomain) {
0637         tb_xdomain_remove(port->xdomain);
0638         tb_port_unconfigure_xdomain(port);
0639         port->xdomain = NULL;
0640     }
0641 
0642     /*
0643      * Do not send uevents until we have discovered all existing
0644      * tunnels and know which switches were authorized already by
0645      * the boot firmware.
0646      */
0647     if (!tcm->hotplug_active)
0648         dev_set_uevent_suppress(&sw->dev, true);
0649 
0650     /*
0651      * At the moment Thunderbolt 2 and beyond (devices with LC) we
0652      * can support runtime PM.
0653      */
0654     sw->rpm = sw->generation > 1;
0655 
0656     if (tb_switch_add(sw)) {
0657         tb_switch_put(sw);
0658         return;
0659     }
0660 
0661     /* Link the switches using both links if available */
0662     upstream_port = tb_upstream_port(sw);
0663     port->remote = upstream_port;
0664     upstream_port->remote = port;
0665     if (port->dual_link_port && upstream_port->dual_link_port) {
0666         port->dual_link_port->remote = upstream_port->dual_link_port;
0667         upstream_port->dual_link_port->remote = port->dual_link_port;
0668     }
0669 
0670     /* Enable lane bonding if supported */
0671     tb_switch_lane_bonding_enable(sw);
0672     /* Set the link configured */
0673     tb_switch_configure_link(sw);
0674     /*
0675      * CL0s and CL1 are enabled and supported together.
0676      * Silently ignore CLx enabling in case CLx is not supported.
0677      */
0678     ret = tb_switch_enable_clx(sw, TB_CL1);
0679     if (ret && ret != -EOPNOTSUPP)
0680         tb_sw_warn(sw, "failed to enable %s on upstream port\n",
0681                tb_switch_clx_name(TB_CL1));
0682 
0683     if (tb_switch_is_clx_enabled(sw, TB_CL1))
0684         /*
0685          * To support highest CLx state, we set router's TMU to
0686          * Normal-Uni mode.
0687          */
0688         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
0689     else
0690         /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
0691         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
0692 
0693     if (tb_enable_tmu(sw))
0694         tb_sw_warn(sw, "failed to enable TMU\n");
0695 
0696     /* Scan upstream retimers */
0697     tb_retimer_scan(upstream_port, true);
0698 
0699     /*
0700      * Create USB 3.x tunnels only when the switch is plugged to the
0701      * domain. This is because we scan the domain also during discovery
0702      * and want to discover existing USB 3.x tunnels before we create
0703      * any new.
0704      */
0705     if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
0706         tb_sw_warn(sw, "USB3 tunnel creation failed\n");
0707 
0708     tb_add_dp_resources(sw);
0709     tb_scan_switch(sw);
0710 }
0711 
0712 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
0713 {
0714     struct tb_port *src_port, *dst_port;
0715     struct tb *tb;
0716 
0717     if (!tunnel)
0718         return;
0719 
0720     tb_tunnel_deactivate(tunnel);
0721     list_del(&tunnel->list);
0722 
0723     tb = tunnel->tb;
0724     src_port = tunnel->src_port;
0725     dst_port = tunnel->dst_port;
0726 
0727     switch (tunnel->type) {
0728     case TB_TUNNEL_DP:
0729         /*
0730          * In case of DP tunnel make sure the DP IN resource is
0731          * deallocated properly.
0732          */
0733         tb_switch_dealloc_dp_resource(src_port->sw, src_port);
0734         /* Now we can allow the domain to runtime suspend again */
0735         pm_runtime_mark_last_busy(&dst_port->sw->dev);
0736         pm_runtime_put_autosuspend(&dst_port->sw->dev);
0737         pm_runtime_mark_last_busy(&src_port->sw->dev);
0738         pm_runtime_put_autosuspend(&src_port->sw->dev);
0739         fallthrough;
0740 
0741     case TB_TUNNEL_USB3:
0742         tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
0743         break;
0744 
0745     default:
0746         /*
0747          * PCIe and DMA tunnels do not consume guaranteed
0748          * bandwidth.
0749          */
0750         break;
0751     }
0752 
0753     tb_tunnel_free(tunnel);
0754 }
0755 
0756 /*
0757  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
0758  */
0759 static void tb_free_invalid_tunnels(struct tb *tb)
0760 {
0761     struct tb_cm *tcm = tb_priv(tb);
0762     struct tb_tunnel *tunnel;
0763     struct tb_tunnel *n;
0764 
0765     list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
0766         if (tb_tunnel_is_invalid(tunnel))
0767             tb_deactivate_and_free_tunnel(tunnel);
0768     }
0769 }
0770 
0771 /*
0772  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
0773  */
0774 static void tb_free_unplugged_children(struct tb_switch *sw)
0775 {
0776     struct tb_port *port;
0777 
0778     tb_switch_for_each_port(sw, port) {
0779         if (!tb_port_has_remote(port))
0780             continue;
0781 
0782         if (port->remote->sw->is_unplugged) {
0783             tb_retimer_remove_all(port);
0784             tb_remove_dp_resources(port->remote->sw);
0785             tb_switch_unconfigure_link(port->remote->sw);
0786             tb_switch_lane_bonding_disable(port->remote->sw);
0787             tb_switch_remove(port->remote->sw);
0788             port->remote = NULL;
0789             if (port->dual_link_port)
0790                 port->dual_link_port->remote = NULL;
0791         } else {
0792             tb_free_unplugged_children(port->remote->sw);
0793         }
0794     }
0795 }
0796 
0797 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
0798                      const struct tb_port *port)
0799 {
0800     struct tb_port *down = NULL;
0801 
0802     /*
0803      * To keep plugging devices consistently in the same PCIe
0804      * hierarchy, do mapping here for switch downstream PCIe ports.
0805      */
0806     if (tb_switch_is_usb4(sw)) {
0807         down = usb4_switch_map_pcie_down(sw, port);
0808     } else if (!tb_route(sw)) {
0809         int phy_port = tb_phy_port_from_link(port->port);
0810         int index;
0811 
0812         /*
0813          * Hard-coded Thunderbolt port to PCIe down port mapping
0814          * per controller.
0815          */
0816         if (tb_switch_is_cactus_ridge(sw) ||
0817             tb_switch_is_alpine_ridge(sw))
0818             index = !phy_port ? 6 : 7;
0819         else if (tb_switch_is_falcon_ridge(sw))
0820             index = !phy_port ? 6 : 8;
0821         else if (tb_switch_is_titan_ridge(sw))
0822             index = !phy_port ? 8 : 9;
0823         else
0824             goto out;
0825 
0826         /* Validate the hard-coding */
0827         if (WARN_ON(index > sw->config.max_port_number))
0828             goto out;
0829 
0830         down = &sw->ports[index];
0831     }
0832 
0833     if (down) {
0834         if (WARN_ON(!tb_port_is_pcie_down(down)))
0835             goto out;
0836         if (tb_pci_port_is_enabled(down))
0837             goto out;
0838 
0839         return down;
0840     }
0841 
0842 out:
0843     return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
0844 }
0845 
0846 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
0847 {
0848     struct tb_port *host_port, *port;
0849     struct tb_cm *tcm = tb_priv(tb);
0850 
0851     host_port = tb_route(in->sw) ?
0852         tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
0853 
0854     list_for_each_entry(port, &tcm->dp_resources, list) {
0855         if (!tb_port_is_dpout(port))
0856             continue;
0857 
0858         if (tb_port_is_enabled(port)) {
0859             tb_port_dbg(port, "in use\n");
0860             continue;
0861         }
0862 
0863         tb_port_dbg(port, "DP OUT available\n");
0864 
0865         /*
0866          * Keep the DP tunnel under the topology starting from
0867          * the same host router downstream port.
0868          */
0869         if (host_port && tb_route(port->sw)) {
0870             struct tb_port *p;
0871 
0872             p = tb_port_at(tb_route(port->sw), tb->root_switch);
0873             if (p != host_port)
0874                 continue;
0875         }
0876 
0877         return port;
0878     }
0879 
0880     return NULL;
0881 }
0882 
0883 static void tb_tunnel_dp(struct tb *tb)
0884 {
0885     int available_up, available_down, ret, link_nr;
0886     struct tb_cm *tcm = tb_priv(tb);
0887     struct tb_port *port, *in, *out;
0888     struct tb_tunnel *tunnel;
0889 
0890     if (!tb_acpi_may_tunnel_dp()) {
0891         tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
0892         return;
0893     }
0894 
0895     /*
0896      * Find pair of inactive DP IN and DP OUT adapters and then
0897      * establish a DP tunnel between them.
0898      */
0899     tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
0900 
0901     in = NULL;
0902     out = NULL;
0903     list_for_each_entry(port, &tcm->dp_resources, list) {
0904         if (!tb_port_is_dpin(port))
0905             continue;
0906 
0907         if (tb_port_is_enabled(port)) {
0908             tb_port_dbg(port, "in use\n");
0909             continue;
0910         }
0911 
0912         tb_port_dbg(port, "DP IN available\n");
0913 
0914         out = tb_find_dp_out(tb, port);
0915         if (out) {
0916             in = port;
0917             break;
0918         }
0919     }
0920 
0921     if (!in) {
0922         tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
0923         return;
0924     }
0925     if (!out) {
0926         tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
0927         return;
0928     }
0929 
0930     /*
0931      * This is only applicable to links that are not bonded (so
0932      * when Thunderbolt 1 hardware is involved somewhere in the
0933      * topology). For these try to share the DP bandwidth between
0934      * the two lanes.
0935      */
0936     link_nr = 1;
0937     list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
0938         if (tb_tunnel_is_dp(tunnel)) {
0939             link_nr = 0;
0940             break;
0941         }
0942     }
0943 
0944     /*
0945      * DP stream needs the domain to be active so runtime resume
0946      * both ends of the tunnel.
0947      *
0948      * This should bring the routers in the middle active as well
0949      * and keeps the domain from runtime suspending while the DP
0950      * tunnel is active.
0951      */
0952     pm_runtime_get_sync(&in->sw->dev);
0953     pm_runtime_get_sync(&out->sw->dev);
0954 
0955     if (tb_switch_alloc_dp_resource(in->sw, in)) {
0956         tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
0957         goto err_rpm_put;
0958     }
0959 
0960     /* Make all unused USB3 bandwidth available for the new DP tunnel */
0961     ret = tb_release_unused_usb3_bandwidth(tb, in, out);
0962     if (ret) {
0963         tb_warn(tb, "failed to release unused bandwidth\n");
0964         goto err_dealloc_dp;
0965     }
0966 
0967     ret = tb_available_bandwidth(tb, in, out, &available_up,
0968                      &available_down);
0969     if (ret)
0970         goto err_reclaim;
0971 
0972     tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
0973            available_up, available_down);
0974 
0975     tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
0976                     available_down);
0977     if (!tunnel) {
0978         tb_port_dbg(out, "could not allocate DP tunnel\n");
0979         goto err_reclaim;
0980     }
0981 
0982     if (tb_tunnel_activate(tunnel)) {
0983         tb_port_info(out, "DP tunnel activation failed, aborting\n");
0984         goto err_free;
0985     }
0986 
0987     list_add_tail(&tunnel->list, &tcm->tunnel_list);
0988     tb_reclaim_usb3_bandwidth(tb, in, out);
0989     /*
0990      * In case of DP tunnel exists, change host router's 1st children
0991      * TMU mode to HiFi for CL0s to work.
0992      */
0993     tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
0994 
0995     return;
0996 
0997 err_free:
0998     tb_tunnel_free(tunnel);
0999 err_reclaim:
1000     tb_reclaim_usb3_bandwidth(tb, in, out);
1001 err_dealloc_dp:
1002     tb_switch_dealloc_dp_resource(in->sw, in);
1003 err_rpm_put:
1004     pm_runtime_mark_last_busy(&out->sw->dev);
1005     pm_runtime_put_autosuspend(&out->sw->dev);
1006     pm_runtime_mark_last_busy(&in->sw->dev);
1007     pm_runtime_put_autosuspend(&in->sw->dev);
1008 }
1009 
1010 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1011 {
1012     struct tb_port *in, *out;
1013     struct tb_tunnel *tunnel;
1014 
1015     if (tb_port_is_dpin(port)) {
1016         tb_port_dbg(port, "DP IN resource unavailable\n");
1017         in = port;
1018         out = NULL;
1019     } else {
1020         tb_port_dbg(port, "DP OUT resource unavailable\n");
1021         in = NULL;
1022         out = port;
1023     }
1024 
1025     tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1026     tb_deactivate_and_free_tunnel(tunnel);
1027     list_del_init(&port->list);
1028 
1029     /*
1030      * See if there is another DP OUT port that can be used for
1031      * to create another tunnel.
1032      */
1033     tb_tunnel_dp(tb);
1034 }
1035 
1036 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1037 {
1038     struct tb_cm *tcm = tb_priv(tb);
1039     struct tb_port *p;
1040 
1041     if (tb_port_is_enabled(port))
1042         return;
1043 
1044     list_for_each_entry(p, &tcm->dp_resources, list) {
1045         if (p == port)
1046             return;
1047     }
1048 
1049     tb_port_dbg(port, "DP %s resource available\n",
1050             tb_port_is_dpin(port) ? "IN" : "OUT");
1051     list_add_tail(&port->list, &tcm->dp_resources);
1052 
1053     /* Look for suitable DP IN <-> DP OUT pairs now */
1054     tb_tunnel_dp(tb);
1055 }
1056 
1057 static void tb_disconnect_and_release_dp(struct tb *tb)
1058 {
1059     struct tb_cm *tcm = tb_priv(tb);
1060     struct tb_tunnel *tunnel, *n;
1061 
1062     /*
1063      * Tear down all DP tunnels and release their resources. They
1064      * will be re-established after resume based on plug events.
1065      */
1066     list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1067         if (tb_tunnel_is_dp(tunnel))
1068             tb_deactivate_and_free_tunnel(tunnel);
1069     }
1070 
1071     while (!list_empty(&tcm->dp_resources)) {
1072         struct tb_port *port;
1073 
1074         port = list_first_entry(&tcm->dp_resources,
1075                     struct tb_port, list);
1076         list_del_init(&port->list);
1077     }
1078 }
1079 
1080 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1081 {
1082     struct tb_tunnel *tunnel;
1083     struct tb_port *up;
1084 
1085     up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1086     if (WARN_ON(!up))
1087         return -ENODEV;
1088 
1089     tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1090     if (WARN_ON(!tunnel))
1091         return -ENODEV;
1092 
1093     tb_switch_xhci_disconnect(sw);
1094 
1095     tb_tunnel_deactivate(tunnel);
1096     list_del(&tunnel->list);
1097     tb_tunnel_free(tunnel);
1098     return 0;
1099 }
1100 
1101 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1102 {
1103     struct tb_port *up, *down, *port;
1104     struct tb_cm *tcm = tb_priv(tb);
1105     struct tb_switch *parent_sw;
1106     struct tb_tunnel *tunnel;
1107 
1108     up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1109     if (!up)
1110         return 0;
1111 
1112     /*
1113      * Look up available down port. Since we are chaining it should
1114      * be found right above this switch.
1115      */
1116     parent_sw = tb_to_switch(sw->dev.parent);
1117     port = tb_port_at(tb_route(sw), parent_sw);
1118     down = tb_find_pcie_down(parent_sw, port);
1119     if (!down)
1120         return 0;
1121 
1122     tunnel = tb_tunnel_alloc_pci(tb, up, down);
1123     if (!tunnel)
1124         return -ENOMEM;
1125 
1126     if (tb_tunnel_activate(tunnel)) {
1127         tb_port_info(up,
1128                  "PCIe tunnel activation failed, aborting\n");
1129         tb_tunnel_free(tunnel);
1130         return -EIO;
1131     }
1132 
1133     /*
1134      * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1135      * here.
1136      */
1137     if (tb_switch_pcie_l1_enable(sw))
1138         tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1139 
1140     if (tb_switch_xhci_connect(sw))
1141         tb_sw_warn(sw, "failed to connect xHCI\n");
1142 
1143     list_add_tail(&tunnel->list, &tcm->tunnel_list);
1144     return 0;
1145 }
1146 
1147 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1148                     int transmit_path, int transmit_ring,
1149                     int receive_path, int receive_ring)
1150 {
1151     struct tb_cm *tcm = tb_priv(tb);
1152     struct tb_port *nhi_port, *dst_port;
1153     struct tb_tunnel *tunnel;
1154     struct tb_switch *sw;
1155 
1156     sw = tb_to_switch(xd->dev.parent);
1157     dst_port = tb_port_at(xd->route, sw);
1158     nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1159 
1160     mutex_lock(&tb->lock);
1161     tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1162                      transmit_ring, receive_path, receive_ring);
1163     if (!tunnel) {
1164         mutex_unlock(&tb->lock);
1165         return -ENOMEM;
1166     }
1167 
1168     if (tb_tunnel_activate(tunnel)) {
1169         tb_port_info(nhi_port,
1170                  "DMA tunnel activation failed, aborting\n");
1171         tb_tunnel_free(tunnel);
1172         mutex_unlock(&tb->lock);
1173         return -EIO;
1174     }
1175 
1176     list_add_tail(&tunnel->list, &tcm->tunnel_list);
1177     mutex_unlock(&tb->lock);
1178     return 0;
1179 }
1180 
1181 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1182                       int transmit_path, int transmit_ring,
1183                       int receive_path, int receive_ring)
1184 {
1185     struct tb_cm *tcm = tb_priv(tb);
1186     struct tb_port *nhi_port, *dst_port;
1187     struct tb_tunnel *tunnel, *n;
1188     struct tb_switch *sw;
1189 
1190     sw = tb_to_switch(xd->dev.parent);
1191     dst_port = tb_port_at(xd->route, sw);
1192     nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1193 
1194     list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1195         if (!tb_tunnel_is_dma(tunnel))
1196             continue;
1197         if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1198             continue;
1199 
1200         if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1201                     receive_path, receive_ring))
1202             tb_deactivate_and_free_tunnel(tunnel);
1203     }
1204 }
1205 
1206 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1207                        int transmit_path, int transmit_ring,
1208                        int receive_path, int receive_ring)
1209 {
1210     if (!xd->is_unplugged) {
1211         mutex_lock(&tb->lock);
1212         __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1213                           transmit_ring, receive_path,
1214                           receive_ring);
1215         mutex_unlock(&tb->lock);
1216     }
1217     return 0;
1218 }
1219 
1220 /* hotplug handling */
1221 
1222 /*
1223  * tb_handle_hotplug() - handle hotplug event
1224  *
1225  * Executes on tb->wq.
1226  */
1227 static void tb_handle_hotplug(struct work_struct *work)
1228 {
1229     struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1230     struct tb *tb = ev->tb;
1231     struct tb_cm *tcm = tb_priv(tb);
1232     struct tb_switch *sw;
1233     struct tb_port *port;
1234 
1235     /* Bring the domain back from sleep if it was suspended */
1236     pm_runtime_get_sync(&tb->dev);
1237 
1238     mutex_lock(&tb->lock);
1239     if (!tcm->hotplug_active)
1240         goto out; /* during init, suspend or shutdown */
1241 
1242     sw = tb_switch_find_by_route(tb, ev->route);
1243     if (!sw) {
1244         tb_warn(tb,
1245             "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1246             ev->route, ev->port, ev->unplug);
1247         goto out;
1248     }
1249     if (ev->port > sw->config.max_port_number) {
1250         tb_warn(tb,
1251             "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1252             ev->route, ev->port, ev->unplug);
1253         goto put_sw;
1254     }
1255     port = &sw->ports[ev->port];
1256     if (tb_is_upstream_port(port)) {
1257         tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1258                ev->route, ev->port, ev->unplug);
1259         goto put_sw;
1260     }
1261 
1262     pm_runtime_get_sync(&sw->dev);
1263 
1264     if (ev->unplug) {
1265         tb_retimer_remove_all(port);
1266 
1267         if (tb_port_has_remote(port)) {
1268             tb_port_dbg(port, "switch unplugged\n");
1269             tb_sw_set_unplugged(port->remote->sw);
1270             tb_free_invalid_tunnels(tb);
1271             tb_remove_dp_resources(port->remote->sw);
1272             tb_switch_tmu_disable(port->remote->sw);
1273             tb_switch_unconfigure_link(port->remote->sw);
1274             tb_switch_lane_bonding_disable(port->remote->sw);
1275             tb_switch_remove(port->remote->sw);
1276             port->remote = NULL;
1277             if (port->dual_link_port)
1278                 port->dual_link_port->remote = NULL;
1279             /* Maybe we can create another DP tunnel */
1280             tb_tunnel_dp(tb);
1281         } else if (port->xdomain) {
1282             struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1283 
1284             tb_port_dbg(port, "xdomain unplugged\n");
1285             /*
1286              * Service drivers are unbound during
1287              * tb_xdomain_remove() so setting XDomain as
1288              * unplugged here prevents deadlock if they call
1289              * tb_xdomain_disable_paths(). We will tear down
1290              * all the tunnels below.
1291              */
1292             xd->is_unplugged = true;
1293             tb_xdomain_remove(xd);
1294             port->xdomain = NULL;
1295             __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1296             tb_xdomain_put(xd);
1297             tb_port_unconfigure_xdomain(port);
1298         } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1299             tb_dp_resource_unavailable(tb, port);
1300         } else if (!port->port) {
1301             tb_sw_dbg(sw, "xHCI disconnect request\n");
1302             tb_switch_xhci_disconnect(sw);
1303         } else {
1304             tb_port_dbg(port,
1305                    "got unplug event for disconnected port, ignoring\n");
1306         }
1307     } else if (port->remote) {
1308         tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1309     } else if (!port->port && sw->authorized) {
1310         tb_sw_dbg(sw, "xHCI connect request\n");
1311         tb_switch_xhci_connect(sw);
1312     } else {
1313         if (tb_port_is_null(port)) {
1314             tb_port_dbg(port, "hotplug: scanning\n");
1315             tb_scan_port(port);
1316             if (!port->remote)
1317                 tb_port_dbg(port, "hotplug: no switch found\n");
1318         } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1319             tb_dp_resource_available(tb, port);
1320         }
1321     }
1322 
1323     pm_runtime_mark_last_busy(&sw->dev);
1324     pm_runtime_put_autosuspend(&sw->dev);
1325 
1326 put_sw:
1327     tb_switch_put(sw);
1328 out:
1329     mutex_unlock(&tb->lock);
1330 
1331     pm_runtime_mark_last_busy(&tb->dev);
1332     pm_runtime_put_autosuspend(&tb->dev);
1333 
1334     kfree(ev);
1335 }
1336 
1337 /*
1338  * tb_schedule_hotplug_handler() - callback function for the control channel
1339  *
1340  * Delegates to tb_handle_hotplug.
1341  */
1342 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1343                 const void *buf, size_t size)
1344 {
1345     const struct cfg_event_pkg *pkg = buf;
1346     u64 route;
1347 
1348     if (type != TB_CFG_PKG_EVENT) {
1349         tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1350         return;
1351     }
1352 
1353     route = tb_cfg_get_route(&pkg->header);
1354 
1355     if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1356         tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1357             pkg->port);
1358     }
1359 
1360     tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1361 }
1362 
1363 static void tb_stop(struct tb *tb)
1364 {
1365     struct tb_cm *tcm = tb_priv(tb);
1366     struct tb_tunnel *tunnel;
1367     struct tb_tunnel *n;
1368 
1369     cancel_delayed_work(&tcm->remove_work);
1370     /* tunnels are only present after everything has been initialized */
1371     list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1372         /*
1373          * DMA tunnels require the driver to be functional so we
1374          * tear them down. Other protocol tunnels can be left
1375          * intact.
1376          */
1377         if (tb_tunnel_is_dma(tunnel))
1378             tb_tunnel_deactivate(tunnel);
1379         tb_tunnel_free(tunnel);
1380     }
1381     tb_switch_remove(tb->root_switch);
1382     tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1383 }
1384 
1385 static int tb_scan_finalize_switch(struct device *dev, void *data)
1386 {
1387     if (tb_is_switch(dev)) {
1388         struct tb_switch *sw = tb_to_switch(dev);
1389 
1390         /*
1391          * If we found that the switch was already setup by the
1392          * boot firmware, mark it as authorized now before we
1393          * send uevent to userspace.
1394          */
1395         if (sw->boot)
1396             sw->authorized = 1;
1397 
1398         dev_set_uevent_suppress(dev, false);
1399         kobject_uevent(&dev->kobj, KOBJ_ADD);
1400         device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1401     }
1402 
1403     return 0;
1404 }
1405 
1406 static int tb_start(struct tb *tb)
1407 {
1408     struct tb_cm *tcm = tb_priv(tb);
1409     int ret;
1410 
1411     tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1412     if (IS_ERR(tb->root_switch))
1413         return PTR_ERR(tb->root_switch);
1414 
1415     /*
1416      * ICM firmware upgrade needs running firmware and in native
1417      * mode that is not available so disable firmware upgrade of the
1418      * root switch.
1419      */
1420     tb->root_switch->no_nvm_upgrade = true;
1421     /* All USB4 routers support runtime PM */
1422     tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1423 
1424     ret = tb_switch_configure(tb->root_switch);
1425     if (ret) {
1426         tb_switch_put(tb->root_switch);
1427         return ret;
1428     }
1429 
1430     /* Announce the switch to the world */
1431     ret = tb_switch_add(tb->root_switch);
1432     if (ret) {
1433         tb_switch_put(tb->root_switch);
1434         return ret;
1435     }
1436 
1437     /*
1438      * To support highest CLx state, we set host router's TMU to
1439      * Normal mode.
1440      */
1441     tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1442                 false);
1443     /* Enable TMU if it is off */
1444     tb_switch_tmu_enable(tb->root_switch);
1445     /* Full scan to discover devices added before the driver was loaded. */
1446     tb_scan_switch(tb->root_switch);
1447     /* Find out tunnels created by the boot firmware */
1448     tb_discover_tunnels(tb);
1449     /*
1450      * If the boot firmware did not create USB 3.x tunnels create them
1451      * now for the whole topology.
1452      */
1453     tb_create_usb3_tunnels(tb->root_switch);
1454     /* Add DP IN resources for the root switch */
1455     tb_add_dp_resources(tb->root_switch);
1456     /* Make the discovered switches available to the userspace */
1457     device_for_each_child(&tb->root_switch->dev, NULL,
1458                   tb_scan_finalize_switch);
1459 
1460     /* Allow tb_handle_hotplug to progress events */
1461     tcm->hotplug_active = true;
1462     return 0;
1463 }
1464 
1465 static int tb_suspend_noirq(struct tb *tb)
1466 {
1467     struct tb_cm *tcm = tb_priv(tb);
1468 
1469     tb_dbg(tb, "suspending...\n");
1470     tb_disconnect_and_release_dp(tb);
1471     tb_switch_suspend(tb->root_switch, false);
1472     tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1473     tb_dbg(tb, "suspend finished\n");
1474 
1475     return 0;
1476 }
1477 
1478 static void tb_restore_children(struct tb_switch *sw)
1479 {
1480     struct tb_port *port;
1481     int ret;
1482 
1483     /* No need to restore if the router is already unplugged */
1484     if (sw->is_unplugged)
1485         return;
1486 
1487     /*
1488      * CL0s and CL1 are enabled and supported together.
1489      * Silently ignore CLx re-enabling in case CLx is not supported.
1490      */
1491     ret = tb_switch_enable_clx(sw, TB_CL1);
1492     if (ret && ret != -EOPNOTSUPP)
1493         tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
1494                tb_switch_clx_name(TB_CL1));
1495 
1496     if (tb_switch_is_clx_enabled(sw, TB_CL1))
1497         /*
1498          * To support highest CLx state, we set router's TMU to
1499          * Normal-Uni mode.
1500          */
1501         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
1502     else
1503         /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
1504         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
1505 
1506     if (tb_enable_tmu(sw))
1507         tb_sw_warn(sw, "failed to restore TMU configuration\n");
1508 
1509     tb_switch_for_each_port(sw, port) {
1510         if (!tb_port_has_remote(port) && !port->xdomain)
1511             continue;
1512 
1513         if (port->remote) {
1514             tb_switch_lane_bonding_enable(port->remote->sw);
1515             tb_switch_configure_link(port->remote->sw);
1516 
1517             tb_restore_children(port->remote->sw);
1518         } else if (port->xdomain) {
1519             tb_port_configure_xdomain(port);
1520         }
1521     }
1522 }
1523 
1524 static int tb_resume_noirq(struct tb *tb)
1525 {
1526     struct tb_cm *tcm = tb_priv(tb);
1527     struct tb_tunnel *tunnel, *n;
1528     unsigned int usb3_delay = 0;
1529     LIST_HEAD(tunnels);
1530 
1531     tb_dbg(tb, "resuming...\n");
1532 
1533     /* remove any pci devices the firmware might have setup */
1534     tb_switch_reset(tb->root_switch);
1535 
1536     tb_switch_resume(tb->root_switch);
1537     tb_free_invalid_tunnels(tb);
1538     tb_free_unplugged_children(tb->root_switch);
1539     tb_restore_children(tb->root_switch);
1540 
1541     /*
1542      * If we get here from suspend to disk the boot firmware or the
1543      * restore kernel might have created tunnels of its own. Since
1544      * we cannot be sure they are usable for us we find and tear
1545      * them down.
1546      */
1547     tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1548     list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1549         if (tb_tunnel_is_usb3(tunnel))
1550             usb3_delay = 500;
1551         tb_tunnel_deactivate(tunnel);
1552         tb_tunnel_free(tunnel);
1553     }
1554 
1555     /* Re-create our tunnels now */
1556     list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1557         /* USB3 requires delay before it can be re-activated */
1558         if (tb_tunnel_is_usb3(tunnel)) {
1559             msleep(usb3_delay);
1560             /* Only need to do it once */
1561             usb3_delay = 0;
1562         }
1563         tb_tunnel_restart(tunnel);
1564     }
1565     if (!list_empty(&tcm->tunnel_list)) {
1566         /*
1567          * the pcie links need some time to get going.
1568          * 100ms works for me...
1569          */
1570         tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1571         msleep(100);
1572     }
1573      /* Allow tb_handle_hotplug to progress events */
1574     tcm->hotplug_active = true;
1575     tb_dbg(tb, "resume finished\n");
1576 
1577     return 0;
1578 }
1579 
1580 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1581 {
1582     struct tb_port *port;
1583     int ret = 0;
1584 
1585     tb_switch_for_each_port(sw, port) {
1586         if (tb_is_upstream_port(port))
1587             continue;
1588         if (port->xdomain && port->xdomain->is_unplugged) {
1589             tb_retimer_remove_all(port);
1590             tb_xdomain_remove(port->xdomain);
1591             tb_port_unconfigure_xdomain(port);
1592             port->xdomain = NULL;
1593             ret++;
1594         } else if (port->remote) {
1595             ret += tb_free_unplugged_xdomains(port->remote->sw);
1596         }
1597     }
1598 
1599     return ret;
1600 }
1601 
1602 static int tb_freeze_noirq(struct tb *tb)
1603 {
1604     struct tb_cm *tcm = tb_priv(tb);
1605 
1606     tcm->hotplug_active = false;
1607     return 0;
1608 }
1609 
1610 static int tb_thaw_noirq(struct tb *tb)
1611 {
1612     struct tb_cm *tcm = tb_priv(tb);
1613 
1614     tcm->hotplug_active = true;
1615     return 0;
1616 }
1617 
1618 static void tb_complete(struct tb *tb)
1619 {
1620     /*
1621      * Release any unplugged XDomains and if there is a case where
1622      * another domain is swapped in place of unplugged XDomain we
1623      * need to run another rescan.
1624      */
1625     mutex_lock(&tb->lock);
1626     if (tb_free_unplugged_xdomains(tb->root_switch))
1627         tb_scan_switch(tb->root_switch);
1628     mutex_unlock(&tb->lock);
1629 }
1630 
1631 static int tb_runtime_suspend(struct tb *tb)
1632 {
1633     struct tb_cm *tcm = tb_priv(tb);
1634 
1635     mutex_lock(&tb->lock);
1636     tb_switch_suspend(tb->root_switch, true);
1637     tcm->hotplug_active = false;
1638     mutex_unlock(&tb->lock);
1639 
1640     return 0;
1641 }
1642 
1643 static void tb_remove_work(struct work_struct *work)
1644 {
1645     struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1646     struct tb *tb = tcm_to_tb(tcm);
1647 
1648     mutex_lock(&tb->lock);
1649     if (tb->root_switch) {
1650         tb_free_unplugged_children(tb->root_switch);
1651         tb_free_unplugged_xdomains(tb->root_switch);
1652     }
1653     mutex_unlock(&tb->lock);
1654 }
1655 
1656 static int tb_runtime_resume(struct tb *tb)
1657 {
1658     struct tb_cm *tcm = tb_priv(tb);
1659     struct tb_tunnel *tunnel, *n;
1660 
1661     mutex_lock(&tb->lock);
1662     tb_switch_resume(tb->root_switch);
1663     tb_free_invalid_tunnels(tb);
1664     tb_restore_children(tb->root_switch);
1665     list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1666         tb_tunnel_restart(tunnel);
1667     tcm->hotplug_active = true;
1668     mutex_unlock(&tb->lock);
1669 
1670     /*
1671      * Schedule cleanup of any unplugged devices. Run this in a
1672      * separate thread to avoid possible deadlock if the device
1673      * removal runtime resumes the unplugged device.
1674      */
1675     queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1676     return 0;
1677 }
1678 
1679 static const struct tb_cm_ops tb_cm_ops = {
1680     .start = tb_start,
1681     .stop = tb_stop,
1682     .suspend_noirq = tb_suspend_noirq,
1683     .resume_noirq = tb_resume_noirq,
1684     .freeze_noirq = tb_freeze_noirq,
1685     .thaw_noirq = tb_thaw_noirq,
1686     .complete = tb_complete,
1687     .runtime_suspend = tb_runtime_suspend,
1688     .runtime_resume = tb_runtime_resume,
1689     .handle_event = tb_handle_event,
1690     .disapprove_switch = tb_disconnect_pci,
1691     .approve_switch = tb_tunnel_pci,
1692     .approve_xdomain_paths = tb_approve_xdomain_paths,
1693     .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1694 };
1695 
1696 /*
1697  * During suspend the Thunderbolt controller is reset and all PCIe
1698  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1699  * during resume. This adds device links between the tunneled PCIe
1700  * downstream ports and the NHI so that the device core will make sure
1701  * NHI is resumed first before the rest.
1702  */
1703 static void tb_apple_add_links(struct tb_nhi *nhi)
1704 {
1705     struct pci_dev *upstream, *pdev;
1706 
1707     if (!x86_apple_machine)
1708         return;
1709 
1710     switch (nhi->pdev->device) {
1711     case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1712     case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1713     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1714     case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1715         break;
1716     default:
1717         return;
1718     }
1719 
1720     upstream = pci_upstream_bridge(nhi->pdev);
1721     while (upstream) {
1722         if (!pci_is_pcie(upstream))
1723             return;
1724         if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1725             break;
1726         upstream = pci_upstream_bridge(upstream);
1727     }
1728 
1729     if (!upstream)
1730         return;
1731 
1732     /*
1733      * For each hotplug downstream port, create add device link
1734      * back to NHI so that PCIe tunnels can be re-established after
1735      * sleep.
1736      */
1737     for_each_pci_bridge(pdev, upstream->subordinate) {
1738         const struct device_link *link;
1739 
1740         if (!pci_is_pcie(pdev))
1741             continue;
1742         if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1743             !pdev->is_hotplug_bridge)
1744             continue;
1745 
1746         link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1747                        DL_FLAG_AUTOREMOVE_SUPPLIER |
1748                        DL_FLAG_PM_RUNTIME);
1749         if (link) {
1750             dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1751                 dev_name(&pdev->dev));
1752         } else {
1753             dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1754                  dev_name(&pdev->dev));
1755         }
1756     }
1757 }
1758 
1759 struct tb *tb_probe(struct tb_nhi *nhi)
1760 {
1761     struct tb_cm *tcm;
1762     struct tb *tb;
1763 
1764     tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1765     if (!tb)
1766         return NULL;
1767 
1768     if (tb_acpi_may_tunnel_pcie())
1769         tb->security_level = TB_SECURITY_USER;
1770     else
1771         tb->security_level = TB_SECURITY_NOPCIE;
1772 
1773     tb->cm_ops = &tb_cm_ops;
1774 
1775     tcm = tb_priv(tb);
1776     INIT_LIST_HEAD(&tcm->tunnel_list);
1777     INIT_LIST_HEAD(&tcm->dp_resources);
1778     INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1779 
1780     tb_dbg(tb, "using software connection manager\n");
1781 
1782     tb_apple_add_links(nhi);
1783     tb_acpi_add_links(nhi);
1784 
1785     return tb;
1786 }