Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * USB4 specific functionality
0004  *
0005  * Copyright (C) 2019, Intel Corporation
0006  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
0007  *      Rajmohan Mani <rajmohan.mani@intel.com>
0008  */
0009 
0010 #include <linux/delay.h>
0011 #include <linux/ktime.h>
0012 
0013 #include "sb_regs.h"
0014 #include "tb.h"
0015 
0016 #define USB4_DATA_RETRIES       3
0017 
0018 enum usb4_sb_target {
0019     USB4_SB_TARGET_ROUTER,
0020     USB4_SB_TARGET_PARTNER,
0021     USB4_SB_TARGET_RETIMER,
0022 };
0023 
0024 #define USB4_NVM_READ_OFFSET_MASK   GENMASK(23, 2)
0025 #define USB4_NVM_READ_OFFSET_SHIFT  2
0026 #define USB4_NVM_READ_LENGTH_MASK   GENMASK(27, 24)
0027 #define USB4_NVM_READ_LENGTH_SHIFT  24
0028 
0029 #define USB4_NVM_SET_OFFSET_MASK    USB4_NVM_READ_OFFSET_MASK
0030 #define USB4_NVM_SET_OFFSET_SHIFT   USB4_NVM_READ_OFFSET_SHIFT
0031 
0032 #define USB4_DROM_ADDRESS_MASK      GENMASK(14, 2)
0033 #define USB4_DROM_ADDRESS_SHIFT     2
0034 #define USB4_DROM_SIZE_MASK     GENMASK(19, 15)
0035 #define USB4_DROM_SIZE_SHIFT        15
0036 
0037 #define USB4_NVM_SECTOR_SIZE_MASK   GENMASK(23, 0)
0038 
0039 #define USB4_BA_LENGTH_MASK     GENMASK(7, 0)
0040 #define USB4_BA_INDEX_MASK      GENMASK(15, 0)
0041 
0042 enum usb4_ba_index {
0043     USB4_BA_MAX_USB3 = 0x1,
0044     USB4_BA_MIN_DP_AUX = 0x2,
0045     USB4_BA_MIN_DP_MAIN = 0x3,
0046     USB4_BA_MAX_PCIE = 0x4,
0047     USB4_BA_MAX_HI = 0x5,
0048 };
0049 
0050 #define USB4_BA_VALUE_MASK      GENMASK(31, 16)
0051 #define USB4_BA_VALUE_SHIFT     16
0052 
0053 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
0054                  u32 *metadata, u8 *status,
0055                  const void *tx_data, size_t tx_dwords,
0056                  void *rx_data, size_t rx_dwords)
0057 {
0058     u32 val;
0059     int ret;
0060 
0061     if (metadata) {
0062         ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
0063         if (ret)
0064             return ret;
0065     }
0066     if (tx_dwords) {
0067         ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
0068                   tx_dwords);
0069         if (ret)
0070             return ret;
0071     }
0072 
0073     val = opcode | ROUTER_CS_26_OV;
0074     ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
0075     if (ret)
0076         return ret;
0077 
0078     ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
0079     if (ret)
0080         return ret;
0081 
0082     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
0083     if (ret)
0084         return ret;
0085 
0086     if (val & ROUTER_CS_26_ONS)
0087         return -EOPNOTSUPP;
0088 
0089     if (status)
0090         *status = (val & ROUTER_CS_26_STATUS_MASK) >>
0091             ROUTER_CS_26_STATUS_SHIFT;
0092 
0093     if (metadata) {
0094         ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
0095         if (ret)
0096             return ret;
0097     }
0098     if (rx_dwords) {
0099         ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
0100                  rx_dwords);
0101         if (ret)
0102             return ret;
0103     }
0104 
0105     return 0;
0106 }
0107 
0108 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
0109                 u8 *status, const void *tx_data, size_t tx_dwords,
0110                 void *rx_data, size_t rx_dwords)
0111 {
0112     const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
0113 
0114     if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
0115         return -EINVAL;
0116 
0117     /*
0118      * If the connection manager implementation provides USB4 router
0119      * operation proxy callback, call it here instead of running the
0120      * operation natively.
0121      */
0122     if (cm_ops->usb4_switch_op) {
0123         int ret;
0124 
0125         ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
0126                          tx_data, tx_dwords, rx_data,
0127                          rx_dwords);
0128         if (ret != -EOPNOTSUPP)
0129             return ret;
0130 
0131         /*
0132          * If the proxy was not supported then run the native
0133          * router operation instead.
0134          */
0135     }
0136 
0137     return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
0138                      tx_dwords, rx_data, rx_dwords);
0139 }
0140 
0141 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
0142                  u32 *metadata, u8 *status)
0143 {
0144     return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
0145 }
0146 
0147 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
0148                       u32 *metadata, u8 *status,
0149                       const void *tx_data, size_t tx_dwords,
0150                       void *rx_data, size_t rx_dwords)
0151 {
0152     return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
0153                 tx_dwords, rx_data, rx_dwords);
0154 }
0155 
0156 static void usb4_switch_check_wakes(struct tb_switch *sw)
0157 {
0158     struct tb_port *port;
0159     bool wakeup = false;
0160     u32 val;
0161 
0162     if (!device_may_wakeup(&sw->dev))
0163         return;
0164 
0165     if (tb_route(sw)) {
0166         if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
0167             return;
0168 
0169         tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
0170               (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
0171               (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
0172 
0173         wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
0174     }
0175 
0176     /* Check for any connected downstream ports for USB4 wake */
0177     tb_switch_for_each_port(sw, port) {
0178         if (!tb_port_has_remote(port))
0179             continue;
0180 
0181         if (tb_port_read(port, &val, TB_CFG_PORT,
0182                  port->cap_usb4 + PORT_CS_18, 1))
0183             break;
0184 
0185         tb_port_dbg(port, "USB4 wake: %s\n",
0186                 (val & PORT_CS_18_WOU4S) ? "yes" : "no");
0187 
0188         if (val & PORT_CS_18_WOU4S)
0189             wakeup = true;
0190     }
0191 
0192     if (wakeup)
0193         pm_wakeup_event(&sw->dev, 0);
0194 }
0195 
0196 static bool link_is_usb4(struct tb_port *port)
0197 {
0198     u32 val;
0199 
0200     if (!port->cap_usb4)
0201         return false;
0202 
0203     if (tb_port_read(port, &val, TB_CFG_PORT,
0204              port->cap_usb4 + PORT_CS_18, 1))
0205         return false;
0206 
0207     return !(val & PORT_CS_18_TCM);
0208 }
0209 
0210 /**
0211  * usb4_switch_setup() - Additional setup for USB4 device
0212  * @sw: USB4 router to setup
0213  *
0214  * USB4 routers need additional settings in order to enable all the
0215  * tunneling. This function enables USB and PCIe tunneling if it can be
0216  * enabled (e.g the parent switch also supports them). If USB tunneling
0217  * is not available for some reason (like that there is Thunderbolt 3
0218  * switch upstream) then the internal xHCI controller is enabled
0219  * instead.
0220  */
0221 int usb4_switch_setup(struct tb_switch *sw)
0222 {
0223     struct tb_port *downstream_port;
0224     struct tb_switch *parent;
0225     bool tbt3, xhci;
0226     u32 val = 0;
0227     int ret;
0228 
0229     usb4_switch_check_wakes(sw);
0230 
0231     if (!tb_route(sw))
0232         return 0;
0233 
0234     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
0235     if (ret)
0236         return ret;
0237 
0238     parent = tb_switch_parent(sw);
0239     downstream_port = tb_port_at(tb_route(sw), parent);
0240     sw->link_usb4 = link_is_usb4(downstream_port);
0241     tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
0242 
0243     xhci = val & ROUTER_CS_6_HCI;
0244     tbt3 = !(val & ROUTER_CS_6_TNS);
0245 
0246     tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
0247           tbt3 ? "yes" : "no", xhci ? "yes" : "no");
0248 
0249     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0250     if (ret)
0251         return ret;
0252 
0253     if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
0254         tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
0255         val |= ROUTER_CS_5_UTO;
0256         xhci = false;
0257     }
0258 
0259     /*
0260      * Only enable PCIe tunneling if the parent router supports it
0261      * and it is not disabled.
0262      */
0263     if (tb_acpi_may_tunnel_pcie() &&
0264         tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
0265         val |= ROUTER_CS_5_PTO;
0266         /*
0267          * xHCI can be enabled if PCIe tunneling is supported
0268          * and the parent does not have any USB3 dowstream
0269          * adapters (so we cannot do USB 3.x tunneling).
0270          */
0271         if (xhci)
0272             val |= ROUTER_CS_5_HCO;
0273     }
0274 
0275     /* TBT3 supported by the CM */
0276     val |= ROUTER_CS_5_C3S;
0277     /* Tunneling configuration is ready now */
0278     val |= ROUTER_CS_5_CV;
0279 
0280     ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0281     if (ret)
0282         return ret;
0283 
0284     return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
0285                       ROUTER_CS_6_CR, 50);
0286 }
0287 
0288 /**
0289  * usb4_switch_read_uid() - Read UID from USB4 router
0290  * @sw: USB4 router
0291  * @uid: UID is stored here
0292  *
0293  * Reads 64-bit UID from USB4 router config space.
0294  */
0295 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
0296 {
0297     return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
0298 }
0299 
0300 static int usb4_switch_drom_read_block(void *data,
0301                        unsigned int dwaddress, void *buf,
0302                        size_t dwords)
0303 {
0304     struct tb_switch *sw = data;
0305     u8 status = 0;
0306     u32 metadata;
0307     int ret;
0308 
0309     metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
0310     metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
0311         USB4_DROM_ADDRESS_MASK;
0312 
0313     ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
0314                   &status, NULL, 0, buf, dwords);
0315     if (ret)
0316         return ret;
0317 
0318     return status ? -EIO : 0;
0319 }
0320 
0321 /**
0322  * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
0323  * @sw: USB4 router
0324  * @address: Byte address inside DROM to start reading
0325  * @buf: Buffer where the DROM content is stored
0326  * @size: Number of bytes to read from DROM
0327  *
0328  * Uses USB4 router operations to read router DROM. For devices this
0329  * should always work but for hosts it may return %-EOPNOTSUPP in which
0330  * case the host router does not have DROM.
0331  */
0332 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
0333               size_t size)
0334 {
0335     return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
0336                 usb4_switch_drom_read_block, sw);
0337 }
0338 
0339 /**
0340  * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
0341  * @sw: USB4 router
0342  *
0343  * Checks whether conditions are met so that lane bonding can be
0344  * established with the upstream router. Call only for device routers.
0345  */
0346 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
0347 {
0348     struct tb_port *up;
0349     int ret;
0350     u32 val;
0351 
0352     up = tb_upstream_port(sw);
0353     ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
0354     if (ret)
0355         return false;
0356 
0357     return !!(val & PORT_CS_18_BE);
0358 }
0359 
0360 /**
0361  * usb4_switch_set_wake() - Enabled/disable wake
0362  * @sw: USB4 router
0363  * @flags: Wakeup flags (%0 to disable)
0364  *
0365  * Enables/disables router to wake up from sleep.
0366  */
0367 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
0368 {
0369     struct tb_port *port;
0370     u64 route = tb_route(sw);
0371     u32 val;
0372     int ret;
0373 
0374     /*
0375      * Enable wakes coming from all USB4 downstream ports (from
0376      * child routers). For device routers do this also for the
0377      * upstream USB4 port.
0378      */
0379     tb_switch_for_each_port(sw, port) {
0380         if (!tb_port_is_null(port))
0381             continue;
0382         if (!route && tb_is_upstream_port(port))
0383             continue;
0384         if (!port->cap_usb4)
0385             continue;
0386 
0387         ret = tb_port_read(port, &val, TB_CFG_PORT,
0388                    port->cap_usb4 + PORT_CS_19, 1);
0389         if (ret)
0390             return ret;
0391 
0392         val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
0393 
0394         if (tb_is_upstream_port(port)) {
0395             val |= PORT_CS_19_WOU4;
0396         } else {
0397             bool configured = val & PORT_CS_19_PC;
0398 
0399             if ((flags & TB_WAKE_ON_CONNECT) && !configured)
0400                 val |= PORT_CS_19_WOC;
0401             if ((flags & TB_WAKE_ON_DISCONNECT) && configured)
0402                 val |= PORT_CS_19_WOD;
0403             if ((flags & TB_WAKE_ON_USB4) && configured)
0404                 val |= PORT_CS_19_WOU4;
0405         }
0406 
0407         ret = tb_port_write(port, &val, TB_CFG_PORT,
0408                     port->cap_usb4 + PORT_CS_19, 1);
0409         if (ret)
0410             return ret;
0411     }
0412 
0413     /*
0414      * Enable wakes from PCIe, USB 3.x and DP on this router. Only
0415      * needed for device routers.
0416      */
0417     if (route) {
0418         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0419         if (ret)
0420             return ret;
0421 
0422         val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
0423         if (flags & TB_WAKE_ON_USB3)
0424             val |= ROUTER_CS_5_WOU;
0425         if (flags & TB_WAKE_ON_PCIE)
0426             val |= ROUTER_CS_5_WOP;
0427         if (flags & TB_WAKE_ON_DP)
0428             val |= ROUTER_CS_5_WOD;
0429 
0430         ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0431         if (ret)
0432             return ret;
0433     }
0434 
0435     return 0;
0436 }
0437 
0438 /**
0439  * usb4_switch_set_sleep() - Prepare the router to enter sleep
0440  * @sw: USB4 router
0441  *
0442  * Sets sleep bit for the router. Returns when the router sleep ready
0443  * bit has been asserted.
0444  */
0445 int usb4_switch_set_sleep(struct tb_switch *sw)
0446 {
0447     int ret;
0448     u32 val;
0449 
0450     /* Set sleep bit and wait for sleep ready to be asserted */
0451     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0452     if (ret)
0453         return ret;
0454 
0455     val |= ROUTER_CS_5_SLP;
0456 
0457     ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
0458     if (ret)
0459         return ret;
0460 
0461     return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
0462                       ROUTER_CS_6_SLPR, 500);
0463 }
0464 
0465 /**
0466  * usb4_switch_nvm_sector_size() - Return router NVM sector size
0467  * @sw: USB4 router
0468  *
0469  * If the router supports NVM operations this function returns the NVM
0470  * sector size in bytes. If NVM operations are not supported returns
0471  * %-EOPNOTSUPP.
0472  */
0473 int usb4_switch_nvm_sector_size(struct tb_switch *sw)
0474 {
0475     u32 metadata;
0476     u8 status;
0477     int ret;
0478 
0479     ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
0480                  &status);
0481     if (ret)
0482         return ret;
0483 
0484     if (status)
0485         return status == 0x2 ? -EOPNOTSUPP : -EIO;
0486 
0487     return metadata & USB4_NVM_SECTOR_SIZE_MASK;
0488 }
0489 
0490 static int usb4_switch_nvm_read_block(void *data,
0491     unsigned int dwaddress, void *buf, size_t dwords)
0492 {
0493     struct tb_switch *sw = data;
0494     u8 status = 0;
0495     u32 metadata;
0496     int ret;
0497 
0498     metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
0499            USB4_NVM_READ_LENGTH_MASK;
0500     metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
0501            USB4_NVM_READ_OFFSET_MASK;
0502 
0503     ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
0504                   &status, NULL, 0, buf, dwords);
0505     if (ret)
0506         return ret;
0507 
0508     return status ? -EIO : 0;
0509 }
0510 
0511 /**
0512  * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
0513  * @sw: USB4 router
0514  * @address: Starting address in bytes
0515  * @buf: Read data is placed here
0516  * @size: How many bytes to read
0517  *
0518  * Reads NVM contents of the router. If NVM is not supported returns
0519  * %-EOPNOTSUPP.
0520  */
0521 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
0522              size_t size)
0523 {
0524     return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
0525                 usb4_switch_nvm_read_block, sw);
0526 }
0527 
0528 /**
0529  * usb4_switch_nvm_set_offset() - Set NVM write offset
0530  * @sw: USB4 router
0531  * @address: Start offset
0532  *
0533  * Explicitly sets NVM write offset. Normally when writing to NVM this
0534  * is done automatically by usb4_switch_nvm_write().
0535  *
0536  * Returns %0 in success and negative errno if there was a failure.
0537  */
0538 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
0539 {
0540     u32 metadata, dwaddress;
0541     u8 status = 0;
0542     int ret;
0543 
0544     dwaddress = address / 4;
0545     metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
0546            USB4_NVM_SET_OFFSET_MASK;
0547 
0548     ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
0549                  &status);
0550     if (ret)
0551         return ret;
0552 
0553     return status ? -EIO : 0;
0554 }
0555 
0556 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
0557                         const void *buf, size_t dwords)
0558 {
0559     struct tb_switch *sw = data;
0560     u8 status;
0561     int ret;
0562 
0563     ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
0564                   buf, dwords, NULL, 0);
0565     if (ret)
0566         return ret;
0567 
0568     return status ? -EIO : 0;
0569 }
0570 
0571 /**
0572  * usb4_switch_nvm_write() - Write to the router NVM
0573  * @sw: USB4 router
0574  * @address: Start address where to write in bytes
0575  * @buf: Pointer to the data to write
0576  * @size: Size of @buf in bytes
0577  *
0578  * Writes @buf to the router NVM using USB4 router operations. If NVM
0579  * write is not supported returns %-EOPNOTSUPP.
0580  */
0581 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
0582               const void *buf, size_t size)
0583 {
0584     int ret;
0585 
0586     ret = usb4_switch_nvm_set_offset(sw, address);
0587     if (ret)
0588         return ret;
0589 
0590     return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
0591                  usb4_switch_nvm_write_next_block, sw);
0592 }
0593 
0594 /**
0595  * usb4_switch_nvm_authenticate() - Authenticate new NVM
0596  * @sw: USB4 router
0597  *
0598  * After the new NVM has been written via usb4_switch_nvm_write(), this
0599  * function triggers NVM authentication process. The router gets power
0600  * cycled and if the authentication is successful the new NVM starts
0601  * running. In case of failure returns negative errno.
0602  *
0603  * The caller should call usb4_switch_nvm_authenticate_status() to read
0604  * the status of the authentication after power cycle. It should be the
0605  * first router operation to avoid the status being lost.
0606  */
0607 int usb4_switch_nvm_authenticate(struct tb_switch *sw)
0608 {
0609     int ret;
0610 
0611     ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
0612     switch (ret) {
0613     /*
0614      * The router is power cycled once NVM_AUTH is started so it is
0615      * expected to get any of the following errors back.
0616      */
0617     case -EACCES:
0618     case -ENOTCONN:
0619     case -ETIMEDOUT:
0620         return 0;
0621 
0622     default:
0623         return ret;
0624     }
0625 }
0626 
0627 /**
0628  * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
0629  * @sw: USB4 router
0630  * @status: Status code of the operation
0631  *
0632  * The function checks if there is status available from the last NVM
0633  * authenticate router operation. If there is status then %0 is returned
0634  * and the status code is placed in @status. Returns negative errno in case
0635  * of failure.
0636  *
0637  * Must be called before any other router operation.
0638  */
0639 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
0640 {
0641     const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
0642     u16 opcode;
0643     u32 val;
0644     int ret;
0645 
0646     if (cm_ops->usb4_switch_nvm_authenticate_status) {
0647         ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
0648         if (ret != -EOPNOTSUPP)
0649             return ret;
0650     }
0651 
0652     ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
0653     if (ret)
0654         return ret;
0655 
0656     /* Check that the opcode is correct */
0657     opcode = val & ROUTER_CS_26_OPCODE_MASK;
0658     if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
0659         if (val & ROUTER_CS_26_OV)
0660             return -EBUSY;
0661         if (val & ROUTER_CS_26_ONS)
0662             return -EOPNOTSUPP;
0663 
0664         *status = (val & ROUTER_CS_26_STATUS_MASK) >>
0665             ROUTER_CS_26_STATUS_SHIFT;
0666     } else {
0667         *status = 0;
0668     }
0669 
0670     return 0;
0671 }
0672 
0673 /**
0674  * usb4_switch_credits_init() - Read buffer allocation parameters
0675  * @sw: USB4 router
0676  *
0677  * Reads @sw buffer allocation parameters and initializes @sw buffer
0678  * allocation fields accordingly. Specifically @sw->credits_allocation
0679  * is set to %true if these parameters can be used in tunneling.
0680  *
0681  * Returns %0 on success and negative errno otherwise.
0682  */
0683 int usb4_switch_credits_init(struct tb_switch *sw)
0684 {
0685     int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
0686     int ret, length, i, nports;
0687     const struct tb_port *port;
0688     u32 data[NVM_DATA_DWORDS];
0689     u32 metadata = 0;
0690     u8 status = 0;
0691 
0692     memset(data, 0, sizeof(data));
0693     ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
0694                   &status, NULL, 0, data, ARRAY_SIZE(data));
0695     if (ret)
0696         return ret;
0697     if (status)
0698         return -EIO;
0699 
0700     length = metadata & USB4_BA_LENGTH_MASK;
0701     if (WARN_ON(length > ARRAY_SIZE(data)))
0702         return -EMSGSIZE;
0703 
0704     max_usb3 = -1;
0705     min_dp_aux = -1;
0706     min_dp_main = -1;
0707     max_pcie = -1;
0708     max_dma = -1;
0709 
0710     tb_sw_dbg(sw, "credit allocation parameters:\n");
0711 
0712     for (i = 0; i < length; i++) {
0713         u16 index, value;
0714 
0715         index = data[i] & USB4_BA_INDEX_MASK;
0716         value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
0717 
0718         switch (index) {
0719         case USB4_BA_MAX_USB3:
0720             tb_sw_dbg(sw, " USB3: %u\n", value);
0721             max_usb3 = value;
0722             break;
0723         case USB4_BA_MIN_DP_AUX:
0724             tb_sw_dbg(sw, " DP AUX: %u\n", value);
0725             min_dp_aux = value;
0726             break;
0727         case USB4_BA_MIN_DP_MAIN:
0728             tb_sw_dbg(sw, " DP main: %u\n", value);
0729             min_dp_main = value;
0730             break;
0731         case USB4_BA_MAX_PCIE:
0732             tb_sw_dbg(sw, " PCIe: %u\n", value);
0733             max_pcie = value;
0734             break;
0735         case USB4_BA_MAX_HI:
0736             tb_sw_dbg(sw, " DMA: %u\n", value);
0737             max_dma = value;
0738             break;
0739         default:
0740             tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
0741                   index);
0742             break;
0743         }
0744     }
0745 
0746     /*
0747      * Validate the buffer allocation preferences. If we find
0748      * issues, log a warning and fall back using the hard-coded
0749      * values.
0750      */
0751 
0752     /* Host router must report baMaxHI */
0753     if (!tb_route(sw) && max_dma < 0) {
0754         tb_sw_warn(sw, "host router is missing baMaxHI\n");
0755         goto err_invalid;
0756     }
0757 
0758     nports = 0;
0759     tb_switch_for_each_port(sw, port) {
0760         if (tb_port_is_null(port))
0761             nports++;
0762     }
0763 
0764     /* Must have DP buffer allocation (multiple USB4 ports) */
0765     if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
0766         tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
0767         goto err_invalid;
0768     }
0769 
0770     tb_switch_for_each_port(sw, port) {
0771         if (tb_port_is_dpout(port) && min_dp_main < 0) {
0772             tb_sw_warn(sw, "missing baMinDPmain");
0773             goto err_invalid;
0774         }
0775         if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
0776             min_dp_aux < 0) {
0777             tb_sw_warn(sw, "missing baMinDPaux");
0778             goto err_invalid;
0779         }
0780         if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
0781             max_usb3 < 0) {
0782             tb_sw_warn(sw, "missing baMaxUSB3");
0783             goto err_invalid;
0784         }
0785         if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
0786             max_pcie < 0) {
0787             tb_sw_warn(sw, "missing baMaxPCIe");
0788             goto err_invalid;
0789         }
0790     }
0791 
0792     /*
0793      * Buffer allocation passed the validation so we can use it in
0794      * path creation.
0795      */
0796     sw->credit_allocation = true;
0797     if (max_usb3 > 0)
0798         sw->max_usb3_credits = max_usb3;
0799     if (min_dp_aux > 0)
0800         sw->min_dp_aux_credits = min_dp_aux;
0801     if (min_dp_main > 0)
0802         sw->min_dp_main_credits = min_dp_main;
0803     if (max_pcie > 0)
0804         sw->max_pcie_credits = max_pcie;
0805     if (max_dma > 0)
0806         sw->max_dma_credits = max_dma;
0807 
0808     return 0;
0809 
0810 err_invalid:
0811     return -EINVAL;
0812 }
0813 
0814 /**
0815  * usb4_switch_query_dp_resource() - Query availability of DP IN resource
0816  * @sw: USB4 router
0817  * @in: DP IN adapter
0818  *
0819  * For DP tunneling this function can be used to query availability of
0820  * DP IN resource. Returns true if the resource is available for DP
0821  * tunneling, false otherwise.
0822  */
0823 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
0824 {
0825     u32 metadata = in->port;
0826     u8 status;
0827     int ret;
0828 
0829     ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
0830                  &status);
0831     /*
0832      * If DP resource allocation is not supported assume it is
0833      * always available.
0834      */
0835     if (ret == -EOPNOTSUPP)
0836         return true;
0837     else if (ret)
0838         return false;
0839 
0840     return !status;
0841 }
0842 
0843 /**
0844  * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
0845  * @sw: USB4 router
0846  * @in: DP IN adapter
0847  *
0848  * Allocates DP IN resource for DP tunneling using USB4 router
0849  * operations. If the resource was allocated returns %0. Otherwise
0850  * returns negative errno, in particular %-EBUSY if the resource is
0851  * already allocated.
0852  */
0853 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
0854 {
0855     u32 metadata = in->port;
0856     u8 status;
0857     int ret;
0858 
0859     ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
0860                  &status);
0861     if (ret == -EOPNOTSUPP)
0862         return 0;
0863     else if (ret)
0864         return ret;
0865 
0866     return status ? -EBUSY : 0;
0867 }
0868 
0869 /**
0870  * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
0871  * @sw: USB4 router
0872  * @in: DP IN adapter
0873  *
0874  * Releases the previously allocated DP IN resource.
0875  */
0876 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
0877 {
0878     u32 metadata = in->port;
0879     u8 status;
0880     int ret;
0881 
0882     ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
0883                  &status);
0884     if (ret == -EOPNOTSUPP)
0885         return 0;
0886     else if (ret)
0887         return ret;
0888 
0889     return status ? -EIO : 0;
0890 }
0891 
0892 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
0893 {
0894     struct tb_port *p;
0895     int usb4_idx = 0;
0896 
0897     /* Assume port is primary */
0898     tb_switch_for_each_port(sw, p) {
0899         if (!tb_port_is_null(p))
0900             continue;
0901         if (tb_is_upstream_port(p))
0902             continue;
0903         if (!p->link_nr) {
0904             if (p == port)
0905                 break;
0906             usb4_idx++;
0907         }
0908     }
0909 
0910     return usb4_idx;
0911 }
0912 
0913 /**
0914  * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
0915  * @sw: USB4 router
0916  * @port: USB4 port
0917  *
0918  * USB4 routers have direct mapping between USB4 ports and PCIe
0919  * downstream adapters where the PCIe topology is extended. This
0920  * function returns the corresponding downstream PCIe adapter or %NULL
0921  * if no such mapping was possible.
0922  */
0923 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
0924                       const struct tb_port *port)
0925 {
0926     int usb4_idx = usb4_port_idx(sw, port);
0927     struct tb_port *p;
0928     int pcie_idx = 0;
0929 
0930     /* Find PCIe down port matching usb4_port */
0931     tb_switch_for_each_port(sw, p) {
0932         if (!tb_port_is_pcie_down(p))
0933             continue;
0934 
0935         if (pcie_idx == usb4_idx)
0936             return p;
0937 
0938         pcie_idx++;
0939     }
0940 
0941     return NULL;
0942 }
0943 
0944 /**
0945  * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
0946  * @sw: USB4 router
0947  * @port: USB4 port
0948  *
0949  * USB4 routers have direct mapping between USB4 ports and USB 3.x
0950  * downstream adapters where the USB 3.x topology is extended. This
0951  * function returns the corresponding downstream USB 3.x adapter or
0952  * %NULL if no such mapping was possible.
0953  */
0954 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
0955                       const struct tb_port *port)
0956 {
0957     int usb4_idx = usb4_port_idx(sw, port);
0958     struct tb_port *p;
0959     int usb_idx = 0;
0960 
0961     /* Find USB3 down port matching usb4_port */
0962     tb_switch_for_each_port(sw, p) {
0963         if (!tb_port_is_usb3_down(p))
0964             continue;
0965 
0966         if (usb_idx == usb4_idx)
0967             return p;
0968 
0969         usb_idx++;
0970     }
0971 
0972     return NULL;
0973 }
0974 
0975 /**
0976  * usb4_switch_add_ports() - Add USB4 ports for this router
0977  * @sw: USB4 router
0978  *
0979  * For USB4 router finds all USB4 ports and registers devices for each.
0980  * Can be called to any router.
0981  *
0982  * Return %0 in case of success and negative errno in case of failure.
0983  */
0984 int usb4_switch_add_ports(struct tb_switch *sw)
0985 {
0986     struct tb_port *port;
0987 
0988     if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
0989         return 0;
0990 
0991     tb_switch_for_each_port(sw, port) {
0992         struct usb4_port *usb4;
0993 
0994         if (!tb_port_is_null(port))
0995             continue;
0996         if (!port->cap_usb4)
0997             continue;
0998 
0999         usb4 = usb4_port_device_add(port);
1000         if (IS_ERR(usb4)) {
1001             usb4_switch_remove_ports(sw);
1002             return PTR_ERR(usb4);
1003         }
1004 
1005         port->usb4 = usb4;
1006     }
1007 
1008     return 0;
1009 }
1010 
1011 /**
1012  * usb4_switch_remove_ports() - Removes USB4 ports from this router
1013  * @sw: USB4 router
1014  *
1015  * Unregisters previously registered USB4 ports.
1016  */
1017 void usb4_switch_remove_ports(struct tb_switch *sw)
1018 {
1019     struct tb_port *port;
1020 
1021     tb_switch_for_each_port(sw, port) {
1022         if (port->usb4) {
1023             usb4_port_device_remove(port->usb4);
1024             port->usb4 = NULL;
1025         }
1026     }
1027 }
1028 
1029 /**
1030  * usb4_port_unlock() - Unlock USB4 downstream port
1031  * @port: USB4 port to unlock
1032  *
1033  * Unlocks USB4 downstream port so that the connection manager can
1034  * access the router below this port.
1035  */
1036 int usb4_port_unlock(struct tb_port *port)
1037 {
1038     int ret;
1039     u32 val;
1040 
1041     ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1042     if (ret)
1043         return ret;
1044 
1045     val &= ~ADP_CS_4_LCK;
1046     return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1047 }
1048 
1049 static int usb4_port_set_configured(struct tb_port *port, bool configured)
1050 {
1051     int ret;
1052     u32 val;
1053 
1054     if (!port->cap_usb4)
1055         return -EINVAL;
1056 
1057     ret = tb_port_read(port, &val, TB_CFG_PORT,
1058                port->cap_usb4 + PORT_CS_19, 1);
1059     if (ret)
1060         return ret;
1061 
1062     if (configured)
1063         val |= PORT_CS_19_PC;
1064     else
1065         val &= ~PORT_CS_19_PC;
1066 
1067     return tb_port_write(port, &val, TB_CFG_PORT,
1068                  port->cap_usb4 + PORT_CS_19, 1);
1069 }
1070 
1071 /**
1072  * usb4_port_configure() - Set USB4 port configured
1073  * @port: USB4 router
1074  *
1075  * Sets the USB4 link to be configured for power management purposes.
1076  */
1077 int usb4_port_configure(struct tb_port *port)
1078 {
1079     return usb4_port_set_configured(port, true);
1080 }
1081 
1082 /**
1083  * usb4_port_unconfigure() - Set USB4 port unconfigured
1084  * @port: USB4 router
1085  *
1086  * Sets the USB4 link to be unconfigured for power management purposes.
1087  */
1088 void usb4_port_unconfigure(struct tb_port *port)
1089 {
1090     usb4_port_set_configured(port, false);
1091 }
1092 
1093 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
1094 {
1095     int ret;
1096     u32 val;
1097 
1098     if (!port->cap_usb4)
1099         return -EINVAL;
1100 
1101     ret = tb_port_read(port, &val, TB_CFG_PORT,
1102                port->cap_usb4 + PORT_CS_19, 1);
1103     if (ret)
1104         return ret;
1105 
1106     if (configured)
1107         val |= PORT_CS_19_PID;
1108     else
1109         val &= ~PORT_CS_19_PID;
1110 
1111     return tb_port_write(port, &val, TB_CFG_PORT,
1112                  port->cap_usb4 + PORT_CS_19, 1);
1113 }
1114 
1115 /**
1116  * usb4_port_configure_xdomain() - Configure port for XDomain
1117  * @port: USB4 port connected to another host
1118  *
1119  * Marks the USB4 port as being connected to another host. Returns %0 in
1120  * success and negative errno in failure.
1121  */
1122 int usb4_port_configure_xdomain(struct tb_port *port)
1123 {
1124     return usb4_set_xdomain_configured(port, true);
1125 }
1126 
1127 /**
1128  * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
1129  * @port: USB4 port that was connected to another host
1130  *
1131  * Clears USB4 port from being marked as XDomain.
1132  */
1133 void usb4_port_unconfigure_xdomain(struct tb_port *port)
1134 {
1135     usb4_set_xdomain_configured(port, false);
1136 }
1137 
1138 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
1139                   u32 value, int timeout_msec)
1140 {
1141     ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1142 
1143     do {
1144         u32 val;
1145         int ret;
1146 
1147         ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
1148         if (ret)
1149             return ret;
1150 
1151         if ((val & bit) == value)
1152             return 0;
1153 
1154         usleep_range(50, 100);
1155     } while (ktime_before(ktime_get(), timeout));
1156 
1157     return -ETIMEDOUT;
1158 }
1159 
1160 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1161 {
1162     if (dwords > NVM_DATA_DWORDS)
1163         return -EINVAL;
1164 
1165     return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1166                 dwords);
1167 }
1168 
1169 static int usb4_port_write_data(struct tb_port *port, const void *data,
1170                 size_t dwords)
1171 {
1172     if (dwords > NVM_DATA_DWORDS)
1173         return -EINVAL;
1174 
1175     return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1176                  dwords);
1177 }
1178 
1179 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1180                  u8 index, u8 reg, void *buf, u8 size)
1181 {
1182     size_t dwords = DIV_ROUND_UP(size, 4);
1183     int ret;
1184     u32 val;
1185 
1186     if (!port->cap_usb4)
1187         return -EINVAL;
1188 
1189     val = reg;
1190     val |= size << PORT_CS_1_LENGTH_SHIFT;
1191     val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1192     if (target == USB4_SB_TARGET_RETIMER)
1193         val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1194     val |= PORT_CS_1_PND;
1195 
1196     ret = tb_port_write(port, &val, TB_CFG_PORT,
1197                 port->cap_usb4 + PORT_CS_1, 1);
1198     if (ret)
1199         return ret;
1200 
1201     ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1202                      PORT_CS_1_PND, 0, 500);
1203     if (ret)
1204         return ret;
1205 
1206     ret = tb_port_read(port, &val, TB_CFG_PORT,
1207                 port->cap_usb4 + PORT_CS_1, 1);
1208     if (ret)
1209         return ret;
1210 
1211     if (val & PORT_CS_1_NR)
1212         return -ENODEV;
1213     if (val & PORT_CS_1_RC)
1214         return -EIO;
1215 
1216     return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1217 }
1218 
1219 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1220                   u8 index, u8 reg, const void *buf, u8 size)
1221 {
1222     size_t dwords = DIV_ROUND_UP(size, 4);
1223     int ret;
1224     u32 val;
1225 
1226     if (!port->cap_usb4)
1227         return -EINVAL;
1228 
1229     if (buf) {
1230         ret = usb4_port_write_data(port, buf, dwords);
1231         if (ret)
1232             return ret;
1233     }
1234 
1235     val = reg;
1236     val |= size << PORT_CS_1_LENGTH_SHIFT;
1237     val |= PORT_CS_1_WNR_WRITE;
1238     val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1239     if (target == USB4_SB_TARGET_RETIMER)
1240         val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1241     val |= PORT_CS_1_PND;
1242 
1243     ret = tb_port_write(port, &val, TB_CFG_PORT,
1244                 port->cap_usb4 + PORT_CS_1, 1);
1245     if (ret)
1246         return ret;
1247 
1248     ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1249                      PORT_CS_1_PND, 0, 500);
1250     if (ret)
1251         return ret;
1252 
1253     ret = tb_port_read(port, &val, TB_CFG_PORT,
1254                 port->cap_usb4 + PORT_CS_1, 1);
1255     if (ret)
1256         return ret;
1257 
1258     if (val & PORT_CS_1_NR)
1259         return -ENODEV;
1260     if (val & PORT_CS_1_RC)
1261         return -EIO;
1262 
1263     return 0;
1264 }
1265 
1266 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1267                u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1268 {
1269     ktime_t timeout;
1270     u32 val;
1271     int ret;
1272 
1273     val = opcode;
1274     ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1275                  sizeof(val));
1276     if (ret)
1277         return ret;
1278 
1279     timeout = ktime_add_ms(ktime_get(), timeout_msec);
1280 
1281     do {
1282         /* Check results */
1283         ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1284                     &val, sizeof(val));
1285         if (ret)
1286             return ret;
1287 
1288         switch (val) {
1289         case 0:
1290             return 0;
1291 
1292         case USB4_SB_OPCODE_ERR:
1293             return -EAGAIN;
1294 
1295         case USB4_SB_OPCODE_ONS:
1296             return -EOPNOTSUPP;
1297 
1298         default:
1299             if (val != opcode)
1300                 return -EIO;
1301             break;
1302         }
1303     } while (ktime_before(ktime_get(), timeout));
1304 
1305     return -ETIMEDOUT;
1306 }
1307 
1308 static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
1309 {
1310     u32 val = !offline;
1311     int ret;
1312 
1313     ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1314                   USB4_SB_METADATA, &val, sizeof(val));
1315     if (ret)
1316         return ret;
1317 
1318     val = USB4_SB_OPCODE_ROUTER_OFFLINE;
1319     return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1320                   USB4_SB_OPCODE, &val, sizeof(val));
1321 }
1322 
1323 /**
1324  * usb4_port_router_offline() - Put the USB4 port to offline mode
1325  * @port: USB4 port
1326  *
1327  * This function puts the USB4 port into offline mode. In this mode the
1328  * port does not react on hotplug events anymore. This needs to be
1329  * called before retimer access is done when the USB4 links is not up.
1330  *
1331  * Returns %0 in case of success and negative errno if there was an
1332  * error.
1333  */
1334 int usb4_port_router_offline(struct tb_port *port)
1335 {
1336     return usb4_port_set_router_offline(port, true);
1337 }
1338 
1339 /**
1340  * usb4_port_router_online() - Put the USB4 port back to online
1341  * @port: USB4 port
1342  *
1343  * Makes the USB4 port functional again.
1344  */
1345 int usb4_port_router_online(struct tb_port *port)
1346 {
1347     return usb4_port_set_router_offline(port, false);
1348 }
1349 
1350 /**
1351  * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1352  * @port: USB4 port
1353  *
1354  * This forces the USB4 port to send broadcast RT transaction which
1355  * makes the retimers on the link to assign index to themselves. Returns
1356  * %0 in case of success and negative errno if there was an error.
1357  */
1358 int usb4_port_enumerate_retimers(struct tb_port *port)
1359 {
1360     u32 val;
1361 
1362     val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1363     return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1364                   USB4_SB_OPCODE, &val, sizeof(val));
1365 }
1366 
1367 /**
1368  * usb4_port_clx_supported() - Check if CLx is supported by the link
1369  * @port: Port to check for CLx support for
1370  *
1371  * PORT_CS_18_CPS bit reflects if the link supports CLx including
1372  * active cables (if connected on the link).
1373  */
1374 bool usb4_port_clx_supported(struct tb_port *port)
1375 {
1376     int ret;
1377     u32 val;
1378 
1379     ret = tb_port_read(port, &val, TB_CFG_PORT,
1380                port->cap_usb4 + PORT_CS_18, 1);
1381     if (ret)
1382         return false;
1383 
1384     return !!(val & PORT_CS_18_CPS);
1385 }
1386 
1387 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1388                        enum usb4_sb_opcode opcode,
1389                        int timeout_msec)
1390 {
1391     return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1392                    timeout_msec);
1393 }
1394 
1395 /**
1396  * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
1397  * @port: USB4 port
1398  * @index: Retimer index
1399  *
1400  * Enables sideband channel transations on SBTX. Can be used when USB4
1401  * link does not go up, for example if there is no device connected.
1402  */
1403 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
1404 {
1405     int ret;
1406 
1407     ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1408                    500);
1409 
1410     if (ret != -ENODEV)
1411         return ret;
1412 
1413     /*
1414      * Per the USB4 retimer spec, the retimer is not required to
1415      * send an RT (Retimer Transaction) response for the first
1416      * SET_INBOUND_SBTX command
1417      */
1418     return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1419                     500);
1420 }
1421 
1422 /**
1423  * usb4_port_retimer_read() - Read from retimer sideband registers
1424  * @port: USB4 port
1425  * @index: Retimer index
1426  * @reg: Sideband register to read
1427  * @buf: Data from @reg is stored here
1428  * @size: Number of bytes to read
1429  *
1430  * Function reads retimer sideband registers starting from @reg. The
1431  * retimer is connected to @port at @index. Returns %0 in case of
1432  * success, and read data is copied to @buf. If there is no retimer
1433  * present at given @index returns %-ENODEV. In any other failure
1434  * returns negative errno.
1435  */
1436 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1437                u8 size)
1438 {
1439     return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1440                  size);
1441 }
1442 
1443 /**
1444  * usb4_port_retimer_write() - Write to retimer sideband registers
1445  * @port: USB4 port
1446  * @index: Retimer index
1447  * @reg: Sideband register to write
1448  * @buf: Data that is written starting from @reg
1449  * @size: Number of bytes to write
1450  *
1451  * Writes retimer sideband registers starting from @reg. The retimer is
1452  * connected to @port at @index. Returns %0 in case of success. If there
1453  * is no retimer present at given @index returns %-ENODEV. In any other
1454  * failure returns negative errno.
1455  */
1456 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1457                 const void *buf, u8 size)
1458 {
1459     return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1460                   size);
1461 }
1462 
1463 /**
1464  * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1465  * @port: USB4 port
1466  * @index: Retimer index
1467  *
1468  * If the retimer at @index is last one (connected directly to the
1469  * Type-C port) this function returns %1. If it is not returns %0. If
1470  * the retimer is not present returns %-ENODEV. Otherwise returns
1471  * negative errno.
1472  */
1473 int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1474 {
1475     u32 metadata;
1476     int ret;
1477 
1478     ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1479                    500);
1480     if (ret)
1481         return ret;
1482 
1483     ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1484                      sizeof(metadata));
1485     return ret ? ret : metadata & 1;
1486 }
1487 
1488 /**
1489  * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1490  * @port: USB4 port
1491  * @index: Retimer index
1492  *
1493  * Reads NVM sector size (in bytes) of a retimer at @index. This
1494  * operation can be used to determine whether the retimer supports NVM
1495  * upgrade for example. Returns sector size in bytes or negative errno
1496  * in case of error. Specifically returns %-ENODEV if there is no
1497  * retimer at @index.
1498  */
1499 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1500 {
1501     u32 metadata;
1502     int ret;
1503 
1504     ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1505                    500);
1506     if (ret)
1507         return ret;
1508 
1509     ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1510                      sizeof(metadata));
1511     return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1512 }
1513 
1514 /**
1515  * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
1516  * @port: USB4 port
1517  * @index: Retimer index
1518  * @address: Start offset
1519  *
1520  * Exlicitly sets NVM write offset. Normally when writing to NVM this is
1521  * done automatically by usb4_port_retimer_nvm_write().
1522  *
1523  * Returns %0 in success and negative errno if there was a failure.
1524  */
1525 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1526                      unsigned int address)
1527 {
1528     u32 metadata, dwaddress;
1529     int ret;
1530 
1531     dwaddress = address / 4;
1532     metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1533           USB4_NVM_SET_OFFSET_MASK;
1534 
1535     ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1536                       sizeof(metadata));
1537     if (ret)
1538         return ret;
1539 
1540     return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1541                     500);
1542 }
1543 
1544 struct retimer_info {
1545     struct tb_port *port;
1546     u8 index;
1547 };
1548 
1549 static int usb4_port_retimer_nvm_write_next_block(void *data,
1550     unsigned int dwaddress, const void *buf, size_t dwords)
1551 
1552 {
1553     const struct retimer_info *info = data;
1554     struct tb_port *port = info->port;
1555     u8 index = info->index;
1556     int ret;
1557 
1558     ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1559                       buf, dwords * 4);
1560     if (ret)
1561         return ret;
1562 
1563     return usb4_port_retimer_op(port, index,
1564             USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1565 }
1566 
1567 /**
1568  * usb4_port_retimer_nvm_write() - Write to retimer NVM
1569  * @port: USB4 port
1570  * @index: Retimer index
1571  * @address: Byte address where to start the write
1572  * @buf: Data to write
1573  * @size: Size in bytes how much to write
1574  *
1575  * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1576  * upgrade. Returns %0 if the data was written successfully and negative
1577  * errno in case of failure. Specifically returns %-ENODEV if there is
1578  * no retimer at @index.
1579  */
1580 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1581                 const void *buf, size_t size)
1582 {
1583     struct retimer_info info = { .port = port, .index = index };
1584     int ret;
1585 
1586     ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1587     if (ret)
1588         return ret;
1589 
1590     return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1591                  usb4_port_retimer_nvm_write_next_block, &info);
1592 }
1593 
1594 /**
1595  * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1596  * @port: USB4 port
1597  * @index: Retimer index
1598  *
1599  * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1600  * this function can be used to trigger the NVM upgrade process. If
1601  * successful the retimer restarts with the new NVM and may not have the
1602  * index set so one needs to call usb4_port_enumerate_retimers() to
1603  * force index to be assigned.
1604  */
1605 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1606 {
1607     u32 val;
1608 
1609     /*
1610      * We need to use the raw operation here because once the
1611      * authentication completes the retimer index is not set anymore
1612      * so we do not get back the status now.
1613      */
1614     val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1615     return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1616                   USB4_SB_OPCODE, &val, sizeof(val));
1617 }
1618 
1619 /**
1620  * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1621  * @port: USB4 port
1622  * @index: Retimer index
1623  * @status: Raw status code read from metadata
1624  *
1625  * This can be called after usb4_port_retimer_nvm_authenticate() and
1626  * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1627  *
1628  * Returns %0 if the authentication status was successfully read. The
1629  * completion metadata (the result) is then stored into @status. If
1630  * reading the status fails, returns negative errno.
1631  */
1632 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1633                           u32 *status)
1634 {
1635     u32 metadata, val;
1636     int ret;
1637 
1638     ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1639                      sizeof(val));
1640     if (ret)
1641         return ret;
1642 
1643     switch (val) {
1644     case 0:
1645         *status = 0;
1646         return 0;
1647 
1648     case USB4_SB_OPCODE_ERR:
1649         ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1650                          &metadata, sizeof(metadata));
1651         if (ret)
1652             return ret;
1653 
1654         *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1655         return 0;
1656 
1657     case USB4_SB_OPCODE_ONS:
1658         return -EOPNOTSUPP;
1659 
1660     default:
1661         return -EIO;
1662     }
1663 }
1664 
1665 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1666                         void *buf, size_t dwords)
1667 {
1668     const struct retimer_info *info = data;
1669     struct tb_port *port = info->port;
1670     u8 index = info->index;
1671     u32 metadata;
1672     int ret;
1673 
1674     metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1675     if (dwords < NVM_DATA_DWORDS)
1676         metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1677 
1678     ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1679                       sizeof(metadata));
1680     if (ret)
1681         return ret;
1682 
1683     ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1684     if (ret)
1685         return ret;
1686 
1687     return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1688                       dwords * 4);
1689 }
1690 
1691 /**
1692  * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1693  * @port: USB4 port
1694  * @index: Retimer index
1695  * @address: NVM address (in bytes) to start reading
1696  * @buf: Data read from NVM is stored here
1697  * @size: Number of bytes to read
1698  *
1699  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1700  * read was successful and negative errno in case of failure.
1701  * Specifically returns %-ENODEV if there is no retimer at @index.
1702  */
1703 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1704                    unsigned int address, void *buf, size_t size)
1705 {
1706     struct retimer_info info = { .port = port, .index = index };
1707 
1708     return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
1709                 usb4_port_retimer_nvm_read_block, &info);
1710 }
1711 
1712 /**
1713  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1714  * @port: USB3 adapter port
1715  *
1716  * Return maximum supported link rate of a USB3 adapter in Mb/s.
1717  * Negative errno in case of error.
1718  */
1719 int usb4_usb3_port_max_link_rate(struct tb_port *port)
1720 {
1721     int ret, lr;
1722     u32 val;
1723 
1724     if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1725         return -EINVAL;
1726 
1727     ret = tb_port_read(port, &val, TB_CFG_PORT,
1728                port->cap_adap + ADP_USB3_CS_4, 1);
1729     if (ret)
1730         return ret;
1731 
1732     lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1733     return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1734 }
1735 
1736 /**
1737  * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1738  * @port: USB3 adapter port
1739  *
1740  * Return actual established link rate of a USB3 adapter in Mb/s. If the
1741  * link is not up returns %0 and negative errno in case of failure.
1742  */
1743 int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1744 {
1745     int ret, lr;
1746     u32 val;
1747 
1748     if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1749         return -EINVAL;
1750 
1751     ret = tb_port_read(port, &val, TB_CFG_PORT,
1752                port->cap_adap + ADP_USB3_CS_4, 1);
1753     if (ret)
1754         return ret;
1755 
1756     if (!(val & ADP_USB3_CS_4_ULV))
1757         return 0;
1758 
1759     lr = val & ADP_USB3_CS_4_ALR_MASK;
1760     return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1761 }
1762 
1763 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1764 {
1765     int ret;
1766     u32 val;
1767 
1768     if (!tb_port_is_usb3_down(port))
1769         return -EINVAL;
1770     if (tb_route(port->sw))
1771         return -EINVAL;
1772 
1773     ret = tb_port_read(port, &val, TB_CFG_PORT,
1774                port->cap_adap + ADP_USB3_CS_2, 1);
1775     if (ret)
1776         return ret;
1777 
1778     if (request)
1779         val |= ADP_USB3_CS_2_CMR;
1780     else
1781         val &= ~ADP_USB3_CS_2_CMR;
1782 
1783     ret = tb_port_write(port, &val, TB_CFG_PORT,
1784                 port->cap_adap + ADP_USB3_CS_2, 1);
1785     if (ret)
1786         return ret;
1787 
1788     /*
1789      * We can use val here directly as the CMR bit is in the same place
1790      * as HCA. Just mask out others.
1791      */
1792     val &= ADP_USB3_CS_2_CMR;
1793     return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1794                       ADP_USB3_CS_1_HCA, val, 1500);
1795 }
1796 
1797 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1798 {
1799     return usb4_usb3_port_cm_request(port, true);
1800 }
1801 
1802 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1803 {
1804     return usb4_usb3_port_cm_request(port, false);
1805 }
1806 
1807 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1808 {
1809     unsigned long uframes;
1810 
1811     uframes = bw * 512UL << scale;
1812     return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1813 }
1814 
1815 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1816 {
1817     unsigned long uframes;
1818 
1819     /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1820     uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
1821     return DIV_ROUND_UP(uframes, 512UL << scale);
1822 }
1823 
1824 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1825                            int *upstream_bw,
1826                            int *downstream_bw)
1827 {
1828     u32 val, bw, scale;
1829     int ret;
1830 
1831     ret = tb_port_read(port, &val, TB_CFG_PORT,
1832                port->cap_adap + ADP_USB3_CS_2, 1);
1833     if (ret)
1834         return ret;
1835 
1836     ret = tb_port_read(port, &scale, TB_CFG_PORT,
1837                port->cap_adap + ADP_USB3_CS_3, 1);
1838     if (ret)
1839         return ret;
1840 
1841     scale &= ADP_USB3_CS_3_SCALE_MASK;
1842 
1843     bw = val & ADP_USB3_CS_2_AUBW_MASK;
1844     *upstream_bw = usb3_bw_to_mbps(bw, scale);
1845 
1846     bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1847     *downstream_bw = usb3_bw_to_mbps(bw, scale);
1848 
1849     return 0;
1850 }
1851 
1852 /**
1853  * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1854  * @port: USB3 adapter port
1855  * @upstream_bw: Allocated upstream bandwidth is stored here
1856  * @downstream_bw: Allocated downstream bandwidth is stored here
1857  *
1858  * Stores currently allocated USB3 bandwidth into @upstream_bw and
1859  * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1860  * errno in failure.
1861  */
1862 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1863                        int *downstream_bw)
1864 {
1865     int ret;
1866 
1867     ret = usb4_usb3_port_set_cm_request(port);
1868     if (ret)
1869         return ret;
1870 
1871     ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1872                               downstream_bw);
1873     usb4_usb3_port_clear_cm_request(port);
1874 
1875     return ret;
1876 }
1877 
1878 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1879                           int *upstream_bw,
1880                           int *downstream_bw)
1881 {
1882     u32 val, bw, scale;
1883     int ret;
1884 
1885     ret = tb_port_read(port, &val, TB_CFG_PORT,
1886                port->cap_adap + ADP_USB3_CS_1, 1);
1887     if (ret)
1888         return ret;
1889 
1890     ret = tb_port_read(port, &scale, TB_CFG_PORT,
1891                port->cap_adap + ADP_USB3_CS_3, 1);
1892     if (ret)
1893         return ret;
1894 
1895     scale &= ADP_USB3_CS_3_SCALE_MASK;
1896 
1897     bw = val & ADP_USB3_CS_1_CUBW_MASK;
1898     *upstream_bw = usb3_bw_to_mbps(bw, scale);
1899 
1900     bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1901     *downstream_bw = usb3_bw_to_mbps(bw, scale);
1902 
1903     return 0;
1904 }
1905 
1906 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1907                             int upstream_bw,
1908                             int downstream_bw)
1909 {
1910     u32 val, ubw, dbw, scale;
1911     int ret;
1912 
1913     /* Read the used scale, hardware default is 0 */
1914     ret = tb_port_read(port, &scale, TB_CFG_PORT,
1915                port->cap_adap + ADP_USB3_CS_3, 1);
1916     if (ret)
1917         return ret;
1918 
1919     scale &= ADP_USB3_CS_3_SCALE_MASK;
1920     ubw = mbps_to_usb3_bw(upstream_bw, scale);
1921     dbw = mbps_to_usb3_bw(downstream_bw, scale);
1922 
1923     ret = tb_port_read(port, &val, TB_CFG_PORT,
1924                port->cap_adap + ADP_USB3_CS_2, 1);
1925     if (ret)
1926         return ret;
1927 
1928     val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1929     val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1930     val |= ubw;
1931 
1932     return tb_port_write(port, &val, TB_CFG_PORT,
1933                  port->cap_adap + ADP_USB3_CS_2, 1);
1934 }
1935 
1936 /**
1937  * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1938  * @port: USB3 adapter port
1939  * @upstream_bw: New upstream bandwidth
1940  * @downstream_bw: New downstream bandwidth
1941  *
1942  * This can be used to set how much bandwidth is allocated for the USB3
1943  * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1944  * new values programmed to the USB3 adapter allocation registers. If
1945  * the values are lower than what is currently consumed the allocation
1946  * is set to what is currently consumed instead (consumed bandwidth
1947  * cannot be taken away by CM). The actual new values are returned in
1948  * @upstream_bw and @downstream_bw.
1949  *
1950  * Returns %0 in case of success and negative errno if there was a
1951  * failure.
1952  */
1953 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1954                       int *downstream_bw)
1955 {
1956     int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1957 
1958     ret = usb4_usb3_port_set_cm_request(port);
1959     if (ret)
1960         return ret;
1961 
1962     ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1963                              &consumed_down);
1964     if (ret)
1965         goto err_request;
1966 
1967     /* Don't allow it go lower than what is consumed */
1968     allocate_up = max(*upstream_bw, consumed_up);
1969     allocate_down = max(*downstream_bw, consumed_down);
1970 
1971     ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1972                                allocate_down);
1973     if (ret)
1974         goto err_request;
1975 
1976     *upstream_bw = allocate_up;
1977     *downstream_bw = allocate_down;
1978 
1979 err_request:
1980     usb4_usb3_port_clear_cm_request(port);
1981     return ret;
1982 }
1983 
1984 /**
1985  * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1986  * @port: USB3 adapter port
1987  * @upstream_bw: New allocated upstream bandwidth
1988  * @downstream_bw: New allocated downstream bandwidth
1989  *
1990  * Releases USB3 allocated bandwidth down to what is actually consumed.
1991  * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1992  *
1993  * Returns 0% in success and negative errno in case of failure.
1994  */
1995 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1996                      int *downstream_bw)
1997 {
1998     int ret, consumed_up, consumed_down;
1999 
2000     ret = usb4_usb3_port_set_cm_request(port);
2001     if (ret)
2002         return ret;
2003 
2004     ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2005                              &consumed_down);
2006     if (ret)
2007         goto err_request;
2008 
2009     /*
2010      * Always keep 1000 Mb/s to make sure xHCI has at least some
2011      * bandwidth available for isochronous traffic.
2012      */
2013     if (consumed_up < 1000)
2014         consumed_up = 1000;
2015     if (consumed_down < 1000)
2016         consumed_down = 1000;
2017 
2018     ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
2019                                consumed_down);
2020     if (ret)
2021         goto err_request;
2022 
2023     *upstream_bw = consumed_up;
2024     *downstream_bw = consumed_down;
2025 
2026 err_request:
2027     usb4_usb3_port_clear_cm_request(port);
2028     return ret;
2029 }