Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Thunderbolt DMA configuration based mailbox support
0004  *
0005  * Copyright (C) 2017, Intel Corporation
0006  * Authors: Michael Jamet <michael.jamet@intel.com>
0007  *          Mika Westerberg <mika.westerberg@linux.intel.com>
0008  */
0009 
0010 #include <linux/delay.h>
0011 #include <linux/slab.h>
0012 
0013 #include "dma_port.h"
0014 #include "tb_regs.h"
0015 
0016 #define DMA_PORT_CAP            0x3e
0017 
0018 #define MAIL_DATA           1
0019 #define MAIL_DATA_DWORDS        16
0020 
0021 #define MAIL_IN             17
0022 #define MAIL_IN_CMD_SHIFT       28
0023 #define MAIL_IN_CMD_MASK        GENMASK(31, 28)
0024 #define MAIL_IN_CMD_FLASH_WRITE     0x0
0025 #define MAIL_IN_CMD_FLASH_UPDATE_AUTH   0x1
0026 #define MAIL_IN_CMD_FLASH_READ      0x2
0027 #define MAIL_IN_CMD_POWER_CYCLE     0x4
0028 #define MAIL_IN_DWORDS_SHIFT        24
0029 #define MAIL_IN_DWORDS_MASK     GENMASK(27, 24)
0030 #define MAIL_IN_ADDRESS_SHIFT       2
0031 #define MAIL_IN_ADDRESS_MASK        GENMASK(23, 2)
0032 #define MAIL_IN_CSS         BIT(1)
0033 #define MAIL_IN_OP_REQUEST      BIT(0)
0034 
0035 #define MAIL_OUT            18
0036 #define MAIL_OUT_STATUS_RESPONSE    BIT(29)
0037 #define MAIL_OUT_STATUS_CMD_SHIFT   4
0038 #define MAIL_OUT_STATUS_CMD_MASK    GENMASK(7, 4)
0039 #define MAIL_OUT_STATUS_MASK        GENMASK(3, 0)
0040 #define MAIL_OUT_STATUS_COMPLETED   0
0041 #define MAIL_OUT_STATUS_ERR_AUTH    1
0042 #define MAIL_OUT_STATUS_ERR_ACCESS  2
0043 
0044 #define DMA_PORT_TIMEOUT        5000 /* ms */
0045 #define DMA_PORT_RETRIES        3
0046 
0047 /**
0048  * struct tb_dma_port - DMA control port
0049  * @sw: Switch the DMA port belongs to
0050  * @port: Switch port number where DMA capability is found
0051  * @base: Start offset of the mailbox registers
0052  * @buf: Temporary buffer to store a single block
0053  */
0054 struct tb_dma_port {
0055     struct tb_switch *sw;
0056     u8 port;
0057     u32 base;
0058     u8 *buf;
0059 };
0060 
0061 /*
0062  * When the switch is in safe mode it supports very little functionality
0063  * so we don't validate that much here.
0064  */
0065 static bool dma_port_match(const struct tb_cfg_request *req,
0066                const struct ctl_pkg *pkg)
0067 {
0068     u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
0069 
0070     if (pkg->frame.eof == TB_CFG_PKG_ERROR)
0071         return true;
0072     if (pkg->frame.eof != req->response_type)
0073         return false;
0074     if (route != tb_cfg_get_route(req->request))
0075         return false;
0076     if (pkg->frame.size != req->response_size)
0077         return false;
0078 
0079     return true;
0080 }
0081 
0082 static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
0083 {
0084     memcpy(req->response, pkg->buffer, req->response_size);
0085     return true;
0086 }
0087 
0088 static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
0089              u32 port, u32 offset, u32 length, int timeout_msec)
0090 {
0091     struct cfg_read_pkg request = {
0092         .header = tb_cfg_make_header(route),
0093         .addr = {
0094             .seq = 1,
0095             .port = port,
0096             .space = TB_CFG_PORT,
0097             .offset = offset,
0098             .length = length,
0099         },
0100     };
0101     struct tb_cfg_request *req;
0102     struct cfg_write_pkg reply;
0103     struct tb_cfg_result res;
0104 
0105     req = tb_cfg_request_alloc();
0106     if (!req)
0107         return -ENOMEM;
0108 
0109     req->match = dma_port_match;
0110     req->copy = dma_port_copy;
0111     req->request = &request;
0112     req->request_size = sizeof(request);
0113     req->request_type = TB_CFG_PKG_READ;
0114     req->response = &reply;
0115     req->response_size = 12 + 4 * length;
0116     req->response_type = TB_CFG_PKG_READ;
0117 
0118     res = tb_cfg_request_sync(ctl, req, timeout_msec);
0119 
0120     tb_cfg_request_put(req);
0121 
0122     if (res.err)
0123         return res.err;
0124 
0125     memcpy(buffer, &reply.data, 4 * length);
0126     return 0;
0127 }
0128 
0129 static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
0130               u32 port, u32 offset, u32 length, int timeout_msec)
0131 {
0132     struct cfg_write_pkg request = {
0133         .header = tb_cfg_make_header(route),
0134         .addr = {
0135             .seq = 1,
0136             .port = port,
0137             .space = TB_CFG_PORT,
0138             .offset = offset,
0139             .length = length,
0140         },
0141     };
0142     struct tb_cfg_request *req;
0143     struct cfg_read_pkg reply;
0144     struct tb_cfg_result res;
0145 
0146     memcpy(&request.data, buffer, length * 4);
0147 
0148     req = tb_cfg_request_alloc();
0149     if (!req)
0150         return -ENOMEM;
0151 
0152     req->match = dma_port_match;
0153     req->copy = dma_port_copy;
0154     req->request = &request;
0155     req->request_size = 12 + 4 * length;
0156     req->request_type = TB_CFG_PKG_WRITE;
0157     req->response = &reply;
0158     req->response_size = sizeof(reply);
0159     req->response_type = TB_CFG_PKG_WRITE;
0160 
0161     res = tb_cfg_request_sync(ctl, req, timeout_msec);
0162 
0163     tb_cfg_request_put(req);
0164 
0165     return res.err;
0166 }
0167 
0168 static int dma_find_port(struct tb_switch *sw)
0169 {
0170     static const int ports[] = { 3, 5, 7 };
0171     int i;
0172 
0173     /*
0174      * The DMA (NHI) port is either 3, 5 or 7 depending on the
0175      * controller. Try all of them.
0176      */
0177     for (i = 0; i < ARRAY_SIZE(ports); i++) {
0178         u32 type;
0179         int ret;
0180 
0181         ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
0182                     2, 1, DMA_PORT_TIMEOUT);
0183         if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
0184             return ports[i];
0185     }
0186 
0187     return -ENODEV;
0188 }
0189 
0190 /**
0191  * dma_port_alloc() - Finds DMA control port from a switch pointed by route
0192  * @sw: Switch from where find the DMA port
0193  *
0194  * Function checks if the switch NHI port supports DMA configuration
0195  * based mailbox capability and if it does, allocates and initializes
0196  * DMA port structure. Returns %NULL if the capabity was not found.
0197  *
0198  * The DMA control port is functional also when the switch is in safe
0199  * mode.
0200  */
0201 struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
0202 {
0203     struct tb_dma_port *dma;
0204     int port;
0205 
0206     port = dma_find_port(sw);
0207     if (port < 0)
0208         return NULL;
0209 
0210     dma = kzalloc(sizeof(*dma), GFP_KERNEL);
0211     if (!dma)
0212         return NULL;
0213 
0214     dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
0215     if (!dma->buf) {
0216         kfree(dma);
0217         return NULL;
0218     }
0219 
0220     dma->sw = sw;
0221     dma->port = port;
0222     dma->base = DMA_PORT_CAP;
0223 
0224     return dma;
0225 }
0226 
0227 /**
0228  * dma_port_free() - Release DMA control port structure
0229  * @dma: DMA control port
0230  */
0231 void dma_port_free(struct tb_dma_port *dma)
0232 {
0233     if (dma) {
0234         kfree(dma->buf);
0235         kfree(dma);
0236     }
0237 }
0238 
0239 static int dma_port_wait_for_completion(struct tb_dma_port *dma,
0240                     unsigned int timeout)
0241 {
0242     unsigned long end = jiffies + msecs_to_jiffies(timeout);
0243     struct tb_switch *sw = dma->sw;
0244 
0245     do {
0246         int ret;
0247         u32 in;
0248 
0249         ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
0250                     dma->base + MAIL_IN, 1, 50);
0251         if (ret) {
0252             if (ret != -ETIMEDOUT)
0253                 return ret;
0254         } else if (!(in & MAIL_IN_OP_REQUEST)) {
0255             return 0;
0256         }
0257 
0258         usleep_range(50, 100);
0259     } while (time_before(jiffies, end));
0260 
0261     return -ETIMEDOUT;
0262 }
0263 
0264 static int status_to_errno(u32 status)
0265 {
0266     switch (status & MAIL_OUT_STATUS_MASK) {
0267     case MAIL_OUT_STATUS_COMPLETED:
0268         return 0;
0269     case MAIL_OUT_STATUS_ERR_AUTH:
0270         return -EINVAL;
0271     case MAIL_OUT_STATUS_ERR_ACCESS:
0272         return -EACCES;
0273     }
0274 
0275     return -EIO;
0276 }
0277 
0278 static int dma_port_request(struct tb_dma_port *dma, u32 in,
0279                 unsigned int timeout)
0280 {
0281     struct tb_switch *sw = dma->sw;
0282     u32 out;
0283     int ret;
0284 
0285     ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
0286                  dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
0287     if (ret)
0288         return ret;
0289 
0290     ret = dma_port_wait_for_completion(dma, timeout);
0291     if (ret)
0292         return ret;
0293 
0294     ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
0295                 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
0296     if (ret)
0297         return ret;
0298 
0299     return status_to_errno(out);
0300 }
0301 
0302 static int dma_port_flash_read_block(void *data, unsigned int dwaddress,
0303                      void *buf, size_t dwords)
0304 {
0305     struct tb_dma_port *dma = data;
0306     struct tb_switch *sw = dma->sw;
0307     int ret;
0308     u32 in;
0309 
0310     in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
0311     if (dwords < MAIL_DATA_DWORDS)
0312         in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
0313     in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
0314     in |= MAIL_IN_OP_REQUEST;
0315 
0316     ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
0317     if (ret)
0318         return ret;
0319 
0320     return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
0321                  dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
0322 }
0323 
0324 static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
0325                       const void *buf, size_t dwords)
0326 {
0327     struct tb_dma_port *dma = data;
0328     struct tb_switch *sw = dma->sw;
0329     int ret;
0330     u32 in;
0331 
0332     /* Write the block to MAIL_DATA registers */
0333     ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
0334                 dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
0335     if (ret)
0336         return ret;
0337 
0338     in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
0339 
0340     /* CSS header write is always done to the same magic address */
0341     if (dwaddress >= DMA_PORT_CSS_ADDRESS)
0342         in |= MAIL_IN_CSS;
0343 
0344     in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
0345     in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
0346     in |= MAIL_IN_OP_REQUEST;
0347 
0348     return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
0349 }
0350 
0351 /**
0352  * dma_port_flash_read() - Read from active flash region
0353  * @dma: DMA control port
0354  * @address: Address relative to the start of active region
0355  * @buf: Buffer where the data is read
0356  * @size: Size of the buffer
0357  */
0358 int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
0359             void *buf, size_t size)
0360 {
0361     return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES,
0362                 dma_port_flash_read_block, dma);
0363 }
0364 
0365 /**
0366  * dma_port_flash_write() - Write to non-active flash region
0367  * @dma: DMA control port
0368  * @address: Address relative to the start of non-active region
0369  * @buf: Data to write
0370  * @size: Size of the buffer
0371  *
0372  * Writes block of data to the non-active flash region of the switch. If
0373  * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
0374  * using CSS command.
0375  */
0376 int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
0377              const void *buf, size_t size)
0378 {
0379     if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE)
0380         return -E2BIG;
0381 
0382     return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES,
0383                  dma_port_flash_write_block, dma);
0384 }
0385 
0386 /**
0387  * dma_port_flash_update_auth() - Starts flash authenticate cycle
0388  * @dma: DMA control port
0389  *
0390  * Starts the flash update authentication cycle. If the image in the
0391  * non-active area was valid, the switch starts upgrade process where
0392  * active and non-active area get swapped in the end. Caller should call
0393  * dma_port_flash_update_auth_status() to get status of this command.
0394  * This is because if the switch in question is root switch the
0395  * thunderbolt host controller gets reset as well.
0396  */
0397 int dma_port_flash_update_auth(struct tb_dma_port *dma)
0398 {
0399     u32 in;
0400 
0401     in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
0402     in |= MAIL_IN_OP_REQUEST;
0403 
0404     return dma_port_request(dma, in, 150);
0405 }
0406 
0407 /**
0408  * dma_port_flash_update_auth_status() - Reads status of update auth command
0409  * @dma: DMA control port
0410  * @status: Status code of the operation
0411  *
0412  * The function checks if there is status available from the last update
0413  * auth command. Returns %0 if there is no status and no further
0414  * action is required. If there is status, %1 is returned instead and
0415  * @status holds the failure code.
0416  *
0417  * Negative return means there was an error reading status from the
0418  * switch.
0419  */
0420 int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
0421 {
0422     struct tb_switch *sw = dma->sw;
0423     u32 out, cmd;
0424     int ret;
0425 
0426     ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
0427                 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
0428     if (ret)
0429         return ret;
0430 
0431     /* Check if the status relates to flash update auth */
0432     cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
0433     if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
0434         if (status)
0435             *status = out & MAIL_OUT_STATUS_MASK;
0436 
0437         /* Reset is needed in any case */
0438         return 1;
0439     }
0440 
0441     return 0;
0442 }
0443 
0444 /**
0445  * dma_port_power_cycle() - Power cycles the switch
0446  * @dma: DMA control port
0447  *
0448  * Triggers power cycle to the switch.
0449  */
0450 int dma_port_power_cycle(struct tb_dma_port *dma)
0451 {
0452     u32 in;
0453 
0454     in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
0455     in |= MAIL_IN_OP_REQUEST;
0456 
0457     return dma_port_request(dma, in, 150);
0458 }