0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/device.h>
0011 #include <linux/delay.h>
0012 #include <linux/kmod.h>
0013 #include <linux/module.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/prandom.h>
0016 #include <linux/utsname.h>
0017 #include <linux/uuid.h>
0018 #include <linux/workqueue.h>
0019
0020 #include "tb.h"
0021
0022 #define XDOMAIN_SHORT_TIMEOUT 100
0023 #define XDOMAIN_DEFAULT_TIMEOUT 1000
0024 #define XDOMAIN_BONDING_TIMEOUT 10000
0025 #define XDOMAIN_RETRIES 10
0026 #define XDOMAIN_DEFAULT_MAX_HOPID 15
0027
0028 enum {
0029 XDOMAIN_STATE_INIT,
0030 XDOMAIN_STATE_UUID,
0031 XDOMAIN_STATE_LINK_STATUS,
0032 XDOMAIN_STATE_LINK_STATE_CHANGE,
0033 XDOMAIN_STATE_LINK_STATUS2,
0034 XDOMAIN_STATE_BONDING_UUID_LOW,
0035 XDOMAIN_STATE_BONDING_UUID_HIGH,
0036 XDOMAIN_STATE_PROPERTIES,
0037 XDOMAIN_STATE_ENUMERATED,
0038 XDOMAIN_STATE_ERROR,
0039 };
0040
0041 static const char * const state_names[] = {
0042 [XDOMAIN_STATE_INIT] = "INIT",
0043 [XDOMAIN_STATE_UUID] = "UUID",
0044 [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
0045 [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
0046 [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
0047 [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
0048 [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
0049 [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
0050 [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
0051 [XDOMAIN_STATE_ERROR] = "ERROR",
0052 };
0053
0054 struct xdomain_request_work {
0055 struct work_struct work;
0056 struct tb_xdp_header *pkg;
0057 struct tb *tb;
0058 };
0059
0060 static bool tb_xdomain_enabled = true;
0061 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
0062 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
0063
0064
0065
0066
0067
0068
0069 static DEFINE_MUTEX(xdomain_lock);
0070
0071
0072 static struct tb_property_dir *xdomain_property_dir;
0073 static u32 xdomain_property_block_gen;
0074
0075
0076 static LIST_HEAD(protocol_handlers);
0077
0078
0079 static const uuid_t tb_xdp_uuid =
0080 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
0081 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
0082
0083 bool tb_is_xdomain_enabled(void)
0084 {
0085 return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
0086 }
0087
0088 static bool tb_xdomain_match(const struct tb_cfg_request *req,
0089 const struct ctl_pkg *pkg)
0090 {
0091 switch (pkg->frame.eof) {
0092 case TB_CFG_PKG_ERROR:
0093 return true;
0094
0095 case TB_CFG_PKG_XDOMAIN_RESP: {
0096 const struct tb_xdp_header *res_hdr = pkg->buffer;
0097 const struct tb_xdp_header *req_hdr = req->request;
0098
0099 if (pkg->frame.size < req->response_size / 4)
0100 return false;
0101
0102
0103 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
0104 req_hdr->xd_hdr.route_hi)
0105 return false;
0106 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
0107 return false;
0108
0109
0110 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
0111 return false;
0112
0113 return true;
0114 }
0115
0116 default:
0117 return false;
0118 }
0119 }
0120
0121 static bool tb_xdomain_copy(struct tb_cfg_request *req,
0122 const struct ctl_pkg *pkg)
0123 {
0124 memcpy(req->response, pkg->buffer, req->response_size);
0125 req->result.err = 0;
0126 return true;
0127 }
0128
0129 static void response_ready(void *data)
0130 {
0131 tb_cfg_request_put(data);
0132 }
0133
0134 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
0135 size_t size, enum tb_cfg_pkg_type type)
0136 {
0137 struct tb_cfg_request *req;
0138
0139 req = tb_cfg_request_alloc();
0140 if (!req)
0141 return -ENOMEM;
0142
0143 req->match = tb_xdomain_match;
0144 req->copy = tb_xdomain_copy;
0145 req->request = response;
0146 req->request_size = size;
0147 req->request_type = type;
0148
0149 return tb_cfg_request(ctl, req, response_ready, req);
0150 }
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
0165 size_t size, enum tb_cfg_pkg_type type)
0166 {
0167 return __tb_xdomain_response(xd->tb->ctl, response, size, type);
0168 }
0169 EXPORT_SYMBOL_GPL(tb_xdomain_response);
0170
0171 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
0172 size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
0173 size_t response_size, enum tb_cfg_pkg_type response_type,
0174 unsigned int timeout_msec)
0175 {
0176 struct tb_cfg_request *req;
0177 struct tb_cfg_result res;
0178
0179 req = tb_cfg_request_alloc();
0180 if (!req)
0181 return -ENOMEM;
0182
0183 req->match = tb_xdomain_match;
0184 req->copy = tb_xdomain_copy;
0185 req->request = request;
0186 req->request_size = request_size;
0187 req->request_type = request_type;
0188 req->response = response;
0189 req->response_size = response_size;
0190 req->response_type = response_type;
0191
0192 res = tb_cfg_request_sync(ctl, req, timeout_msec);
0193
0194 tb_cfg_request_put(req);
0195
0196 return res.err == 1 ? -EIO : res.err;
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
0217 size_t request_size, enum tb_cfg_pkg_type request_type,
0218 void *response, size_t response_size,
0219 enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
0220 {
0221 return __tb_xdomain_request(xd->tb->ctl, request, request_size,
0222 request_type, response, response_size,
0223 response_type, timeout_msec);
0224 }
0225 EXPORT_SYMBOL_GPL(tb_xdomain_request);
0226
0227 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
0228 u8 sequence, enum tb_xdp_type type, size_t size)
0229 {
0230 u32 length_sn;
0231
0232 length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
0233 length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
0234
0235 hdr->xd_hdr.route_hi = upper_32_bits(route);
0236 hdr->xd_hdr.route_lo = lower_32_bits(route);
0237 hdr->xd_hdr.length_sn = length_sn;
0238 hdr->type = type;
0239 memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
0240 }
0241
0242 static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
0243 {
0244 if (res->hdr.type != ERROR_RESPONSE)
0245 return 0;
0246
0247 switch (res->error) {
0248 case ERROR_UNKNOWN_PACKET:
0249 case ERROR_UNKNOWN_DOMAIN:
0250 return -EIO;
0251 case ERROR_NOT_SUPPORTED:
0252 return -ENOTSUPP;
0253 case ERROR_NOT_READY:
0254 return -EAGAIN;
0255 default:
0256 break;
0257 }
0258
0259 return 0;
0260 }
0261
0262 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
0263 uuid_t *uuid, u64 *remote_route)
0264 {
0265 struct tb_xdp_uuid_response res;
0266 struct tb_xdp_uuid req;
0267 int ret;
0268
0269 memset(&req, 0, sizeof(req));
0270 tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
0271 sizeof(req));
0272
0273 memset(&res, 0, sizeof(res));
0274 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
0275 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
0276 TB_CFG_PKG_XDOMAIN_RESP,
0277 XDOMAIN_DEFAULT_TIMEOUT);
0278 if (ret)
0279 return ret;
0280
0281 ret = tb_xdp_handle_error(&res.err);
0282 if (ret)
0283 return ret;
0284
0285 uuid_copy(uuid, &res.src_uuid);
0286 *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
0287
0288 return 0;
0289 }
0290
0291 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
0292 const uuid_t *uuid)
0293 {
0294 struct tb_xdp_uuid_response res;
0295
0296 memset(&res, 0, sizeof(res));
0297 tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
0298 sizeof(res));
0299
0300 uuid_copy(&res.src_uuid, uuid);
0301 res.src_route_hi = upper_32_bits(route);
0302 res.src_route_lo = lower_32_bits(route);
0303
0304 return __tb_xdomain_response(ctl, &res, sizeof(res),
0305 TB_CFG_PKG_XDOMAIN_RESP);
0306 }
0307
0308 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
0309 enum tb_xdp_error error)
0310 {
0311 struct tb_xdp_error_response res;
0312
0313 memset(&res, 0, sizeof(res));
0314 tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
0315 sizeof(res));
0316 res.error = error;
0317
0318 return __tb_xdomain_response(ctl, &res, sizeof(res),
0319 TB_CFG_PKG_XDOMAIN_RESP);
0320 }
0321
0322 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
0323 const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
0324 u32 **block, u32 *generation)
0325 {
0326 struct tb_xdp_properties_response *res;
0327 struct tb_xdp_properties req;
0328 u16 data_len, len;
0329 size_t total_size;
0330 u32 *data = NULL;
0331 int ret;
0332
0333 total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
0334 res = kzalloc(total_size, GFP_KERNEL);
0335 if (!res)
0336 return -ENOMEM;
0337
0338 memset(&req, 0, sizeof(req));
0339 tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
0340 sizeof(req));
0341 memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
0342 memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
0343
0344 len = 0;
0345 data_len = 0;
0346
0347 do {
0348 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
0349 TB_CFG_PKG_XDOMAIN_REQ, res,
0350 total_size, TB_CFG_PKG_XDOMAIN_RESP,
0351 XDOMAIN_DEFAULT_TIMEOUT);
0352 if (ret)
0353 goto err;
0354
0355 ret = tb_xdp_handle_error(&res->err);
0356 if (ret)
0357 goto err;
0358
0359
0360
0361
0362
0363
0364 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
0365 if (len < sizeof(*res) / 4) {
0366 ret = -EINVAL;
0367 goto err;
0368 }
0369
0370 len += sizeof(res->hdr.xd_hdr) / 4;
0371 len -= sizeof(*res) / 4;
0372
0373 if (res->offset != req.offset) {
0374 ret = -EINVAL;
0375 goto err;
0376 }
0377
0378
0379
0380
0381
0382 if (!data) {
0383 data_len = res->data_length;
0384 if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
0385 ret = -E2BIG;
0386 goto err;
0387 }
0388
0389 data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
0390 if (!data) {
0391 ret = -ENOMEM;
0392 goto err;
0393 }
0394 }
0395
0396 memcpy(data + req.offset, res->data, len * 4);
0397 req.offset += len;
0398 } while (!data_len || req.offset < data_len);
0399
0400 *block = data;
0401 *generation = res->generation;
0402
0403 kfree(res);
0404
0405 return data_len;
0406
0407 err:
0408 kfree(data);
0409 kfree(res);
0410
0411 return ret;
0412 }
0413
0414 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
0415 struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
0416 {
0417 struct tb_xdp_properties_response *res;
0418 size_t total_size;
0419 u16 len;
0420 int ret;
0421
0422
0423
0424
0425
0426
0427 if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
0428 tb_xdp_error_response(ctl, xd->route, sequence,
0429 ERROR_UNKNOWN_DOMAIN);
0430 return 0;
0431 }
0432
0433 mutex_lock(&xd->lock);
0434
0435 if (req->offset >= xd->local_property_block_len) {
0436 mutex_unlock(&xd->lock);
0437 return -EINVAL;
0438 }
0439
0440 len = xd->local_property_block_len - req->offset;
0441 len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
0442 total_size = sizeof(*res) + len * 4;
0443
0444 res = kzalloc(total_size, GFP_KERNEL);
0445 if (!res) {
0446 mutex_unlock(&xd->lock);
0447 return -ENOMEM;
0448 }
0449
0450 tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
0451 total_size);
0452 res->generation = xd->local_property_block_gen;
0453 res->data_length = xd->local_property_block_len;
0454 res->offset = req->offset;
0455 uuid_copy(&res->src_uuid, xd->local_uuid);
0456 uuid_copy(&res->dst_uuid, &req->src_uuid);
0457 memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
0458
0459 mutex_unlock(&xd->lock);
0460
0461 ret = __tb_xdomain_response(ctl, res, total_size,
0462 TB_CFG_PKG_XDOMAIN_RESP);
0463
0464 kfree(res);
0465 return ret;
0466 }
0467
0468 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
0469 int retry, const uuid_t *uuid)
0470 {
0471 struct tb_xdp_properties_changed_response res;
0472 struct tb_xdp_properties_changed req;
0473 int ret;
0474
0475 memset(&req, 0, sizeof(req));
0476 tb_xdp_fill_header(&req.hdr, route, retry % 4,
0477 PROPERTIES_CHANGED_REQUEST, sizeof(req));
0478 uuid_copy(&req.src_uuid, uuid);
0479
0480 memset(&res, 0, sizeof(res));
0481 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
0482 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
0483 TB_CFG_PKG_XDOMAIN_RESP,
0484 XDOMAIN_DEFAULT_TIMEOUT);
0485 if (ret)
0486 return ret;
0487
0488 return tb_xdp_handle_error(&res.err);
0489 }
0490
0491 static int
0492 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
0493 {
0494 struct tb_xdp_properties_changed_response res;
0495
0496 memset(&res, 0, sizeof(res));
0497 tb_xdp_fill_header(&res.hdr, route, sequence,
0498 PROPERTIES_CHANGED_RESPONSE, sizeof(res));
0499 return __tb_xdomain_response(ctl, &res, sizeof(res),
0500 TB_CFG_PKG_XDOMAIN_RESP);
0501 }
0502
0503 static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
0504 u8 sequence, u8 *slw, u8 *tlw,
0505 u8 *sls, u8 *tls)
0506 {
0507 struct tb_xdp_link_state_status_response res;
0508 struct tb_xdp_link_state_status req;
0509 int ret;
0510
0511 memset(&req, 0, sizeof(req));
0512 tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
0513 sizeof(req));
0514
0515 memset(&res, 0, sizeof(res));
0516 ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
0517 &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
0518 XDOMAIN_DEFAULT_TIMEOUT);
0519 if (ret)
0520 return ret;
0521
0522 ret = tb_xdp_handle_error(&res.err);
0523 if (ret)
0524 return ret;
0525
0526 if (res.status != 0)
0527 return -EREMOTEIO;
0528
0529 *slw = res.slw;
0530 *tlw = res.tlw;
0531 *sls = res.sls;
0532 *tls = res.tls;
0533
0534 return 0;
0535 }
0536
0537 static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
0538 struct tb_xdomain *xd, u8 sequence)
0539 {
0540 struct tb_switch *sw = tb_to_switch(xd->dev.parent);
0541 struct tb_xdp_link_state_status_response res;
0542 struct tb_port *port = tb_port_at(xd->route, sw);
0543 u32 val[2];
0544 int ret;
0545
0546 memset(&res, 0, sizeof(res));
0547 tb_xdp_fill_header(&res.hdr, xd->route, sequence,
0548 LINK_STATE_STATUS_RESPONSE, sizeof(res));
0549
0550 ret = tb_port_read(port, val, TB_CFG_PORT,
0551 port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
0552 if (ret)
0553 return ret;
0554
0555 res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
0556 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
0557 res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
0558 LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
0559 res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
0560 res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
0561 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
0562
0563 return __tb_xdomain_response(ctl, &res, sizeof(res),
0564 TB_CFG_PKG_XDOMAIN_RESP);
0565 }
0566
0567 static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
0568 u8 sequence, u8 tlw, u8 tls)
0569 {
0570 struct tb_xdp_link_state_change_response res;
0571 struct tb_xdp_link_state_change req;
0572 int ret;
0573
0574 memset(&req, 0, sizeof(req));
0575 tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
0576 sizeof(req));
0577 req.tlw = tlw;
0578 req.tls = tls;
0579
0580 memset(&res, 0, sizeof(res));
0581 ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
0582 &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
0583 XDOMAIN_DEFAULT_TIMEOUT);
0584 if (ret)
0585 return ret;
0586
0587 ret = tb_xdp_handle_error(&res.err);
0588 if (ret)
0589 return ret;
0590
0591 return res.status != 0 ? -EREMOTEIO : 0;
0592 }
0593
0594 static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
0595 u8 sequence, u32 status)
0596 {
0597 struct tb_xdp_link_state_change_response res;
0598
0599 memset(&res, 0, sizeof(res));
0600 tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
0601 sizeof(res));
0602
0603 res.status = status;
0604
0605 return __tb_xdomain_response(ctl, &res, sizeof(res),
0606 TB_CFG_PKG_XDOMAIN_RESP);
0607 }
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
0619 {
0620 if (!handler->uuid || !handler->callback)
0621 return -EINVAL;
0622 if (uuid_equal(handler->uuid, &tb_xdp_uuid))
0623 return -EINVAL;
0624
0625 mutex_lock(&xdomain_lock);
0626 list_add_tail(&handler->list, &protocol_handlers);
0627 mutex_unlock(&xdomain_lock);
0628
0629 return 0;
0630 }
0631 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
0632
0633
0634
0635
0636
0637
0638
0639 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
0640 {
0641 mutex_lock(&xdomain_lock);
0642 list_del_init(&handler->list);
0643 mutex_unlock(&xdomain_lock);
0644 }
0645 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
0646
0647 static void update_property_block(struct tb_xdomain *xd)
0648 {
0649 mutex_lock(&xdomain_lock);
0650 mutex_lock(&xd->lock);
0651
0652
0653
0654
0655 if (!xd->local_property_block ||
0656 xd->local_property_block_gen < xdomain_property_block_gen) {
0657 struct tb_property_dir *dir;
0658 int ret, block_len;
0659 u32 *block;
0660
0661 dir = tb_property_copy_dir(xdomain_property_dir);
0662 if (!dir) {
0663 dev_warn(&xd->dev, "failed to copy properties\n");
0664 goto out_unlock;
0665 }
0666
0667
0668 tb_property_add_text(dir, "deviceid", utsname()->nodename);
0669 tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
0670
0671 ret = tb_property_format_dir(dir, NULL, 0);
0672 if (ret < 0) {
0673 dev_warn(&xd->dev, "local property block creation failed\n");
0674 tb_property_free_dir(dir);
0675 goto out_unlock;
0676 }
0677
0678 block_len = ret;
0679 block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
0680 if (!block) {
0681 tb_property_free_dir(dir);
0682 goto out_unlock;
0683 }
0684
0685 ret = tb_property_format_dir(dir, block, block_len);
0686 if (ret) {
0687 dev_warn(&xd->dev, "property block generation failed\n");
0688 tb_property_free_dir(dir);
0689 kfree(block);
0690 goto out_unlock;
0691 }
0692
0693 tb_property_free_dir(dir);
0694
0695 kfree(xd->local_property_block);
0696
0697 xd->local_property_block = block;
0698 xd->local_property_block_len = block_len;
0699 xd->local_property_block_gen = xdomain_property_block_gen;
0700 }
0701
0702 out_unlock:
0703 mutex_unlock(&xd->lock);
0704 mutex_unlock(&xdomain_lock);
0705 }
0706
0707 static void tb_xdp_handle_request(struct work_struct *work)
0708 {
0709 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
0710 const struct tb_xdp_header *pkg = xw->pkg;
0711 const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
0712 struct tb *tb = xw->tb;
0713 struct tb_ctl *ctl = tb->ctl;
0714 struct tb_xdomain *xd;
0715 const uuid_t *uuid;
0716 int ret = 0;
0717 u32 sequence;
0718 u64 route;
0719
0720 route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
0721 sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
0722 sequence >>= TB_XDOMAIN_SN_SHIFT;
0723
0724 mutex_lock(&tb->lock);
0725 if (tb->root_switch)
0726 uuid = tb->root_switch->uuid;
0727 else
0728 uuid = NULL;
0729 mutex_unlock(&tb->lock);
0730
0731 if (!uuid) {
0732 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
0733 goto out;
0734 }
0735
0736 xd = tb_xdomain_find_by_route_locked(tb, route);
0737 if (xd)
0738 update_property_block(xd);
0739
0740 switch (pkg->type) {
0741 case PROPERTIES_REQUEST:
0742 tb_dbg(tb, "%llx: received XDomain properties request\n", route);
0743 if (xd) {
0744 ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
0745 (const struct tb_xdp_properties *)pkg);
0746 }
0747 break;
0748
0749 case PROPERTIES_CHANGED_REQUEST:
0750 tb_dbg(tb, "%llx: received XDomain properties changed request\n",
0751 route);
0752
0753 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
0754
0755
0756
0757
0758
0759
0760 if (xd && device_is_registered(&xd->dev))
0761 queue_delayed_work(tb->wq, &xd->state_work,
0762 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
0763 break;
0764
0765 case UUID_REQUEST_OLD:
0766 case UUID_REQUEST:
0767 tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
0768 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
0769 break;
0770
0771 case LINK_STATE_STATUS_REQUEST:
0772 tb_dbg(tb, "%llx: received XDomain link state status request\n",
0773 route);
0774
0775 if (xd) {
0776 ret = tb_xdp_link_state_status_response(tb, ctl, xd,
0777 sequence);
0778 } else {
0779 tb_xdp_error_response(ctl, route, sequence,
0780 ERROR_NOT_READY);
0781 }
0782 break;
0783
0784 case LINK_STATE_CHANGE_REQUEST:
0785 tb_dbg(tb, "%llx: received XDomain link state change request\n",
0786 route);
0787
0788 if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
0789 const struct tb_xdp_link_state_change *lsc =
0790 (const struct tb_xdp_link_state_change *)pkg;
0791
0792 ret = tb_xdp_link_state_change_response(ctl, route,
0793 sequence, 0);
0794 xd->target_link_width = lsc->tlw;
0795 queue_delayed_work(tb->wq, &xd->state_work,
0796 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
0797 } else {
0798 tb_xdp_error_response(ctl, route, sequence,
0799 ERROR_NOT_READY);
0800 }
0801 break;
0802
0803 default:
0804 tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
0805 tb_xdp_error_response(ctl, route, sequence,
0806 ERROR_NOT_SUPPORTED);
0807 break;
0808 }
0809
0810 tb_xdomain_put(xd);
0811
0812 if (ret) {
0813 tb_warn(tb, "failed to send XDomain response for %#x\n",
0814 pkg->type);
0815 }
0816
0817 out:
0818 kfree(xw->pkg);
0819 kfree(xw);
0820
0821 tb_domain_put(tb);
0822 }
0823
0824 static bool
0825 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
0826 size_t size)
0827 {
0828 struct xdomain_request_work *xw;
0829
0830 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
0831 if (!xw)
0832 return false;
0833
0834 INIT_WORK(&xw->work, tb_xdp_handle_request);
0835 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
0836 if (!xw->pkg) {
0837 kfree(xw);
0838 return false;
0839 }
0840 xw->tb = tb_domain_get(tb);
0841
0842 schedule_work(&xw->work);
0843 return true;
0844 }
0845
0846
0847
0848
0849
0850
0851
0852 int tb_register_service_driver(struct tb_service_driver *drv)
0853 {
0854 drv->driver.bus = &tb_bus_type;
0855 return driver_register(&drv->driver);
0856 }
0857 EXPORT_SYMBOL_GPL(tb_register_service_driver);
0858
0859
0860
0861
0862
0863
0864
0865 void tb_unregister_service_driver(struct tb_service_driver *drv)
0866 {
0867 driver_unregister(&drv->driver);
0868 }
0869 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
0870
0871 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
0872 char *buf)
0873 {
0874 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0875
0876
0877
0878
0879
0880 return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
0881 }
0882 static DEVICE_ATTR_RO(key);
0883
0884 static int get_modalias(struct tb_service *svc, char *buf, size_t size)
0885 {
0886 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
0887 svc->prtcid, svc->prtcvers, svc->prtcrevs);
0888 }
0889
0890 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
0891 char *buf)
0892 {
0893 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0894
0895
0896 get_modalias(svc, buf, PAGE_SIZE - 2);
0897 return strlen(strcat(buf, "\n"));
0898 }
0899 static DEVICE_ATTR_RO(modalias);
0900
0901 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
0902 char *buf)
0903 {
0904 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0905
0906 return sprintf(buf, "%u\n", svc->prtcid);
0907 }
0908 static DEVICE_ATTR_RO(prtcid);
0909
0910 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
0911 char *buf)
0912 {
0913 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0914
0915 return sprintf(buf, "%u\n", svc->prtcvers);
0916 }
0917 static DEVICE_ATTR_RO(prtcvers);
0918
0919 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
0920 char *buf)
0921 {
0922 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0923
0924 return sprintf(buf, "%u\n", svc->prtcrevs);
0925 }
0926 static DEVICE_ATTR_RO(prtcrevs);
0927
0928 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
0929 char *buf)
0930 {
0931 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0932
0933 return sprintf(buf, "0x%08x\n", svc->prtcstns);
0934 }
0935 static DEVICE_ATTR_RO(prtcstns);
0936
0937 static struct attribute *tb_service_attrs[] = {
0938 &dev_attr_key.attr,
0939 &dev_attr_modalias.attr,
0940 &dev_attr_prtcid.attr,
0941 &dev_attr_prtcvers.attr,
0942 &dev_attr_prtcrevs.attr,
0943 &dev_attr_prtcstns.attr,
0944 NULL,
0945 };
0946
0947 static const struct attribute_group tb_service_attr_group = {
0948 .attrs = tb_service_attrs,
0949 };
0950
0951 static const struct attribute_group *tb_service_attr_groups[] = {
0952 &tb_service_attr_group,
0953 NULL,
0954 };
0955
0956 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
0957 {
0958 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0959 char modalias[64];
0960
0961 get_modalias(svc, modalias, sizeof(modalias));
0962 return add_uevent_var(env, "MODALIAS=%s", modalias);
0963 }
0964
0965 static void tb_service_release(struct device *dev)
0966 {
0967 struct tb_service *svc = container_of(dev, struct tb_service, dev);
0968 struct tb_xdomain *xd = tb_service_parent(svc);
0969
0970 tb_service_debugfs_remove(svc);
0971 ida_simple_remove(&xd->service_ids, svc->id);
0972 kfree(svc->key);
0973 kfree(svc);
0974 }
0975
0976 struct device_type tb_service_type = {
0977 .name = "thunderbolt_service",
0978 .groups = tb_service_attr_groups,
0979 .uevent = tb_service_uevent,
0980 .release = tb_service_release,
0981 };
0982 EXPORT_SYMBOL_GPL(tb_service_type);
0983
0984 static int remove_missing_service(struct device *dev, void *data)
0985 {
0986 struct tb_xdomain *xd = data;
0987 struct tb_service *svc;
0988
0989 svc = tb_to_service(dev);
0990 if (!svc)
0991 return 0;
0992
0993 if (!tb_property_find(xd->remote_properties, svc->key,
0994 TB_PROPERTY_TYPE_DIRECTORY))
0995 device_unregister(dev);
0996
0997 return 0;
0998 }
0999
1000 static int find_service(struct device *dev, void *data)
1001 {
1002 const struct tb_property *p = data;
1003 struct tb_service *svc;
1004
1005 svc = tb_to_service(dev);
1006 if (!svc)
1007 return 0;
1008
1009 return !strcmp(svc->key, p->key);
1010 }
1011
1012 static int populate_service(struct tb_service *svc,
1013 struct tb_property *property)
1014 {
1015 struct tb_property_dir *dir = property->value.dir;
1016 struct tb_property *p;
1017
1018
1019 p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1020 if (p)
1021 svc->prtcid = p->value.immediate;
1022 p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1023 if (p)
1024 svc->prtcvers = p->value.immediate;
1025 p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1026 if (p)
1027 svc->prtcrevs = p->value.immediate;
1028 p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1029 if (p)
1030 svc->prtcstns = p->value.immediate;
1031
1032 svc->key = kstrdup(property->key, GFP_KERNEL);
1033 if (!svc->key)
1034 return -ENOMEM;
1035
1036 return 0;
1037 }
1038
1039 static void enumerate_services(struct tb_xdomain *xd)
1040 {
1041 struct tb_service *svc;
1042 struct tb_property *p;
1043 struct device *dev;
1044 int id;
1045
1046
1047
1048
1049
1050 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
1051
1052
1053 tb_property_for_each(xd->remote_properties, p) {
1054 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
1055 continue;
1056
1057
1058 dev = device_find_child(&xd->dev, p, find_service);
1059 if (dev) {
1060 put_device(dev);
1061 continue;
1062 }
1063
1064 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1065 if (!svc)
1066 break;
1067
1068 if (populate_service(svc, p)) {
1069 kfree(svc);
1070 break;
1071 }
1072
1073 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
1074 if (id < 0) {
1075 kfree(svc->key);
1076 kfree(svc);
1077 break;
1078 }
1079 svc->id = id;
1080 svc->dev.bus = &tb_bus_type;
1081 svc->dev.type = &tb_service_type;
1082 svc->dev.parent = &xd->dev;
1083 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
1084
1085 tb_service_debugfs_init(svc);
1086
1087 if (device_register(&svc->dev)) {
1088 put_device(&svc->dev);
1089 break;
1090 }
1091 }
1092 }
1093
1094 static int populate_properties(struct tb_xdomain *xd,
1095 struct tb_property_dir *dir)
1096 {
1097 const struct tb_property *p;
1098
1099
1100 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1101 if (!p)
1102 return -EINVAL;
1103 xd->device = p->value.immediate;
1104
1105 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1106 if (!p)
1107 return -EINVAL;
1108 xd->vendor = p->value.immediate;
1109
1110 p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
1111
1112
1113
1114
1115
1116 xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
1117
1118 kfree(xd->device_name);
1119 xd->device_name = NULL;
1120 kfree(xd->vendor_name);
1121 xd->vendor_name = NULL;
1122
1123
1124 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1125 if (p)
1126 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
1127 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1128 if (p)
1129 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
1130
1131 return 0;
1132 }
1133
1134 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
1135 {
1136 return tb_to_switch(xd->dev.parent);
1137 }
1138
1139 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
1140 {
1141 bool change = false;
1142 struct tb_port *port;
1143 int ret;
1144
1145 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1146
1147 ret = tb_port_get_link_speed(port);
1148 if (ret < 0)
1149 return ret;
1150
1151 if (xd->link_speed != ret)
1152 change = true;
1153
1154 xd->link_speed = ret;
1155
1156 ret = tb_port_get_link_width(port);
1157 if (ret < 0)
1158 return ret;
1159
1160 if (xd->link_width != ret)
1161 change = true;
1162
1163 xd->link_width = ret;
1164
1165 if (change)
1166 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1167
1168 return 0;
1169 }
1170
1171 static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
1172 {
1173 struct tb *tb = xd->tb;
1174 uuid_t uuid;
1175 u64 route;
1176 int ret;
1177
1178 dev_dbg(&xd->dev, "requesting remote UUID\n");
1179
1180 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
1181 &route);
1182 if (ret < 0) {
1183 if (xd->state_retries-- > 0) {
1184 dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
1185 return -EAGAIN;
1186 } else {
1187 dev_dbg(&xd->dev, "failed to read remote UUID\n");
1188 }
1189 return ret;
1190 }
1191
1192 dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
1193
1194 if (uuid_equal(&uuid, xd->local_uuid)) {
1195 if (route == xd->route)
1196 dev_dbg(&xd->dev, "loop back detected\n");
1197 else
1198 dev_dbg(&xd->dev, "intra-domain loop detected\n");
1199
1200
1201 xd->bonding_possible = false;
1202 }
1203
1204
1205
1206
1207
1208
1209 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1210 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1211 xd->is_unplugged = true;
1212 return -ENODEV;
1213 }
1214
1215
1216 if (!xd->remote_uuid) {
1217 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1218 if (!xd->remote_uuid)
1219 return -ENOMEM;
1220 }
1221
1222 return 0;
1223 }
1224
1225 static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
1226 {
1227 struct tb *tb = xd->tb;
1228 u8 slw, tlw, sls, tls;
1229 int ret;
1230
1231 dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
1232 xd->remote_uuid);
1233
1234 ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
1235 xd->state_retries, &slw, &tlw, &sls,
1236 &tls);
1237 if (ret) {
1238 if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1239 dev_dbg(&xd->dev,
1240 "failed to request remote link status, retrying\n");
1241 return -EAGAIN;
1242 }
1243 dev_dbg(&xd->dev, "failed to receive remote link status\n");
1244 return ret;
1245 }
1246
1247 dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
1248
1249 if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
1250 dev_dbg(&xd->dev, "remote adapter is single lane only\n");
1251 return -EOPNOTSUPP;
1252 }
1253
1254 return 0;
1255 }
1256
1257 static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
1258 unsigned int width)
1259 {
1260 struct tb_switch *sw = tb_to_switch(xd->dev.parent);
1261 struct tb_port *port = tb_port_at(xd->route, sw);
1262 struct tb *tb = xd->tb;
1263 u8 tlw, tls;
1264 u32 val;
1265 int ret;
1266
1267 if (width == 2)
1268 tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
1269 else if (width == 1)
1270 tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
1271 else
1272 return -EINVAL;
1273
1274
1275 ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
1276 if (ret)
1277 return ret;
1278 tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
1279
1280 dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
1281 tlw, tls);
1282
1283 ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
1284 xd->state_retries, tlw, tls);
1285 if (ret) {
1286 if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1287 dev_dbg(&xd->dev,
1288 "failed to change remote link state, retrying\n");
1289 return -EAGAIN;
1290 }
1291 dev_err(&xd->dev, "failed request link state change, aborting\n");
1292 return ret;
1293 }
1294
1295 dev_dbg(&xd->dev, "received link state change response\n");
1296 return 0;
1297 }
1298
1299 static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
1300 {
1301 struct tb_port *port;
1302 int ret, width;
1303
1304 if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
1305 width = 1;
1306 } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
1307 width = 2;
1308 } else {
1309 if (xd->state_retries-- > 0) {
1310 dev_dbg(&xd->dev,
1311 "link state change request not received yet, retrying\n");
1312 return -EAGAIN;
1313 }
1314 dev_dbg(&xd->dev, "timeout waiting for link change request\n");
1315 return -ETIMEDOUT;
1316 }
1317
1318 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1319
1320
1321
1322
1323
1324
1325
1326 ret = tb_port_set_link_width(port->dual_link_port, width);
1327 if (ret) {
1328 tb_port_warn(port->dual_link_port,
1329 "failed to set link width to %d\n", width);
1330 return ret;
1331 }
1332
1333 ret = tb_port_set_link_width(port, width);
1334 if (ret) {
1335 tb_port_warn(port, "failed to set link width to %d\n", width);
1336 return ret;
1337 }
1338
1339 ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
1340 if (ret) {
1341 dev_warn(&xd->dev, "error waiting for link width to become %d\n",
1342 width);
1343 return ret;
1344 }
1345
1346 port->bonded = width == 2;
1347 port->dual_link_port->bonded = width == 2;
1348
1349 tb_port_update_credits(port);
1350 tb_xdomain_update_link_attributes(xd);
1351
1352 dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
1353 return 0;
1354 }
1355
1356 static int tb_xdomain_get_properties(struct tb_xdomain *xd)
1357 {
1358 struct tb_property_dir *dir;
1359 struct tb *tb = xd->tb;
1360 bool update = false;
1361 u32 *block = NULL;
1362 u32 gen = 0;
1363 int ret;
1364
1365 dev_dbg(&xd->dev, "requesting remote properties\n");
1366
1367 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1368 xd->remote_uuid, xd->state_retries,
1369 &block, &gen);
1370 if (ret < 0) {
1371 if (xd->state_retries-- > 0) {
1372 dev_dbg(&xd->dev,
1373 "failed to request remote properties, retrying\n");
1374 return -EAGAIN;
1375 } else {
1376
1377 dev_err(&xd->dev,
1378 "failed read XDomain properties from %pUb\n",
1379 xd->remote_uuid);
1380 }
1381
1382 return ret;
1383 }
1384
1385 mutex_lock(&xd->lock);
1386
1387
1388 if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
1389 ret = 0;
1390 goto err_free_block;
1391 }
1392
1393 dir = tb_property_parse_dir(block, ret);
1394 if (!dir) {
1395 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1396 ret = -ENOMEM;
1397 goto err_free_block;
1398 }
1399
1400 ret = populate_properties(xd, dir);
1401 if (ret) {
1402 dev_err(&xd->dev, "missing XDomain properties in response\n");
1403 goto err_free_dir;
1404 }
1405
1406
1407 if (xd->remote_properties) {
1408 tb_property_free_dir(xd->remote_properties);
1409 update = true;
1410 }
1411
1412 xd->remote_properties = dir;
1413 xd->remote_property_block_gen = gen;
1414
1415 tb_xdomain_update_link_attributes(xd);
1416
1417 mutex_unlock(&xd->lock);
1418
1419 kfree(block);
1420
1421
1422
1423
1424
1425
1426 if (!update) {
1427 struct tb_port *port;
1428
1429
1430 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1431 if (!port->bonded)
1432 tb_port_disable(port->dual_link_port);
1433
1434 if (device_add(&xd->dev)) {
1435 dev_err(&xd->dev, "failed to add XDomain device\n");
1436 return -ENODEV;
1437 }
1438 dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
1439 xd->vendor, xd->device);
1440 if (xd->vendor_name && xd->device_name)
1441 dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
1442 xd->device_name);
1443 } else {
1444 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1445 }
1446
1447 enumerate_services(xd);
1448 return 0;
1449
1450 err_free_dir:
1451 tb_property_free_dir(dir);
1452 err_free_block:
1453 kfree(block);
1454 mutex_unlock(&xd->lock);
1455
1456 return ret;
1457 }
1458
1459 static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
1460 {
1461 xd->state = XDOMAIN_STATE_UUID;
1462 xd->state_retries = XDOMAIN_RETRIES;
1463 queue_delayed_work(xd->tb->wq, &xd->state_work,
1464 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1465 }
1466
1467 static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
1468 {
1469 xd->state = XDOMAIN_STATE_LINK_STATUS;
1470 xd->state_retries = XDOMAIN_RETRIES;
1471 queue_delayed_work(xd->tb->wq, &xd->state_work,
1472 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1473 }
1474
1475 static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
1476 {
1477 xd->state = XDOMAIN_STATE_LINK_STATUS2;
1478 xd->state_retries = XDOMAIN_RETRIES;
1479 queue_delayed_work(xd->tb->wq, &xd->state_work,
1480 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1481 }
1482
1483 static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
1484 {
1485 if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
1486 dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
1487 xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
1488 } else {
1489 dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
1490 xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
1491 }
1492
1493 xd->state_retries = XDOMAIN_RETRIES;
1494 queue_delayed_work(xd->tb->wq, &xd->state_work,
1495 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1496 }
1497
1498 static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
1499 {
1500 xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
1501 xd->state_retries = XDOMAIN_RETRIES;
1502 queue_delayed_work(xd->tb->wq, &xd->state_work,
1503 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1504 }
1505
1506 static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
1507 {
1508 xd->state = XDOMAIN_STATE_PROPERTIES;
1509 xd->state_retries = XDOMAIN_RETRIES;
1510 queue_delayed_work(xd->tb->wq, &xd->state_work,
1511 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1512 }
1513
1514 static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
1515 {
1516 xd->properties_changed_retries = XDOMAIN_RETRIES;
1517 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1518 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1519 }
1520
1521 static void tb_xdomain_state_work(struct work_struct *work)
1522 {
1523 struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
1524 int ret, state = xd->state;
1525
1526 if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
1527 state > XDOMAIN_STATE_ERROR))
1528 return;
1529
1530 dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
1531
1532 switch (state) {
1533 case XDOMAIN_STATE_INIT:
1534 if (xd->needs_uuid) {
1535 tb_xdomain_queue_uuid(xd);
1536 } else {
1537 tb_xdomain_queue_properties_changed(xd);
1538 tb_xdomain_queue_properties(xd);
1539 }
1540 break;
1541
1542 case XDOMAIN_STATE_UUID:
1543 ret = tb_xdomain_get_uuid(xd);
1544 if (ret) {
1545 if (ret == -EAGAIN)
1546 goto retry_state;
1547 xd->state = XDOMAIN_STATE_ERROR;
1548 } else {
1549 tb_xdomain_queue_properties_changed(xd);
1550 if (xd->bonding_possible)
1551 tb_xdomain_queue_link_status(xd);
1552 else
1553 tb_xdomain_queue_properties(xd);
1554 }
1555 break;
1556
1557 case XDOMAIN_STATE_LINK_STATUS:
1558 ret = tb_xdomain_get_link_status(xd);
1559 if (ret) {
1560 if (ret == -EAGAIN)
1561 goto retry_state;
1562
1563
1564
1565
1566
1567
1568 tb_xdomain_queue_properties(xd);
1569 } else {
1570 tb_xdomain_queue_bonding(xd);
1571 }
1572 break;
1573
1574 case XDOMAIN_STATE_LINK_STATE_CHANGE:
1575 ret = tb_xdomain_link_state_change(xd, 2);
1576 if (ret) {
1577 if (ret == -EAGAIN)
1578 goto retry_state;
1579 tb_xdomain_queue_properties(xd);
1580 } else {
1581 tb_xdomain_queue_link_status2(xd);
1582 }
1583 break;
1584
1585 case XDOMAIN_STATE_LINK_STATUS2:
1586 ret = tb_xdomain_get_link_status(xd);
1587 if (ret) {
1588 if (ret == -EAGAIN)
1589 goto retry_state;
1590 tb_xdomain_queue_properties(xd);
1591 } else {
1592 tb_xdomain_queue_bonding_uuid_low(xd);
1593 }
1594 break;
1595
1596 case XDOMAIN_STATE_BONDING_UUID_LOW:
1597 tb_xdomain_lane_bonding_enable(xd);
1598 tb_xdomain_queue_properties(xd);
1599 break;
1600
1601 case XDOMAIN_STATE_BONDING_UUID_HIGH:
1602 if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
1603 goto retry_state;
1604 tb_xdomain_queue_properties(xd);
1605 break;
1606
1607 case XDOMAIN_STATE_PROPERTIES:
1608 ret = tb_xdomain_get_properties(xd);
1609 if (ret) {
1610 if (ret == -EAGAIN)
1611 goto retry_state;
1612 xd->state = XDOMAIN_STATE_ERROR;
1613 } else {
1614 xd->state = XDOMAIN_STATE_ENUMERATED;
1615 }
1616 break;
1617
1618 case XDOMAIN_STATE_ENUMERATED:
1619 tb_xdomain_queue_properties(xd);
1620 break;
1621
1622 case XDOMAIN_STATE_ERROR:
1623 break;
1624
1625 default:
1626 dev_warn(&xd->dev, "unexpected state %d\n", state);
1627 break;
1628 }
1629
1630 return;
1631
1632 retry_state:
1633 queue_delayed_work(xd->tb->wq, &xd->state_work,
1634 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1635 }
1636
1637 static void tb_xdomain_properties_changed(struct work_struct *work)
1638 {
1639 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1640 properties_changed_work.work);
1641 int ret;
1642
1643 dev_dbg(&xd->dev, "sending properties changed notification\n");
1644
1645 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1646 xd->properties_changed_retries, xd->local_uuid);
1647 if (ret) {
1648 if (xd->properties_changed_retries-- > 0) {
1649 dev_dbg(&xd->dev,
1650 "failed to send properties changed notification, retrying\n");
1651 queue_delayed_work(xd->tb->wq,
1652 &xd->properties_changed_work,
1653 msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1654 }
1655 dev_err(&xd->dev, "failed to send properties changed notification\n");
1656 return;
1657 }
1658
1659 xd->properties_changed_retries = XDOMAIN_RETRIES;
1660 }
1661
1662 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1663 char *buf)
1664 {
1665 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1666
1667 return sprintf(buf, "%#x\n", xd->device);
1668 }
1669 static DEVICE_ATTR_RO(device);
1670
1671 static ssize_t
1672 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1673 {
1674 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1675 int ret;
1676
1677 if (mutex_lock_interruptible(&xd->lock))
1678 return -ERESTARTSYS;
1679 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1680 mutex_unlock(&xd->lock);
1681
1682 return ret;
1683 }
1684 static DEVICE_ATTR_RO(device_name);
1685
1686 static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
1687 char *buf)
1688 {
1689 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1690
1691 return sprintf(buf, "%d\n", xd->remote_max_hopid);
1692 }
1693 static DEVICE_ATTR_RO(maxhopid);
1694
1695 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1696 char *buf)
1697 {
1698 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1699
1700 return sprintf(buf, "%#x\n", xd->vendor);
1701 }
1702 static DEVICE_ATTR_RO(vendor);
1703
1704 static ssize_t
1705 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1706 {
1707 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1708 int ret;
1709
1710 if (mutex_lock_interruptible(&xd->lock))
1711 return -ERESTARTSYS;
1712 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1713 mutex_unlock(&xd->lock);
1714
1715 return ret;
1716 }
1717 static DEVICE_ATTR_RO(vendor_name);
1718
1719 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1720 char *buf)
1721 {
1722 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1723
1724 return sprintf(buf, "%pUb\n", xd->remote_uuid);
1725 }
1726 static DEVICE_ATTR_RO(unique_id);
1727
1728 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1729 char *buf)
1730 {
1731 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1732
1733 return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
1734 }
1735
1736 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1737 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1738
1739 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1740 char *buf)
1741 {
1742 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1743
1744 return sprintf(buf, "%u\n", xd->link_width);
1745 }
1746
1747 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1748 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1749
1750 static struct attribute *xdomain_attrs[] = {
1751 &dev_attr_device.attr,
1752 &dev_attr_device_name.attr,
1753 &dev_attr_maxhopid.attr,
1754 &dev_attr_rx_lanes.attr,
1755 &dev_attr_rx_speed.attr,
1756 &dev_attr_tx_lanes.attr,
1757 &dev_attr_tx_speed.attr,
1758 &dev_attr_unique_id.attr,
1759 &dev_attr_vendor.attr,
1760 &dev_attr_vendor_name.attr,
1761 NULL,
1762 };
1763
1764 static const struct attribute_group xdomain_attr_group = {
1765 .attrs = xdomain_attrs,
1766 };
1767
1768 static const struct attribute_group *xdomain_attr_groups[] = {
1769 &xdomain_attr_group,
1770 NULL,
1771 };
1772
1773 static void tb_xdomain_release(struct device *dev)
1774 {
1775 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1776
1777 put_device(xd->dev.parent);
1778
1779 kfree(xd->local_property_block);
1780 tb_property_free_dir(xd->remote_properties);
1781 ida_destroy(&xd->out_hopids);
1782 ida_destroy(&xd->in_hopids);
1783 ida_destroy(&xd->service_ids);
1784
1785 kfree(xd->local_uuid);
1786 kfree(xd->remote_uuid);
1787 kfree(xd->device_name);
1788 kfree(xd->vendor_name);
1789 kfree(xd);
1790 }
1791
1792 static void start_handshake(struct tb_xdomain *xd)
1793 {
1794 xd->state = XDOMAIN_STATE_INIT;
1795 queue_delayed_work(xd->tb->wq, &xd->state_work,
1796 msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1797 }
1798
1799 static void stop_handshake(struct tb_xdomain *xd)
1800 {
1801 cancel_delayed_work_sync(&xd->properties_changed_work);
1802 cancel_delayed_work_sync(&xd->state_work);
1803 xd->properties_changed_retries = 0;
1804 xd->state_retries = 0;
1805 }
1806
1807 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1808 {
1809 stop_handshake(tb_to_xdomain(dev));
1810 return 0;
1811 }
1812
1813 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1814 {
1815 start_handshake(tb_to_xdomain(dev));
1816 return 0;
1817 }
1818
1819 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1820 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1821 };
1822
1823 struct device_type tb_xdomain_type = {
1824 .name = "thunderbolt_xdomain",
1825 .release = tb_xdomain_release,
1826 .pm = &tb_xdomain_pm_ops,
1827 };
1828 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1843 u64 route, const uuid_t *local_uuid,
1844 const uuid_t *remote_uuid)
1845 {
1846 struct tb_switch *parent_sw = tb_to_switch(parent);
1847 struct tb_xdomain *xd;
1848 struct tb_port *down;
1849
1850
1851 down = tb_port_at(route, parent_sw);
1852 tb_port_unlock(down);
1853
1854 xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1855 if (!xd)
1856 return NULL;
1857
1858 xd->tb = tb;
1859 xd->route = route;
1860 xd->local_max_hopid = down->config.max_in_hop_id;
1861 ida_init(&xd->service_ids);
1862 ida_init(&xd->in_hopids);
1863 ida_init(&xd->out_hopids);
1864 mutex_init(&xd->lock);
1865 INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
1866 INIT_DELAYED_WORK(&xd->properties_changed_work,
1867 tb_xdomain_properties_changed);
1868
1869 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1870 if (!xd->local_uuid)
1871 goto err_free;
1872
1873 if (remote_uuid) {
1874 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1875 GFP_KERNEL);
1876 if (!xd->remote_uuid)
1877 goto err_free_local_uuid;
1878 } else {
1879 xd->needs_uuid = true;
1880 xd->bonding_possible = !!down->dual_link_port;
1881 }
1882
1883 device_initialize(&xd->dev);
1884 xd->dev.parent = get_device(parent);
1885 xd->dev.bus = &tb_bus_type;
1886 xd->dev.type = &tb_xdomain_type;
1887 xd->dev.groups = xdomain_attr_groups;
1888 dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1889
1890 dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
1891 if (remote_uuid)
1892 dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
1893
1894
1895
1896
1897
1898 pm_runtime_set_active(&xd->dev);
1899 pm_runtime_get_noresume(&xd->dev);
1900 pm_runtime_enable(&xd->dev);
1901
1902 return xd;
1903
1904 err_free_local_uuid:
1905 kfree(xd->local_uuid);
1906 err_free:
1907 kfree(xd);
1908
1909 return NULL;
1910 }
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921 void tb_xdomain_add(struct tb_xdomain *xd)
1922 {
1923
1924 start_handshake(xd);
1925 }
1926
1927 static int unregister_service(struct device *dev, void *data)
1928 {
1929 device_unregister(dev);
1930 return 0;
1931 }
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 void tb_xdomain_remove(struct tb_xdomain *xd)
1942 {
1943 stop_handshake(xd);
1944
1945 device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1946
1947
1948
1949
1950
1951
1952 pm_runtime_disable(&xd->dev);
1953 pm_runtime_put_noidle(&xd->dev);
1954 pm_runtime_set_suspended(&xd->dev);
1955
1956 if (!device_is_registered(&xd->dev)) {
1957 put_device(&xd->dev);
1958 } else {
1959 dev_info(&xd->dev, "host disconnected\n");
1960 device_unregister(&xd->dev);
1961 }
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1975 {
1976 struct tb_port *port;
1977 int ret;
1978
1979 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1980 if (!port->dual_link_port)
1981 return -ENODEV;
1982
1983 ret = tb_port_enable(port->dual_link_port);
1984 if (ret)
1985 return ret;
1986
1987 ret = tb_wait_for_port(port->dual_link_port, true);
1988 if (ret < 0)
1989 return ret;
1990 if (!ret)
1991 return -ENOTCONN;
1992
1993 ret = tb_port_lane_bonding_enable(port);
1994 if (ret) {
1995 tb_port_warn(port, "failed to enable lane bonding\n");
1996 return ret;
1997 }
1998
1999 ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
2000 if (ret) {
2001 tb_port_warn(port, "failed to enable lane bonding\n");
2002 return ret;
2003 }
2004
2005 tb_port_update_credits(port);
2006 tb_xdomain_update_link_attributes(xd);
2007
2008 dev_dbg(&xd->dev, "lane bonding enabled\n");
2009 return 0;
2010 }
2011 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
2012
2013
2014
2015
2016
2017
2018
2019
2020 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
2021 {
2022 struct tb_port *port;
2023
2024 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
2025 if (port->dual_link_port) {
2026 tb_port_lane_bonding_disable(port);
2027 if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
2028 tb_port_warn(port, "timeout disabling lane bonding\n");
2029 tb_port_disable(port->dual_link_port);
2030 tb_port_update_credits(port);
2031 tb_xdomain_update_link_attributes(xd);
2032
2033 dev_dbg(&xd->dev, "lane bonding disabled\n");
2034 }
2035 }
2036 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
2049 {
2050 if (hopid < 0)
2051 hopid = TB_PATH_MIN_HOPID;
2052 if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
2053 return -EINVAL;
2054
2055 return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
2056 GFP_KERNEL);
2057 }
2058 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
2071 {
2072 if (hopid < 0)
2073 hopid = TB_PATH_MIN_HOPID;
2074 if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
2075 return -EINVAL;
2076
2077 return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
2078 GFP_KERNEL);
2079 }
2080 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
2081
2082
2083
2084
2085
2086
2087 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
2088 {
2089 ida_free(&xd->in_hopids, hopid);
2090 }
2091 EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
2092
2093
2094
2095
2096
2097
2098 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
2099 {
2100 ida_free(&xd->out_hopids, hopid);
2101 }
2102 EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
2120 int transmit_ring, int receive_path,
2121 int receive_ring)
2122 {
2123 return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
2124 transmit_ring, receive_path,
2125 receive_ring);
2126 }
2127 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
2145 int transmit_ring, int receive_path,
2146 int receive_ring)
2147 {
2148 return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
2149 transmit_ring, receive_path,
2150 receive_ring);
2151 }
2152 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
2153
2154 struct tb_xdomain_lookup {
2155 const uuid_t *uuid;
2156 u8 link;
2157 u8 depth;
2158 u64 route;
2159 };
2160
2161 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
2162 const struct tb_xdomain_lookup *lookup)
2163 {
2164 struct tb_port *port;
2165
2166 tb_switch_for_each_port(sw, port) {
2167 struct tb_xdomain *xd;
2168
2169 if (port->xdomain) {
2170 xd = port->xdomain;
2171
2172 if (lookup->uuid) {
2173 if (xd->remote_uuid &&
2174 uuid_equal(xd->remote_uuid, lookup->uuid))
2175 return xd;
2176 } else if (lookup->link &&
2177 lookup->link == xd->link &&
2178 lookup->depth == xd->depth) {
2179 return xd;
2180 } else if (lookup->route &&
2181 lookup->route == xd->route) {
2182 return xd;
2183 }
2184 } else if (tb_port_has_remote(port)) {
2185 xd = switch_find_xdomain(port->remote->sw, lookup);
2186 if (xd)
2187 return xd;
2188 }
2189 }
2190
2191 return NULL;
2192 }
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2210 {
2211 struct tb_xdomain_lookup lookup;
2212 struct tb_xdomain *xd;
2213
2214 memset(&lookup, 0, sizeof(lookup));
2215 lookup.uuid = uuid;
2216
2217 xd = switch_find_xdomain(tb->root_switch, &lookup);
2218 return tb_xdomain_get(xd);
2219 }
2220 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
2239 u8 depth)
2240 {
2241 struct tb_xdomain_lookup lookup;
2242 struct tb_xdomain *xd;
2243
2244 memset(&lookup, 0, sizeof(lookup));
2245 lookup.link = link;
2246 lookup.depth = depth;
2247
2248 xd = switch_find_xdomain(tb->root_switch, &lookup);
2249 return tb_xdomain_get(xd);
2250 }
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
2268 {
2269 struct tb_xdomain_lookup lookup;
2270 struct tb_xdomain *xd;
2271
2272 memset(&lookup, 0, sizeof(lookup));
2273 lookup.route = route;
2274
2275 xd = switch_find_xdomain(tb->root_switch, &lookup);
2276 return tb_xdomain_get(xd);
2277 }
2278 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
2279
2280 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
2281 const void *buf, size_t size)
2282 {
2283 const struct tb_protocol_handler *handler, *tmp;
2284 const struct tb_xdp_header *hdr = buf;
2285 unsigned int length;
2286 int ret = 0;
2287
2288
2289 length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
2290 if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
2291 return true;
2292 if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
2293 return true;
2294
2295
2296
2297
2298
2299
2300 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
2301 if (type == TB_CFG_PKG_XDOMAIN_REQ)
2302 return tb_xdp_schedule_request(tb, hdr, size);
2303 return false;
2304 }
2305
2306 mutex_lock(&xdomain_lock);
2307 list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
2308 if (!uuid_equal(&hdr->uuid, handler->uuid))
2309 continue;
2310
2311 mutex_unlock(&xdomain_lock);
2312 ret = handler->callback(buf, size, handler->data);
2313 mutex_lock(&xdomain_lock);
2314
2315 if (ret)
2316 break;
2317 }
2318 mutex_unlock(&xdomain_lock);
2319
2320 return ret > 0;
2321 }
2322
2323 static int update_xdomain(struct device *dev, void *data)
2324 {
2325 struct tb_xdomain *xd;
2326
2327 xd = tb_to_xdomain(dev);
2328 if (xd) {
2329 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
2330 msecs_to_jiffies(50));
2331 }
2332
2333 return 0;
2334 }
2335
2336 static void update_all_xdomains(void)
2337 {
2338 bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
2339 }
2340
2341 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
2342 {
2343 struct tb_property *p;
2344
2345 p = tb_property_find(xdomain_property_dir, key,
2346 TB_PROPERTY_TYPE_DIRECTORY);
2347 if (p && p->value.dir == dir) {
2348 tb_property_remove(p);
2349 return true;
2350 }
2351 return false;
2352 }
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
2367 {
2368 int ret;
2369
2370 if (WARN_ON(!xdomain_property_dir))
2371 return -EAGAIN;
2372
2373 if (!key || strlen(key) > 8)
2374 return -EINVAL;
2375
2376 mutex_lock(&xdomain_lock);
2377 if (tb_property_find(xdomain_property_dir, key,
2378 TB_PROPERTY_TYPE_DIRECTORY)) {
2379 ret = -EEXIST;
2380 goto err_unlock;
2381 }
2382
2383 ret = tb_property_add_dir(xdomain_property_dir, key, dir);
2384 if (ret)
2385 goto err_unlock;
2386
2387 xdomain_property_block_gen++;
2388
2389 mutex_unlock(&xdomain_lock);
2390 update_all_xdomains();
2391 return 0;
2392
2393 err_unlock:
2394 mutex_unlock(&xdomain_lock);
2395 return ret;
2396 }
2397 EXPORT_SYMBOL_GPL(tb_register_property_dir);
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
2408 {
2409 int ret = 0;
2410
2411 mutex_lock(&xdomain_lock);
2412 if (remove_directory(key, dir))
2413 xdomain_property_block_gen++;
2414 mutex_unlock(&xdomain_lock);
2415
2416 if (!ret)
2417 update_all_xdomains();
2418 }
2419 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
2420
2421 int tb_xdomain_init(void)
2422 {
2423 xdomain_property_dir = tb_property_create_dir(NULL);
2424 if (!xdomain_property_dir)
2425 return -ENOMEM;
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435 tb_property_add_immediate(xdomain_property_dir, "vendorid",
2436 PCI_VENDOR_ID_INTEL);
2437 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
2438 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
2439 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
2440
2441 xdomain_property_block_gen = prandom_u32();
2442 return 0;
2443 }
2444
2445 void tb_xdomain_exit(void)
2446 {
2447 tb_property_free_dir(xdomain_property_dir);
2448 }