0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/delay.h>
0011 #include <linux/pm_runtime.h>
0012 #include <linux/sched/signal.h>
0013
0014 #include "sb_regs.h"
0015 #include "tb.h"
0016
0017 #define TB_MAX_RETIMER_INDEX 6
0018
0019 static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
0020 size_t bytes)
0021 {
0022 struct tb_nvm *nvm = priv;
0023 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
0024 int ret;
0025
0026 pm_runtime_get_sync(&rt->dev);
0027
0028 if (!mutex_trylock(&rt->tb->lock)) {
0029 ret = restart_syscall();
0030 goto out;
0031 }
0032
0033 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
0034 mutex_unlock(&rt->tb->lock);
0035
0036 out:
0037 pm_runtime_mark_last_busy(&rt->dev);
0038 pm_runtime_put_autosuspend(&rt->dev);
0039
0040 return ret;
0041 }
0042
0043 static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
0044 size_t bytes)
0045 {
0046 struct tb_nvm *nvm = priv;
0047 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
0048 int ret = 0;
0049
0050 if (!mutex_trylock(&rt->tb->lock))
0051 return restart_syscall();
0052
0053 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
0054 mutex_unlock(&rt->tb->lock);
0055
0056 return ret;
0057 }
0058
0059 static int tb_retimer_nvm_add(struct tb_retimer *rt)
0060 {
0061 struct tb_nvm *nvm;
0062 u32 val, nvm_size;
0063 int ret;
0064
0065 nvm = tb_nvm_alloc(&rt->dev);
0066 if (IS_ERR(nvm))
0067 return PTR_ERR(nvm);
0068
0069 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
0070 sizeof(val));
0071 if (ret)
0072 goto err_nvm;
0073
0074 nvm->major = val >> 16;
0075 nvm->minor = val >> 8;
0076
0077 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
0078 &val, sizeof(val));
0079 if (ret)
0080 goto err_nvm;
0081
0082 nvm_size = (SZ_1M << (val & 7)) / 8;
0083 nvm_size = (nvm_size - SZ_16K) / 2;
0084
0085 ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
0086 if (ret)
0087 goto err_nvm;
0088
0089 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
0090 if (ret)
0091 goto err_nvm;
0092
0093 rt->nvm = nvm;
0094 return 0;
0095
0096 err_nvm:
0097 tb_nvm_free(nvm);
0098 return ret;
0099 }
0100
0101 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
0102 {
0103 unsigned int image_size, hdr_size;
0104 const u8 *buf = rt->nvm->buf;
0105 u16 ds_size, device;
0106 int ret;
0107
0108 image_size = rt->nvm->buf_data_size;
0109 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
0110 return -EINVAL;
0111
0112
0113
0114
0115
0116 hdr_size = (*(u32 *)buf) & 0xffffff;
0117 if (hdr_size + NVM_DEVID + 2 >= image_size)
0118 return -EINVAL;
0119
0120
0121 if (!IS_ALIGNED(hdr_size, SZ_4K))
0122 return -EINVAL;
0123
0124
0125
0126
0127
0128 ds_size = *(u16 *)(buf + hdr_size);
0129 if (ds_size >= image_size)
0130 return -EINVAL;
0131
0132
0133
0134
0135
0136 device = *(u16 *)(buf + hdr_size + NVM_DEVID);
0137 if (device != rt->device)
0138 return -EINVAL;
0139
0140
0141 buf += hdr_size;
0142 image_size -= hdr_size;
0143
0144 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
0145 image_size);
0146 if (!ret)
0147 rt->nvm->flushed = true;
0148
0149 return ret;
0150 }
0151
0152 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
0153 {
0154 u32 status;
0155 int ret;
0156
0157 if (auth_only) {
0158 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
0159 if (ret)
0160 return ret;
0161 }
0162
0163 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
0164 if (ret)
0165 return ret;
0166
0167 usleep_range(100, 150);
0168
0169
0170
0171
0172
0173 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
0174 &status);
0175 if (!ret) {
0176 rt->auth_status = status;
0177 return status ? -EINVAL : 0;
0178 }
0179
0180 return 0;
0181 }
0182
0183 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
0184 char *buf)
0185 {
0186 struct tb_retimer *rt = tb_to_retimer(dev);
0187
0188 return sprintf(buf, "%#x\n", rt->device);
0189 }
0190 static DEVICE_ATTR_RO(device);
0191
0192 static ssize_t nvm_authenticate_show(struct device *dev,
0193 struct device_attribute *attr, char *buf)
0194 {
0195 struct tb_retimer *rt = tb_to_retimer(dev);
0196 int ret;
0197
0198 if (!mutex_trylock(&rt->tb->lock))
0199 return restart_syscall();
0200
0201 if (!rt->nvm)
0202 ret = -EAGAIN;
0203 else
0204 ret = sprintf(buf, "%#x\n", rt->auth_status);
0205
0206 mutex_unlock(&rt->tb->lock);
0207
0208 return ret;
0209 }
0210
0211 static ssize_t nvm_authenticate_store(struct device *dev,
0212 struct device_attribute *attr, const char *buf, size_t count)
0213 {
0214 struct tb_retimer *rt = tb_to_retimer(dev);
0215 int val, ret;
0216
0217 pm_runtime_get_sync(&rt->dev);
0218
0219 if (!mutex_trylock(&rt->tb->lock)) {
0220 ret = restart_syscall();
0221 goto exit_rpm;
0222 }
0223
0224 if (!rt->nvm) {
0225 ret = -EAGAIN;
0226 goto exit_unlock;
0227 }
0228
0229 ret = kstrtoint(buf, 10, &val);
0230 if (ret)
0231 goto exit_unlock;
0232
0233
0234 rt->auth_status = 0;
0235
0236 if (val) {
0237 if (val == AUTHENTICATE_ONLY) {
0238 ret = tb_retimer_nvm_authenticate(rt, true);
0239 } else {
0240 if (!rt->nvm->flushed) {
0241 if (!rt->nvm->buf) {
0242 ret = -EINVAL;
0243 goto exit_unlock;
0244 }
0245
0246 ret = tb_retimer_nvm_validate_and_write(rt);
0247 if (ret || val == WRITE_ONLY)
0248 goto exit_unlock;
0249 }
0250 if (val == WRITE_AND_AUTHENTICATE)
0251 ret = tb_retimer_nvm_authenticate(rt, false);
0252 }
0253 }
0254
0255 exit_unlock:
0256 mutex_unlock(&rt->tb->lock);
0257 exit_rpm:
0258 pm_runtime_mark_last_busy(&rt->dev);
0259 pm_runtime_put_autosuspend(&rt->dev);
0260
0261 if (ret)
0262 return ret;
0263 return count;
0264 }
0265 static DEVICE_ATTR_RW(nvm_authenticate);
0266
0267 static ssize_t nvm_version_show(struct device *dev,
0268 struct device_attribute *attr, char *buf)
0269 {
0270 struct tb_retimer *rt = tb_to_retimer(dev);
0271 int ret;
0272
0273 if (!mutex_trylock(&rt->tb->lock))
0274 return restart_syscall();
0275
0276 if (!rt->nvm)
0277 ret = -EAGAIN;
0278 else
0279 ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
0280
0281 mutex_unlock(&rt->tb->lock);
0282 return ret;
0283 }
0284 static DEVICE_ATTR_RO(nvm_version);
0285
0286 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
0287 char *buf)
0288 {
0289 struct tb_retimer *rt = tb_to_retimer(dev);
0290
0291 return sprintf(buf, "%#x\n", rt->vendor);
0292 }
0293 static DEVICE_ATTR_RO(vendor);
0294
0295 static struct attribute *retimer_attrs[] = {
0296 &dev_attr_device.attr,
0297 &dev_attr_nvm_authenticate.attr,
0298 &dev_attr_nvm_version.attr,
0299 &dev_attr_vendor.attr,
0300 NULL
0301 };
0302
0303 static const struct attribute_group retimer_group = {
0304 .attrs = retimer_attrs,
0305 };
0306
0307 static const struct attribute_group *retimer_groups[] = {
0308 &retimer_group,
0309 NULL
0310 };
0311
0312 static void tb_retimer_release(struct device *dev)
0313 {
0314 struct tb_retimer *rt = tb_to_retimer(dev);
0315
0316 kfree(rt);
0317 }
0318
0319 struct device_type tb_retimer_type = {
0320 .name = "thunderbolt_retimer",
0321 .groups = retimer_groups,
0322 .release = tb_retimer_release,
0323 };
0324
0325 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
0326 {
0327 struct tb_retimer *rt;
0328 u32 vendor, device;
0329 int ret;
0330
0331 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
0332 sizeof(vendor));
0333 if (ret) {
0334 if (ret != -ENODEV)
0335 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
0336 return ret;
0337 }
0338
0339 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
0340 sizeof(device));
0341 if (ret) {
0342 if (ret != -ENODEV)
0343 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
0344 return ret;
0345 }
0346
0347 if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
0348 tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
0349 vendor);
0350 return -EOPNOTSUPP;
0351 }
0352
0353
0354
0355
0356
0357 ret = usb4_port_retimer_nvm_sector_size(port, index);
0358 if (ret < 0)
0359 return ret;
0360
0361 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
0362 if (!rt)
0363 return -ENOMEM;
0364
0365 rt->index = index;
0366 rt->vendor = vendor;
0367 rt->device = device;
0368 rt->auth_status = auth_status;
0369 rt->port = port;
0370 rt->tb = port->sw->tb;
0371
0372 rt->dev.parent = &port->usb4->dev;
0373 rt->dev.bus = &tb_bus_type;
0374 rt->dev.type = &tb_retimer_type;
0375 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
0376 port->port, index);
0377
0378 ret = device_register(&rt->dev);
0379 if (ret) {
0380 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
0381 put_device(&rt->dev);
0382 return ret;
0383 }
0384
0385 ret = tb_retimer_nvm_add(rt);
0386 if (ret) {
0387 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
0388 device_unregister(&rt->dev);
0389 return ret;
0390 }
0391
0392 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
0393 rt->vendor, rt->device);
0394
0395 pm_runtime_no_callbacks(&rt->dev);
0396 pm_runtime_set_active(&rt->dev);
0397 pm_runtime_enable(&rt->dev);
0398 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
0399 pm_runtime_mark_last_busy(&rt->dev);
0400 pm_runtime_use_autosuspend(&rt->dev);
0401
0402 return 0;
0403 }
0404
0405 static void tb_retimer_remove(struct tb_retimer *rt)
0406 {
0407 dev_info(&rt->dev, "retimer disconnected\n");
0408 tb_nvm_free(rt->nvm);
0409 device_unregister(&rt->dev);
0410 }
0411
0412 struct tb_retimer_lookup {
0413 const struct tb_port *port;
0414 u8 index;
0415 };
0416
0417 static int retimer_match(struct device *dev, void *data)
0418 {
0419 const struct tb_retimer_lookup *lookup = data;
0420 struct tb_retimer *rt = tb_to_retimer(dev);
0421
0422 return rt && rt->port == lookup->port && rt->index == lookup->index;
0423 }
0424
0425 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
0426 {
0427 struct tb_retimer_lookup lookup = { .port = port, .index = index };
0428 struct device *dev;
0429
0430 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
0431 if (dev)
0432 return tb_to_retimer(dev);
0433
0434 return NULL;
0435 }
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447 int tb_retimer_scan(struct tb_port *port, bool add)
0448 {
0449 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
0450 int ret, i, last_idx = 0;
0451 struct usb4_port *usb4;
0452
0453 usb4 = port->usb4;
0454 if (!usb4)
0455 return 0;
0456
0457 pm_runtime_get_sync(&usb4->dev);
0458
0459
0460
0461
0462
0463 ret = usb4_port_enumerate_retimers(port);
0464 if (ret)
0465 goto out;
0466
0467
0468
0469
0470
0471 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
0472 usb4_port_retimer_set_inbound_sbtx(port, i);
0473
0474
0475
0476
0477
0478
0479 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
0480 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
0481
0482 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
0483
0484
0485
0486
0487
0488 ret = usb4_port_retimer_is_last(port, i);
0489 if (ret > 0)
0490 last_idx = i;
0491 else if (ret < 0)
0492 break;
0493 }
0494
0495 if (!last_idx) {
0496 ret = 0;
0497 goto out;
0498 }
0499
0500
0501 for (i = 1; i <= last_idx; i++) {
0502 struct tb_retimer *rt;
0503
0504 rt = tb_port_find_retimer(port, i);
0505 if (rt) {
0506 put_device(&rt->dev);
0507 } else if (add) {
0508 ret = tb_retimer_add(port, i, status[i]);
0509 if (ret && ret != -EOPNOTSUPP)
0510 break;
0511 }
0512 }
0513
0514 out:
0515 pm_runtime_mark_last_busy(&usb4->dev);
0516 pm_runtime_put_autosuspend(&usb4->dev);
0517
0518 return ret;
0519 }
0520
0521 static int remove_retimer(struct device *dev, void *data)
0522 {
0523 struct tb_retimer *rt = tb_to_retimer(dev);
0524 struct tb_port *port = data;
0525
0526 if (rt && rt->port == port)
0527 tb_retimer_remove(rt);
0528 return 0;
0529 }
0530
0531
0532
0533
0534
0535
0536
0537 void tb_retimer_remove_all(struct tb_port *port)
0538 {
0539 struct usb4_port *usb4;
0540
0541 usb4 = port->usb4;
0542 if (usb4)
0543 device_for_each_child_reverse(&usb4->dev, port,
0544 remove_retimer);
0545 }