0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/delay.h>
0011
0012 #include "tb.h"
0013
0014 static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
0015 enum tb_switch_tmu_rate rate)
0016 {
0017 u32 freq_meas_wind[2] = { 30, 800 };
0018 u32 avg_const[2] = { 4, 8 };
0019 u32 freq, avg, val;
0020 int ret;
0021
0022 if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
0023 freq = freq_meas_wind[0];
0024 avg = avg_const[0];
0025 } else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
0026 freq = freq_meas_wind[1];
0027 avg = avg_const[1];
0028 } else {
0029 return 0;
0030 }
0031
0032 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0033 sw->tmu.cap + TMU_RTR_CS_0, 1);
0034 if (ret)
0035 return ret;
0036
0037 val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
0038 val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
0039
0040 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
0041 sw->tmu.cap + TMU_RTR_CS_0, 1);
0042 if (ret)
0043 return ret;
0044
0045 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0046 sw->tmu.cap + TMU_RTR_CS_15, 1);
0047 if (ret)
0048 return ret;
0049
0050 val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
0051 ~TMU_RTR_CS_15_DELAY_AVG_MASK &
0052 ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
0053 ~TMU_RTR_CS_15_ERROR_AVG_MASK;
0054 val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
0055 FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
0056 FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
0057 FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
0058
0059 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
0060 sw->tmu.cap + TMU_RTR_CS_15, 1);
0061 }
0062
0063 static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
0064 {
0065 bool root_switch = !tb_route(sw);
0066
0067 switch (sw->tmu.rate) {
0068 case TB_SWITCH_TMU_RATE_OFF:
0069 return "off";
0070
0071 case TB_SWITCH_TMU_RATE_HIFI:
0072
0073 if (root_switch)
0074 return "HiFi";
0075 if (sw->tmu.unidirectional)
0076 return "uni-directional, HiFi";
0077 return "bi-directional, HiFi";
0078
0079 case TB_SWITCH_TMU_RATE_NORMAL:
0080 if (root_switch)
0081 return "normal";
0082 return "uni-directional, normal";
0083
0084 default:
0085 return "unknown";
0086 }
0087 }
0088
0089 static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
0090 {
0091 int ret;
0092 u32 val;
0093
0094 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0095 sw->tmu.cap + TMU_RTR_CS_0, 1);
0096 if (ret)
0097 return false;
0098
0099 return !!(val & TMU_RTR_CS_0_UCAP);
0100 }
0101
0102 static int tb_switch_tmu_rate_read(struct tb_switch *sw)
0103 {
0104 int ret;
0105 u32 val;
0106
0107 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0108 sw->tmu.cap + TMU_RTR_CS_3, 1);
0109 if (ret)
0110 return ret;
0111
0112 val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
0113 return val;
0114 }
0115
0116 static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
0117 {
0118 int ret;
0119 u32 val;
0120
0121 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0122 sw->tmu.cap + TMU_RTR_CS_3, 1);
0123 if (ret)
0124 return ret;
0125
0126 val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
0127 val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
0128
0129 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
0130 sw->tmu.cap + TMU_RTR_CS_3, 1);
0131 }
0132
0133 static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
0134 u32 value)
0135 {
0136 u32 data;
0137 int ret;
0138
0139 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
0140 if (ret)
0141 return ret;
0142
0143 data &= ~mask;
0144 data |= value;
0145
0146 return tb_port_write(port, &data, TB_CFG_PORT,
0147 port->cap_tmu + offset, 1);
0148 }
0149
0150 static int tb_port_tmu_set_unidirectional(struct tb_port *port,
0151 bool unidirectional)
0152 {
0153 u32 val;
0154
0155 if (!port->sw->tmu.has_ucap)
0156 return 0;
0157
0158 val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
0159 return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
0160 }
0161
0162 static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
0163 {
0164 return tb_port_tmu_set_unidirectional(port, false);
0165 }
0166
0167 static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
0168 {
0169 return tb_port_tmu_set_unidirectional(port, true);
0170 }
0171
0172 static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
0173 {
0174 int ret;
0175 u32 val;
0176
0177 ret = tb_port_read(port, &val, TB_CFG_PORT,
0178 port->cap_tmu + TMU_ADP_CS_3, 1);
0179 if (ret)
0180 return false;
0181
0182 return val & TMU_ADP_CS_3_UDM;
0183 }
0184
0185 static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
0186 {
0187 u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
0188
0189 return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
0190 }
0191
0192 static int tb_port_tmu_time_sync_disable(struct tb_port *port)
0193 {
0194 return tb_port_tmu_time_sync(port, true);
0195 }
0196
0197 static int tb_port_tmu_time_sync_enable(struct tb_port *port)
0198 {
0199 return tb_port_tmu_time_sync(port, false);
0200 }
0201
0202 static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
0203 {
0204 u32 val, offset, bit;
0205 int ret;
0206
0207 if (tb_switch_is_usb4(sw)) {
0208 offset = sw->tmu.cap + TMU_RTR_CS_0;
0209 bit = TMU_RTR_CS_0_TD;
0210 } else {
0211 offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
0212 bit = TB_TIME_VSEC_3_CS_26_TD;
0213 }
0214
0215 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
0216 if (ret)
0217 return ret;
0218
0219 if (set)
0220 val |= bit;
0221 else
0222 val &= ~bit;
0223
0224 return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 int tb_switch_tmu_init(struct tb_switch *sw)
0236 {
0237 struct tb_port *port;
0238 int ret;
0239
0240 if (tb_switch_is_icm(sw))
0241 return 0;
0242
0243 ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
0244 if (ret > 0)
0245 sw->tmu.cap = ret;
0246
0247 tb_switch_for_each_port(sw, port) {
0248 int cap;
0249
0250 cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
0251 if (cap > 0)
0252 port->cap_tmu = cap;
0253 }
0254
0255 ret = tb_switch_tmu_rate_read(sw);
0256 if (ret < 0)
0257 return ret;
0258
0259 sw->tmu.rate = ret;
0260
0261 sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
0262 if (sw->tmu.has_ucap) {
0263 tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
0264
0265 if (tb_route(sw)) {
0266 struct tb_port *up = tb_upstream_port(sw);
0267
0268 sw->tmu.unidirectional =
0269 tb_port_tmu_is_unidirectional(up);
0270 }
0271 } else {
0272 sw->tmu.unidirectional = false;
0273 }
0274
0275 tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
0276 return 0;
0277 }
0278
0279
0280
0281
0282
0283
0284
0285 int tb_switch_tmu_post_time(struct tb_switch *sw)
0286 {
0287 unsigned int post_time_high_offset, post_time_high = 0;
0288 unsigned int post_local_time_offset, post_time_offset;
0289 struct tb_switch *root_switch = sw->tb->root_switch;
0290 u64 hi, mid, lo, local_time, post_time;
0291 int i, ret, retries = 100;
0292 u32 gm_local_time[3];
0293
0294 if (!tb_route(sw))
0295 return 0;
0296
0297 if (!tb_switch_is_usb4(sw))
0298 return 0;
0299
0300
0301 if (!root_switch->tmu.cap)
0302 return 0;
0303
0304 ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
0305 root_switch->tmu.cap + TMU_RTR_CS_1,
0306 ARRAY_SIZE(gm_local_time));
0307 if (ret)
0308 return ret;
0309
0310 for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
0311 tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
0312 gm_local_time[i]);
0313
0314
0315 hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
0316 mid = gm_local_time[1];
0317 lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
0318 TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
0319 local_time = hi << 48 | mid << 16 | lo;
0320
0321
0322 ret = tb_switch_tmu_set_time_disruption(sw, true);
0323 if (ret)
0324 return ret;
0325
0326 post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
0327 post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
0328 post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
0329
0330
0331
0332
0333
0334 ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
0335 post_local_time_offset, 2);
0336 if (ret)
0337 goto out;
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 post_time = 0xffffffff00000001ULL;
0348
0349 ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
0350 if (ret)
0351 goto out;
0352
0353 ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
0354 post_time_high_offset, 1);
0355 if (ret)
0356 goto out;
0357
0358 do {
0359 usleep_range(5, 10);
0360 ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
0361 post_time_offset, 2);
0362 if (ret)
0363 goto out;
0364 } while (--retries && post_time);
0365
0366 if (!retries) {
0367 ret = -ETIMEDOUT;
0368 goto out;
0369 }
0370
0371 tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
0372
0373 out:
0374 tb_switch_tmu_set_time_disruption(sw, false);
0375 return ret;
0376 }
0377
0378
0379
0380
0381
0382
0383
0384 int tb_switch_tmu_disable(struct tb_switch *sw)
0385 {
0386
0387
0388
0389
0390
0391 if (!tb_switch_is_clx_supported(sw))
0392 return 0;
0393
0394
0395 if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
0396 return 0;
0397
0398
0399 if (tb_route(sw)) {
0400 bool unidirectional = sw->tmu.unidirectional;
0401 struct tb_switch *parent = tb_switch_parent(sw);
0402 struct tb_port *down, *up;
0403 int ret;
0404
0405 down = tb_port_at(tb_route(sw), parent);
0406 up = tb_upstream_port(sw);
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
0419
0420 tb_port_tmu_time_sync_disable(up);
0421 ret = tb_port_tmu_time_sync_disable(down);
0422 if (ret)
0423 return ret;
0424
0425 if (unidirectional) {
0426
0427 tb_port_tmu_unidirectional_disable(up);
0428 ret = tb_port_tmu_unidirectional_disable(down);
0429 if (ret)
0430 return ret;
0431 }
0432 } else {
0433 tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
0434 }
0435
0436 sw->tmu.unidirectional = false;
0437 sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
0438
0439 tb_sw_dbg(sw, "TMU: disabled\n");
0440 return 0;
0441 }
0442
0443 static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
0444 {
0445 struct tb_switch *parent = tb_switch_parent(sw);
0446 struct tb_port *down, *up;
0447
0448 down = tb_port_at(tb_route(sw), parent);
0449 up = tb_upstream_port(sw);
0450
0451
0452
0453
0454
0455
0456
0457 tb_port_tmu_time_sync_disable(down);
0458 tb_port_tmu_time_sync_disable(up);
0459 if (unidirectional)
0460 tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
0461 else
0462 tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
0463
0464 tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
0465 tb_port_tmu_unidirectional_disable(down);
0466 tb_port_tmu_unidirectional_disable(up);
0467 }
0468
0469
0470
0471
0472
0473 static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
0474 {
0475 struct tb_switch *parent = tb_switch_parent(sw);
0476 struct tb_port *up, *down;
0477 int ret;
0478
0479 up = tb_upstream_port(sw);
0480 down = tb_port_at(tb_route(sw), parent);
0481
0482 ret = tb_port_tmu_unidirectional_disable(up);
0483 if (ret)
0484 return ret;
0485
0486 ret = tb_port_tmu_unidirectional_disable(down);
0487 if (ret)
0488 goto out;
0489
0490 ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
0491 if (ret)
0492 goto out;
0493
0494 ret = tb_port_tmu_time_sync_enable(up);
0495 if (ret)
0496 goto out;
0497
0498 ret = tb_port_tmu_time_sync_enable(down);
0499 if (ret)
0500 goto out;
0501
0502 return 0;
0503
0504 out:
0505 __tb_switch_tmu_off(sw, false);
0506 return ret;
0507 }
0508
0509 static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
0510 {
0511 u32 val;
0512 int ret;
0513
0514 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
0515 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
0516 if (ret)
0517 return ret;
0518
0519 val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
0520
0521 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
0522 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
0523 }
0524
0525 static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
0526 {
0527 struct tb_port *up = tb_upstream_port(sw);
0528
0529 return tb_port_tmu_write(up, TMU_ADP_CS_6,
0530 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
0531 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
0532 }
0533
0534
0535
0536
0537
0538 static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
0539 {
0540 struct tb_switch *parent = tb_switch_parent(sw);
0541 struct tb_port *up, *down;
0542 int ret;
0543
0544 up = tb_upstream_port(sw);
0545 down = tb_port_at(tb_route(sw), parent);
0546 ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
0547 if (ret)
0548 return ret;
0549
0550 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
0551 if (ret)
0552 return ret;
0553
0554 ret = tb_port_tmu_unidirectional_enable(up);
0555 if (ret)
0556 goto out;
0557
0558 ret = tb_port_tmu_time_sync_enable(up);
0559 if (ret)
0560 goto out;
0561
0562 ret = tb_port_tmu_unidirectional_enable(down);
0563 if (ret)
0564 goto out;
0565
0566 ret = tb_port_tmu_time_sync_enable(down);
0567 if (ret)
0568 goto out;
0569
0570 return 0;
0571
0572 out:
0573 __tb_switch_tmu_off(sw, true);
0574 return ret;
0575 }
0576
0577 static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
0578 {
0579 struct tb_switch *parent = tb_switch_parent(sw);
0580 struct tb_port *down, *up;
0581
0582 down = tb_port_at(tb_route(sw), parent);
0583 up = tb_upstream_port(sw);
0584
0585
0586
0587
0588
0589
0590 tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
0591 if (sw->tmu.unidirectional_request)
0592 tb_switch_tmu_rate_write(parent, sw->tmu.rate);
0593 else
0594 tb_switch_tmu_rate_write(sw, sw->tmu.rate);
0595
0596 tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
0597 tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
0598 }
0599
0600 static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
0601 {
0602 struct tb_switch *parent = tb_switch_parent(sw);
0603 struct tb_port *up, *down;
0604 int ret;
0605
0606 up = tb_upstream_port(sw);
0607 down = tb_port_at(tb_route(sw), parent);
0608 ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
0609 if (ret)
0610 goto out;
0611
0612 if (sw->tmu.unidirectional_request)
0613 ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
0614 else
0615 ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
0616 if (ret)
0617 return ret;
0618
0619 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
0620 if (ret)
0621 return ret;
0622
0623 ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
0624 if (ret)
0625 goto out;
0626
0627 ret = tb_port_tmu_time_sync_enable(down);
0628 if (ret)
0629 goto out;
0630
0631 ret = tb_port_tmu_time_sync_enable(up);
0632 if (ret)
0633 goto out;
0634
0635 return 0;
0636
0637 out:
0638 __tb_switch_tmu_change_mode_prev(sw);
0639 return ret;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 int tb_switch_tmu_enable(struct tb_switch *sw)
0654 {
0655 bool unidirectional = sw->tmu.unidirectional_request;
0656 int ret;
0657
0658 if (unidirectional && !sw->tmu.has_ucap)
0659 return -EOPNOTSUPP;
0660
0661
0662
0663
0664
0665
0666 if (!tb_switch_is_clx_supported(sw))
0667 return 0;
0668
0669 if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
0670 return 0;
0671
0672 if (tb_switch_is_titan_ridge(sw) && unidirectional) {
0673
0674
0675
0676
0677 if (!tb_switch_is_clx_enabled(sw, TB_CL1))
0678 return -EOPNOTSUPP;
0679
0680 ret = tb_switch_tmu_objection_mask(sw);
0681 if (ret)
0682 return ret;
0683
0684 ret = tb_switch_tmu_unidirectional_enable(sw);
0685 if (ret)
0686 return ret;
0687 }
0688
0689 ret = tb_switch_tmu_set_time_disruption(sw, true);
0690 if (ret)
0691 return ret;
0692
0693 if (tb_route(sw)) {
0694
0695
0696
0697
0698
0699 if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
0700 if (unidirectional)
0701 ret = __tb_switch_tmu_enable_unidirectional(sw);
0702 else
0703 ret = __tb_switch_tmu_enable_bidirectional(sw);
0704 if (ret)
0705 return ret;
0706 } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
0707 ret = __tb_switch_tmu_change_mode(sw);
0708 if (ret)
0709 return ret;
0710 }
0711 sw->tmu.unidirectional = unidirectional;
0712 } else {
0713
0714
0715
0716
0717
0718
0719 ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
0720 if (ret)
0721 return ret;
0722 }
0723
0724 sw->tmu.rate = sw->tmu.rate_request;
0725
0726 tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
0727 return tb_switch_tmu_set_time_disruption(sw, false);
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739 void tb_switch_tmu_configure(struct tb_switch *sw,
0740 enum tb_switch_tmu_rate rate, bool unidirectional)
0741 {
0742 sw->tmu.unidirectional_request = unidirectional;
0743 sw->tmu.rate_request = rate;
0744 }
0745
0746 static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
0747 {
0748 if (tb_is_switch(dev)) {
0749 struct tb_switch *sw = tb_to_switch(dev);
0750
0751 tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
0752 tb_switch_is_clx_enabled(sw, TB_CL1));
0753 if (tb_switch_tmu_enable(sw))
0754 tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
0755 }
0756
0757 return 0;
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
0769 enum tb_switch_tmu_rate rate)
0770 {
0771 device_for_each_child(&sw->dev, &rate,
0772 tb_switch_tmu_config_enable);
0773 }