0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <kunit/test.h>
0010 #include <linux/idr.h>
0011
0012 #include "tb.h"
0013 #include "tunnel.h"
0014
0015 static int __ida_init(struct kunit_resource *res, void *context)
0016 {
0017 struct ida *ida = context;
0018
0019 ida_init(ida);
0020 res->data = ida;
0021 return 0;
0022 }
0023
0024 static void __ida_destroy(struct kunit_resource *res)
0025 {
0026 struct ida *ida = res->data;
0027
0028 ida_destroy(ida);
0029 }
0030
0031 static void kunit_ida_init(struct kunit *test, struct ida *ida)
0032 {
0033 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
0034 }
0035
0036 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
0037 u8 upstream_port, u8 max_port_number)
0038 {
0039 struct tb_switch *sw;
0040 size_t size;
0041 int i;
0042
0043 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
0044 if (!sw)
0045 return NULL;
0046
0047 sw->config.upstream_port_number = upstream_port;
0048 sw->config.depth = tb_route_length(route);
0049 sw->config.route_hi = upper_32_bits(route);
0050 sw->config.route_lo = lower_32_bits(route);
0051 sw->config.enabled = 0;
0052 sw->config.max_port_number = max_port_number;
0053
0054 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
0055 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
0056 if (!sw->ports)
0057 return NULL;
0058
0059 for (i = 0; i <= sw->config.max_port_number; i++) {
0060 sw->ports[i].sw = sw;
0061 sw->ports[i].port = i;
0062 sw->ports[i].config.port_number = i;
0063 if (i) {
0064 kunit_ida_init(test, &sw->ports[i].in_hopids);
0065 kunit_ida_init(test, &sw->ports[i].out_hopids);
0066 }
0067 }
0068
0069 return sw;
0070 }
0071
0072 static struct tb_switch *alloc_host(struct kunit *test)
0073 {
0074 struct tb_switch *sw;
0075
0076 sw = alloc_switch(test, 0, 7, 13);
0077 if (!sw)
0078 return NULL;
0079
0080 sw->config.vendor_id = 0x8086;
0081 sw->config.device_id = 0x9a1b;
0082
0083 sw->ports[0].config.type = TB_TYPE_PORT;
0084 sw->ports[0].config.max_in_hop_id = 7;
0085 sw->ports[0].config.max_out_hop_id = 7;
0086
0087 sw->ports[1].config.type = TB_TYPE_PORT;
0088 sw->ports[1].config.max_in_hop_id = 19;
0089 sw->ports[1].config.max_out_hop_id = 19;
0090 sw->ports[1].total_credits = 60;
0091 sw->ports[1].ctl_credits = 2;
0092 sw->ports[1].dual_link_port = &sw->ports[2];
0093
0094 sw->ports[2].config.type = TB_TYPE_PORT;
0095 sw->ports[2].config.max_in_hop_id = 19;
0096 sw->ports[2].config.max_out_hop_id = 19;
0097 sw->ports[2].total_credits = 60;
0098 sw->ports[2].ctl_credits = 2;
0099 sw->ports[2].dual_link_port = &sw->ports[1];
0100 sw->ports[2].link_nr = 1;
0101
0102 sw->ports[3].config.type = TB_TYPE_PORT;
0103 sw->ports[3].config.max_in_hop_id = 19;
0104 sw->ports[3].config.max_out_hop_id = 19;
0105 sw->ports[3].total_credits = 60;
0106 sw->ports[3].ctl_credits = 2;
0107 sw->ports[3].dual_link_port = &sw->ports[4];
0108
0109 sw->ports[4].config.type = TB_TYPE_PORT;
0110 sw->ports[4].config.max_in_hop_id = 19;
0111 sw->ports[4].config.max_out_hop_id = 19;
0112 sw->ports[4].total_credits = 60;
0113 sw->ports[4].ctl_credits = 2;
0114 sw->ports[4].dual_link_port = &sw->ports[3];
0115 sw->ports[4].link_nr = 1;
0116
0117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
0118 sw->ports[5].config.max_in_hop_id = 9;
0119 sw->ports[5].config.max_out_hop_id = 9;
0120 sw->ports[5].cap_adap = -1;
0121
0122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
0123 sw->ports[6].config.max_in_hop_id = 9;
0124 sw->ports[6].config.max_out_hop_id = 9;
0125 sw->ports[6].cap_adap = -1;
0126
0127 sw->ports[7].config.type = TB_TYPE_NHI;
0128 sw->ports[7].config.max_in_hop_id = 11;
0129 sw->ports[7].config.max_out_hop_id = 11;
0130 sw->ports[7].config.nfc_credits = 0x41800000;
0131
0132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
0133 sw->ports[8].config.max_in_hop_id = 8;
0134 sw->ports[8].config.max_out_hop_id = 8;
0135
0136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
0137 sw->ports[9].config.max_in_hop_id = 8;
0138 sw->ports[9].config.max_out_hop_id = 8;
0139
0140 sw->ports[10].disabled = true;
0141 sw->ports[11].disabled = true;
0142
0143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
0144 sw->ports[12].config.max_in_hop_id = 8;
0145 sw->ports[12].config.max_out_hop_id = 8;
0146
0147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
0148 sw->ports[13].config.max_in_hop_id = 8;
0149 sw->ports[13].config.max_out_hop_id = 8;
0150
0151 return sw;
0152 }
0153
0154 static struct tb_switch *alloc_host_usb4(struct kunit *test)
0155 {
0156 struct tb_switch *sw;
0157
0158 sw = alloc_host(test);
0159 if (!sw)
0160 return NULL;
0161
0162 sw->generation = 4;
0163 sw->credit_allocation = true;
0164 sw->max_usb3_credits = 32;
0165 sw->min_dp_aux_credits = 1;
0166 sw->min_dp_main_credits = 0;
0167 sw->max_pcie_credits = 64;
0168 sw->max_dma_credits = 14;
0169
0170 return sw;
0171 }
0172
0173 static struct tb_switch *alloc_dev_default(struct kunit *test,
0174 struct tb_switch *parent,
0175 u64 route, bool bonded)
0176 {
0177 struct tb_port *port, *upstream_port;
0178 struct tb_switch *sw;
0179
0180 sw = alloc_switch(test, route, 1, 19);
0181 if (!sw)
0182 return NULL;
0183
0184 sw->config.vendor_id = 0x8086;
0185 sw->config.device_id = 0x15ef;
0186
0187 sw->ports[0].config.type = TB_TYPE_PORT;
0188 sw->ports[0].config.max_in_hop_id = 8;
0189 sw->ports[0].config.max_out_hop_id = 8;
0190
0191 sw->ports[1].config.type = TB_TYPE_PORT;
0192 sw->ports[1].config.max_in_hop_id = 19;
0193 sw->ports[1].config.max_out_hop_id = 19;
0194 sw->ports[1].total_credits = 60;
0195 sw->ports[1].ctl_credits = 2;
0196 sw->ports[1].dual_link_port = &sw->ports[2];
0197
0198 sw->ports[2].config.type = TB_TYPE_PORT;
0199 sw->ports[2].config.max_in_hop_id = 19;
0200 sw->ports[2].config.max_out_hop_id = 19;
0201 sw->ports[2].total_credits = 60;
0202 sw->ports[2].ctl_credits = 2;
0203 sw->ports[2].dual_link_port = &sw->ports[1];
0204 sw->ports[2].link_nr = 1;
0205
0206 sw->ports[3].config.type = TB_TYPE_PORT;
0207 sw->ports[3].config.max_in_hop_id = 19;
0208 sw->ports[3].config.max_out_hop_id = 19;
0209 sw->ports[3].total_credits = 60;
0210 sw->ports[3].ctl_credits = 2;
0211 sw->ports[3].dual_link_port = &sw->ports[4];
0212
0213 sw->ports[4].config.type = TB_TYPE_PORT;
0214 sw->ports[4].config.max_in_hop_id = 19;
0215 sw->ports[4].config.max_out_hop_id = 19;
0216 sw->ports[4].total_credits = 60;
0217 sw->ports[4].ctl_credits = 2;
0218 sw->ports[4].dual_link_port = &sw->ports[3];
0219 sw->ports[4].link_nr = 1;
0220
0221 sw->ports[5].config.type = TB_TYPE_PORT;
0222 sw->ports[5].config.max_in_hop_id = 19;
0223 sw->ports[5].config.max_out_hop_id = 19;
0224 sw->ports[5].total_credits = 60;
0225 sw->ports[5].ctl_credits = 2;
0226 sw->ports[5].dual_link_port = &sw->ports[6];
0227
0228 sw->ports[6].config.type = TB_TYPE_PORT;
0229 sw->ports[6].config.max_in_hop_id = 19;
0230 sw->ports[6].config.max_out_hop_id = 19;
0231 sw->ports[6].total_credits = 60;
0232 sw->ports[6].ctl_credits = 2;
0233 sw->ports[6].dual_link_port = &sw->ports[5];
0234 sw->ports[6].link_nr = 1;
0235
0236 sw->ports[7].config.type = TB_TYPE_PORT;
0237 sw->ports[7].config.max_in_hop_id = 19;
0238 sw->ports[7].config.max_out_hop_id = 19;
0239 sw->ports[7].total_credits = 60;
0240 sw->ports[7].ctl_credits = 2;
0241 sw->ports[7].dual_link_port = &sw->ports[8];
0242
0243 sw->ports[8].config.type = TB_TYPE_PORT;
0244 sw->ports[8].config.max_in_hop_id = 19;
0245 sw->ports[8].config.max_out_hop_id = 19;
0246 sw->ports[8].total_credits = 60;
0247 sw->ports[8].ctl_credits = 2;
0248 sw->ports[8].dual_link_port = &sw->ports[7];
0249 sw->ports[8].link_nr = 1;
0250
0251 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
0252 sw->ports[9].config.max_in_hop_id = 8;
0253 sw->ports[9].config.max_out_hop_id = 8;
0254
0255 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
0256 sw->ports[10].config.max_in_hop_id = 8;
0257 sw->ports[10].config.max_out_hop_id = 8;
0258
0259 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
0260 sw->ports[11].config.max_in_hop_id = 8;
0261 sw->ports[11].config.max_out_hop_id = 8;
0262
0263 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
0264 sw->ports[12].config.max_in_hop_id = 8;
0265 sw->ports[12].config.max_out_hop_id = 8;
0266
0267 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
0268 sw->ports[13].config.max_in_hop_id = 9;
0269 sw->ports[13].config.max_out_hop_id = 9;
0270 sw->ports[13].cap_adap = -1;
0271
0272 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
0273 sw->ports[14].config.max_in_hop_id = 9;
0274 sw->ports[14].config.max_out_hop_id = 9;
0275 sw->ports[14].cap_adap = -1;
0276
0277 sw->ports[15].disabled = true;
0278
0279 sw->ports[16].config.type = TB_TYPE_USB3_UP;
0280 sw->ports[16].config.max_in_hop_id = 8;
0281 sw->ports[16].config.max_out_hop_id = 8;
0282
0283 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
0284 sw->ports[17].config.max_in_hop_id = 8;
0285 sw->ports[17].config.max_out_hop_id = 8;
0286
0287 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
0288 sw->ports[18].config.max_in_hop_id = 8;
0289 sw->ports[18].config.max_out_hop_id = 8;
0290
0291 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
0292 sw->ports[19].config.max_in_hop_id = 8;
0293 sw->ports[19].config.max_out_hop_id = 8;
0294
0295 if (!parent)
0296 return sw;
0297
0298
0299 upstream_port = tb_upstream_port(sw);
0300 port = tb_port_at(route, parent);
0301 port->remote = upstream_port;
0302 upstream_port->remote = port;
0303 if (port->dual_link_port && upstream_port->dual_link_port) {
0304 port->dual_link_port->remote = upstream_port->dual_link_port;
0305 upstream_port->dual_link_port->remote = port->dual_link_port;
0306
0307 if (bonded) {
0308
0309 port->bonded = true;
0310 port->total_credits *= 2;
0311 port->dual_link_port->bonded = true;
0312 port->dual_link_port->total_credits = 0;
0313 upstream_port->bonded = true;
0314 upstream_port->total_credits *= 2;
0315 upstream_port->dual_link_port->bonded = true;
0316 upstream_port->dual_link_port->total_credits = 0;
0317 }
0318 }
0319
0320 return sw;
0321 }
0322
0323 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
0324 struct tb_switch *parent,
0325 u64 route, bool bonded)
0326 {
0327 struct tb_switch *sw;
0328
0329 sw = alloc_dev_default(test, parent, route, bonded);
0330 if (!sw)
0331 return NULL;
0332
0333 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
0334 sw->ports[13].config.max_in_hop_id = 9;
0335 sw->ports[13].config.max_out_hop_id = 9;
0336
0337 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
0338 sw->ports[14].config.max_in_hop_id = 9;
0339 sw->ports[14].config.max_out_hop_id = 9;
0340
0341 return sw;
0342 }
0343
0344 static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
0345 struct tb_switch *parent,
0346 u64 route, bool bonded)
0347 {
0348 struct tb_switch *sw;
0349 int i;
0350
0351 sw = alloc_dev_default(test, parent, route, bonded);
0352 if (!sw)
0353 return NULL;
0354
0355
0356
0357
0358
0359
0360
0361
0362 for (i = 5; i <= 8; i++)
0363 sw->ports[i].disabled = true;
0364
0365 for (i = 11; i <= 14; i++)
0366 sw->ports[i].disabled = true;
0367
0368 sw->ports[13].cap_adap = 0;
0369 sw->ports[14].cap_adap = 0;
0370
0371 for (i = 18; i <= 19; i++)
0372 sw->ports[i].disabled = true;
0373
0374 sw->generation = 4;
0375 sw->credit_allocation = true;
0376 sw->max_usb3_credits = 109;
0377 sw->min_dp_aux_credits = 0;
0378 sw->min_dp_main_credits = 0;
0379 sw->max_pcie_credits = 30;
0380 sw->max_dma_credits = 1;
0381
0382 return sw;
0383 }
0384
0385 static struct tb_switch *alloc_dev_usb4(struct kunit *test,
0386 struct tb_switch *parent,
0387 u64 route, bool bonded)
0388 {
0389 struct tb_switch *sw;
0390
0391 sw = alloc_dev_default(test, parent, route, bonded);
0392 if (!sw)
0393 return NULL;
0394
0395 sw->generation = 4;
0396 sw->credit_allocation = true;
0397 sw->max_usb3_credits = 14;
0398 sw->min_dp_aux_credits = 1;
0399 sw->min_dp_main_credits = 18;
0400 sw->max_pcie_credits = 32;
0401 sw->max_dma_credits = 14;
0402
0403 return sw;
0404 }
0405
0406 static void tb_test_path_basic(struct kunit *test)
0407 {
0408 struct tb_port *src_port, *dst_port, *p;
0409 struct tb_switch *host;
0410
0411 host = alloc_host(test);
0412
0413 src_port = &host->ports[5];
0414 dst_port = src_port;
0415
0416 p = tb_next_port_on_path(src_port, dst_port, NULL);
0417 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
0418
0419 p = tb_next_port_on_path(src_port, dst_port, p);
0420 KUNIT_EXPECT_TRUE(test, !p);
0421 }
0422
0423 static void tb_test_path_not_connected_walk(struct kunit *test)
0424 {
0425 struct tb_port *src_port, *dst_port, *p;
0426 struct tb_switch *host, *dev;
0427
0428 host = alloc_host(test);
0429
0430 dev = alloc_dev_default(test, NULL, 3, true);
0431
0432 src_port = &host->ports[12];
0433 dst_port = &dev->ports[16];
0434
0435 p = tb_next_port_on_path(src_port, dst_port, NULL);
0436 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
0437
0438 p = tb_next_port_on_path(src_port, dst_port, p);
0439 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
0440
0441 p = tb_next_port_on_path(src_port, dst_port, p);
0442 KUNIT_EXPECT_TRUE(test, !p);
0443
0444
0445
0446 p = tb_next_port_on_path(dst_port, src_port, NULL);
0447 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
0448
0449 p = tb_next_port_on_path(dst_port, src_port, p);
0450 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
0451
0452 p = tb_next_port_on_path(dst_port, src_port, p);
0453 KUNIT_EXPECT_TRUE(test, !p);
0454 }
0455
0456 struct port_expectation {
0457 u64 route;
0458 u8 port;
0459 enum tb_port_type type;
0460 };
0461
0462 static void tb_test_path_single_hop_walk(struct kunit *test)
0463 {
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static const struct port_expectation test_data[] = {
0474 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
0475 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
0476 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
0477 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
0478 };
0479 struct tb_port *src_port, *dst_port, *p;
0480 struct tb_switch *host, *dev;
0481 int i;
0482
0483 host = alloc_host(test);
0484 dev = alloc_dev_default(test, host, 1, true);
0485
0486 src_port = &host->ports[8];
0487 dst_port = &dev->ports[9];
0488
0489
0490
0491 i = 0;
0492 tb_for_each_port_on_path(src_port, dst_port, p) {
0493 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0494 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0495 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0496 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0497 test_data[i].type);
0498 i++;
0499 }
0500
0501 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
0502
0503 i = ARRAY_SIZE(test_data) - 1;
0504 tb_for_each_port_on_path(dst_port, src_port, p) {
0505 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0506 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0507 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0508 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0509 test_data[i].type);
0510 i--;
0511 }
0512
0513 KUNIT_EXPECT_EQ(test, i, -1);
0514 }
0515
0516 static void tb_test_path_daisy_chain_walk(struct kunit *test)
0517 {
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static const struct port_expectation test_data[] = {
0530 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
0531 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
0532 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
0533 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
0534 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
0535 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
0536 };
0537 struct tb_port *src_port, *dst_port, *p;
0538 struct tb_switch *host, *dev1, *dev2;
0539 int i;
0540
0541 host = alloc_host(test);
0542 dev1 = alloc_dev_default(test, host, 0x1, true);
0543 dev2 = alloc_dev_default(test, dev1, 0x301, true);
0544
0545 src_port = &host->ports[5];
0546 dst_port = &dev2->ports[13];
0547
0548
0549
0550 i = 0;
0551 tb_for_each_port_on_path(src_port, dst_port, p) {
0552 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0553 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0554 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0555 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0556 test_data[i].type);
0557 i++;
0558 }
0559
0560 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
0561
0562 i = ARRAY_SIZE(test_data) - 1;
0563 tb_for_each_port_on_path(dst_port, src_port, p) {
0564 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0565 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0566 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0567 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0568 test_data[i].type);
0569 i--;
0570 }
0571
0572 KUNIT_EXPECT_EQ(test, i, -1);
0573 }
0574
0575 static void tb_test_path_simple_tree_walk(struct kunit *test)
0576 {
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 static const struct port_expectation test_data[] = {
0591 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
0592 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
0593 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
0594 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
0595 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
0596 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
0597 };
0598 struct tb_port *src_port, *dst_port, *p;
0599 struct tb_switch *host, *dev1, *dev3;
0600 int i;
0601
0602 host = alloc_host(test);
0603 dev1 = alloc_dev_default(test, host, 0x1, true);
0604 alloc_dev_default(test, dev1, 0x301, true);
0605 dev3 = alloc_dev_default(test, dev1, 0x501, true);
0606 alloc_dev_default(test, dev1, 0x701, true);
0607
0608 src_port = &host->ports[5];
0609 dst_port = &dev3->ports[13];
0610
0611
0612
0613 i = 0;
0614 tb_for_each_port_on_path(src_port, dst_port, p) {
0615 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0616 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0617 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0618 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0619 test_data[i].type);
0620 i++;
0621 }
0622
0623 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
0624
0625 i = ARRAY_SIZE(test_data) - 1;
0626 tb_for_each_port_on_path(dst_port, src_port, p) {
0627 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0628 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0629 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0630 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0631 test_data[i].type);
0632 i--;
0633 }
0634
0635 KUNIT_EXPECT_EQ(test, i, -1);
0636 }
0637
0638 static void tb_test_path_complex_tree_walk(struct kunit *test)
0639 {
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 static const struct port_expectation test_data[] = {
0662 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
0663 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
0664 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
0665 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
0666 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
0667 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
0668 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
0669 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
0670 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
0671 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
0672 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
0673 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
0674 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
0675 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
0676 };
0677 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
0678 struct tb_port *src_port, *dst_port, *p;
0679 int i;
0680
0681 host = alloc_host(test);
0682 dev1 = alloc_dev_default(test, host, 0x1, true);
0683 dev2 = alloc_dev_default(test, dev1, 0x301, true);
0684 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
0685 alloc_dev_default(test, dev1, 0x501, true);
0686 dev5 = alloc_dev_default(test, dev1, 0x701, true);
0687 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
0688 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
0689 alloc_dev_default(test, dev7, 0x303070701, true);
0690 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
0691
0692 src_port = &dev3->ports[13];
0693 dst_port = &dev9->ports[14];
0694
0695
0696
0697 i = 0;
0698 tb_for_each_port_on_path(src_port, dst_port, p) {
0699 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0700 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0701 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0702 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0703 test_data[i].type);
0704 i++;
0705 }
0706
0707 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
0708
0709 i = ARRAY_SIZE(test_data) - 1;
0710 tb_for_each_port_on_path(dst_port, src_port, p) {
0711 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0712 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0713 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0714 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0715 test_data[i].type);
0716 i--;
0717 }
0718
0719 KUNIT_EXPECT_EQ(test, i, -1);
0720 }
0721
0722 static void tb_test_path_max_length_walk(struct kunit *test)
0723 {
0724 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
0725 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
0726 struct tb_port *src_port, *dst_port, *p;
0727 int i;
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 static const struct port_expectation test_data[] = {
0753 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
0754 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
0755 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
0756 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
0757 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
0758 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
0759 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
0760 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
0761 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
0762 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
0763 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
0764 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
0765 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
0766 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
0767 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
0768 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
0769 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
0770 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
0771 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
0772 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
0773 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
0774 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
0775 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
0776 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
0777 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
0778 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
0779 };
0780
0781 host = alloc_host(test);
0782 dev1 = alloc_dev_default(test, host, 0x1, true);
0783 dev2 = alloc_dev_default(test, dev1, 0x301, true);
0784 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
0785 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
0786 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
0787 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
0788 dev7 = alloc_dev_default(test, host, 0x3, true);
0789 dev8 = alloc_dev_default(test, dev7, 0x303, true);
0790 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
0791 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
0792 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
0793 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
0794
0795 src_port = &dev6->ports[13];
0796 dst_port = &dev12->ports[13];
0797
0798
0799
0800 i = 0;
0801 tb_for_each_port_on_path(src_port, dst_port, p) {
0802 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0803 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0804 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0805 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0806 test_data[i].type);
0807 i++;
0808 }
0809
0810 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
0811
0812 i = ARRAY_SIZE(test_data) - 1;
0813 tb_for_each_port_on_path(dst_port, src_port, p) {
0814 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
0815 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
0816 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
0817 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
0818 test_data[i].type);
0819 i--;
0820 }
0821
0822 KUNIT_EXPECT_EQ(test, i, -1);
0823 }
0824
0825 static void tb_test_path_not_connected(struct kunit *test)
0826 {
0827 struct tb_switch *host, *dev1, *dev2;
0828 struct tb_port *down, *up;
0829 struct tb_path *path;
0830
0831 host = alloc_host(test);
0832 dev1 = alloc_dev_default(test, host, 0x3, false);
0833
0834 dev2 = alloc_dev_default(test, NULL, 0x303, false);
0835
0836 down = &dev1->ports[10];
0837 up = &dev2->ports[9];
0838
0839 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
0840 KUNIT_ASSERT_NULL(test, path);
0841 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
0842 KUNIT_ASSERT_NULL(test, path);
0843 }
0844
0845 struct hop_expectation {
0846 u64 route;
0847 u8 in_port;
0848 enum tb_port_type in_type;
0849 u8 out_port;
0850 enum tb_port_type out_type;
0851 };
0852
0853 static void tb_test_path_not_bonded_lane0(struct kunit *test)
0854 {
0855
0856
0857
0858
0859
0860
0861
0862
0863 static const struct hop_expectation test_data[] = {
0864 {
0865 .route = 0x0,
0866 .in_port = 9,
0867 .in_type = TB_TYPE_PCIE_DOWN,
0868 .out_port = 3,
0869 .out_type = TB_TYPE_PORT,
0870 },
0871 {
0872 .route = 0x3,
0873 .in_port = 1,
0874 .in_type = TB_TYPE_PORT,
0875 .out_port = 9,
0876 .out_type = TB_TYPE_PCIE_UP,
0877 },
0878 };
0879 struct tb_switch *host, *dev;
0880 struct tb_port *down, *up;
0881 struct tb_path *path;
0882 int i;
0883
0884 host = alloc_host(test);
0885 dev = alloc_dev_default(test, host, 0x3, false);
0886
0887 down = &host->ports[9];
0888 up = &dev->ports[9];
0889
0890 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
0891 KUNIT_ASSERT_NOT_NULL(test, path);
0892 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
0893 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
0894 const struct tb_port *in_port, *out_port;
0895
0896 in_port = path->hops[i].in_port;
0897 out_port = path->hops[i].out_port;
0898
0899 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
0900 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
0901 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
0902 test_data[i].in_type);
0903 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
0904 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
0905 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
0906 test_data[i].out_type);
0907 }
0908 tb_path_free(path);
0909 }
0910
0911 static void tb_test_path_not_bonded_lane1(struct kunit *test)
0912 {
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925 static const struct hop_expectation test_data[] = {
0926 {
0927 .route = 0x0,
0928 .in_port = 5,
0929 .in_type = TB_TYPE_DP_HDMI_IN,
0930 .out_port = 2,
0931 .out_type = TB_TYPE_PORT,
0932 },
0933 {
0934 .route = 0x1,
0935 .in_port = 2,
0936 .in_type = TB_TYPE_PORT,
0937 .out_port = 13,
0938 .out_type = TB_TYPE_DP_HDMI_OUT,
0939 },
0940 };
0941 struct tb_switch *host, *dev;
0942 struct tb_port *in, *out;
0943 struct tb_path *path;
0944 int i;
0945
0946 host = alloc_host(test);
0947 dev = alloc_dev_default(test, host, 0x1, false);
0948
0949 in = &host->ports[5];
0950 out = &dev->ports[13];
0951
0952 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
0953 KUNIT_ASSERT_NOT_NULL(test, path);
0954 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
0955 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
0956 const struct tb_port *in_port, *out_port;
0957
0958 in_port = path->hops[i].in_port;
0959 out_port = path->hops[i].out_port;
0960
0961 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
0962 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
0963 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
0964 test_data[i].in_type);
0965 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
0966 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
0967 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
0968 test_data[i].out_type);
0969 }
0970 tb_path_free(path);
0971 }
0972
0973 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
0974 {
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989 static const struct hop_expectation test_data[] = {
0990 {
0991 .route = 0x0,
0992 .in_port = 5,
0993 .in_type = TB_TYPE_DP_HDMI_IN,
0994 .out_port = 2,
0995 .out_type = TB_TYPE_PORT,
0996 },
0997 {
0998 .route = 0x1,
0999 .in_port = 2,
1000 .in_type = TB_TYPE_PORT,
1001 .out_port = 8,
1002 .out_type = TB_TYPE_PORT,
1003 },
1004 {
1005 .route = 0x701,
1006 .in_port = 2,
1007 .in_type = TB_TYPE_PORT,
1008 .out_port = 6,
1009 .out_type = TB_TYPE_PORT,
1010 },
1011 {
1012 .route = 0x50701,
1013 .in_port = 2,
1014 .in_type = TB_TYPE_PORT,
1015 .out_port = 13,
1016 .out_type = TB_TYPE_DP_HDMI_OUT,
1017 },
1018 };
1019 struct tb_switch *host, *dev1, *dev2, *dev3;
1020 struct tb_port *in, *out;
1021 struct tb_path *path;
1022 int i;
1023
1024 host = alloc_host(test);
1025 dev1 = alloc_dev_default(test, host, 0x1, false);
1026 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1027 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1028
1029 in = &host->ports[5];
1030 out = &dev3->ports[13];
1031
1032 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1033 KUNIT_ASSERT_NOT_NULL(test, path);
1034 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1035 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1036 const struct tb_port *in_port, *out_port;
1037
1038 in_port = path->hops[i].in_port;
1039 out_port = path->hops[i].out_port;
1040
1041 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1042 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1043 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1044 test_data[i].in_type);
1045 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1046 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1047 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1048 test_data[i].out_type);
1049 }
1050 tb_path_free(path);
1051 }
1052
1053 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1054 {
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 static const struct hop_expectation test_data[] = {
1070 {
1071 .route = 0x50701,
1072 .in_port = 13,
1073 .in_type = TB_TYPE_DP_HDMI_IN,
1074 .out_port = 2,
1075 .out_type = TB_TYPE_PORT,
1076 },
1077 {
1078 .route = 0x701,
1079 .in_port = 6,
1080 .in_type = TB_TYPE_PORT,
1081 .out_port = 2,
1082 .out_type = TB_TYPE_PORT,
1083 },
1084 {
1085 .route = 0x1,
1086 .in_port = 8,
1087 .in_type = TB_TYPE_PORT,
1088 .out_port = 2,
1089 .out_type = TB_TYPE_PORT,
1090 },
1091 {
1092 .route = 0x0,
1093 .in_port = 2,
1094 .in_type = TB_TYPE_PORT,
1095 .out_port = 5,
1096 .out_type = TB_TYPE_DP_HDMI_IN,
1097 },
1098 };
1099 struct tb_switch *host, *dev1, *dev2, *dev3;
1100 struct tb_port *in, *out;
1101 struct tb_path *path;
1102 int i;
1103
1104 host = alloc_host(test);
1105 dev1 = alloc_dev_default(test, host, 0x1, false);
1106 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1107 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1108
1109 in = &dev3->ports[13];
1110 out = &host->ports[5];
1111
1112 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1113 KUNIT_ASSERT_NOT_NULL(test, path);
1114 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1115 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1116 const struct tb_port *in_port, *out_port;
1117
1118 in_port = path->hops[i].in_port;
1119 out_port = path->hops[i].out_port;
1120
1121 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1122 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1123 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1124 test_data[i].in_type);
1125 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1126 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1127 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1128 test_data[i].out_type);
1129 }
1130 tb_path_free(path);
1131 }
1132
1133 static void tb_test_path_mixed_chain(struct kunit *test)
1134 {
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 static const struct hop_expectation test_data[] = {
1154 {
1155 .route = 0x0,
1156 .in_port = 5,
1157 .in_type = TB_TYPE_DP_HDMI_IN,
1158 .out_port = 1,
1159 .out_type = TB_TYPE_PORT,
1160 },
1161 {
1162 .route = 0x1,
1163 .in_port = 1,
1164 .in_type = TB_TYPE_PORT,
1165 .out_port = 8,
1166 .out_type = TB_TYPE_PORT,
1167 },
1168 {
1169 .route = 0x701,
1170 .in_port = 2,
1171 .in_type = TB_TYPE_PORT,
1172 .out_port = 6,
1173 .out_type = TB_TYPE_PORT,
1174 },
1175 {
1176 .route = 0x50701,
1177 .in_port = 2,
1178 .in_type = TB_TYPE_PORT,
1179 .out_port = 3,
1180 .out_type = TB_TYPE_PORT,
1181 },
1182 {
1183 .route = 0x3050701,
1184 .in_port = 1,
1185 .in_type = TB_TYPE_PORT,
1186 .out_port = 13,
1187 .out_type = TB_TYPE_DP_HDMI_OUT,
1188 },
1189 };
1190 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1191 struct tb_port *in, *out;
1192 struct tb_path *path;
1193 int i;
1194
1195 host = alloc_host(test);
1196 dev1 = alloc_dev_default(test, host, 0x1, true);
1197 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1198 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1199 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1200
1201 in = &host->ports[5];
1202 out = &dev4->ports[13];
1203
1204 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1205 KUNIT_ASSERT_NOT_NULL(test, path);
1206 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1207 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1208 const struct tb_port *in_port, *out_port;
1209
1210 in_port = path->hops[i].in_port;
1211 out_port = path->hops[i].out_port;
1212
1213 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1214 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1215 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1216 test_data[i].in_type);
1217 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1218 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1219 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1220 test_data[i].out_type);
1221 }
1222 tb_path_free(path);
1223 }
1224
1225 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1226 {
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 static const struct hop_expectation test_data[] = {
1246 {
1247 .route = 0x3050701,
1248 .in_port = 13,
1249 .in_type = TB_TYPE_DP_HDMI_OUT,
1250 .out_port = 1,
1251 .out_type = TB_TYPE_PORT,
1252 },
1253 {
1254 .route = 0x50701,
1255 .in_port = 3,
1256 .in_type = TB_TYPE_PORT,
1257 .out_port = 2,
1258 .out_type = TB_TYPE_PORT,
1259 },
1260 {
1261 .route = 0x701,
1262 .in_port = 6,
1263 .in_type = TB_TYPE_PORT,
1264 .out_port = 2,
1265 .out_type = TB_TYPE_PORT,
1266 },
1267 {
1268 .route = 0x1,
1269 .in_port = 8,
1270 .in_type = TB_TYPE_PORT,
1271 .out_port = 1,
1272 .out_type = TB_TYPE_PORT,
1273 },
1274 {
1275 .route = 0x0,
1276 .in_port = 1,
1277 .in_type = TB_TYPE_PORT,
1278 .out_port = 5,
1279 .out_type = TB_TYPE_DP_HDMI_IN,
1280 },
1281 };
1282 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1283 struct tb_port *in, *out;
1284 struct tb_path *path;
1285 int i;
1286
1287 host = alloc_host(test);
1288 dev1 = alloc_dev_default(test, host, 0x1, true);
1289 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1290 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1291 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1292
1293 in = &dev4->ports[13];
1294 out = &host->ports[5];
1295
1296 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1297 KUNIT_ASSERT_NOT_NULL(test, path);
1298 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1299 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1300 const struct tb_port *in_port, *out_port;
1301
1302 in_port = path->hops[i].in_port;
1303 out_port = path->hops[i].out_port;
1304
1305 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1306 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1307 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1308 test_data[i].in_type);
1309 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1310 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1311 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1312 test_data[i].out_type);
1313 }
1314 tb_path_free(path);
1315 }
1316
1317 static void tb_test_tunnel_pcie(struct kunit *test)
1318 {
1319 struct tb_switch *host, *dev1, *dev2;
1320 struct tb_tunnel *tunnel1, *tunnel2;
1321 struct tb_port *down, *up;
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 host = alloc_host(test);
1335 dev1 = alloc_dev_default(test, host, 0x1, true);
1336 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1337
1338 down = &host->ports[8];
1339 up = &dev1->ports[9];
1340 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1341 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1342 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1343 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1344 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1345 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1346 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1347 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1348 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1349 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1350 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1351 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1352
1353 down = &dev1->ports[10];
1354 up = &dev2->ports[9];
1355 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1356 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1357 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1358 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1359 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1360 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1361 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1362 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1363 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1364 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1366 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1367
1368 tb_tunnel_free(tunnel2);
1369 tb_tunnel_free(tunnel1);
1370 }
1371
1372 static void tb_test_tunnel_dp(struct kunit *test)
1373 {
1374 struct tb_switch *host, *dev;
1375 struct tb_port *in, *out;
1376 struct tb_tunnel *tunnel;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 host = alloc_host(test);
1387 dev = alloc_dev_default(test, host, 0x3, true);
1388
1389 in = &host->ports[5];
1390 out = &dev->ports[13];
1391
1392 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1393 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1394 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1395 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1396 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1397 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1398 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1399 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1400 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1401 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1402 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1403 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1404 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1405 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1406 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1407 tb_tunnel_free(tunnel);
1408 }
1409
1410 static void tb_test_tunnel_dp_chain(struct kunit *test)
1411 {
1412 struct tb_switch *host, *dev1, *dev4;
1413 struct tb_port *in, *out;
1414 struct tb_tunnel *tunnel;
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 host = alloc_host(test);
1430 dev1 = alloc_dev_default(test, host, 0x1, true);
1431 alloc_dev_default(test, dev1, 0x301, true);
1432 alloc_dev_default(test, dev1, 0x501, true);
1433 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1434
1435 in = &host->ports[5];
1436 out = &dev4->ports[14];
1437
1438 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1439 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1440 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1441 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1442 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1443 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1444 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1445 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1446 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1447 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1448 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1449 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1450 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1452 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1453 tb_tunnel_free(tunnel);
1454 }
1455
1456 static void tb_test_tunnel_dp_tree(struct kunit *test)
1457 {
1458 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1459 struct tb_port *in, *out;
1460 struct tb_tunnel *tunnel;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 host = alloc_host(test);
1479 dev1 = alloc_dev_default(test, host, 0x3, true);
1480 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1481 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1482 alloc_dev_default(test, dev1, 0x703, true);
1483 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1484
1485 in = &dev2->ports[13];
1486 out = &dev5->ports[13];
1487
1488 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1489 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1490 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1491 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1492 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1493 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1494 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1495 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1496 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1497 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1498 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1499 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1500 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1501 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1502 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1503 tb_tunnel_free(tunnel);
1504 }
1505
1506 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1507 {
1508 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1509 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1510 struct tb_port *in, *out;
1511 struct tb_tunnel *tunnel;
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 host = alloc_host(test);
1537 dev1 = alloc_dev_default(test, host, 0x1, true);
1538 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1539 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1540 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1541 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1542 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1543 dev7 = alloc_dev_default(test, host, 0x3, true);
1544 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1545 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1546 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1547 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1548 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1549
1550 in = &dev6->ports[13];
1551 out = &dev12->ports[13];
1552
1553 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1554 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1555 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1556 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1557 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1558 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1559 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1560
1561 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1562
1563 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1564 &host->ports[1]);
1565 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1566 &host->ports[3]);
1567
1568 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1569 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1570 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1571 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1572 &host->ports[1]);
1573 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1574 &host->ports[3]);
1575 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1576 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1577 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1579 &host->ports[3]);
1580 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1581 &host->ports[1]);
1582 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1583 tb_tunnel_free(tunnel);
1584 }
1585
1586 static void tb_test_tunnel_usb3(struct kunit *test)
1587 {
1588 struct tb_switch *host, *dev1, *dev2;
1589 struct tb_tunnel *tunnel1, *tunnel2;
1590 struct tb_port *down, *up;
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603 host = alloc_host(test);
1604 dev1 = alloc_dev_default(test, host, 0x1, true);
1605 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1606
1607 down = &host->ports[12];
1608 up = &dev1->ports[16];
1609 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1610 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1611 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1612 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1613 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1614 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1615 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1616 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1617 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1618 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1619 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1620 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1621
1622 down = &dev1->ports[17];
1623 up = &dev2->ports[16];
1624 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1625 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1626 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1627 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1628 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1629 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1630 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1631 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1632 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1633 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1634 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1635 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1636
1637 tb_tunnel_free(tunnel2);
1638 tb_tunnel_free(tunnel1);
1639 }
1640
1641 static void tb_test_tunnel_port_on_path(struct kunit *test)
1642 {
1643 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1644 struct tb_port *in, *out, *port;
1645 struct tb_tunnel *dp_tunnel;
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 host = alloc_host(test);
1662 dev1 = alloc_dev_default(test, host, 0x3, true);
1663 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1664 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1665 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1666 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1667
1668 in = &dev2->ports[13];
1669 out = &dev5->ports[13];
1670
1671 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1672 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1673
1674 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1675 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1676
1677 port = &host->ports[8];
1678 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1679
1680 port = &host->ports[3];
1681 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1682
1683 port = &dev1->ports[1];
1684 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1685
1686 port = &dev1->ports[3];
1687 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1688
1689 port = &dev1->ports[5];
1690 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1691
1692 port = &dev1->ports[7];
1693 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1694
1695 port = &dev3->ports[1];
1696 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1697
1698 port = &dev5->ports[1];
1699 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1700
1701 port = &dev4->ports[1];
1702 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1703
1704 tb_tunnel_free(dp_tunnel);
1705 }
1706
1707 static void tb_test_tunnel_dma(struct kunit *test)
1708 {
1709 struct tb_port *nhi, *port;
1710 struct tb_tunnel *tunnel;
1711 struct tb_switch *host;
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724 host = alloc_host(test);
1725 nhi = &host->ports[7];
1726 port = &host->ports[1];
1727
1728 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1729 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1730 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1731 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1732 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1733 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1734
1735 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1736 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1737 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1738 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1739 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1740
1741 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1742 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1743 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1744 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1745 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1746
1747 tb_tunnel_free(tunnel);
1748 }
1749
1750 static void tb_test_tunnel_dma_rx(struct kunit *test)
1751 {
1752 struct tb_port *nhi, *port;
1753 struct tb_tunnel *tunnel;
1754 struct tb_switch *host;
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 host = alloc_host(test);
1768 nhi = &host->ports[7];
1769 port = &host->ports[1];
1770
1771 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1772 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1773 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1774 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1776 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1777
1778 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1779 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1780 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1781 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1782 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1783
1784 tb_tunnel_free(tunnel);
1785 }
1786
1787 static void tb_test_tunnel_dma_tx(struct kunit *test)
1788 {
1789 struct tb_port *nhi, *port;
1790 struct tb_tunnel *tunnel;
1791 struct tb_switch *host;
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 host = alloc_host(test);
1805 nhi = &host->ports[7];
1806 port = &host->ports[1];
1807
1808 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1809 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1810 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1811 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1812 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1813 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1814
1815 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1816 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1817 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1818 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1819 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1820
1821 tb_tunnel_free(tunnel);
1822 }
1823
1824 static void tb_test_tunnel_dma_chain(struct kunit *test)
1825 {
1826 struct tb_switch *host, *dev1, *dev2;
1827 struct tb_port *nhi, *port;
1828 struct tb_tunnel *tunnel;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 host = alloc_host(test);
1849 dev1 = alloc_dev_default(test, host, 0x1, true);
1850 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1851
1852 nhi = &host->ports[7];
1853 port = &dev2->ports[3];
1854 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1855 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1856 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1857 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1858 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1859 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1860
1861 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1862 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1863 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1864 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1865 &dev2->ports[1]);
1866 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1867 &dev1->ports[7]);
1868 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1869 &dev1->ports[1]);
1870 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1871 &host->ports[1]);
1872 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1873 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1874
1875 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1876 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1877 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1878 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1879 &dev1->ports[1]);
1880 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1881 &dev1->ports[7]);
1882 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1883 &dev2->ports[1]);
1884 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1885 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1886
1887 tb_tunnel_free(tunnel);
1888 }
1889
1890 static void tb_test_tunnel_dma_match(struct kunit *test)
1891 {
1892 struct tb_port *nhi, *port;
1893 struct tb_tunnel *tunnel;
1894 struct tb_switch *host;
1895
1896 host = alloc_host(test);
1897 nhi = &host->ports[7];
1898 port = &host->ports[1];
1899
1900 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1901 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1902
1903 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1904 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1905 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1906 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1907 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1908 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1909 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1910 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1911 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1912 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1913
1914 tb_tunnel_free(tunnel);
1915
1916 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1917 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1918 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1919 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1920 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1921 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1922 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1923 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1924 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1925
1926 tb_tunnel_free(tunnel);
1927
1928 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1929 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1930 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1931 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1932 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1933 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1934 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1935 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1936 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1937
1938 tb_tunnel_free(tunnel);
1939 }
1940
1941 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1942 {
1943 struct tb_switch *host, *dev;
1944 struct tb_port *up, *down;
1945 struct tb_tunnel *tunnel;
1946 struct tb_path *path;
1947
1948 host = alloc_host(test);
1949 dev = alloc_dev_default(test, host, 0x1, false);
1950
1951 down = &host->ports[8];
1952 up = &dev->ports[9];
1953 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1954 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1955 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1956
1957 path = tunnel->paths[0];
1958 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1959 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1960 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1961 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1962 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1963
1964 path = tunnel->paths[1];
1965 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1966 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1967 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1968 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1969 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1970
1971 tb_tunnel_free(tunnel);
1972 }
1973
1974 static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1975 {
1976 struct tb_switch *host, *dev;
1977 struct tb_port *up, *down;
1978 struct tb_tunnel *tunnel;
1979 struct tb_path *path;
1980
1981 host = alloc_host(test);
1982 dev = alloc_dev_default(test, host, 0x1, true);
1983
1984 down = &host->ports[8];
1985 up = &dev->ports[9];
1986 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1987 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1988 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1989
1990 path = tunnel->paths[0];
1991 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1992 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1993 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1994 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1995 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1996
1997 path = tunnel->paths[1];
1998 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1999 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2000 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2001 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2002 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2003
2004 tb_tunnel_free(tunnel);
2005 }
2006
2007 static void tb_test_credit_alloc_pcie(struct kunit *test)
2008 {
2009 struct tb_switch *host, *dev;
2010 struct tb_port *up, *down;
2011 struct tb_tunnel *tunnel;
2012 struct tb_path *path;
2013
2014 host = alloc_host_usb4(test);
2015 dev = alloc_dev_usb4(test, host, 0x1, true);
2016
2017 down = &host->ports[8];
2018 up = &dev->ports[9];
2019 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2020 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2021 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2022
2023 path = tunnel->paths[0];
2024 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2025 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2026 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2027 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2028 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2029
2030 path = tunnel->paths[1];
2031 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2032 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2033 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2034 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2035 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2036
2037 tb_tunnel_free(tunnel);
2038 }
2039
2040 static void tb_test_credit_alloc_without_dp(struct kunit *test)
2041 {
2042 struct tb_switch *host, *dev;
2043 struct tb_port *up, *down;
2044 struct tb_tunnel *tunnel;
2045 struct tb_path *path;
2046
2047 host = alloc_host_usb4(test);
2048 dev = alloc_dev_without_dp(test, host, 0x1, true);
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 down = &host->ports[8];
2066 up = &dev->ports[9];
2067 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2068 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2069 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2070
2071
2072 path = tunnel->paths[0];
2073 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2078
2079
2080 path = tunnel->paths[1];
2081 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2082 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2083 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2084 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2085 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2086
2087 tb_tunnel_free(tunnel);
2088 }
2089
2090 static void tb_test_credit_alloc_dp(struct kunit *test)
2091 {
2092 struct tb_switch *host, *dev;
2093 struct tb_port *in, *out;
2094 struct tb_tunnel *tunnel;
2095 struct tb_path *path;
2096
2097 host = alloc_host_usb4(test);
2098 dev = alloc_dev_usb4(test, host, 0x1, true);
2099
2100 in = &host->ports[5];
2101 out = &dev->ports[14];
2102
2103 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2104 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2105 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2106
2107
2108 path = tunnel->paths[0];
2109 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2110 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2111 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2112 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2113 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2114
2115
2116 path = tunnel->paths[1];
2117 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2118 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2119 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2120 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2121 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2122
2123
2124 path = tunnel->paths[2];
2125 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2126 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2127 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2128 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2129 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2130
2131 tb_tunnel_free(tunnel);
2132 }
2133
2134 static void tb_test_credit_alloc_usb3(struct kunit *test)
2135 {
2136 struct tb_switch *host, *dev;
2137 struct tb_port *up, *down;
2138 struct tb_tunnel *tunnel;
2139 struct tb_path *path;
2140
2141 host = alloc_host_usb4(test);
2142 dev = alloc_dev_usb4(test, host, 0x1, true);
2143
2144 down = &host->ports[12];
2145 up = &dev->ports[16];
2146 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2147 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2148 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2149
2150 path = tunnel->paths[0];
2151 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2152 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2153 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2154 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2155 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2156
2157 path = tunnel->paths[1];
2158 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2159 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2160 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2161 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2162 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2163
2164 tb_tunnel_free(tunnel);
2165 }
2166
2167 static void tb_test_credit_alloc_dma(struct kunit *test)
2168 {
2169 struct tb_switch *host, *dev;
2170 struct tb_port *nhi, *port;
2171 struct tb_tunnel *tunnel;
2172 struct tb_path *path;
2173
2174 host = alloc_host_usb4(test);
2175 dev = alloc_dev_usb4(test, host, 0x1, true);
2176
2177 nhi = &host->ports[7];
2178 port = &dev->ports[3];
2179
2180 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2181 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2182 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2183
2184
2185 path = tunnel->paths[0];
2186 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2187 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2188 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2189 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2190 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2191
2192
2193 path = tunnel->paths[1];
2194 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2195 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2196 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2197 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2198 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2199
2200 tb_tunnel_free(tunnel);
2201 }
2202
2203 static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2204 {
2205 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2206 struct tb_switch *host, *dev;
2207 struct tb_port *nhi, *port;
2208 struct tb_path *path;
2209
2210 host = alloc_host_usb4(test);
2211 dev = alloc_dev_usb4(test, host, 0x1, true);
2212
2213 nhi = &host->ports[7];
2214 port = &dev->ports[3];
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2235 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2236 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2237
2238 path = tunnel1->paths[0];
2239 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2240 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2241 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2242 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2243 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2244
2245 path = tunnel1->paths[1];
2246 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2247 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2248 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2249 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2250 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2251
2252 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2253 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2254 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2255
2256 path = tunnel2->paths[0];
2257 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2258 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2259 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2260 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2261 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2262
2263 path = tunnel2->paths[1];
2264 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2265 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2266 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2267 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2268 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2269
2270 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2271 KUNIT_ASSERT_NULL(test, tunnel3);
2272
2273
2274
2275
2276
2277 tb_tunnel_free(tunnel1);
2278
2279 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2280 KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2281
2282 path = tunnel3->paths[0];
2283 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2284 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2285 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2286 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2287 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2288
2289 path = tunnel3->paths[1];
2290 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2292 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2294 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2295
2296 tb_tunnel_free(tunnel3);
2297 tb_tunnel_free(tunnel2);
2298 }
2299
2300 static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2301 struct tb_switch *host, struct tb_switch *dev)
2302 {
2303 struct tb_port *up, *down;
2304 struct tb_tunnel *pcie_tunnel;
2305 struct tb_path *path;
2306
2307 down = &host->ports[8];
2308 up = &dev->ports[9];
2309 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2310 KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2311 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2312
2313 path = pcie_tunnel->paths[0];
2314 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2315 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2316 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2317 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2318 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2319
2320 path = pcie_tunnel->paths[1];
2321 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2324 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2326
2327 return pcie_tunnel;
2328 }
2329
2330 static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2331 struct tb_switch *host, struct tb_switch *dev)
2332 {
2333 struct tb_port *in, *out;
2334 struct tb_tunnel *dp_tunnel1;
2335 struct tb_path *path;
2336
2337 in = &host->ports[5];
2338 out = &dev->ports[13];
2339 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2340 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2341 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2342
2343 path = dp_tunnel1->paths[0];
2344 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2345 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2346 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2347 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2348 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2349
2350 path = dp_tunnel1->paths[1];
2351 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2352 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2353 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2354 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2355 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2356
2357 path = dp_tunnel1->paths[2];
2358 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2359 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2360 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2361 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2362 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2363
2364 return dp_tunnel1;
2365 }
2366
2367 static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2368 struct tb_switch *host, struct tb_switch *dev)
2369 {
2370 struct tb_port *in, *out;
2371 struct tb_tunnel *dp_tunnel2;
2372 struct tb_path *path;
2373
2374 in = &host->ports[6];
2375 out = &dev->ports[14];
2376 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2377 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2378 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2379
2380 path = dp_tunnel2->paths[0];
2381 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2382 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2383 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2384 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2385 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2386
2387 path = dp_tunnel2->paths[1];
2388 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2389 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2390 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2391 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2392 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2393
2394 path = dp_tunnel2->paths[2];
2395 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2396 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2397 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2398 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2399 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2400
2401 return dp_tunnel2;
2402 }
2403
2404 static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2405 struct tb_switch *host, struct tb_switch *dev)
2406 {
2407 struct tb_port *up, *down;
2408 struct tb_tunnel *usb3_tunnel;
2409 struct tb_path *path;
2410
2411 down = &host->ports[12];
2412 up = &dev->ports[16];
2413 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2414 KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2415 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2416
2417 path = usb3_tunnel->paths[0];
2418 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2419 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2420 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2421 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2422 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2423
2424 path = usb3_tunnel->paths[1];
2425 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2426 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2427 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2428 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2429 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2430
2431 return usb3_tunnel;
2432 }
2433
2434 static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2435 struct tb_switch *host, struct tb_switch *dev)
2436 {
2437 struct tb_port *nhi, *port;
2438 struct tb_tunnel *dma_tunnel1;
2439 struct tb_path *path;
2440
2441 nhi = &host->ports[7];
2442 port = &dev->ports[3];
2443 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2444 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2445 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2446
2447 path = dma_tunnel1->paths[0];
2448 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2449 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2450 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2451 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2452 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2453
2454 path = dma_tunnel1->paths[1];
2455 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2456 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2457 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2458 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2459 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2460
2461 return dma_tunnel1;
2462 }
2463
2464 static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2465 struct tb_switch *host, struct tb_switch *dev)
2466 {
2467 struct tb_port *nhi, *port;
2468 struct tb_tunnel *dma_tunnel2;
2469 struct tb_path *path;
2470
2471 nhi = &host->ports[7];
2472 port = &dev->ports[3];
2473 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2474 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2475 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2476
2477 path = dma_tunnel2->paths[0];
2478 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2479 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2480 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2481 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2482 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2483
2484 path = dma_tunnel2->paths[1];
2485 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2486 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2487 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2488 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2489 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2490
2491 return dma_tunnel2;
2492 }
2493
2494 static void tb_test_credit_alloc_all(struct kunit *test)
2495 {
2496 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2497 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2498 struct tb_switch *host, *dev;
2499
2500
2501
2502
2503
2504
2505
2506 host = alloc_host_usb4(test);
2507 dev = alloc_dev_usb4(test, host, 0x1, true);
2508
2509 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2510 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2511 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2512 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2513 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2514 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2515
2516 tb_tunnel_free(dma_tunnel2);
2517 tb_tunnel_free(dma_tunnel1);
2518 tb_tunnel_free(usb3_tunnel);
2519 tb_tunnel_free(dp_tunnel2);
2520 tb_tunnel_free(dp_tunnel1);
2521 tb_tunnel_free(pcie_tunnel);
2522 }
2523
2524 static const u32 root_directory[] = {
2525 0x55584401,
2526 0x00000018,
2527 0x76656e64,
2528 0x6f726964,
2529 0x76000001,
2530 0x00000a27,
2531 0x76656e64,
2532 0x6f726964,
2533 0x74000003,
2534 0x0000001a,
2535 0x64657669,
2536 0x63656964,
2537 0x76000001,
2538 0x0000000a,
2539 0x64657669,
2540 0x63656964,
2541 0x74000003,
2542 0x0000001d,
2543 0x64657669,
2544 0x63657276,
2545 0x76000001,
2546 0x80000100,
2547 0x6e657477,
2548 0x6f726b00,
2549 0x44000014,
2550 0x00000021,
2551 0x4170706c,
2552 0x6520496e,
2553 0x632e0000,
2554 0x4d616369,
2555 0x6e746f73,
2556 0x68000000,
2557 0x00000000,
2558 0xca8961c6,
2559 0x9541ce1c,
2560 0x5949b8bd,
2561 0x4f5a5f2e,
2562 0x70727463,
2563 0x69640000,
2564 0x76000001,
2565 0x00000001,
2566 0x70727463,
2567 0x76657273,
2568 0x76000001,
2569 0x00000001,
2570 0x70727463,
2571 0x72657673,
2572 0x76000001,
2573 0x00000001,
2574 0x70727463,
2575 0x73746e73,
2576 0x76000001,
2577 0x00000000,
2578 };
2579
2580 static const uuid_t network_dir_uuid =
2581 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2582 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2583
2584 static void tb_test_property_parse(struct kunit *test)
2585 {
2586 struct tb_property_dir *dir, *network_dir;
2587 struct tb_property *p;
2588
2589 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2590 KUNIT_ASSERT_NOT_NULL(test, dir);
2591
2592 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2593 KUNIT_ASSERT_NULL(test, p);
2594
2595 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2596 KUNIT_ASSERT_NOT_NULL(test, p);
2597 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2598
2599 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2600 KUNIT_ASSERT_NOT_NULL(test, p);
2601 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2602
2603 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2604 KUNIT_ASSERT_NOT_NULL(test, p);
2605 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2606
2607 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2608 KUNIT_ASSERT_NOT_NULL(test, p);
2609 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2610
2611 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2612 KUNIT_ASSERT_NULL(test, p);
2613
2614 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2615 KUNIT_ASSERT_NOT_NULL(test, p);
2616
2617 network_dir = p->value.dir;
2618 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2619
2620 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2621 KUNIT_ASSERT_NOT_NULL(test, p);
2622 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2623
2624 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2625 KUNIT_ASSERT_NOT_NULL(test, p);
2626 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2627
2628 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2629 KUNIT_ASSERT_NOT_NULL(test, p);
2630 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2631
2632 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2633 KUNIT_ASSERT_NOT_NULL(test, p);
2634 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2635
2636 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2637 KUNIT_EXPECT_TRUE(test, !p);
2638 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2639 KUNIT_EXPECT_TRUE(test, !p);
2640
2641 tb_property_free_dir(dir);
2642 }
2643
2644 static void tb_test_property_format(struct kunit *test)
2645 {
2646 struct tb_property_dir *dir;
2647 ssize_t block_len;
2648 u32 *block;
2649 int ret, i;
2650
2651 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2652 KUNIT_ASSERT_NOT_NULL(test, dir);
2653
2654 ret = tb_property_format_dir(dir, NULL, 0);
2655 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2656
2657 block_len = ret;
2658
2659 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2660 KUNIT_ASSERT_NOT_NULL(test, block);
2661
2662 ret = tb_property_format_dir(dir, block, block_len);
2663 KUNIT_EXPECT_EQ(test, ret, 0);
2664
2665 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2666 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2667
2668 tb_property_free_dir(dir);
2669 }
2670
2671 static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2672 struct tb_property_dir *d2)
2673 {
2674 struct tb_property *p1, *p2, *tmp;
2675 int n1, n2, i;
2676
2677 if (d1->uuid) {
2678 KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2679 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2680 } else {
2681 KUNIT_ASSERT_NULL(test, d2->uuid);
2682 }
2683
2684 n1 = 0;
2685 tb_property_for_each(d1, tmp)
2686 n1++;
2687 KUNIT_ASSERT_NE(test, n1, 0);
2688
2689 n2 = 0;
2690 tb_property_for_each(d2, tmp)
2691 n2++;
2692 KUNIT_ASSERT_NE(test, n2, 0);
2693
2694 KUNIT_ASSERT_EQ(test, n1, n2);
2695
2696 p1 = NULL;
2697 p2 = NULL;
2698 for (i = 0; i < n1; i++) {
2699 p1 = tb_property_get_next(d1, p1);
2700 KUNIT_ASSERT_NOT_NULL(test, p1);
2701 p2 = tb_property_get_next(d2, p2);
2702 KUNIT_ASSERT_NOT_NULL(test, p2);
2703
2704 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2705 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2706 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2707
2708 switch (p1->type) {
2709 case TB_PROPERTY_TYPE_DIRECTORY:
2710 KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2711 KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2712 compare_dirs(test, p1->value.dir, p2->value.dir);
2713 break;
2714
2715 case TB_PROPERTY_TYPE_DATA:
2716 KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2717 KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2718 KUNIT_ASSERT_TRUE(test,
2719 !memcmp(p1->value.data, p2->value.data,
2720 p1->length * 4)
2721 );
2722 break;
2723
2724 case TB_PROPERTY_TYPE_TEXT:
2725 KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2726 KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2727 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2728 break;
2729
2730 case TB_PROPERTY_TYPE_VALUE:
2731 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2732 p2->value.immediate);
2733 break;
2734 default:
2735 KUNIT_FAIL(test, "unexpected property type");
2736 break;
2737 }
2738 }
2739 }
2740
2741 static void tb_test_property_copy(struct kunit *test)
2742 {
2743 struct tb_property_dir *src, *dst;
2744 u32 *block;
2745 int ret, i;
2746
2747 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2748 KUNIT_ASSERT_NOT_NULL(test, src);
2749
2750 dst = tb_property_copy_dir(src);
2751 KUNIT_ASSERT_NOT_NULL(test, dst);
2752
2753
2754 compare_dirs(test, src, dst);
2755
2756
2757 ret = tb_property_format_dir(dst, NULL, 0);
2758 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2759
2760 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2761 KUNIT_ASSERT_NOT_NULL(test, block);
2762
2763 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2764 KUNIT_EXPECT_TRUE(test, !ret);
2765
2766 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2767 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2768
2769 tb_property_free_dir(dst);
2770 tb_property_free_dir(src);
2771 }
2772
2773 static struct kunit_case tb_test_cases[] = {
2774 KUNIT_CASE(tb_test_path_basic),
2775 KUNIT_CASE(tb_test_path_not_connected_walk),
2776 KUNIT_CASE(tb_test_path_single_hop_walk),
2777 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2778 KUNIT_CASE(tb_test_path_simple_tree_walk),
2779 KUNIT_CASE(tb_test_path_complex_tree_walk),
2780 KUNIT_CASE(tb_test_path_max_length_walk),
2781 KUNIT_CASE(tb_test_path_not_connected),
2782 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2783 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2784 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2785 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2786 KUNIT_CASE(tb_test_path_mixed_chain),
2787 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2788 KUNIT_CASE(tb_test_tunnel_pcie),
2789 KUNIT_CASE(tb_test_tunnel_dp),
2790 KUNIT_CASE(tb_test_tunnel_dp_chain),
2791 KUNIT_CASE(tb_test_tunnel_dp_tree),
2792 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2793 KUNIT_CASE(tb_test_tunnel_port_on_path),
2794 KUNIT_CASE(tb_test_tunnel_usb3),
2795 KUNIT_CASE(tb_test_tunnel_dma),
2796 KUNIT_CASE(tb_test_tunnel_dma_rx),
2797 KUNIT_CASE(tb_test_tunnel_dma_tx),
2798 KUNIT_CASE(tb_test_tunnel_dma_chain),
2799 KUNIT_CASE(tb_test_tunnel_dma_match),
2800 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2801 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2802 KUNIT_CASE(tb_test_credit_alloc_pcie),
2803 KUNIT_CASE(tb_test_credit_alloc_without_dp),
2804 KUNIT_CASE(tb_test_credit_alloc_dp),
2805 KUNIT_CASE(tb_test_credit_alloc_usb3),
2806 KUNIT_CASE(tb_test_credit_alloc_dma),
2807 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2808 KUNIT_CASE(tb_test_credit_alloc_all),
2809 KUNIT_CASE(tb_test_property_parse),
2810 KUNIT_CASE(tb_test_property_format),
2811 KUNIT_CASE(tb_test_property_copy),
2812 { }
2813 };
2814
2815 static struct kunit_suite tb_test_suite = {
2816 .name = "thunderbolt",
2817 .test_cases = tb_test_cases,
2818 };
2819
2820 kunit_test_suite(tb_test_suite);