0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/bitfield.h>
0024 #include <linux/delay.h>
0025 #include <linux/errno.h>
0026 #include <linux/i2c.h>
0027 #include <linux/init.h>
0028 #include <linux/kernel.h>
0029 #include <linux/random.h>
0030 #include <linux/sched.h>
0031 #include <linux/seq_file.h>
0032 #include <linux/iopoll.h>
0033
0034 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
0035 #include <linux/stacktrace.h>
0036 #include <linux/sort.h>
0037 #include <linux/timekeeping.h>
0038 #include <linux/math64.h>
0039 #endif
0040
0041 #include <drm/display/drm_dp_mst_helper.h>
0042 #include <drm/drm_atomic.h>
0043 #include <drm/drm_atomic_helper.h>
0044 #include <drm/drm_drv.h>
0045 #include <drm/drm_edid.h>
0046 #include <drm/drm_print.h>
0047 #include <drm/drm_probe_helper.h>
0048
0049 #include "drm_dp_helper_internal.h"
0050 #include "drm_dp_mst_topology_internal.h"
0051
0052
0053
0054
0055
0056
0057
0058
0059 struct drm_dp_pending_up_req {
0060 struct drm_dp_sideband_msg_hdr hdr;
0061 struct drm_dp_sideband_msg_req_body msg;
0062 struct list_head next;
0063 };
0064
0065 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
0066 char *buf);
0067
0068 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
0069
0070 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
0071 int id,
0072 struct drm_dp_payload *payload);
0073
0074 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
0075 struct drm_dp_mst_port *port,
0076 int offset, int size, u8 *bytes);
0077 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
0078 struct drm_dp_mst_port *port,
0079 int offset, int size, u8 *bytes);
0080
0081 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
0082 struct drm_dp_mst_branch *mstb);
0083
0084 static void
0085 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
0086 struct drm_dp_mst_branch *mstb);
0087
0088 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
0089 struct drm_dp_mst_branch *mstb,
0090 struct drm_dp_mst_port *port);
0091 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
0092 u8 *guid);
0093
0094 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
0095 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
0096 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
0097
0098 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
0099 struct drm_dp_mst_branch *branch);
0100
0101 #define DBG_PREFIX "[dp_mst]"
0102
0103 #define DP_STR(x) [DP_ ## x] = #x
0104
0105 static const char *drm_dp_mst_req_type_str(u8 req_type)
0106 {
0107 static const char * const req_type_str[] = {
0108 DP_STR(GET_MSG_TRANSACTION_VERSION),
0109 DP_STR(LINK_ADDRESS),
0110 DP_STR(CONNECTION_STATUS_NOTIFY),
0111 DP_STR(ENUM_PATH_RESOURCES),
0112 DP_STR(ALLOCATE_PAYLOAD),
0113 DP_STR(QUERY_PAYLOAD),
0114 DP_STR(RESOURCE_STATUS_NOTIFY),
0115 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
0116 DP_STR(REMOTE_DPCD_READ),
0117 DP_STR(REMOTE_DPCD_WRITE),
0118 DP_STR(REMOTE_I2C_READ),
0119 DP_STR(REMOTE_I2C_WRITE),
0120 DP_STR(POWER_UP_PHY),
0121 DP_STR(POWER_DOWN_PHY),
0122 DP_STR(SINK_EVENT_NOTIFY),
0123 DP_STR(QUERY_STREAM_ENC_STATUS),
0124 };
0125
0126 if (req_type >= ARRAY_SIZE(req_type_str) ||
0127 !req_type_str[req_type])
0128 return "unknown";
0129
0130 return req_type_str[req_type];
0131 }
0132
0133 #undef DP_STR
0134 #define DP_STR(x) [DP_NAK_ ## x] = #x
0135
0136 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
0137 {
0138 static const char * const nak_reason_str[] = {
0139 DP_STR(WRITE_FAILURE),
0140 DP_STR(INVALID_READ),
0141 DP_STR(CRC_FAILURE),
0142 DP_STR(BAD_PARAM),
0143 DP_STR(DEFER),
0144 DP_STR(LINK_FAILURE),
0145 DP_STR(NO_RESOURCES),
0146 DP_STR(DPCD_FAIL),
0147 DP_STR(I2C_NAK),
0148 DP_STR(ALLOCATE_FAIL),
0149 };
0150
0151 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
0152 !nak_reason_str[nak_reason])
0153 return "unknown";
0154
0155 return nak_reason_str[nak_reason];
0156 }
0157
0158 #undef DP_STR
0159 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
0160
0161 static const char *drm_dp_mst_sideband_tx_state_str(int state)
0162 {
0163 static const char * const sideband_reason_str[] = {
0164 DP_STR(QUEUED),
0165 DP_STR(START_SEND),
0166 DP_STR(SENT),
0167 DP_STR(RX),
0168 DP_STR(TIMEOUT),
0169 };
0170
0171 if (state >= ARRAY_SIZE(sideband_reason_str) ||
0172 !sideband_reason_str[state])
0173 return "unknown";
0174
0175 return sideband_reason_str[state];
0176 }
0177
0178 static int
0179 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
0180 {
0181 int i;
0182 u8 unpacked_rad[16];
0183
0184 for (i = 0; i < lct; i++) {
0185 if (i % 2)
0186 unpacked_rad[i] = rad[i / 2] >> 4;
0187 else
0188 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
0189 }
0190
0191
0192
0193
0194 return snprintf(out, len, "%*phC", lct, unpacked_rad);
0195 }
0196
0197
0198 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
0199 {
0200 u8 bitmask = 0x80;
0201 u8 bitshift = 7;
0202 u8 array_index = 0;
0203 int number_of_bits = num_nibbles * 4;
0204 u8 remainder = 0;
0205
0206 while (number_of_bits != 0) {
0207 number_of_bits--;
0208 remainder <<= 1;
0209 remainder |= (data[array_index] & bitmask) >> bitshift;
0210 bitmask >>= 1;
0211 bitshift--;
0212 if (bitmask == 0) {
0213 bitmask = 0x80;
0214 bitshift = 7;
0215 array_index++;
0216 }
0217 if ((remainder & 0x10) == 0x10)
0218 remainder ^= 0x13;
0219 }
0220
0221 number_of_bits = 4;
0222 while (number_of_bits != 0) {
0223 number_of_bits--;
0224 remainder <<= 1;
0225 if ((remainder & 0x10) != 0)
0226 remainder ^= 0x13;
0227 }
0228
0229 return remainder;
0230 }
0231
0232 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
0233 {
0234 u8 bitmask = 0x80;
0235 u8 bitshift = 7;
0236 u8 array_index = 0;
0237 int number_of_bits = number_of_bytes * 8;
0238 u16 remainder = 0;
0239
0240 while (number_of_bits != 0) {
0241 number_of_bits--;
0242 remainder <<= 1;
0243 remainder |= (data[array_index] & bitmask) >> bitshift;
0244 bitmask >>= 1;
0245 bitshift--;
0246 if (bitmask == 0) {
0247 bitmask = 0x80;
0248 bitshift = 7;
0249 array_index++;
0250 }
0251 if ((remainder & 0x100) == 0x100)
0252 remainder ^= 0xd5;
0253 }
0254
0255 number_of_bits = 8;
0256 while (number_of_bits != 0) {
0257 number_of_bits--;
0258 remainder <<= 1;
0259 if ((remainder & 0x100) != 0)
0260 remainder ^= 0xd5;
0261 }
0262
0263 return remainder & 0xff;
0264 }
0265 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
0266 {
0267 u8 size = 3;
0268
0269 size += (hdr->lct / 2);
0270 return size;
0271 }
0272
0273 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
0274 u8 *buf, int *len)
0275 {
0276 int idx = 0;
0277 int i;
0278 u8 crc4;
0279
0280 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
0281 for (i = 0; i < (hdr->lct / 2); i++)
0282 buf[idx++] = hdr->rad[i];
0283 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
0284 (hdr->msg_len & 0x3f);
0285 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
0286
0287 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
0288 buf[idx - 1] |= (crc4 & 0xf);
0289
0290 *len = idx;
0291 }
0292
0293 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
0294 struct drm_dp_sideband_msg_hdr *hdr,
0295 u8 *buf, int buflen, u8 *hdrlen)
0296 {
0297 u8 crc4;
0298 u8 len;
0299 int i;
0300 u8 idx;
0301
0302 if (buf[0] == 0)
0303 return false;
0304 len = 3;
0305 len += ((buf[0] & 0xf0) >> 4) / 2;
0306 if (len > buflen)
0307 return false;
0308 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
0309
0310 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
0311 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
0312 return false;
0313 }
0314
0315 hdr->lct = (buf[0] & 0xf0) >> 4;
0316 hdr->lcr = (buf[0] & 0xf);
0317 idx = 1;
0318 for (i = 0; i < (hdr->lct / 2); i++)
0319 hdr->rad[i] = buf[idx++];
0320 hdr->broadcast = (buf[idx] >> 7) & 0x1;
0321 hdr->path_msg = (buf[idx] >> 6) & 0x1;
0322 hdr->msg_len = buf[idx] & 0x3f;
0323 idx++;
0324 hdr->somt = (buf[idx] >> 7) & 0x1;
0325 hdr->eomt = (buf[idx] >> 6) & 0x1;
0326 hdr->seqno = (buf[idx] >> 4) & 0x1;
0327 idx++;
0328 *hdrlen = idx;
0329 return true;
0330 }
0331
0332 void
0333 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
0334 struct drm_dp_sideband_msg_tx *raw)
0335 {
0336 int idx = 0;
0337 int i;
0338 u8 *buf = raw->msg;
0339
0340 buf[idx++] = req->req_type & 0x7f;
0341
0342 switch (req->req_type) {
0343 case DP_ENUM_PATH_RESOURCES:
0344 case DP_POWER_DOWN_PHY:
0345 case DP_POWER_UP_PHY:
0346 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
0347 idx++;
0348 break;
0349 case DP_ALLOCATE_PAYLOAD:
0350 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
0351 (req->u.allocate_payload.number_sdp_streams & 0xf);
0352 idx++;
0353 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
0354 idx++;
0355 buf[idx] = (req->u.allocate_payload.pbn >> 8);
0356 idx++;
0357 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
0358 idx++;
0359 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
0360 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
0361 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
0362 idx++;
0363 }
0364 if (req->u.allocate_payload.number_sdp_streams & 1) {
0365 i = req->u.allocate_payload.number_sdp_streams - 1;
0366 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
0367 idx++;
0368 }
0369 break;
0370 case DP_QUERY_PAYLOAD:
0371 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
0372 idx++;
0373 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
0374 idx++;
0375 break;
0376 case DP_REMOTE_DPCD_READ:
0377 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
0378 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
0379 idx++;
0380 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
0381 idx++;
0382 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
0383 idx++;
0384 buf[idx] = (req->u.dpcd_read.num_bytes);
0385 idx++;
0386 break;
0387
0388 case DP_REMOTE_DPCD_WRITE:
0389 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
0390 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
0391 idx++;
0392 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
0393 idx++;
0394 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
0395 idx++;
0396 buf[idx] = (req->u.dpcd_write.num_bytes);
0397 idx++;
0398 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
0399 idx += req->u.dpcd_write.num_bytes;
0400 break;
0401 case DP_REMOTE_I2C_READ:
0402 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
0403 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
0404 idx++;
0405 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
0406 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
0407 idx++;
0408 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
0409 idx++;
0410 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
0411 idx += req->u.i2c_read.transactions[i].num_bytes;
0412
0413 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
0414 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
0415 idx++;
0416 }
0417 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
0418 idx++;
0419 buf[idx] = (req->u.i2c_read.num_bytes_read);
0420 idx++;
0421 break;
0422
0423 case DP_REMOTE_I2C_WRITE:
0424 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
0425 idx++;
0426 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
0427 idx++;
0428 buf[idx] = (req->u.i2c_write.num_bytes);
0429 idx++;
0430 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
0431 idx += req->u.i2c_write.num_bytes;
0432 break;
0433 case DP_QUERY_STREAM_ENC_STATUS: {
0434 const struct drm_dp_query_stream_enc_status *msg;
0435
0436 msg = &req->u.enc_status;
0437 buf[idx] = msg->stream_id;
0438 idx++;
0439 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
0440 idx += sizeof(msg->client_id);
0441 buf[idx] = 0;
0442 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
0443 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
0444 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
0445 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
0446 idx++;
0447 }
0448 break;
0449 }
0450 raw->cur_len = idx;
0451 }
0452 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
0453
0454
0455 int
0456 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
0457 struct drm_dp_sideband_msg_req_body *req)
0458 {
0459 const u8 *buf = raw->msg;
0460 int i, idx = 0;
0461
0462 req->req_type = buf[idx++] & 0x7f;
0463 switch (req->req_type) {
0464 case DP_ENUM_PATH_RESOURCES:
0465 case DP_POWER_DOWN_PHY:
0466 case DP_POWER_UP_PHY:
0467 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
0468 break;
0469 case DP_ALLOCATE_PAYLOAD:
0470 {
0471 struct drm_dp_allocate_payload *a =
0472 &req->u.allocate_payload;
0473
0474 a->number_sdp_streams = buf[idx] & 0xf;
0475 a->port_number = (buf[idx] >> 4) & 0xf;
0476
0477 WARN_ON(buf[++idx] & 0x80);
0478 a->vcpi = buf[idx] & 0x7f;
0479
0480 a->pbn = buf[++idx] << 8;
0481 a->pbn |= buf[++idx];
0482
0483 idx++;
0484 for (i = 0; i < a->number_sdp_streams; i++) {
0485 a->sdp_stream_sink[i] =
0486 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
0487 }
0488 }
0489 break;
0490 case DP_QUERY_PAYLOAD:
0491 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
0492 WARN_ON(buf[++idx] & 0x80);
0493 req->u.query_payload.vcpi = buf[idx] & 0x7f;
0494 break;
0495 case DP_REMOTE_DPCD_READ:
0496 {
0497 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
0498
0499 r->port_number = (buf[idx] >> 4) & 0xf;
0500
0501 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
0502 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
0503 r->dpcd_address |= buf[++idx] & 0xff;
0504
0505 r->num_bytes = buf[++idx];
0506 }
0507 break;
0508 case DP_REMOTE_DPCD_WRITE:
0509 {
0510 struct drm_dp_remote_dpcd_write *w =
0511 &req->u.dpcd_write;
0512
0513 w->port_number = (buf[idx] >> 4) & 0xf;
0514
0515 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
0516 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
0517 w->dpcd_address |= buf[++idx] & 0xff;
0518
0519 w->num_bytes = buf[++idx];
0520
0521 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
0522 GFP_KERNEL);
0523 if (!w->bytes)
0524 return -ENOMEM;
0525 }
0526 break;
0527 case DP_REMOTE_I2C_READ:
0528 {
0529 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
0530 struct drm_dp_remote_i2c_read_tx *tx;
0531 bool failed = false;
0532
0533 r->num_transactions = buf[idx] & 0x3;
0534 r->port_number = (buf[idx] >> 4) & 0xf;
0535 for (i = 0; i < r->num_transactions; i++) {
0536 tx = &r->transactions[i];
0537
0538 tx->i2c_dev_id = buf[++idx] & 0x7f;
0539 tx->num_bytes = buf[++idx];
0540 tx->bytes = kmemdup(&buf[++idx],
0541 tx->num_bytes,
0542 GFP_KERNEL);
0543 if (!tx->bytes) {
0544 failed = true;
0545 break;
0546 }
0547 idx += tx->num_bytes;
0548 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
0549 tx->i2c_transaction_delay = buf[idx] & 0xf;
0550 }
0551
0552 if (failed) {
0553 for (i = 0; i < r->num_transactions; i++) {
0554 tx = &r->transactions[i];
0555 kfree(tx->bytes);
0556 }
0557 return -ENOMEM;
0558 }
0559
0560 r->read_i2c_device_id = buf[++idx] & 0x7f;
0561 r->num_bytes_read = buf[++idx];
0562 }
0563 break;
0564 case DP_REMOTE_I2C_WRITE:
0565 {
0566 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
0567
0568 w->port_number = (buf[idx] >> 4) & 0xf;
0569 w->write_i2c_device_id = buf[++idx] & 0x7f;
0570 w->num_bytes = buf[++idx];
0571 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
0572 GFP_KERNEL);
0573 if (!w->bytes)
0574 return -ENOMEM;
0575 }
0576 break;
0577 case DP_QUERY_STREAM_ENC_STATUS:
0578 req->u.enc_status.stream_id = buf[idx++];
0579 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
0580 req->u.enc_status.client_id[i] = buf[idx++];
0581
0582 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
0583 buf[idx]);
0584 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
0585 buf[idx]);
0586 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
0587 buf[idx]);
0588 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
0589 buf[idx]);
0590 break;
0591 }
0592
0593 return 0;
0594 }
0595 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
0596
0597 void
0598 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
0599 int indent, struct drm_printer *printer)
0600 {
0601 int i;
0602
0603 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
0604 if (req->req_type == DP_LINK_ADDRESS) {
0605
0606 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
0607 return;
0608 }
0609
0610 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
0611 indent++;
0612
0613 switch (req->req_type) {
0614 case DP_ENUM_PATH_RESOURCES:
0615 case DP_POWER_DOWN_PHY:
0616 case DP_POWER_UP_PHY:
0617 P("port=%d\n", req->u.port_num.port_number);
0618 break;
0619 case DP_ALLOCATE_PAYLOAD:
0620 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
0621 req->u.allocate_payload.port_number,
0622 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
0623 req->u.allocate_payload.number_sdp_streams,
0624 req->u.allocate_payload.number_sdp_streams,
0625 req->u.allocate_payload.sdp_stream_sink);
0626 break;
0627 case DP_QUERY_PAYLOAD:
0628 P("port=%d vcpi=%d\n",
0629 req->u.query_payload.port_number,
0630 req->u.query_payload.vcpi);
0631 break;
0632 case DP_REMOTE_DPCD_READ:
0633 P("port=%d dpcd_addr=%05x len=%d\n",
0634 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
0635 req->u.dpcd_read.num_bytes);
0636 break;
0637 case DP_REMOTE_DPCD_WRITE:
0638 P("port=%d addr=%05x len=%d: %*ph\n",
0639 req->u.dpcd_write.port_number,
0640 req->u.dpcd_write.dpcd_address,
0641 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
0642 req->u.dpcd_write.bytes);
0643 break;
0644 case DP_REMOTE_I2C_READ:
0645 P("port=%d num_tx=%d id=%d size=%d:\n",
0646 req->u.i2c_read.port_number,
0647 req->u.i2c_read.num_transactions,
0648 req->u.i2c_read.read_i2c_device_id,
0649 req->u.i2c_read.num_bytes_read);
0650
0651 indent++;
0652 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
0653 const struct drm_dp_remote_i2c_read_tx *rtx =
0654 &req->u.i2c_read.transactions[i];
0655
0656 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
0657 i, rtx->i2c_dev_id, rtx->num_bytes,
0658 rtx->no_stop_bit, rtx->i2c_transaction_delay,
0659 rtx->num_bytes, rtx->bytes);
0660 }
0661 break;
0662 case DP_REMOTE_I2C_WRITE:
0663 P("port=%d id=%d size=%d: %*ph\n",
0664 req->u.i2c_write.port_number,
0665 req->u.i2c_write.write_i2c_device_id,
0666 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
0667 req->u.i2c_write.bytes);
0668 break;
0669 case DP_QUERY_STREAM_ENC_STATUS:
0670 P("stream_id=%u client_id=%*ph stream_event=%x "
0671 "valid_event=%d stream_behavior=%x valid_behavior=%d",
0672 req->u.enc_status.stream_id,
0673 (int)ARRAY_SIZE(req->u.enc_status.client_id),
0674 req->u.enc_status.client_id, req->u.enc_status.stream_event,
0675 req->u.enc_status.valid_stream_event,
0676 req->u.enc_status.stream_behavior,
0677 req->u.enc_status.valid_stream_behavior);
0678 break;
0679 default:
0680 P("???\n");
0681 break;
0682 }
0683 #undef P
0684 }
0685 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
0686
0687 static inline void
0688 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
0689 const struct drm_dp_sideband_msg_tx *txmsg)
0690 {
0691 struct drm_dp_sideband_msg_req_body req;
0692 char buf[64];
0693 int ret;
0694 int i;
0695
0696 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
0697 sizeof(buf));
0698 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
0699 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
0700 drm_dp_mst_sideband_tx_state_str(txmsg->state),
0701 txmsg->path_msg, buf);
0702
0703 ret = drm_dp_decode_sideband_req(txmsg, &req);
0704 if (ret) {
0705 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
0706 return;
0707 }
0708 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
0709
0710 switch (req.req_type) {
0711 case DP_REMOTE_DPCD_WRITE:
0712 kfree(req.u.dpcd_write.bytes);
0713 break;
0714 case DP_REMOTE_I2C_READ:
0715 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
0716 kfree(req.u.i2c_read.transactions[i].bytes);
0717 break;
0718 case DP_REMOTE_I2C_WRITE:
0719 kfree(req.u.i2c_write.bytes);
0720 break;
0721 }
0722 }
0723
0724 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
0725 {
0726 u8 crc4;
0727
0728 crc4 = drm_dp_msg_data_crc4(msg, len);
0729 msg[len] = crc4;
0730 }
0731
0732 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
0733 struct drm_dp_sideband_msg_tx *raw)
0734 {
0735 int idx = 0;
0736 u8 *buf = raw->msg;
0737
0738 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
0739
0740 raw->cur_len = idx;
0741 }
0742
0743 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
0744 struct drm_dp_sideband_msg_hdr *hdr,
0745 u8 hdrlen)
0746 {
0747
0748
0749
0750
0751 if (!hdr->somt && !msg->have_somt)
0752 return false;
0753
0754
0755 msg->curchunk_idx = 0;
0756 msg->curchunk_len = hdr->msg_len;
0757 msg->curchunk_hdrlen = hdrlen;
0758
0759
0760 if (hdr->somt && msg->have_somt)
0761 return false;
0762
0763 if (hdr->somt) {
0764 memcpy(&msg->initial_hdr, hdr,
0765 sizeof(struct drm_dp_sideband_msg_hdr));
0766 msg->have_somt = true;
0767 }
0768 if (hdr->eomt)
0769 msg->have_eomt = true;
0770
0771 return true;
0772 }
0773
0774
0775 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
0776 u8 *replybuf, u8 replybuflen)
0777 {
0778 u8 crc4;
0779
0780 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
0781 msg->curchunk_idx += replybuflen;
0782
0783 if (msg->curchunk_idx >= msg->curchunk_len) {
0784
0785 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
0786 if (crc4 != msg->chunk[msg->curchunk_len - 1])
0787 print_hex_dump(KERN_DEBUG, "wrong crc",
0788 DUMP_PREFIX_NONE, 16, 1,
0789 msg->chunk, msg->curchunk_len, false);
0790
0791 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
0792 msg->curlen += msg->curchunk_len - 1;
0793 }
0794 return true;
0795 }
0796
0797 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
0798 struct drm_dp_sideband_msg_rx *raw,
0799 struct drm_dp_sideband_msg_reply_body *repmsg)
0800 {
0801 int idx = 1;
0802 int i;
0803
0804 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
0805 idx += 16;
0806 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
0807 idx++;
0808 if (idx > raw->curlen)
0809 goto fail_len;
0810 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
0811 if (raw->msg[idx] & 0x80)
0812 repmsg->u.link_addr.ports[i].input_port = 1;
0813
0814 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
0815 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
0816
0817 idx++;
0818 if (idx > raw->curlen)
0819 goto fail_len;
0820 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
0821 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
0822 if (repmsg->u.link_addr.ports[i].input_port == 0)
0823 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
0824 idx++;
0825 if (idx > raw->curlen)
0826 goto fail_len;
0827 if (repmsg->u.link_addr.ports[i].input_port == 0) {
0828 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
0829 idx++;
0830 if (idx > raw->curlen)
0831 goto fail_len;
0832 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
0833 idx += 16;
0834 if (idx > raw->curlen)
0835 goto fail_len;
0836 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
0837 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
0838 idx++;
0839
0840 }
0841 if (idx > raw->curlen)
0842 goto fail_len;
0843 }
0844
0845 return true;
0846 fail_len:
0847 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
0848 return false;
0849 }
0850
0851 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
0852 struct drm_dp_sideband_msg_reply_body *repmsg)
0853 {
0854 int idx = 1;
0855
0856 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
0857 idx++;
0858 if (idx > raw->curlen)
0859 goto fail_len;
0860 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
0861 idx++;
0862 if (idx > raw->curlen)
0863 goto fail_len;
0864
0865 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
0866 return true;
0867 fail_len:
0868 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
0869 return false;
0870 }
0871
0872 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
0873 struct drm_dp_sideband_msg_reply_body *repmsg)
0874 {
0875 int idx = 1;
0876
0877 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
0878 idx++;
0879 if (idx > raw->curlen)
0880 goto fail_len;
0881 return true;
0882 fail_len:
0883 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
0884 return false;
0885 }
0886
0887 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
0888 struct drm_dp_sideband_msg_reply_body *repmsg)
0889 {
0890 int idx = 1;
0891
0892 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
0893 idx++;
0894 if (idx > raw->curlen)
0895 goto fail_len;
0896 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
0897 idx++;
0898
0899 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
0900 return true;
0901 fail_len:
0902 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
0903 return false;
0904 }
0905
0906 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
0907 struct drm_dp_sideband_msg_reply_body *repmsg)
0908 {
0909 int idx = 1;
0910
0911 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
0912 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
0913 idx++;
0914 if (idx > raw->curlen)
0915 goto fail_len;
0916 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
0917 idx += 2;
0918 if (idx > raw->curlen)
0919 goto fail_len;
0920 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
0921 idx += 2;
0922 if (idx > raw->curlen)
0923 goto fail_len;
0924 return true;
0925 fail_len:
0926 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
0927 return false;
0928 }
0929
0930 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
0931 struct drm_dp_sideband_msg_reply_body *repmsg)
0932 {
0933 int idx = 1;
0934
0935 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
0936 idx++;
0937 if (idx > raw->curlen)
0938 goto fail_len;
0939 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
0940 idx++;
0941 if (idx > raw->curlen)
0942 goto fail_len;
0943 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
0944 idx += 2;
0945 if (idx > raw->curlen)
0946 goto fail_len;
0947 return true;
0948 fail_len:
0949 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
0950 return false;
0951 }
0952
0953 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
0954 struct drm_dp_sideband_msg_reply_body *repmsg)
0955 {
0956 int idx = 1;
0957
0958 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
0959 idx++;
0960 if (idx > raw->curlen)
0961 goto fail_len;
0962 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
0963 idx += 2;
0964 if (idx > raw->curlen)
0965 goto fail_len;
0966 return true;
0967 fail_len:
0968 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
0969 return false;
0970 }
0971
0972 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
0973 struct drm_dp_sideband_msg_reply_body *repmsg)
0974 {
0975 int idx = 1;
0976
0977 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
0978 idx++;
0979 if (idx > raw->curlen) {
0980 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
0981 idx, raw->curlen);
0982 return false;
0983 }
0984 return true;
0985 }
0986
0987 static bool
0988 drm_dp_sideband_parse_query_stream_enc_status(
0989 struct drm_dp_sideband_msg_rx *raw,
0990 struct drm_dp_sideband_msg_reply_body *repmsg)
0991 {
0992 struct drm_dp_query_stream_enc_status_ack_reply *reply;
0993
0994 reply = &repmsg->u.enc_status;
0995
0996 reply->stream_id = raw->msg[3];
0997
0998 reply->reply_signed = raw->msg[2] & BIT(0);
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1009 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1010
1011 reply->query_capable_device_present = raw->msg[2] & BIT(5);
1012 reply->legacy_device_present = raw->msg[2] & BIT(6);
1013 reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1014
1015 reply->auth_completed = !!(raw->msg[1] & BIT(3));
1016 reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1017 reply->repeater_present = !!(raw->msg[1] & BIT(5));
1018 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1019
1020 return true;
1021 }
1022
1023 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1024 struct drm_dp_sideband_msg_rx *raw,
1025 struct drm_dp_sideband_msg_reply_body *msg)
1026 {
1027 memset(msg, 0, sizeof(*msg));
1028 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1029 msg->req_type = (raw->msg[0] & 0x7f);
1030
1031 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1032 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
1033 msg->u.nak.reason = raw->msg[17];
1034 msg->u.nak.nak_data = raw->msg[18];
1035 return false;
1036 }
1037
1038 switch (msg->req_type) {
1039 case DP_LINK_ADDRESS:
1040 return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1041 case DP_QUERY_PAYLOAD:
1042 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1043 case DP_REMOTE_DPCD_READ:
1044 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1045 case DP_REMOTE_DPCD_WRITE:
1046 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1047 case DP_REMOTE_I2C_READ:
1048 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1049 case DP_REMOTE_I2C_WRITE:
1050 return true;
1051 case DP_ENUM_PATH_RESOURCES:
1052 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1053 case DP_ALLOCATE_PAYLOAD:
1054 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1055 case DP_POWER_DOWN_PHY:
1056 case DP_POWER_UP_PHY:
1057 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1058 case DP_CLEAR_PAYLOAD_ID_TABLE:
1059 return true;
1060 case DP_QUERY_STREAM_ENC_STATUS:
1061 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1062 default:
1063 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
1064 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1065 return false;
1066 }
1067 }
1068
1069 static bool
1070 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1071 struct drm_dp_sideband_msg_rx *raw,
1072 struct drm_dp_sideband_msg_req_body *msg)
1073 {
1074 int idx = 1;
1075
1076 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1077 idx++;
1078 if (idx > raw->curlen)
1079 goto fail_len;
1080
1081 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
1082 idx += 16;
1083 if (idx > raw->curlen)
1084 goto fail_len;
1085
1086 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1087 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1088 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1089 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1090 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1091 idx++;
1092 return true;
1093 fail_len:
1094 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
1095 idx, raw->curlen);
1096 return false;
1097 }
1098
1099 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1100 struct drm_dp_sideband_msg_rx *raw,
1101 struct drm_dp_sideband_msg_req_body *msg)
1102 {
1103 int idx = 1;
1104
1105 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1106 idx++;
1107 if (idx > raw->curlen)
1108 goto fail_len;
1109
1110 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1111 idx += 16;
1112 if (idx > raw->curlen)
1113 goto fail_len;
1114
1115 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1116 idx++;
1117 return true;
1118 fail_len:
1119 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
1120 return false;
1121 }
1122
1123 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1124 struct drm_dp_sideband_msg_rx *raw,
1125 struct drm_dp_sideband_msg_req_body *msg)
1126 {
1127 memset(msg, 0, sizeof(*msg));
1128 msg->req_type = (raw->msg[0] & 0x7f);
1129
1130 switch (msg->req_type) {
1131 case DP_CONNECTION_STATUS_NOTIFY:
1132 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1133 case DP_RESOURCE_STATUS_NOTIFY:
1134 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1135 default:
1136 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
1137 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1138 return false;
1139 }
1140 }
1141
1142 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1143 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1144 {
1145 struct drm_dp_sideband_msg_req_body req;
1146
1147 req.req_type = DP_REMOTE_DPCD_WRITE;
1148 req.u.dpcd_write.port_number = port_num;
1149 req.u.dpcd_write.dpcd_address = offset;
1150 req.u.dpcd_write.num_bytes = num_bytes;
1151 req.u.dpcd_write.bytes = bytes;
1152 drm_dp_encode_sideband_req(&req, msg);
1153 }
1154
1155 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1156 {
1157 struct drm_dp_sideband_msg_req_body req;
1158
1159 req.req_type = DP_LINK_ADDRESS;
1160 drm_dp_encode_sideband_req(&req, msg);
1161 }
1162
1163 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1164 {
1165 struct drm_dp_sideband_msg_req_body req;
1166
1167 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1168 drm_dp_encode_sideband_req(&req, msg);
1169 msg->path_msg = true;
1170 }
1171
1172 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1173 int port_num)
1174 {
1175 struct drm_dp_sideband_msg_req_body req;
1176
1177 req.req_type = DP_ENUM_PATH_RESOURCES;
1178 req.u.port_num.port_number = port_num;
1179 drm_dp_encode_sideband_req(&req, msg);
1180 msg->path_msg = true;
1181 return 0;
1182 }
1183
1184 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1185 int port_num,
1186 u8 vcpi, uint16_t pbn,
1187 u8 number_sdp_streams,
1188 u8 *sdp_stream_sink)
1189 {
1190 struct drm_dp_sideband_msg_req_body req;
1191
1192 memset(&req, 0, sizeof(req));
1193 req.req_type = DP_ALLOCATE_PAYLOAD;
1194 req.u.allocate_payload.port_number = port_num;
1195 req.u.allocate_payload.vcpi = vcpi;
1196 req.u.allocate_payload.pbn = pbn;
1197 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1198 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1199 number_sdp_streams);
1200 drm_dp_encode_sideband_req(&req, msg);
1201 msg->path_msg = true;
1202 }
1203
1204 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1205 int port_num, bool power_up)
1206 {
1207 struct drm_dp_sideband_msg_req_body req;
1208
1209 if (power_up)
1210 req.req_type = DP_POWER_UP_PHY;
1211 else
1212 req.req_type = DP_POWER_DOWN_PHY;
1213
1214 req.u.port_num.port_number = port_num;
1215 drm_dp_encode_sideband_req(&req, msg);
1216 msg->path_msg = true;
1217 }
1218
1219 static int
1220 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1221 u8 *q_id)
1222 {
1223 struct drm_dp_sideband_msg_req_body req;
1224
1225 req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1226 req.u.enc_status.stream_id = stream_id;
1227 memcpy(req.u.enc_status.client_id, q_id,
1228 sizeof(req.u.enc_status.client_id));
1229 req.u.enc_status.stream_event = 0;
1230 req.u.enc_status.valid_stream_event = false;
1231 req.u.enc_status.stream_behavior = 0;
1232 req.u.enc_status.valid_stream_behavior = false;
1233
1234 drm_dp_encode_sideband_req(&req, msg);
1235 return 0;
1236 }
1237
1238 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1239 struct drm_dp_vcpi *vcpi)
1240 {
1241 int ret, vcpi_ret;
1242
1243 mutex_lock(&mgr->payload_lock);
1244 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1245 if (ret > mgr->max_payloads) {
1246 ret = -EINVAL;
1247 drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
1248 goto out_unlock;
1249 }
1250
1251 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1252 if (vcpi_ret > mgr->max_payloads) {
1253 ret = -EINVAL;
1254 drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
1255 goto out_unlock;
1256 }
1257
1258 set_bit(ret, &mgr->payload_mask);
1259 set_bit(vcpi_ret, &mgr->vcpi_mask);
1260 vcpi->vcpi = vcpi_ret + 1;
1261 mgr->proposed_vcpis[ret - 1] = vcpi;
1262 out_unlock:
1263 mutex_unlock(&mgr->payload_lock);
1264 return ret;
1265 }
1266
1267 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1268 int vcpi)
1269 {
1270 int i;
1271
1272 if (vcpi == 0)
1273 return;
1274
1275 mutex_lock(&mgr->payload_lock);
1276 drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
1277 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1278
1279 for (i = 0; i < mgr->max_payloads; i++) {
1280 if (mgr->proposed_vcpis[i] &&
1281 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1282 mgr->proposed_vcpis[i] = NULL;
1283 clear_bit(i + 1, &mgr->payload_mask);
1284 }
1285 }
1286 mutex_unlock(&mgr->payload_lock);
1287 }
1288
1289 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1290 struct drm_dp_sideband_msg_tx *txmsg)
1291 {
1292 unsigned int state;
1293
1294
1295
1296
1297
1298
1299 state = READ_ONCE(txmsg->state);
1300 return (state == DRM_DP_SIDEBAND_TX_RX ||
1301 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1302 }
1303
1304 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1305 struct drm_dp_sideband_msg_tx *txmsg)
1306 {
1307 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1308 unsigned long wait_timeout = msecs_to_jiffies(4000);
1309 unsigned long wait_expires = jiffies + wait_timeout;
1310 int ret;
1311
1312 for (;;) {
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 ret = wait_event_timeout(mgr->tx_waitq,
1327 check_txmsg_state(mgr, txmsg),
1328 mgr->cbs->poll_hpd_irq ?
1329 msecs_to_jiffies(50) :
1330 wait_timeout);
1331
1332 if (ret || !mgr->cbs->poll_hpd_irq ||
1333 time_after(jiffies, wait_expires))
1334 break;
1335
1336 mgr->cbs->poll_hpd_irq(mgr);
1337 }
1338
1339 mutex_lock(&mgr->qlock);
1340 if (ret > 0) {
1341 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1342 ret = -EIO;
1343 goto out;
1344 }
1345 } else {
1346 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
1347 txmsg, txmsg->state, txmsg->seqno);
1348
1349
1350 ret = -EIO;
1351
1352
1353 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1354 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1355 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1356 list_del(&txmsg->next);
1357 }
1358 out:
1359 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1360 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1361
1362 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1363 }
1364 mutex_unlock(&mgr->qlock);
1365
1366 drm_dp_mst_kick_tx(mgr);
1367 return ret;
1368 }
1369
1370 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1371 {
1372 struct drm_dp_mst_branch *mstb;
1373
1374 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1375 if (!mstb)
1376 return NULL;
1377
1378 mstb->lct = lct;
1379 if (lct > 1)
1380 memcpy(mstb->rad, rad, lct / 2);
1381 INIT_LIST_HEAD(&mstb->ports);
1382 kref_init(&mstb->topology_kref);
1383 kref_init(&mstb->malloc_kref);
1384 return mstb;
1385 }
1386
1387 static void drm_dp_free_mst_branch_device(struct kref *kref)
1388 {
1389 struct drm_dp_mst_branch *mstb =
1390 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1391
1392 if (mstb->port_parent)
1393 drm_dp_mst_put_port_malloc(mstb->port_parent);
1394
1395 kfree(mstb);
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 static void
1498 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1499 {
1500 kref_get(&mstb->malloc_kref);
1501 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 static void
1516 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1517 {
1518 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1519 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1520 }
1521
1522 static void drm_dp_free_mst_port(struct kref *kref)
1523 {
1524 struct drm_dp_mst_port *port =
1525 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1526
1527 drm_dp_mst_put_mstb_malloc(port->parent);
1528 kfree(port);
1529 }
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548 void
1549 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1550 {
1551 kref_get(&port->malloc_kref);
1552 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
1553 }
1554 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 void
1567 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1568 {
1569 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1570 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1571 }
1572 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1573
1574 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1575
1576 #define STACK_DEPTH 8
1577
1578 static noinline void
1579 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1580 struct drm_dp_mst_topology_ref_history *history,
1581 enum drm_dp_mst_topology_ref_type type)
1582 {
1583 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1584 depot_stack_handle_t backtrace;
1585 ulong stack_entries[STACK_DEPTH];
1586 uint n;
1587 int i;
1588
1589 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1590 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1591 if (!backtrace)
1592 return;
1593
1594
1595 for (i = 0; i < history->len; i++) {
1596 if (history->entries[i].backtrace == backtrace) {
1597 entry = &history->entries[i];
1598 break;
1599 }
1600 }
1601
1602
1603 if (!entry) {
1604 struct drm_dp_mst_topology_ref_entry *new;
1605 int new_len = history->len + 1;
1606
1607 new = krealloc(history->entries, sizeof(*new) * new_len,
1608 GFP_KERNEL);
1609 if (!new)
1610 return;
1611
1612 entry = &new[history->len];
1613 history->len = new_len;
1614 history->entries = new;
1615
1616 entry->backtrace = backtrace;
1617 entry->type = type;
1618 entry->count = 0;
1619 }
1620 entry->count++;
1621 entry->ts_nsec = ktime_get_ns();
1622 }
1623
1624 static int
1625 topology_ref_history_cmp(const void *a, const void *b)
1626 {
1627 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1628
1629 if (entry_a->ts_nsec > entry_b->ts_nsec)
1630 return 1;
1631 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1632 return -1;
1633 else
1634 return 0;
1635 }
1636
1637 static inline const char *
1638 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1639 {
1640 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1641 return "get";
1642 else
1643 return "put";
1644 }
1645
1646 static void
1647 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1648 void *ptr, const char *type_str)
1649 {
1650 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1651 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1652 int i;
1653
1654 if (!buf)
1655 return;
1656
1657 if (!history->len)
1658 goto out;
1659
1660
1661
1662
1663 sort(history->entries, history->len, sizeof(*history->entries),
1664 topology_ref_history_cmp, NULL);
1665
1666 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1667 type_str, ptr);
1668
1669 for (i = 0; i < history->len; i++) {
1670 const struct drm_dp_mst_topology_ref_entry *entry =
1671 &history->entries[i];
1672 u64 ts_nsec = entry->ts_nsec;
1673 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1674
1675 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
1676
1677 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1678 entry->count,
1679 topology_ref_type_to_str(entry->type),
1680 ts_nsec, rem_nsec / 1000, buf);
1681 }
1682
1683
1684 kfree(history->entries);
1685 out:
1686 kfree(buf);
1687 }
1688
1689 static __always_inline void
1690 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1691 {
1692 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1693 "MSTB");
1694 }
1695
1696 static __always_inline void
1697 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1698 {
1699 __dump_topology_ref_history(&port->topology_ref_history, port,
1700 "Port");
1701 }
1702
1703 static __always_inline void
1704 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1705 enum drm_dp_mst_topology_ref_type type)
1706 {
1707 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1708 }
1709
1710 static __always_inline void
1711 save_port_topology_ref(struct drm_dp_mst_port *port,
1712 enum drm_dp_mst_topology_ref_type type)
1713 {
1714 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1715 }
1716
1717 static inline void
1718 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1719 {
1720 mutex_lock(&mgr->topology_ref_history_lock);
1721 }
1722
1723 static inline void
1724 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1725 {
1726 mutex_unlock(&mgr->topology_ref_history_lock);
1727 }
1728 #else
1729 static inline void
1730 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1731 static inline void
1732 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1733 static inline void
1734 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1735 static inline void
1736 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1737 #define save_mstb_topology_ref(mstb, type)
1738 #define save_port_topology_ref(port, type)
1739 #endif
1740
1741 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1742 {
1743 struct drm_dp_mst_branch *mstb =
1744 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1745 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1746
1747 drm_dp_mst_dump_mstb_topology_history(mstb);
1748
1749 INIT_LIST_HEAD(&mstb->destroy_next);
1750
1751
1752
1753
1754
1755 mutex_lock(&mgr->delayed_destroy_lock);
1756 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1757 mutex_unlock(&mgr->delayed_destroy_lock);
1758 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1759 }
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 static int __must_check
1784 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1785 {
1786 int ret;
1787
1788 topology_ref_history_lock(mstb->mgr);
1789 ret = kref_get_unless_zero(&mstb->topology_kref);
1790 if (ret) {
1791 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1792 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1793 }
1794
1795 topology_ref_history_unlock(mstb->mgr);
1796
1797 return ret;
1798 }
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1815 {
1816 topology_ref_history_lock(mstb->mgr);
1817
1818 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1819 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1820 kref_get(&mstb->topology_kref);
1821 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1822
1823 topology_ref_history_unlock(mstb->mgr);
1824 }
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 static void
1839 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1840 {
1841 topology_ref_history_lock(mstb->mgr);
1842
1843 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
1844 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1845
1846 topology_ref_history_unlock(mstb->mgr);
1847 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1848 }
1849
1850 static void drm_dp_destroy_port(struct kref *kref)
1851 {
1852 struct drm_dp_mst_port *port =
1853 container_of(kref, struct drm_dp_mst_port, topology_kref);
1854 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1855
1856 drm_dp_mst_dump_port_topology_history(port);
1857
1858
1859 if (port->input) {
1860 drm_dp_mst_put_port_malloc(port);
1861 return;
1862 }
1863
1864 kfree(port->cached_edid);
1865
1866
1867
1868
1869
1870 mutex_lock(&mgr->delayed_destroy_lock);
1871 list_add(&port->next, &mgr->destroy_port_list);
1872 mutex_unlock(&mgr->delayed_destroy_lock);
1873 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1874 }
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898 static int __must_check
1899 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1900 {
1901 int ret;
1902
1903 topology_ref_history_lock(port->mgr);
1904 ret = kref_get_unless_zero(&port->topology_kref);
1905 if (ret) {
1906 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1907 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1908 }
1909
1910 topology_ref_history_unlock(port->mgr);
1911 return ret;
1912 }
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1928 {
1929 topology_ref_history_lock(port->mgr);
1930
1931 WARN_ON(kref_read(&port->topology_kref) == 0);
1932 kref_get(&port->topology_kref);
1933 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1934 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1935
1936 topology_ref_history_unlock(port->mgr);
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1951 {
1952 topology_ref_history_lock(port->mgr);
1953
1954 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
1955 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1956
1957 topology_ref_history_unlock(port->mgr);
1958 kref_put(&port->topology_kref, drm_dp_destroy_port);
1959 }
1960
1961 static struct drm_dp_mst_branch *
1962 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1963 struct drm_dp_mst_branch *to_find)
1964 {
1965 struct drm_dp_mst_port *port;
1966 struct drm_dp_mst_branch *rmstb;
1967
1968 if (to_find == mstb)
1969 return mstb;
1970
1971 list_for_each_entry(port, &mstb->ports, next) {
1972 if (port->mstb) {
1973 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1974 port->mstb, to_find);
1975 if (rmstb)
1976 return rmstb;
1977 }
1978 }
1979 return NULL;
1980 }
1981
1982 static struct drm_dp_mst_branch *
1983 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1984 struct drm_dp_mst_branch *mstb)
1985 {
1986 struct drm_dp_mst_branch *rmstb = NULL;
1987
1988 mutex_lock(&mgr->lock);
1989 if (mgr->mst_primary) {
1990 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1991 mgr->mst_primary, mstb);
1992
1993 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1994 rmstb = NULL;
1995 }
1996 mutex_unlock(&mgr->lock);
1997 return rmstb;
1998 }
1999
2000 static struct drm_dp_mst_port *
2001 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
2002 struct drm_dp_mst_port *to_find)
2003 {
2004 struct drm_dp_mst_port *port, *mport;
2005
2006 list_for_each_entry(port, &mstb->ports, next) {
2007 if (port == to_find)
2008 return port;
2009
2010 if (port->mstb) {
2011 mport = drm_dp_mst_topology_get_port_validated_locked(
2012 port->mstb, to_find);
2013 if (mport)
2014 return mport;
2015 }
2016 }
2017 return NULL;
2018 }
2019
2020 static struct drm_dp_mst_port *
2021 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2022 struct drm_dp_mst_port *port)
2023 {
2024 struct drm_dp_mst_port *rport = NULL;
2025
2026 mutex_lock(&mgr->lock);
2027 if (mgr->mst_primary) {
2028 rport = drm_dp_mst_topology_get_port_validated_locked(
2029 mgr->mst_primary, port);
2030
2031 if (rport && !drm_dp_mst_topology_try_get_port(rport))
2032 rport = NULL;
2033 }
2034 mutex_unlock(&mgr->lock);
2035 return rport;
2036 }
2037
2038 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2039 {
2040 struct drm_dp_mst_port *port;
2041 int ret;
2042
2043 list_for_each_entry(port, &mstb->ports, next) {
2044 if (port->port_num == port_num) {
2045 ret = drm_dp_mst_topology_try_get_port(port);
2046 return ret ? port : NULL;
2047 }
2048 }
2049
2050 return NULL;
2051 }
2052
2053
2054
2055
2056
2057
2058 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2059 u8 *rad)
2060 {
2061 int parent_lct = port->parent->lct;
2062 int shift = 4;
2063 int idx = (parent_lct - 1) / 2;
2064
2065 if (parent_lct > 1) {
2066 memcpy(rad, port->parent->rad, idx + 1);
2067 shift = (parent_lct % 2) ? 4 : 0;
2068 } else
2069 rad[0] = 0;
2070
2071 rad[idx] |= port->port_num << shift;
2072 return parent_lct + 1;
2073 }
2074
2075 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2076 {
2077 switch (pdt) {
2078 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2079 case DP_PEER_DEVICE_SST_SINK:
2080 return true;
2081 case DP_PEER_DEVICE_MST_BRANCHING:
2082
2083 if (!mcs)
2084 return true;
2085
2086 return false;
2087 }
2088 return true;
2089 }
2090
2091 static int
2092 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2093 bool new_mcs)
2094 {
2095 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2096 struct drm_dp_mst_branch *mstb;
2097 u8 rad[8], lct;
2098 int ret = 0;
2099
2100 if (port->pdt == new_pdt && port->mcs == new_mcs)
2101 return 0;
2102
2103
2104 if (port->pdt != DP_PEER_DEVICE_NONE) {
2105 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2106
2107
2108
2109
2110 if (new_pdt != DP_PEER_DEVICE_NONE &&
2111 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2112 port->pdt = new_pdt;
2113 port->mcs = new_mcs;
2114 return 0;
2115 }
2116
2117
2118 drm_dp_mst_unregister_i2c_bus(port);
2119 } else {
2120 mutex_lock(&mgr->lock);
2121 drm_dp_mst_topology_put_mstb(port->mstb);
2122 port->mstb = NULL;
2123 mutex_unlock(&mgr->lock);
2124 }
2125 }
2126
2127 port->pdt = new_pdt;
2128 port->mcs = new_mcs;
2129
2130 if (port->pdt != DP_PEER_DEVICE_NONE) {
2131 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2132
2133 ret = drm_dp_mst_register_i2c_bus(port);
2134 } else {
2135 lct = drm_dp_calculate_rad(port, rad);
2136 mstb = drm_dp_add_mst_branch_device(lct, rad);
2137 if (!mstb) {
2138 ret = -ENOMEM;
2139 drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
2140 goto out;
2141 }
2142
2143 mutex_lock(&mgr->lock);
2144 port->mstb = mstb;
2145 mstb->mgr = port->mgr;
2146 mstb->port_parent = port;
2147
2148
2149
2150
2151
2152 drm_dp_mst_get_port_malloc(port);
2153 mutex_unlock(&mgr->lock);
2154
2155
2156 ret = 1;
2157 }
2158 }
2159
2160 out:
2161 if (ret < 0)
2162 port->pdt = DP_PEER_DEVICE_NONE;
2163 return ret;
2164 }
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2180 unsigned int offset, void *buffer, size_t size)
2181 {
2182 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2183 aux);
2184
2185 return drm_dp_send_dpcd_read(port->mgr, port,
2186 offset, size, buffer);
2187 }
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2203 unsigned int offset, void *buffer, size_t size)
2204 {
2205 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2206 aux);
2207
2208 return drm_dp_send_dpcd_write(port->mgr, port,
2209 offset, size, buffer);
2210 }
2211
2212 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2213 {
2214 int ret = 0;
2215
2216 memcpy(mstb->guid, guid, 16);
2217
2218 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2219 if (mstb->port_parent) {
2220 ret = drm_dp_send_dpcd_write(mstb->mgr,
2221 mstb->port_parent,
2222 DP_GUID, 16, mstb->guid);
2223 } else {
2224 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2225 DP_GUID, mstb->guid, 16);
2226 }
2227 }
2228
2229 if (ret < 16 && ret > 0)
2230 return -EPROTO;
2231
2232 return ret == 16 ? 0 : ret;
2233 }
2234
2235 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2236 int pnum,
2237 char *proppath,
2238 size_t proppath_size)
2239 {
2240 int i;
2241 char temp[8];
2242
2243 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2244 for (i = 0; i < (mstb->lct - 1); i++) {
2245 int shift = (i % 2) ? 0 : 4;
2246 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2247
2248 snprintf(temp, sizeof(temp), "-%d", port_num);
2249 strlcat(proppath, temp, proppath_size);
2250 }
2251 snprintf(temp, sizeof(temp), "-%d", pnum);
2252 strlcat(proppath, temp, proppath_size);
2253 }
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2267 struct drm_dp_mst_port *port)
2268 {
2269 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
2270 port->aux.name, connector->kdev->kobj.name);
2271
2272 port->aux.dev = connector->kdev;
2273 return drm_dp_aux_register_devnode(&port->aux);
2274 }
2275 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2287 struct drm_dp_mst_port *port)
2288 {
2289 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
2290 port->aux.name, connector->kdev->kobj.name);
2291 drm_dp_aux_unregister_devnode(&port->aux);
2292 }
2293 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2294
2295 static void
2296 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2297 struct drm_dp_mst_port *port)
2298 {
2299 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2300 char proppath[255];
2301 int ret;
2302
2303 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2304 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2305 if (!port->connector) {
2306 ret = -ENOMEM;
2307 goto error;
2308 }
2309
2310 if (port->pdt != DP_PEER_DEVICE_NONE &&
2311 drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2312 port->port_num >= DP_MST_LOGICAL_PORT_0)
2313 port->cached_edid = drm_get_edid(port->connector,
2314 &port->aux.ddc);
2315
2316 drm_connector_register(port->connector);
2317 return;
2318
2319 error:
2320 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
2321 }
2322
2323
2324
2325
2326
2327 static void
2328 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2329 struct drm_dp_mst_port *port)
2330 {
2331 mutex_lock(&mgr->lock);
2332 port->parent->num_ports--;
2333 list_del(&port->next);
2334 mutex_unlock(&mgr->lock);
2335 drm_dp_mst_topology_put_port(port);
2336 }
2337
2338 static struct drm_dp_mst_port *
2339 drm_dp_mst_add_port(struct drm_device *dev,
2340 struct drm_dp_mst_topology_mgr *mgr,
2341 struct drm_dp_mst_branch *mstb, u8 port_number)
2342 {
2343 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2344
2345 if (!port)
2346 return NULL;
2347
2348 kref_init(&port->topology_kref);
2349 kref_init(&port->malloc_kref);
2350 port->parent = mstb;
2351 port->port_num = port_number;
2352 port->mgr = mgr;
2353 port->aux.name = "DPMST";
2354 port->aux.dev = dev->dev;
2355 port->aux.is_remote = true;
2356
2357
2358 port->aux.drm_dev = dev;
2359 drm_dp_remote_aux_init(&port->aux);
2360
2361
2362
2363
2364
2365 drm_dp_mst_get_mstb_malloc(mstb);
2366
2367 return port;
2368 }
2369
2370 static int
2371 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2372 struct drm_device *dev,
2373 struct drm_dp_link_addr_reply_port *port_msg)
2374 {
2375 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2376 struct drm_dp_mst_port *port;
2377 int old_ddps = 0, ret;
2378 u8 new_pdt = DP_PEER_DEVICE_NONE;
2379 bool new_mcs = 0;
2380 bool created = false, send_link_addr = false, changed = false;
2381
2382 port = drm_dp_get_port(mstb, port_msg->port_number);
2383 if (!port) {
2384 port = drm_dp_mst_add_port(dev, mgr, mstb,
2385 port_msg->port_number);
2386 if (!port)
2387 return -ENOMEM;
2388 created = true;
2389 changed = true;
2390 } else if (!port->input && port_msg->input_port && port->connector) {
2391
2392
2393
2394 drm_dp_mst_topology_unlink_port(mgr, port);
2395 drm_dp_mst_topology_put_port(port);
2396 port = drm_dp_mst_add_port(dev, mgr, mstb,
2397 port_msg->port_number);
2398 if (!port)
2399 return -ENOMEM;
2400 changed = true;
2401 created = true;
2402 } else if (port->input && !port_msg->input_port) {
2403 changed = true;
2404 } else if (port->connector) {
2405
2406
2407
2408 drm_modeset_lock(&mgr->base.lock, NULL);
2409
2410 old_ddps = port->ddps;
2411 changed = port->ddps != port_msg->ddps ||
2412 (port->ddps &&
2413 (port->ldps != port_msg->legacy_device_plug_status ||
2414 port->dpcd_rev != port_msg->dpcd_revision ||
2415 port->mcs != port_msg->mcs ||
2416 port->pdt != port_msg->peer_device_type ||
2417 port->num_sdp_stream_sinks !=
2418 port_msg->num_sdp_stream_sinks));
2419 }
2420
2421 port->input = port_msg->input_port;
2422 if (!port->input)
2423 new_pdt = port_msg->peer_device_type;
2424 new_mcs = port_msg->mcs;
2425 port->ddps = port_msg->ddps;
2426 port->ldps = port_msg->legacy_device_plug_status;
2427 port->dpcd_rev = port_msg->dpcd_revision;
2428 port->num_sdp_streams = port_msg->num_sdp_streams;
2429 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2430
2431
2432
2433 if (created) {
2434 mutex_lock(&mgr->lock);
2435 drm_dp_mst_topology_get_port(port);
2436 list_add(&port->next, &mstb->ports);
2437 mstb->num_ports++;
2438 mutex_unlock(&mgr->lock);
2439 }
2440
2441
2442
2443
2444
2445 if (old_ddps != port->ddps || !created) {
2446 if (port->ddps && !port->input) {
2447 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2448 port);
2449 if (ret == 1)
2450 changed = true;
2451 } else {
2452 port->full_pbn = 0;
2453 }
2454 }
2455
2456 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2457 if (ret == 1) {
2458 send_link_addr = true;
2459 } else if (ret < 0) {
2460 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
2461 goto fail;
2462 }
2463
2464
2465
2466
2467
2468
2469 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2470 port->mcs)
2471 send_link_addr = true;
2472
2473 if (port->connector)
2474 drm_modeset_unlock(&mgr->base.lock);
2475 else if (!port->input)
2476 drm_dp_mst_port_add_connector(mstb, port);
2477
2478 if (send_link_addr && port->mstb) {
2479 ret = drm_dp_send_link_address(mgr, port->mstb);
2480 if (ret == 1)
2481 changed = true;
2482 else if (ret < 0)
2483 goto fail_put;
2484 }
2485
2486
2487 drm_dp_mst_topology_put_port(port);
2488 return changed;
2489
2490 fail:
2491 drm_dp_mst_topology_unlink_port(mgr, port);
2492 if (port->connector)
2493 drm_modeset_unlock(&mgr->base.lock);
2494 fail_put:
2495 drm_dp_mst_topology_put_port(port);
2496 return ret;
2497 }
2498
2499 static void
2500 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2501 struct drm_dp_connection_status_notify *conn_stat)
2502 {
2503 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2504 struct drm_dp_mst_port *port;
2505 int old_ddps, ret;
2506 u8 new_pdt;
2507 bool new_mcs;
2508 bool dowork = false, create_connector = false;
2509
2510 port = drm_dp_get_port(mstb, conn_stat->port_number);
2511 if (!port)
2512 return;
2513
2514 if (port->connector) {
2515 if (!port->input && conn_stat->input_port) {
2516
2517
2518
2519
2520
2521 drm_dp_mst_topology_unlink_port(mgr, port);
2522 mstb->link_address_sent = false;
2523 dowork = true;
2524 goto out;
2525 }
2526
2527
2528 drm_modeset_lock(&mgr->base.lock, NULL);
2529 } else if (port->input && !conn_stat->input_port) {
2530 create_connector = true;
2531
2532 mstb->link_address_sent = false;
2533 dowork = true;
2534 }
2535
2536 old_ddps = port->ddps;
2537 port->input = conn_stat->input_port;
2538 port->ldps = conn_stat->legacy_device_plug_status;
2539 port->ddps = conn_stat->displayport_device_plug_status;
2540
2541 if (old_ddps != port->ddps) {
2542 if (port->ddps && !port->input)
2543 drm_dp_send_enum_path_resources(mgr, mstb, port);
2544 else
2545 port->full_pbn = 0;
2546 }
2547
2548 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2549 new_mcs = conn_stat->message_capability_status;
2550 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2551 if (ret == 1) {
2552 dowork = true;
2553 } else if (ret < 0) {
2554 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
2555 dowork = false;
2556 }
2557
2558 if (port->connector)
2559 drm_modeset_unlock(&mgr->base.lock);
2560 else if (create_connector)
2561 drm_dp_mst_port_add_connector(mstb, port);
2562
2563 out:
2564 drm_dp_mst_topology_put_port(port);
2565 if (dowork)
2566 queue_work(system_long_wq, &mstb->mgr->work);
2567 }
2568
2569 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2570 u8 lct, u8 *rad)
2571 {
2572 struct drm_dp_mst_branch *mstb;
2573 struct drm_dp_mst_port *port;
2574 int i, ret;
2575
2576
2577 mutex_lock(&mgr->lock);
2578 mstb = mgr->mst_primary;
2579
2580 if (!mstb)
2581 goto out;
2582
2583 for (i = 0; i < lct - 1; i++) {
2584 int shift = (i % 2) ? 0 : 4;
2585 int port_num = (rad[i / 2] >> shift) & 0xf;
2586
2587 list_for_each_entry(port, &mstb->ports, next) {
2588 if (port->port_num == port_num) {
2589 mstb = port->mstb;
2590 if (!mstb) {
2591 drm_err(mgr->dev,
2592 "failed to lookup MSTB with lct %d, rad %02x\n",
2593 lct, rad[0]);
2594 goto out;
2595 }
2596
2597 break;
2598 }
2599 }
2600 }
2601 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2602 if (!ret)
2603 mstb = NULL;
2604 out:
2605 mutex_unlock(&mgr->lock);
2606 return mstb;
2607 }
2608
2609 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2610 struct drm_dp_mst_branch *mstb,
2611 const uint8_t *guid)
2612 {
2613 struct drm_dp_mst_branch *found_mstb;
2614 struct drm_dp_mst_port *port;
2615
2616 if (memcmp(mstb->guid, guid, 16) == 0)
2617 return mstb;
2618
2619
2620 list_for_each_entry(port, &mstb->ports, next) {
2621 if (!port->mstb)
2622 continue;
2623
2624 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2625
2626 if (found_mstb)
2627 return found_mstb;
2628 }
2629
2630 return NULL;
2631 }
2632
2633 static struct drm_dp_mst_branch *
2634 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2635 const uint8_t *guid)
2636 {
2637 struct drm_dp_mst_branch *mstb;
2638 int ret;
2639
2640
2641 mutex_lock(&mgr->lock);
2642
2643 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2644 if (mstb) {
2645 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2646 if (!ret)
2647 mstb = NULL;
2648 }
2649
2650 mutex_unlock(&mgr->lock);
2651 return mstb;
2652 }
2653
2654 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2655 struct drm_dp_mst_branch *mstb)
2656 {
2657 struct drm_dp_mst_port *port;
2658 int ret;
2659 bool changed = false;
2660
2661 if (!mstb->link_address_sent) {
2662 ret = drm_dp_send_link_address(mgr, mstb);
2663 if (ret == 1)
2664 changed = true;
2665 else if (ret < 0)
2666 return ret;
2667 }
2668
2669 list_for_each_entry(port, &mstb->ports, next) {
2670 if (port->input || !port->ddps || !port->mstb)
2671 continue;
2672
2673 ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
2674 if (ret == 1)
2675 changed = true;
2676 else if (ret < 0)
2677 return ret;
2678 }
2679
2680 return changed;
2681 }
2682
2683 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2684 {
2685 struct drm_dp_mst_topology_mgr *mgr =
2686 container_of(work, struct drm_dp_mst_topology_mgr, work);
2687 struct drm_device *dev = mgr->dev;
2688 struct drm_dp_mst_branch *mstb;
2689 int ret;
2690 bool clear_payload_id_table;
2691
2692 mutex_lock(&mgr->probe_lock);
2693
2694 mutex_lock(&mgr->lock);
2695 clear_payload_id_table = !mgr->payload_id_table_cleared;
2696 mgr->payload_id_table_cleared = true;
2697
2698 mstb = mgr->mst_primary;
2699 if (mstb) {
2700 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2701 if (!ret)
2702 mstb = NULL;
2703 }
2704 mutex_unlock(&mgr->lock);
2705 if (!mstb) {
2706 mutex_unlock(&mgr->probe_lock);
2707 return;
2708 }
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718 if (clear_payload_id_table) {
2719 drm_dbg_kms(dev, "Clearing payload ID table\n");
2720 drm_dp_send_clear_payload_id_table(mgr, mstb);
2721 }
2722
2723 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2724 drm_dp_mst_topology_put_mstb(mstb);
2725
2726 mutex_unlock(&mgr->probe_lock);
2727 if (ret > 0)
2728 drm_kms_helper_hotplug_event(dev);
2729 }
2730
2731 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2732 u8 *guid)
2733 {
2734 u64 salt;
2735
2736 if (memchr_inv(guid, 0, 16))
2737 return true;
2738
2739 salt = get_jiffies_64();
2740
2741 memcpy(&guid[0], &salt, sizeof(u64));
2742 memcpy(&guid[8], &salt, sizeof(u64));
2743
2744 return false;
2745 }
2746
2747 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2748 u8 port_num, u32 offset, u8 num_bytes)
2749 {
2750 struct drm_dp_sideband_msg_req_body req;
2751
2752 req.req_type = DP_REMOTE_DPCD_READ;
2753 req.u.dpcd_read.port_number = port_num;
2754 req.u.dpcd_read.dpcd_address = offset;
2755 req.u.dpcd_read.num_bytes = num_bytes;
2756 drm_dp_encode_sideband_req(&req, msg);
2757 }
2758
2759 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2760 bool up, u8 *msg, int len)
2761 {
2762 int ret;
2763 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2764 int tosend, total, offset;
2765 int retries = 0;
2766
2767 retry:
2768 total = len;
2769 offset = 0;
2770 do {
2771 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2772
2773 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2774 &msg[offset],
2775 tosend);
2776 if (ret != tosend) {
2777 if (ret == -EIO && retries < 5) {
2778 retries++;
2779 goto retry;
2780 }
2781 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
2782
2783 return -EIO;
2784 }
2785 offset += tosend;
2786 total -= tosend;
2787 } while (total > 0);
2788 return 0;
2789 }
2790
2791 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2792 struct drm_dp_sideband_msg_tx *txmsg)
2793 {
2794 struct drm_dp_mst_branch *mstb = txmsg->dst;
2795 u8 req_type;
2796
2797 req_type = txmsg->msg[0] & 0x7f;
2798 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2799 req_type == DP_RESOURCE_STATUS_NOTIFY ||
2800 req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
2801 hdr->broadcast = 1;
2802 else
2803 hdr->broadcast = 0;
2804 hdr->path_msg = txmsg->path_msg;
2805 if (hdr->broadcast) {
2806 hdr->lct = 1;
2807 hdr->lcr = 6;
2808 } else {
2809 hdr->lct = mstb->lct;
2810 hdr->lcr = mstb->lct - 1;
2811 }
2812
2813 memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2814
2815 return 0;
2816 }
2817
2818
2819
2820 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2821 struct drm_dp_sideband_msg_tx *txmsg,
2822 bool up)
2823 {
2824 u8 chunk[48];
2825 struct drm_dp_sideband_msg_hdr hdr;
2826 int len, space, idx, tosend;
2827 int ret;
2828
2829 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2830 return 0;
2831
2832 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2833
2834 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2835 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2836
2837
2838 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2839 if (ret < 0)
2840 return ret;
2841
2842
2843 len = txmsg->cur_len - txmsg->cur_offset;
2844
2845
2846 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2847
2848 tosend = min(len, space);
2849 if (len == txmsg->cur_len)
2850 hdr.somt = 1;
2851 if (space >= len)
2852 hdr.eomt = 1;
2853
2854
2855 hdr.msg_len = tosend + 1;
2856 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2857 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2858
2859 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2860 idx += tosend + 1;
2861
2862 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2863 if (ret) {
2864 if (drm_debug_enabled(DRM_UT_DP)) {
2865 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2866
2867 drm_printf(&p, "sideband msg failed to send\n");
2868 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2869 }
2870 return ret;
2871 }
2872
2873 txmsg->cur_offset += tosend;
2874 if (txmsg->cur_offset == txmsg->cur_len) {
2875 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2876 return 1;
2877 }
2878 return 0;
2879 }
2880
2881 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2882 {
2883 struct drm_dp_sideband_msg_tx *txmsg;
2884 int ret;
2885
2886 WARN_ON(!mutex_is_locked(&mgr->qlock));
2887
2888
2889 if (list_empty(&mgr->tx_msg_downq))
2890 return;
2891
2892 txmsg = list_first_entry(&mgr->tx_msg_downq,
2893 struct drm_dp_sideband_msg_tx, next);
2894 ret = process_single_tx_qlock(mgr, txmsg, false);
2895 if (ret < 0) {
2896 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
2897 list_del(&txmsg->next);
2898 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2899 wake_up_all(&mgr->tx_waitq);
2900 }
2901 }
2902
2903 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2904 struct drm_dp_sideband_msg_tx *txmsg)
2905 {
2906 mutex_lock(&mgr->qlock);
2907 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2908
2909 if (drm_debug_enabled(DRM_UT_DP)) {
2910 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2911
2912 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2913 }
2914
2915 if (list_is_singular(&mgr->tx_msg_downq))
2916 process_single_down_tx_qlock(mgr);
2917 mutex_unlock(&mgr->qlock);
2918 }
2919
2920 static void
2921 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2922 struct drm_dp_link_address_ack_reply *reply)
2923 {
2924 struct drm_dp_link_addr_reply_port *port_reply;
2925 int i;
2926
2927 for (i = 0; i < reply->nports; i++) {
2928 port_reply = &reply->ports[i];
2929 drm_dbg_kms(mgr->dev,
2930 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2931 i,
2932 port_reply->input_port,
2933 port_reply->peer_device_type,
2934 port_reply->port_number,
2935 port_reply->dpcd_revision,
2936 port_reply->mcs,
2937 port_reply->ddps,
2938 port_reply->legacy_device_plug_status,
2939 port_reply->num_sdp_streams,
2940 port_reply->num_sdp_stream_sinks);
2941 }
2942 }
2943
2944 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2945 struct drm_dp_mst_branch *mstb)
2946 {
2947 struct drm_dp_sideband_msg_tx *txmsg;
2948 struct drm_dp_link_address_ack_reply *reply;
2949 struct drm_dp_mst_port *port, *tmp;
2950 int i, ret, port_mask = 0;
2951 bool changed = false;
2952
2953 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2954 if (!txmsg)
2955 return -ENOMEM;
2956
2957 txmsg->dst = mstb;
2958 build_link_address(txmsg);
2959
2960 mstb->link_address_sent = true;
2961 drm_dp_queue_down_tx(mgr, txmsg);
2962
2963
2964 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2965 if (ret <= 0) {
2966 drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
2967 goto out;
2968 }
2969 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2970 drm_err(mgr->dev, "link address NAK received\n");
2971 ret = -EIO;
2972 goto out;
2973 }
2974
2975 reply = &txmsg->reply.u.link_addr;
2976 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
2977 drm_dp_dump_link_address(mgr, reply);
2978
2979 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2980 if (ret) {
2981 char buf[64];
2982
2983 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2984 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
2985 goto out;
2986 }
2987
2988 for (i = 0; i < reply->nports; i++) {
2989 port_mask |= BIT(reply->ports[i].port_number);
2990 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2991 &reply->ports[i]);
2992 if (ret == 1)
2993 changed = true;
2994 else if (ret < 0)
2995 goto out;
2996 }
2997
2998
2999
3000
3001
3002
3003 mutex_lock(&mgr->lock);
3004 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3005 if (port_mask & BIT(port->port_num))
3006 continue;
3007
3008 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
3009 port->port_num);
3010 list_del(&port->next);
3011 drm_dp_mst_topology_put_port(port);
3012 changed = true;
3013 }
3014 mutex_unlock(&mgr->lock);
3015
3016 out:
3017 if (ret <= 0)
3018 mstb->link_address_sent = false;
3019 kfree(txmsg);
3020 return ret < 0 ? ret : changed;
3021 }
3022
3023 static void
3024 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3025 struct drm_dp_mst_branch *mstb)
3026 {
3027 struct drm_dp_sideband_msg_tx *txmsg;
3028 int ret;
3029
3030 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3031 if (!txmsg)
3032 return;
3033
3034 txmsg->dst = mstb;
3035 build_clear_payload_id_table(txmsg);
3036
3037 drm_dp_queue_down_tx(mgr, txmsg);
3038
3039 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3040 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3041 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
3042
3043 kfree(txmsg);
3044 }
3045
3046 static int
3047 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3048 struct drm_dp_mst_branch *mstb,
3049 struct drm_dp_mst_port *port)
3050 {
3051 struct drm_dp_enum_path_resources_ack_reply *path_res;
3052 struct drm_dp_sideband_msg_tx *txmsg;
3053 int ret;
3054
3055 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3056 if (!txmsg)
3057 return -ENOMEM;
3058
3059 txmsg->dst = mstb;
3060 build_enum_path_resources(txmsg, port->port_num);
3061
3062 drm_dp_queue_down_tx(mgr, txmsg);
3063
3064 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3065 if (ret > 0) {
3066 ret = 0;
3067 path_res = &txmsg->reply.u.path_resources;
3068
3069 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3070 drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
3071 } else {
3072 if (port->port_num != path_res->port_number)
3073 DRM_ERROR("got incorrect port in response\n");
3074
3075 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
3076 path_res->port_number,
3077 path_res->full_payload_bw_number,
3078 path_res->avail_payload_bw_number);
3079
3080
3081
3082
3083
3084 if (port->full_pbn != path_res->full_payload_bw_number ||
3085 port->fec_capable != path_res->fec_capable)
3086 ret = 1;
3087
3088 port->full_pbn = path_res->full_payload_bw_number;
3089 port->fec_capable = path_res->fec_capable;
3090 }
3091 }
3092
3093 kfree(txmsg);
3094 return ret;
3095 }
3096
3097 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3098 {
3099 if (!mstb->port_parent)
3100 return NULL;
3101
3102 if (mstb->port_parent->mstb != mstb)
3103 return mstb->port_parent;
3104
3105 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3106 }
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116 static struct drm_dp_mst_branch *
3117 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3118 struct drm_dp_mst_branch *mstb,
3119 int *port_num)
3120 {
3121 struct drm_dp_mst_branch *rmstb = NULL;
3122 struct drm_dp_mst_port *found_port;
3123
3124 mutex_lock(&mgr->lock);
3125 if (!mgr->mst_primary)
3126 goto out;
3127
3128 do {
3129 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3130 if (!found_port)
3131 break;
3132
3133 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3134 rmstb = found_port->parent;
3135 *port_num = found_port->port_num;
3136 } else {
3137
3138 mstb = found_port->parent;
3139 }
3140 } while (!rmstb);
3141 out:
3142 mutex_unlock(&mgr->lock);
3143 return rmstb;
3144 }
3145
3146 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3147 struct drm_dp_mst_port *port,
3148 int id,
3149 int pbn)
3150 {
3151 struct drm_dp_sideband_msg_tx *txmsg;
3152 struct drm_dp_mst_branch *mstb;
3153 int ret, port_num;
3154 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3155 int i;
3156
3157 port_num = port->port_num;
3158 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3159 if (!mstb) {
3160 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3161 port->parent,
3162 &port_num);
3163
3164 if (!mstb)
3165 return -EINVAL;
3166 }
3167
3168 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3169 if (!txmsg) {
3170 ret = -ENOMEM;
3171 goto fail_put;
3172 }
3173
3174 for (i = 0; i < port->num_sdp_streams; i++)
3175 sinks[i] = i;
3176
3177 txmsg->dst = mstb;
3178 build_allocate_payload(txmsg, port_num,
3179 id,
3180 pbn, port->num_sdp_streams, sinks);
3181
3182 drm_dp_queue_down_tx(mgr, txmsg);
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3193 if (ret > 0) {
3194 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3195 ret = -EINVAL;
3196 else
3197 ret = 0;
3198 }
3199 kfree(txmsg);
3200 fail_put:
3201 drm_dp_mst_topology_put_mstb(mstb);
3202 return ret;
3203 }
3204
3205 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3206 struct drm_dp_mst_port *port, bool power_up)
3207 {
3208 struct drm_dp_sideband_msg_tx *txmsg;
3209 int ret;
3210
3211 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3212 if (!port)
3213 return -EINVAL;
3214
3215 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3216 if (!txmsg) {
3217 drm_dp_mst_topology_put_port(port);
3218 return -ENOMEM;
3219 }
3220
3221 txmsg->dst = port->parent;
3222 build_power_updown_phy(txmsg, port->port_num, power_up);
3223 drm_dp_queue_down_tx(mgr, txmsg);
3224
3225 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3226 if (ret > 0) {
3227 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3228 ret = -EINVAL;
3229 else
3230 ret = 0;
3231 }
3232 kfree(txmsg);
3233 drm_dp_mst_topology_put_port(port);
3234
3235 return ret;
3236 }
3237 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3238
3239 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3240 struct drm_dp_mst_port *port,
3241 struct drm_dp_query_stream_enc_status_ack_reply *status)
3242 {
3243 struct drm_dp_sideband_msg_tx *txmsg;
3244 u8 nonce[7];
3245 int ret;
3246
3247 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3248 if (!txmsg)
3249 return -ENOMEM;
3250
3251 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3252 if (!port) {
3253 ret = -EINVAL;
3254 goto out_get_port;
3255 }
3256
3257 get_random_bytes(nonce, sizeof(nonce));
3258
3259
3260
3261
3262
3263
3264 txmsg->dst = mgr->mst_primary;
3265
3266 build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3267
3268 drm_dp_queue_down_tx(mgr, txmsg);
3269
3270 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3271 if (ret < 0) {
3272 goto out;
3273 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3274 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3275 ret = -ENXIO;
3276 goto out;
3277 }
3278
3279 ret = 0;
3280 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3281
3282 out:
3283 drm_dp_mst_topology_put_port(port);
3284 out_get_port:
3285 kfree(txmsg);
3286 return ret;
3287 }
3288 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3289
3290 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3291 int id,
3292 struct drm_dp_payload *payload)
3293 {
3294 int ret;
3295
3296 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3297 if (ret < 0) {
3298 payload->payload_state = 0;
3299 return ret;
3300 }
3301 payload->payload_state = DP_PAYLOAD_LOCAL;
3302 return 0;
3303 }
3304
3305 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3306 struct drm_dp_mst_port *port,
3307 int id,
3308 struct drm_dp_payload *payload)
3309 {
3310 int ret;
3311
3312 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3313 if (ret < 0)
3314 return ret;
3315 payload->payload_state = DP_PAYLOAD_REMOTE;
3316 return ret;
3317 }
3318
3319 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3320 struct drm_dp_mst_port *port,
3321 int id,
3322 struct drm_dp_payload *payload)
3323 {
3324 drm_dbg_kms(mgr->dev, "\n");
3325
3326 if (port) {
3327 drm_dp_payload_send_msg(mgr, port, id, 0);
3328 }
3329
3330 drm_dp_dpcd_write_payload(mgr, id, payload);
3331 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3332 return 0;
3333 }
3334
3335 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3336 int id,
3337 struct drm_dp_payload *payload)
3338 {
3339 payload->payload_state = 0;
3340 return 0;
3341 }
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
3361 {
3362 struct drm_dp_payload req_payload;
3363 struct drm_dp_mst_port *port;
3364 int i, j;
3365 int cur_slots = start_slot;
3366 bool skip;
3367
3368 mutex_lock(&mgr->payload_lock);
3369 for (i = 0; i < mgr->max_payloads; i++) {
3370 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3371 struct drm_dp_payload *payload = &mgr->payloads[i];
3372 bool put_port = false;
3373
3374
3375
3376 req_payload.start_slot = cur_slots;
3377 if (vcpi) {
3378 port = container_of(vcpi, struct drm_dp_mst_port,
3379 vcpi);
3380
3381 mutex_lock(&mgr->lock);
3382 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3383 mutex_unlock(&mgr->lock);
3384
3385 if (skip) {
3386 drm_dbg_kms(mgr->dev,
3387 "Virtual channel %d is not in current topology\n",
3388 i);
3389 continue;
3390 }
3391
3392
3393
3394 if (vcpi->num_slots) {
3395 port = drm_dp_mst_topology_get_port_validated(
3396 mgr, port);
3397 if (!port) {
3398 if (vcpi->num_slots == payload->num_slots) {
3399 cur_slots += vcpi->num_slots;
3400 payload->start_slot = req_payload.start_slot;
3401 continue;
3402 } else {
3403 drm_dbg_kms(mgr->dev,
3404 "Fail:set payload to invalid sink");
3405 mutex_unlock(&mgr->payload_lock);
3406 return -EINVAL;
3407 }
3408 }
3409 put_port = true;
3410 }
3411
3412 req_payload.num_slots = vcpi->num_slots;
3413 req_payload.vcpi = vcpi->vcpi;
3414 } else {
3415 port = NULL;
3416 req_payload.num_slots = 0;
3417 }
3418
3419 payload->start_slot = req_payload.start_slot;
3420
3421 if (payload->num_slots != req_payload.num_slots) {
3422
3423
3424 if (req_payload.num_slots) {
3425 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3426 &req_payload);
3427 payload->num_slots = req_payload.num_slots;
3428 payload->vcpi = req_payload.vcpi;
3429
3430 } else if (payload->num_slots) {
3431 payload->num_slots = 0;
3432 drm_dp_destroy_payload_step1(mgr, port,
3433 payload->vcpi,
3434 payload);
3435 req_payload.payload_state =
3436 payload->payload_state;
3437 payload->start_slot = 0;
3438 }
3439 payload->payload_state = req_payload.payload_state;
3440 }
3441 cur_slots += req_payload.num_slots;
3442
3443 if (put_port)
3444 drm_dp_mst_topology_put_port(port);
3445 }
3446
3447 for (i = 0; i < mgr->max_payloads; ) {
3448 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3449 i++;
3450 continue;
3451 }
3452
3453 drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
3454 for (j = i; j < mgr->max_payloads - 1; j++) {
3455 mgr->payloads[j] = mgr->payloads[j + 1];
3456 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3457
3458 if (mgr->proposed_vcpis[j] &&
3459 mgr->proposed_vcpis[j]->num_slots) {
3460 set_bit(j + 1, &mgr->payload_mask);
3461 } else {
3462 clear_bit(j + 1, &mgr->payload_mask);
3463 }
3464 }
3465
3466 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3467 sizeof(struct drm_dp_payload));
3468 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3469 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3470 }
3471 mutex_unlock(&mgr->payload_lock);
3472
3473 return 0;
3474 }
3475 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3487 {
3488 struct drm_dp_mst_port *port;
3489 int i;
3490 int ret = 0;
3491 bool skip;
3492
3493 mutex_lock(&mgr->payload_lock);
3494 for (i = 0; i < mgr->max_payloads; i++) {
3495
3496 if (!mgr->proposed_vcpis[i])
3497 continue;
3498
3499 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3500
3501 mutex_lock(&mgr->lock);
3502 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3503 mutex_unlock(&mgr->lock);
3504
3505 if (skip)
3506 continue;
3507
3508 drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
3509 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3510 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3511 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3512 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3513 }
3514 if (ret) {
3515 mutex_unlock(&mgr->payload_lock);
3516 return ret;
3517 }
3518 }
3519 mutex_unlock(&mgr->payload_lock);
3520 return 0;
3521 }
3522 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3523
3524 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3525 struct drm_dp_mst_port *port,
3526 int offset, int size, u8 *bytes)
3527 {
3528 int ret = 0;
3529 struct drm_dp_sideband_msg_tx *txmsg;
3530 struct drm_dp_mst_branch *mstb;
3531
3532 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3533 if (!mstb)
3534 return -EINVAL;
3535
3536 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3537 if (!txmsg) {
3538 ret = -ENOMEM;
3539 goto fail_put;
3540 }
3541
3542 build_dpcd_read(txmsg, port->port_num, offset, size);
3543 txmsg->dst = port->parent;
3544
3545 drm_dp_queue_down_tx(mgr, txmsg);
3546
3547 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3548 if (ret < 0)
3549 goto fail_free;
3550
3551 if (txmsg->reply.reply_type == 1) {
3552 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3553 mstb, port->port_num, offset, size);
3554 ret = -EIO;
3555 goto fail_free;
3556 }
3557
3558 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3559 ret = -EPROTO;
3560 goto fail_free;
3561 }
3562
3563 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3564 size);
3565 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3566
3567 fail_free:
3568 kfree(txmsg);
3569 fail_put:
3570 drm_dp_mst_topology_put_mstb(mstb);
3571
3572 return ret;
3573 }
3574
3575 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3576 struct drm_dp_mst_port *port,
3577 int offset, int size, u8 *bytes)
3578 {
3579 int ret;
3580 struct drm_dp_sideband_msg_tx *txmsg;
3581 struct drm_dp_mst_branch *mstb;
3582
3583 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3584 if (!mstb)
3585 return -EINVAL;
3586
3587 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3588 if (!txmsg) {
3589 ret = -ENOMEM;
3590 goto fail_put;
3591 }
3592
3593 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3594 txmsg->dst = mstb;
3595
3596 drm_dp_queue_down_tx(mgr, txmsg);
3597
3598 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3599 if (ret > 0) {
3600 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3601 ret = -EIO;
3602 else
3603 ret = size;
3604 }
3605
3606 kfree(txmsg);
3607 fail_put:
3608 drm_dp_mst_topology_put_mstb(mstb);
3609 return ret;
3610 }
3611
3612 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3613 {
3614 struct drm_dp_sideband_msg_reply_body reply;
3615
3616 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3617 reply.req_type = req_type;
3618 drm_dp_encode_sideband_reply(&reply, msg);
3619 return 0;
3620 }
3621
3622 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3623 struct drm_dp_mst_branch *mstb,
3624 int req_type, bool broadcast)
3625 {
3626 struct drm_dp_sideband_msg_tx *txmsg;
3627
3628 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3629 if (!txmsg)
3630 return -ENOMEM;
3631
3632 txmsg->dst = mstb;
3633 drm_dp_encode_up_ack_reply(txmsg, req_type);
3634
3635 mutex_lock(&mgr->qlock);
3636
3637 process_single_tx_qlock(mgr, txmsg, true);
3638 mutex_unlock(&mgr->qlock);
3639
3640 kfree(txmsg);
3641 return 0;
3642 }
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
3656 int link_rate, int link_lane_count)
3657 {
3658 if (link_rate == 0 || link_lane_count == 0)
3659 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
3660 link_rate, link_lane_count);
3661
3662
3663 return link_rate * link_lane_count / 54000;
3664 }
3665 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3666
3667
3668
3669
3670
3671
3672
3673
3674 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3675 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3676 {
3677 u8 mstm_cap;
3678
3679 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3680 return false;
3681
3682 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3683 return false;
3684
3685 return mstm_cap & DP_MST_CAP;
3686 }
3687 EXPORT_SYMBOL(drm_dp_read_mst_cap);
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3698 {
3699 int ret = 0;
3700 struct drm_dp_mst_branch *mstb = NULL;
3701
3702 mutex_lock(&mgr->payload_lock);
3703 mutex_lock(&mgr->lock);
3704 if (mst_state == mgr->mst_state)
3705 goto out_unlock;
3706
3707 mgr->mst_state = mst_state;
3708
3709 if (mst_state) {
3710 struct drm_dp_payload reset_pay;
3711 int lane_count;
3712 int link_rate;
3713
3714 WARN_ON(mgr->mst_primary);
3715
3716
3717 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3718 if (ret < 0) {
3719 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
3720 mgr->aux->name, ret);
3721 goto out_unlock;
3722 }
3723
3724 lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
3725 link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
3726 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
3727 link_rate,
3728 lane_count);
3729 if (mgr->pbn_div == 0) {
3730 ret = -EINVAL;
3731 goto out_unlock;
3732 }
3733
3734
3735 mstb = drm_dp_add_mst_branch_device(1, NULL);
3736 if (mstb == NULL) {
3737 ret = -ENOMEM;
3738 goto out_unlock;
3739 }
3740 mstb->mgr = mgr;
3741
3742
3743 mgr->mst_primary = mstb;
3744 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3745
3746 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3747 DP_MST_EN |
3748 DP_UP_REQ_EN |
3749 DP_UPSTREAM_IS_SRC);
3750 if (ret < 0)
3751 goto out_unlock;
3752
3753 reset_pay.start_slot = 0;
3754 reset_pay.num_slots = 0x3f;
3755 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3756
3757 queue_work(system_long_wq, &mgr->work);
3758
3759 ret = 0;
3760 } else {
3761
3762 mstb = mgr->mst_primary;
3763 mgr->mst_primary = NULL;
3764
3765 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3766 ret = 0;
3767 memset(mgr->payloads, 0,
3768 mgr->max_payloads * sizeof(mgr->payloads[0]));
3769 memset(mgr->proposed_vcpis, 0,
3770 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3771 mgr->payload_mask = 0;
3772 set_bit(0, &mgr->payload_mask);
3773 mgr->vcpi_mask = 0;
3774 mgr->payload_id_table_cleared = false;
3775 }
3776
3777 out_unlock:
3778 mutex_unlock(&mgr->lock);
3779 mutex_unlock(&mgr->payload_lock);
3780 if (mstb)
3781 drm_dp_mst_topology_put_mstb(mstb);
3782 return ret;
3783
3784 }
3785 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3786
3787 static void
3788 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3789 {
3790 struct drm_dp_mst_port *port;
3791
3792
3793 mstb->link_address_sent = false;
3794
3795 list_for_each_entry(port, &mstb->ports, next)
3796 if (port->mstb)
3797 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3798 }
3799
3800
3801
3802
3803
3804
3805
3806
3807 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3808 {
3809 mutex_lock(&mgr->lock);
3810 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3811 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3812 mutex_unlock(&mgr->lock);
3813 flush_work(&mgr->up_req_work);
3814 flush_work(&mgr->work);
3815 flush_work(&mgr->delayed_destroy_work);
3816
3817 mutex_lock(&mgr->lock);
3818 if (mgr->mst_state && mgr->mst_primary)
3819 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3820 mutex_unlock(&mgr->lock);
3821 }
3822 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3845 bool sync)
3846 {
3847 int ret;
3848 u8 guid[16];
3849
3850 mutex_lock(&mgr->lock);
3851 if (!mgr->mst_primary)
3852 goto out_fail;
3853
3854 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
3855 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3856 goto out_fail;
3857 }
3858
3859 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3860 DP_MST_EN |
3861 DP_UP_REQ_EN |
3862 DP_UPSTREAM_IS_SRC);
3863 if (ret < 0) {
3864 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
3865 goto out_fail;
3866 }
3867
3868
3869 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3870 if (ret != 16) {
3871 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3872 goto out_fail;
3873 }
3874
3875 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3876 if (ret) {
3877 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
3878 goto out_fail;
3879 }
3880
3881
3882
3883
3884
3885
3886 queue_work(system_long_wq, &mgr->work);
3887 mutex_unlock(&mgr->lock);
3888
3889 if (sync) {
3890 drm_dbg_kms(mgr->dev,
3891 "Waiting for link probe work to finish re-syncing topology...\n");
3892 flush_work(&mgr->work);
3893 }
3894
3895 return 0;
3896
3897 out_fail:
3898 mutex_unlock(&mgr->lock);
3899 return -1;
3900 }
3901 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3902
3903 static bool
3904 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3905 struct drm_dp_mst_branch **mstb)
3906 {
3907 int len;
3908 u8 replyblock[32];
3909 int replylen, curreply;
3910 int ret;
3911 u8 hdrlen;
3912 struct drm_dp_sideband_msg_hdr hdr;
3913 struct drm_dp_sideband_msg_rx *msg =
3914 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3915 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3916 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3917
3918 if (!up)
3919 *mstb = NULL;
3920
3921 len = min(mgr->max_dpcd_transaction_bytes, 16);
3922 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3923 if (ret != len) {
3924 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
3925 return false;
3926 }
3927
3928 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
3929 if (ret == false) {
3930 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3931 1, replyblock, len, false);
3932 drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
3933 return false;
3934 }
3935
3936 if (!up) {
3937
3938 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3939 if (!*mstb) {
3940 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
3941 return false;
3942 }
3943 }
3944
3945 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3946 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
3947 return false;
3948 }
3949
3950 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3951 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3952 if (!ret) {
3953 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
3954 return false;
3955 }
3956
3957 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3958 curreply = len;
3959 while (replylen > 0) {
3960 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3961 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3962 replyblock, len);
3963 if (ret != len) {
3964 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
3965 len, ret);
3966 return false;
3967 }
3968
3969 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3970 if (!ret) {
3971 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
3972 return false;
3973 }
3974
3975 curreply += len;
3976 replylen -= len;
3977 }
3978 return true;
3979 }
3980
3981 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3982 {
3983 struct drm_dp_sideband_msg_tx *txmsg;
3984 struct drm_dp_mst_branch *mstb = NULL;
3985 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3986
3987 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3988 goto out;
3989
3990
3991 if (!msg->have_eomt)
3992 goto out;
3993
3994
3995 mutex_lock(&mgr->qlock);
3996 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3997 struct drm_dp_sideband_msg_tx, next);
3998 mutex_unlock(&mgr->qlock);
3999
4000
4001 if (!txmsg || txmsg->dst != mstb) {
4002 struct drm_dp_sideband_msg_hdr *hdr;
4003
4004 hdr = &msg->initial_hdr;
4005 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
4006 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
4007 goto out_clear_reply;
4008 }
4009
4010 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
4011
4012 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4013 drm_dbg_kms(mgr->dev,
4014 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4015 txmsg->reply.req_type,
4016 drm_dp_mst_req_type_str(txmsg->reply.req_type),
4017 txmsg->reply.u.nak.reason,
4018 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4019 txmsg->reply.u.nak.nak_data);
4020 }
4021
4022 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4023 drm_dp_mst_topology_put_mstb(mstb);
4024
4025 mutex_lock(&mgr->qlock);
4026 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4027 list_del(&txmsg->next);
4028 mutex_unlock(&mgr->qlock);
4029
4030 wake_up_all(&mgr->tx_waitq);
4031
4032 return 0;
4033
4034 out_clear_reply:
4035 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4036 out:
4037 if (mstb)
4038 drm_dp_mst_topology_put_mstb(mstb);
4039
4040 return 0;
4041 }
4042
4043 static inline bool
4044 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4045 struct drm_dp_pending_up_req *up_req)
4046 {
4047 struct drm_dp_mst_branch *mstb = NULL;
4048 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4049 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4050 bool hotplug = false;
4051
4052 if (hdr->broadcast) {
4053 const u8 *guid = NULL;
4054
4055 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4056 guid = msg->u.conn_stat.guid;
4057 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4058 guid = msg->u.resource_stat.guid;
4059
4060 if (guid)
4061 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4062 } else {
4063 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4064 }
4065
4066 if (!mstb) {
4067 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
4068 return false;
4069 }
4070
4071
4072 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4073 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4074 hotplug = true;
4075 }
4076
4077 drm_dp_mst_topology_put_mstb(mstb);
4078 return hotplug;
4079 }
4080
4081 static void drm_dp_mst_up_req_work(struct work_struct *work)
4082 {
4083 struct drm_dp_mst_topology_mgr *mgr =
4084 container_of(work, struct drm_dp_mst_topology_mgr,
4085 up_req_work);
4086 struct drm_dp_pending_up_req *up_req;
4087 bool send_hotplug = false;
4088
4089 mutex_lock(&mgr->probe_lock);
4090 while (true) {
4091 mutex_lock(&mgr->up_req_lock);
4092 up_req = list_first_entry_or_null(&mgr->up_req_list,
4093 struct drm_dp_pending_up_req,
4094 next);
4095 if (up_req)
4096 list_del(&up_req->next);
4097 mutex_unlock(&mgr->up_req_lock);
4098
4099 if (!up_req)
4100 break;
4101
4102 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4103 kfree(up_req);
4104 }
4105 mutex_unlock(&mgr->probe_lock);
4106
4107 if (send_hotplug)
4108 drm_kms_helper_hotplug_event(mgr->dev);
4109 }
4110
4111 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4112 {
4113 struct drm_dp_pending_up_req *up_req;
4114
4115 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4116 goto out;
4117
4118 if (!mgr->up_req_recv.have_eomt)
4119 return 0;
4120
4121 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4122 if (!up_req)
4123 return -ENOMEM;
4124
4125 INIT_LIST_HEAD(&up_req->next);
4126
4127 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4128
4129 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4130 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4131 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
4132 up_req->msg.req_type);
4133 kfree(up_req);
4134 goto out;
4135 }
4136
4137 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4138 false);
4139
4140 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4141 const struct drm_dp_connection_status_notify *conn_stat =
4142 &up_req->msg.u.conn_stat;
4143
4144 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4145 conn_stat->port_number,
4146 conn_stat->legacy_device_plug_status,
4147 conn_stat->displayport_device_plug_status,
4148 conn_stat->message_capability_status,
4149 conn_stat->input_port,
4150 conn_stat->peer_device_type);
4151 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4152 const struct drm_dp_resource_status_notify *res_stat =
4153 &up_req->msg.u.resource_stat;
4154
4155 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
4156 res_stat->port_number,
4157 res_stat->available_pbn);
4158 }
4159
4160 up_req->hdr = mgr->up_req_recv.initial_hdr;
4161 mutex_lock(&mgr->up_req_lock);
4162 list_add_tail(&up_req->next, &mgr->up_req_list);
4163 mutex_unlock(&mgr->up_req_lock);
4164 queue_work(system_long_wq, &mgr->up_req_work);
4165
4166 out:
4167 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4168 return 0;
4169 }
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
4183 {
4184 int ret = 0;
4185 int sc;
4186 *handled = false;
4187 sc = DP_GET_SINK_COUNT(esi[0]);
4188
4189 if (sc != mgr->sink_count) {
4190 mgr->sink_count = sc;
4191 *handled = true;
4192 }
4193
4194 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4195 ret = drm_dp_mst_handle_down_rep(mgr);
4196 *handled = true;
4197 }
4198
4199 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4200 ret |= drm_dp_mst_handle_up_req(mgr);
4201 *handled = true;
4202 }
4203
4204 drm_dp_mst_kick_tx(mgr);
4205 return ret;
4206 }
4207 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218 int
4219 drm_dp_mst_detect_port(struct drm_connector *connector,
4220 struct drm_modeset_acquire_ctx *ctx,
4221 struct drm_dp_mst_topology_mgr *mgr,
4222 struct drm_dp_mst_port *port)
4223 {
4224 int ret;
4225
4226
4227 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4228 if (!port)
4229 return connector_status_disconnected;
4230
4231 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4232 if (ret)
4233 goto out;
4234
4235 ret = connector_status_disconnected;
4236
4237 if (!port->ddps)
4238 goto out;
4239
4240 switch (port->pdt) {
4241 case DP_PEER_DEVICE_NONE:
4242 break;
4243 case DP_PEER_DEVICE_MST_BRANCHING:
4244 if (!port->mcs)
4245 ret = connector_status_connected;
4246 break;
4247
4248 case DP_PEER_DEVICE_SST_SINK:
4249 ret = connector_status_connected;
4250
4251 if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
4252 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4253 break;
4254 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4255 if (port->ldps)
4256 ret = connector_status_connected;
4257 break;
4258 }
4259 out:
4260 drm_dp_mst_topology_put_port(port);
4261 return ret;
4262 }
4263 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4276 {
4277 struct edid *edid = NULL;
4278
4279
4280 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4281 if (!port)
4282 return NULL;
4283
4284 if (port->cached_edid)
4285 edid = drm_edid_duplicate(port->cached_edid);
4286 else {
4287 edid = drm_get_edid(connector, &port->aux.ddc);
4288 }
4289 port->has_audio = drm_detect_monitor_audio(edid);
4290 drm_dp_mst_topology_put_port(port);
4291 return edid;
4292 }
4293 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4308 int pbn)
4309 {
4310 int num_slots;
4311
4312 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4313
4314
4315 if (num_slots > 63)
4316 return -ENOSPC;
4317 return num_slots;
4318 }
4319 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4320
4321 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4322 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4323 {
4324 int ret;
4325
4326 vcpi->pbn = pbn;
4327 vcpi->aligned_pbn = slots * mgr->pbn_div;
4328 vcpi->num_slots = slots;
4329
4330 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4331 if (ret < 0)
4332 return ret;
4333 return 0;
4334 }
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4368 struct drm_dp_mst_topology_mgr *mgr,
4369 struct drm_dp_mst_port *port, int pbn,
4370 int pbn_div)
4371 {
4372 struct drm_dp_mst_topology_state *topology_state;
4373 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4374 int prev_slots, prev_bw, req_slots;
4375
4376 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4377 if (IS_ERR(topology_state))
4378 return PTR_ERR(topology_state);
4379
4380
4381 list_for_each_entry(pos, &topology_state->vcpis, next) {
4382 if (pos->port == port) {
4383 vcpi = pos;
4384 prev_slots = vcpi->vcpi;
4385 prev_bw = vcpi->pbn;
4386
4387
4388
4389
4390
4391
4392 if (WARN_ON(!prev_slots)) {
4393 drm_err(mgr->dev,
4394 "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4395 port);
4396 return -EINVAL;
4397 }
4398
4399 break;
4400 }
4401 }
4402 if (!vcpi) {
4403 prev_slots = 0;
4404 prev_bw = 0;
4405 }
4406
4407 if (pbn_div <= 0)
4408 pbn_div = mgr->pbn_div;
4409
4410 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4411
4412 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4413 port->connector->base.id, port->connector->name,
4414 port, prev_slots, req_slots);
4415 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4416 port->connector->base.id, port->connector->name,
4417 port, prev_bw, pbn);
4418
4419
4420 if (!vcpi) {
4421 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4422 if (!vcpi)
4423 return -ENOMEM;
4424
4425 drm_dp_mst_get_port_malloc(port);
4426 vcpi->port = port;
4427 list_add(&vcpi->next, &topology_state->vcpis);
4428 }
4429 vcpi->vcpi = req_slots;
4430 vcpi->pbn = pbn;
4431
4432 return req_slots;
4433 }
4434 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4463 struct drm_dp_mst_topology_mgr *mgr,
4464 struct drm_dp_mst_port *port)
4465 {
4466 struct drm_dp_mst_topology_state *topology_state;
4467 struct drm_dp_vcpi_allocation *pos;
4468 bool found = false;
4469
4470 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4471 if (IS_ERR(topology_state))
4472 return PTR_ERR(topology_state);
4473
4474 list_for_each_entry(pos, &topology_state->vcpis, next) {
4475 if (pos->port == port) {
4476 found = true;
4477 break;
4478 }
4479 }
4480 if (WARN_ON(!found)) {
4481 drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
4482 port, &topology_state->base);
4483 return -EINVAL;
4484 }
4485
4486 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4487 if (pos->vcpi) {
4488 drm_dp_mst_put_port_malloc(port);
4489 pos->vcpi = 0;
4490 pos->pbn = 0;
4491 }
4492
4493 return 0;
4494 }
4495 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4496
4497
4498
4499
4500
4501
4502 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
4503 {
4504 if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
4505 mst_state->total_avail_slots = 64;
4506 mst_state->start_slot = 0;
4507 } else {
4508 mst_state->total_avail_slots = 63;
4509 mst_state->start_slot = 1;
4510 }
4511
4512 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
4513 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
4514 mst_state);
4515 }
4516 EXPORT_SYMBOL(drm_dp_mst_update_slots);
4517
4518
4519
4520
4521
4522
4523
4524
4525 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4526 struct drm_dp_mst_port *port, int pbn, int slots)
4527 {
4528 int ret;
4529
4530 if (slots < 0)
4531 return false;
4532
4533 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4534 if (!port)
4535 return false;
4536
4537 if (port->vcpi.vcpi > 0) {
4538 drm_dbg_kms(mgr->dev,
4539 "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4540 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4541 if (pbn == port->vcpi.pbn) {
4542 drm_dp_mst_topology_put_port(port);
4543 return true;
4544 }
4545 }
4546
4547 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4548 if (ret) {
4549 drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
4550 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4551 drm_dp_mst_topology_put_port(port);
4552 goto out;
4553 }
4554 drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
4555
4556
4557 drm_dp_mst_get_port_malloc(port);
4558 drm_dp_mst_topology_put_port(port);
4559 return true;
4560 out:
4561 return false;
4562 }
4563 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4564
4565 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4566 {
4567 int slots = 0;
4568
4569 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4570 if (!port)
4571 return slots;
4572
4573 slots = port->vcpi.num_slots;
4574 drm_dp_mst_topology_put_port(port);
4575 return slots;
4576 }
4577 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4578
4579
4580
4581
4582
4583
4584
4585
4586 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4587 {
4588
4589
4590
4591
4592
4593 port->vcpi.num_slots = 0;
4594 }
4595 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4606 struct drm_dp_mst_port *port)
4607 {
4608 bool skip;
4609
4610 if (!port->vcpi.vcpi)
4611 return;
4612
4613 mutex_lock(&mgr->lock);
4614 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
4615 mutex_unlock(&mgr->lock);
4616
4617 if (skip)
4618 return;
4619
4620 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4621 port->vcpi.num_slots = 0;
4622 port->vcpi.pbn = 0;
4623 port->vcpi.aligned_pbn = 0;
4624 port->vcpi.vcpi = 0;
4625 drm_dp_mst_put_port_malloc(port);
4626 }
4627 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4628
4629 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4630 int id, struct drm_dp_payload *payload)
4631 {
4632 u8 payload_alloc[3], status;
4633 int ret;
4634 int retries = 0;
4635
4636 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4637 DP_PAYLOAD_TABLE_UPDATED);
4638
4639 payload_alloc[0] = id;
4640 payload_alloc[1] = payload->start_slot;
4641 payload_alloc[2] = payload->num_slots;
4642
4643 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4644 if (ret != 3) {
4645 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
4646 goto fail;
4647 }
4648
4649 retry:
4650 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4651 if (ret < 0) {
4652 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
4653 goto fail;
4654 }
4655
4656 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4657 retries++;
4658 if (retries < 20) {
4659 usleep_range(10000, 20000);
4660 goto retry;
4661 }
4662 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
4663 status);
4664 ret = -EINVAL;
4665 goto fail;
4666 }
4667 ret = 0;
4668 fail:
4669 return ret;
4670 }
4671
4672 static int do_get_act_status(struct drm_dp_aux *aux)
4673 {
4674 int ret;
4675 u8 status;
4676
4677 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4678 if (ret < 0)
4679 return ret;
4680
4681 return status;
4682 }
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4696 {
4697
4698
4699
4700
4701
4702
4703 const int timeout_ms = 3000;
4704 int ret, status;
4705
4706 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4707 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4708 200, timeout_ms * USEC_PER_MSEC);
4709 if (ret < 0 && status >= 0) {
4710 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
4711 timeout_ms, status);
4712 return -EINVAL;
4713 } else if (status < 0) {
4714
4715
4716
4717
4718 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
4719 return status;
4720 }
4721
4722 return 0;
4723 }
4724 EXPORT_SYMBOL(drm_dp_check_act_status);
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4735 {
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751 if (dsc)
4752 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4753 8 * 54 * 1000 * 1000);
4754
4755 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4756 8 * 54 * 1000 * 1000);
4757 }
4758 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4759
4760
4761 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4762 {
4763 queue_work(system_long_wq, &mgr->tx_work);
4764 }
4765
4766
4767
4768
4769
4770 static const char *pdt_to_string(u8 pdt)
4771 {
4772 switch (pdt) {
4773 case DP_PEER_DEVICE_NONE:
4774 return "NONE";
4775 case DP_PEER_DEVICE_SOURCE_OR_SST:
4776 return "SOURCE OR SST";
4777 case DP_PEER_DEVICE_MST_BRANCHING:
4778 return "MST BRANCHING";
4779 case DP_PEER_DEVICE_SST_SINK:
4780 return "SST SINK";
4781 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4782 return "DP LEGACY CONV";
4783 default:
4784 return "ERR";
4785 }
4786 }
4787
4788 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4789 struct drm_dp_mst_branch *mstb)
4790 {
4791 struct drm_dp_mst_port *port;
4792 int tabs = mstb->lct;
4793 char prefix[10];
4794 int i;
4795
4796 for (i = 0; i < tabs; i++)
4797 prefix[i] = '\t';
4798 prefix[i] = '\0';
4799
4800 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
4801 list_for_each_entry(port, &mstb->ports, next) {
4802 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
4803 prefix,
4804 port->port_num,
4805 port,
4806 port->input ? "input" : "output",
4807 pdt_to_string(port->pdt),
4808 port->ddps,
4809 port->ldps,
4810 port->num_sdp_streams,
4811 port->num_sdp_stream_sinks,
4812 port->fec_capable ? "true" : "false",
4813 port->connector);
4814 if (port->mstb)
4815 drm_dp_mst_dump_mstb(m, port->mstb);
4816 }
4817 }
4818
4819 #define DP_PAYLOAD_TABLE_SIZE 64
4820
4821 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4822 char *buf)
4823 {
4824 int i;
4825
4826 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4827 if (drm_dp_dpcd_read(mgr->aux,
4828 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4829 &buf[i], 16) != 16)
4830 return false;
4831 }
4832 return true;
4833 }
4834
4835 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4836 struct drm_dp_mst_port *port, char *name,
4837 int namelen)
4838 {
4839 struct edid *mst_edid;
4840
4841 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4842 drm_edid_get_monitor_name(mst_edid, name, namelen);
4843 kfree(mst_edid);
4844 }
4845
4846
4847
4848
4849
4850
4851
4852
4853 void drm_dp_mst_dump_topology(struct seq_file *m,
4854 struct drm_dp_mst_topology_mgr *mgr)
4855 {
4856 int i;
4857 struct drm_dp_mst_port *port;
4858
4859 mutex_lock(&mgr->lock);
4860 if (mgr->mst_primary)
4861 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4862
4863
4864 mutex_unlock(&mgr->lock);
4865
4866 mutex_lock(&mgr->payload_lock);
4867 seq_printf(m, "\n*** VCPI Info ***\n");
4868 seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
4869
4870 seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
4871 for (i = 0; i < mgr->max_payloads; i++) {
4872 if (mgr->proposed_vcpis[i]) {
4873 char name[14];
4874
4875 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4876 fetch_monitor_name(mgr, port, name, sizeof(name));
4877 seq_printf(m, "%10d%10d%10d%10d%20s\n",
4878 i,
4879 port->port_num,
4880 port->vcpi.vcpi,
4881 port->vcpi.num_slots,
4882 (*name != 0) ? name : "Unknown");
4883 } else
4884 seq_printf(m, "%6d - Unused\n", i);
4885 }
4886 seq_printf(m, "\n*** Payload Info ***\n");
4887 seq_printf(m, "| idx | state | start slot | # slots |\n");
4888 for (i = 0; i < mgr->max_payloads; i++) {
4889 seq_printf(m, "%10d%10d%15d%10d\n",
4890 i,
4891 mgr->payloads[i].payload_state,
4892 mgr->payloads[i].start_slot,
4893 mgr->payloads[i].num_slots);
4894 }
4895 mutex_unlock(&mgr->payload_lock);
4896
4897 seq_printf(m, "\n*** DPCD Info ***\n");
4898 mutex_lock(&mgr->lock);
4899 if (mgr->mst_primary) {
4900 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4901 int ret;
4902
4903 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
4904 seq_printf(m, "dpcd read failed\n");
4905 goto out;
4906 }
4907 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4908
4909 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4910 if (ret) {
4911 seq_printf(m, "faux/mst read failed\n");
4912 goto out;
4913 }
4914 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4915
4916 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4917 if (ret) {
4918 seq_printf(m, "mst ctrl read failed\n");
4919 goto out;
4920 }
4921 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4922
4923
4924 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4925 if (ret) {
4926 seq_printf(m, "branch oui read failed\n");
4927 goto out;
4928 }
4929 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4930
4931 for (i = 0x3; i < 0x8 && buf[i]; i++)
4932 seq_printf(m, "%c", buf[i]);
4933 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4934 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4935 if (dump_dp_payload_table(mgr, buf))
4936 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4937 }
4938
4939 out:
4940 mutex_unlock(&mgr->lock);
4941
4942 }
4943 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4944
4945 static void drm_dp_tx_work(struct work_struct *work)
4946 {
4947 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4948
4949 mutex_lock(&mgr->qlock);
4950 if (!list_empty(&mgr->tx_msg_downq))
4951 process_single_down_tx_qlock(mgr);
4952 mutex_unlock(&mgr->qlock);
4953 }
4954
4955 static inline void
4956 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4957 {
4958 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4959
4960 if (port->connector) {
4961 drm_connector_unregister(port->connector);
4962 drm_connector_put(port->connector);
4963 }
4964
4965 drm_dp_mst_put_port_malloc(port);
4966 }
4967
4968 static inline void
4969 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4970 {
4971 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4972 struct drm_dp_mst_port *port, *port_tmp;
4973 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4974 bool wake_tx = false;
4975
4976 mutex_lock(&mgr->lock);
4977 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4978 list_del(&port->next);
4979 drm_dp_mst_topology_put_port(port);
4980 }
4981 mutex_unlock(&mgr->lock);
4982
4983
4984 mutex_lock(&mstb->mgr->qlock);
4985 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4986 if (txmsg->dst != mstb)
4987 continue;
4988
4989 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4990 list_del(&txmsg->next);
4991 wake_tx = true;
4992 }
4993 mutex_unlock(&mstb->mgr->qlock);
4994
4995 if (wake_tx)
4996 wake_up_all(&mstb->mgr->tx_waitq);
4997
4998 drm_dp_mst_put_mstb_malloc(mstb);
4999 }
5000
5001 static void drm_dp_delayed_destroy_work(struct work_struct *work)
5002 {
5003 struct drm_dp_mst_topology_mgr *mgr =
5004 container_of(work, struct drm_dp_mst_topology_mgr,
5005 delayed_destroy_work);
5006 bool send_hotplug = false, go_again;
5007
5008
5009
5010
5011
5012
5013 do {
5014 go_again = false;
5015
5016 for (;;) {
5017 struct drm_dp_mst_branch *mstb;
5018
5019 mutex_lock(&mgr->delayed_destroy_lock);
5020 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
5021 struct drm_dp_mst_branch,
5022 destroy_next);
5023 if (mstb)
5024 list_del(&mstb->destroy_next);
5025 mutex_unlock(&mgr->delayed_destroy_lock);
5026
5027 if (!mstb)
5028 break;
5029
5030 drm_dp_delayed_destroy_mstb(mstb);
5031 go_again = true;
5032 }
5033
5034 for (;;) {
5035 struct drm_dp_mst_port *port;
5036
5037 mutex_lock(&mgr->delayed_destroy_lock);
5038 port = list_first_entry_or_null(&mgr->destroy_port_list,
5039 struct drm_dp_mst_port,
5040 next);
5041 if (port)
5042 list_del(&port->next);
5043 mutex_unlock(&mgr->delayed_destroy_lock);
5044
5045 if (!port)
5046 break;
5047
5048 drm_dp_delayed_destroy_port(port);
5049 send_hotplug = true;
5050 go_again = true;
5051 }
5052 } while (go_again);
5053
5054 if (send_hotplug)
5055 drm_kms_helper_hotplug_event(mgr->dev);
5056 }
5057
5058 static struct drm_private_state *
5059 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5060 {
5061 struct drm_dp_mst_topology_state *state, *old_state =
5062 to_dp_mst_topology_state(obj->state);
5063 struct drm_dp_vcpi_allocation *pos, *vcpi;
5064
5065 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
5066 if (!state)
5067 return NULL;
5068
5069 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5070
5071 INIT_LIST_HEAD(&state->vcpis);
5072
5073 list_for_each_entry(pos, &old_state->vcpis, next) {
5074
5075 if (!pos->vcpi)
5076 continue;
5077
5078 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
5079 if (!vcpi)
5080 goto fail;
5081
5082 drm_dp_mst_get_port_malloc(vcpi->port);
5083 list_add(&vcpi->next, &state->vcpis);
5084 }
5085
5086 return &state->base;
5087
5088 fail:
5089 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
5090 drm_dp_mst_put_port_malloc(pos->port);
5091 kfree(pos);
5092 }
5093 kfree(state);
5094
5095 return NULL;
5096 }
5097
5098 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5099 struct drm_private_state *state)
5100 {
5101 struct drm_dp_mst_topology_state *mst_state =
5102 to_dp_mst_topology_state(state);
5103 struct drm_dp_vcpi_allocation *pos, *tmp;
5104
5105 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5106
5107 if (pos->vcpi)
5108 drm_dp_mst_put_port_malloc(pos->port);
5109 kfree(pos);
5110 }
5111
5112 kfree(mst_state);
5113 }
5114
5115 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5116 struct drm_dp_mst_branch *branch)
5117 {
5118 while (port->parent) {
5119 if (port->parent == branch)
5120 return true;
5121
5122 if (port->parent->port_parent)
5123 port = port->parent->port_parent;
5124 else
5125 break;
5126 }
5127 return false;
5128 }
5129
5130 static int
5131 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5132 struct drm_dp_mst_topology_state *state);
5133
5134 static int
5135 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5136 struct drm_dp_mst_topology_state *state)
5137 {
5138 struct drm_dp_vcpi_allocation *vcpi;
5139 struct drm_dp_mst_port *port;
5140 int pbn_used = 0, ret;
5141 bool found = false;
5142
5143
5144
5145
5146 list_for_each_entry(vcpi, &state->vcpis, next) {
5147 if (!vcpi->pbn ||
5148 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5149 continue;
5150
5151 found = true;
5152 break;
5153 }
5154 if (!found)
5155 return 0;
5156
5157 if (mstb->port_parent)
5158 drm_dbg_atomic(mstb->mgr->dev,
5159 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5160 mstb->port_parent->parent, mstb->port_parent, mstb);
5161 else
5162 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
5163
5164 list_for_each_entry(port, &mstb->ports, next) {
5165 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5166 if (ret < 0)
5167 return ret;
5168
5169 pbn_used += ret;
5170 }
5171
5172 return pbn_used;
5173 }
5174
5175 static int
5176 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5177 struct drm_dp_mst_topology_state *state)
5178 {
5179 struct drm_dp_vcpi_allocation *vcpi;
5180 int pbn_used = 0;
5181
5182 if (port->pdt == DP_PEER_DEVICE_NONE)
5183 return 0;
5184
5185 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5186 bool found = false;
5187
5188 list_for_each_entry(vcpi, &state->vcpis, next) {
5189 if (vcpi->port != port)
5190 continue;
5191 if (!vcpi->pbn)
5192 return 0;
5193
5194 found = true;
5195 break;
5196 }
5197 if (!found)
5198 return 0;
5199
5200
5201
5202
5203
5204 if (!port->full_pbn) {
5205 drm_dbg_atomic(port->mgr->dev,
5206 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
5207 port->parent, port);
5208 return -EINVAL;
5209 }
5210
5211 pbn_used = vcpi->pbn;
5212 } else {
5213 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5214 state);
5215 if (pbn_used <= 0)
5216 return pbn_used;
5217 }
5218
5219 if (pbn_used > port->full_pbn) {
5220 drm_dbg_atomic(port->mgr->dev,
5221 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5222 port->parent, port, pbn_used, port->full_pbn);
5223 return -ENOSPC;
5224 }
5225
5226 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5227 port->parent, port, pbn_used, port->full_pbn);
5228
5229 return pbn_used;
5230 }
5231
5232 static inline int
5233 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5234 struct drm_dp_mst_topology_state *mst_state)
5235 {
5236 struct drm_dp_vcpi_allocation *vcpi;
5237 int avail_slots = mst_state->total_avail_slots, payload_count = 0;
5238
5239 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5240
5241 if (!vcpi->vcpi) {
5242 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
5243 vcpi->port);
5244 continue;
5245 }
5246
5247 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
5248 vcpi->port, vcpi->vcpi);
5249
5250 avail_slots -= vcpi->vcpi;
5251 if (avail_slots < 0) {
5252 drm_dbg_atomic(mgr->dev,
5253 "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5254 vcpi->port, mst_state, avail_slots + vcpi->vcpi);
5255 return -ENOSPC;
5256 }
5257
5258 if (++payload_count > mgr->max_payloads) {
5259 drm_dbg_atomic(mgr->dev,
5260 "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5261 mgr, mst_state, mgr->max_payloads);
5262 return -EINVAL;
5263 }
5264 }
5265 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5266 mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
5267
5268 return 0;
5269 }
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5285 {
5286 struct drm_dp_mst_topology_state *mst_state;
5287 struct drm_dp_vcpi_allocation *pos;
5288 struct drm_connector *connector;
5289 struct drm_connector_state *conn_state;
5290 struct drm_crtc *crtc;
5291 struct drm_crtc_state *crtc_state;
5292
5293 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5294
5295 if (IS_ERR(mst_state))
5296 return -EINVAL;
5297
5298 list_for_each_entry(pos, &mst_state->vcpis, next) {
5299
5300 connector = pos->port->connector;
5301
5302 if (!connector)
5303 return -EINVAL;
5304
5305 conn_state = drm_atomic_get_connector_state(state, connector);
5306
5307 if (IS_ERR(conn_state))
5308 return PTR_ERR(conn_state);
5309
5310 crtc = conn_state->crtc;
5311
5312 if (!crtc)
5313 continue;
5314
5315 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5316 continue;
5317
5318 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5319
5320 if (IS_ERR(crtc_state))
5321 return PTR_ERR(crtc_state);
5322
5323 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5324 mgr, crtc);
5325
5326 crtc_state->mode_changed = true;
5327 }
5328 return 0;
5329 }
5330 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5347 struct drm_dp_mst_port *port,
5348 int pbn, int pbn_div,
5349 bool enable)
5350 {
5351 struct drm_dp_mst_topology_state *mst_state;
5352 struct drm_dp_vcpi_allocation *pos;
5353 bool found = false;
5354 int vcpi = 0;
5355
5356 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5357
5358 if (IS_ERR(mst_state))
5359 return PTR_ERR(mst_state);
5360
5361 list_for_each_entry(pos, &mst_state->vcpis, next) {
5362 if (pos->port == port) {
5363 found = true;
5364 break;
5365 }
5366 }
5367
5368 if (!found) {
5369 drm_dbg_atomic(state->dev,
5370 "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5371 port, mst_state);
5372 return -EINVAL;
5373 }
5374
5375 if (pos->dsc_enabled == enable) {
5376 drm_dbg_atomic(state->dev,
5377 "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5378 port, enable, pos->vcpi);
5379 vcpi = pos->vcpi;
5380 }
5381
5382 if (enable) {
5383 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5384 drm_dbg_atomic(state->dev,
5385 "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5386 port, vcpi);
5387 if (vcpi < 0)
5388 return -EINVAL;
5389 }
5390
5391 pos->dsc_enabled = enable;
5392
5393 return vcpi;
5394 }
5395 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5418 {
5419 struct drm_dp_mst_topology_mgr *mgr;
5420 struct drm_dp_mst_topology_state *mst_state;
5421 int i, ret = 0;
5422
5423 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5424 if (!mgr->mst_state)
5425 continue;
5426
5427 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5428 if (ret)
5429 break;
5430
5431 mutex_lock(&mgr->lock);
5432 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5433 mst_state);
5434 mutex_unlock(&mgr->lock);
5435 if (ret < 0)
5436 break;
5437 else
5438 ret = 0;
5439 }
5440
5441 return ret;
5442 }
5443 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5444
5445 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5446 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5447 .atomic_destroy_state = drm_dp_mst_destroy_state,
5448 };
5449 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5466 struct drm_dp_mst_topology_mgr *mgr)
5467 {
5468 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5469 }
5470 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5486 struct drm_device *dev, struct drm_dp_aux *aux,
5487 int max_dpcd_transaction_bytes, int max_payloads,
5488 int max_lane_count, int max_link_rate,
5489 int conn_base_id)
5490 {
5491 struct drm_dp_mst_topology_state *mst_state;
5492
5493 mutex_init(&mgr->lock);
5494 mutex_init(&mgr->qlock);
5495 mutex_init(&mgr->payload_lock);
5496 mutex_init(&mgr->delayed_destroy_lock);
5497 mutex_init(&mgr->up_req_lock);
5498 mutex_init(&mgr->probe_lock);
5499 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5500 mutex_init(&mgr->topology_ref_history_lock);
5501 stack_depot_init();
5502 #endif
5503 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5504 INIT_LIST_HEAD(&mgr->destroy_port_list);
5505 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5506 INIT_LIST_HEAD(&mgr->up_req_list);
5507
5508
5509
5510
5511
5512 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5513 if (mgr->delayed_destroy_wq == NULL)
5514 return -ENOMEM;
5515
5516 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5517 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5518 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5519 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5520 init_waitqueue_head(&mgr->tx_waitq);
5521 mgr->dev = dev;
5522 mgr->aux = aux;
5523 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5524 mgr->max_payloads = max_payloads;
5525 mgr->max_lane_count = max_lane_count;
5526 mgr->max_link_rate = max_link_rate;
5527 mgr->conn_base_id = conn_base_id;
5528 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5529 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5530 return -EINVAL;
5531 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5532 if (!mgr->payloads)
5533 return -ENOMEM;
5534 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5535 if (!mgr->proposed_vcpis)
5536 return -ENOMEM;
5537 set_bit(0, &mgr->payload_mask);
5538
5539 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5540 if (mst_state == NULL)
5541 return -ENOMEM;
5542
5543 mst_state->total_avail_slots = 63;
5544 mst_state->start_slot = 1;
5545
5546 mst_state->mgr = mgr;
5547 INIT_LIST_HEAD(&mst_state->vcpis);
5548
5549 drm_atomic_private_obj_init(dev, &mgr->base,
5550 &mst_state->base,
5551 &drm_dp_mst_topology_state_funcs);
5552
5553 return 0;
5554 }
5555 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5556
5557
5558
5559
5560
5561 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5562 {
5563 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5564 flush_work(&mgr->work);
5565
5566 if (mgr->delayed_destroy_wq) {
5567 destroy_workqueue(mgr->delayed_destroy_wq);
5568 mgr->delayed_destroy_wq = NULL;
5569 }
5570 mutex_lock(&mgr->payload_lock);
5571 kfree(mgr->payloads);
5572 mgr->payloads = NULL;
5573 kfree(mgr->proposed_vcpis);
5574 mgr->proposed_vcpis = NULL;
5575 mutex_unlock(&mgr->payload_lock);
5576 mgr->dev = NULL;
5577 mgr->aux = NULL;
5578 drm_atomic_private_obj_fini(&mgr->base);
5579 mgr->funcs = NULL;
5580
5581 mutex_destroy(&mgr->delayed_destroy_lock);
5582 mutex_destroy(&mgr->payload_lock);
5583 mutex_destroy(&mgr->qlock);
5584 mutex_destroy(&mgr->lock);
5585 mutex_destroy(&mgr->up_req_lock);
5586 mutex_destroy(&mgr->probe_lock);
5587 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5588 mutex_destroy(&mgr->topology_ref_history_lock);
5589 #endif
5590 }
5591 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5592
5593 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5594 {
5595 int i;
5596
5597 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5598 return false;
5599
5600 for (i = 0; i < num - 1; i++) {
5601 if (msgs[i].flags & I2C_M_RD ||
5602 msgs[i].len > 0xff)
5603 return false;
5604 }
5605
5606 return msgs[num - 1].flags & I2C_M_RD &&
5607 msgs[num - 1].len <= 0xff;
5608 }
5609
5610 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5611 {
5612 int i;
5613
5614 for (i = 0; i < num - 1; i++) {
5615 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5616 msgs[i].len > 0xff)
5617 return false;
5618 }
5619
5620 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5621 }
5622
5623 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5624 struct drm_dp_mst_port *port,
5625 struct i2c_msg *msgs, int num)
5626 {
5627 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5628 unsigned int i;
5629 struct drm_dp_sideband_msg_req_body msg;
5630 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5631 int ret;
5632
5633 memset(&msg, 0, sizeof(msg));
5634 msg.req_type = DP_REMOTE_I2C_READ;
5635 msg.u.i2c_read.num_transactions = num - 1;
5636 msg.u.i2c_read.port_number = port->port_num;
5637 for (i = 0; i < num - 1; i++) {
5638 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5639 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5640 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5641 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5642 }
5643 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5644 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5645
5646 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5647 if (!txmsg) {
5648 ret = -ENOMEM;
5649 goto out;
5650 }
5651
5652 txmsg->dst = mstb;
5653 drm_dp_encode_sideband_req(&msg, txmsg);
5654
5655 drm_dp_queue_down_tx(mgr, txmsg);
5656
5657 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5658 if (ret > 0) {
5659
5660 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5661 ret = -EREMOTEIO;
5662 goto out;
5663 }
5664 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5665 ret = -EIO;
5666 goto out;
5667 }
5668 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5669 ret = num;
5670 }
5671 out:
5672 kfree(txmsg);
5673 return ret;
5674 }
5675
5676 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5677 struct drm_dp_mst_port *port,
5678 struct i2c_msg *msgs, int num)
5679 {
5680 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5681 unsigned int i;
5682 struct drm_dp_sideband_msg_req_body msg;
5683 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5684 int ret;
5685
5686 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5687 if (!txmsg) {
5688 ret = -ENOMEM;
5689 goto out;
5690 }
5691 for (i = 0; i < num; i++) {
5692 memset(&msg, 0, sizeof(msg));
5693 msg.req_type = DP_REMOTE_I2C_WRITE;
5694 msg.u.i2c_write.port_number = port->port_num;
5695 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5696 msg.u.i2c_write.num_bytes = msgs[i].len;
5697 msg.u.i2c_write.bytes = msgs[i].buf;
5698
5699 memset(txmsg, 0, sizeof(*txmsg));
5700 txmsg->dst = mstb;
5701
5702 drm_dp_encode_sideband_req(&msg, txmsg);
5703 drm_dp_queue_down_tx(mgr, txmsg);
5704
5705 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5706 if (ret > 0) {
5707 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5708 ret = -EREMOTEIO;
5709 goto out;
5710 }
5711 } else {
5712 goto out;
5713 }
5714 }
5715 ret = num;
5716 out:
5717 kfree(txmsg);
5718 return ret;
5719 }
5720
5721
5722 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5723 struct i2c_msg *msgs, int num)
5724 {
5725 struct drm_dp_aux *aux = adapter->algo_data;
5726 struct drm_dp_mst_port *port =
5727 container_of(aux, struct drm_dp_mst_port, aux);
5728 struct drm_dp_mst_branch *mstb;
5729 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5730 int ret;
5731
5732 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5733 if (!mstb)
5734 return -EREMOTEIO;
5735
5736 if (remote_i2c_read_ok(msgs, num)) {
5737 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5738 } else if (remote_i2c_write_ok(msgs, num)) {
5739 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5740 } else {
5741 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
5742 ret = -EIO;
5743 }
5744
5745 drm_dp_mst_topology_put_mstb(mstb);
5746 return ret;
5747 }
5748
5749 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5750 {
5751 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5752 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5753 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5754 I2C_FUNC_10BIT_ADDR;
5755 }
5756
5757 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5758 .functionality = drm_dp_mst_i2c_functionality,
5759 .master_xfer = drm_dp_mst_i2c_xfer,
5760 };
5761
5762
5763
5764
5765
5766
5767
5768 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5769 {
5770 struct drm_dp_aux *aux = &port->aux;
5771 struct device *parent_dev = port->mgr->dev->dev;
5772
5773 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5774 aux->ddc.algo_data = aux;
5775 aux->ddc.retries = 3;
5776
5777 aux->ddc.class = I2C_CLASS_DDC;
5778 aux->ddc.owner = THIS_MODULE;
5779
5780 aux->ddc.dev.parent = parent_dev;
5781 aux->ddc.dev.of_node = parent_dev->of_node;
5782
5783 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5784 sizeof(aux->ddc.name));
5785
5786 return i2c_add_adapter(&aux->ddc);
5787 }
5788
5789
5790
5791
5792
5793 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5794 {
5795 i2c_del_adapter(&port->aux.ddc);
5796 }
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5815 {
5816 struct drm_dp_mst_port *downstream_port;
5817
5818 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5819 return false;
5820
5821
5822 if (port->port_num >= 8)
5823 return true;
5824
5825
5826 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5827 !port->mcs &&
5828 port->ldps)
5829 return true;
5830
5831
5832 mutex_lock(&port->mgr->lock);
5833 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5834 port->mstb &&
5835 port->mstb->num_ports == 2) {
5836 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5837 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5838 !downstream_port->input) {
5839 mutex_unlock(&port->mgr->lock);
5840 return true;
5841 }
5842 }
5843 }
5844 mutex_unlock(&port->mgr->lock);
5845
5846 return false;
5847 }
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5866 {
5867 struct drm_dp_mst_port *immediate_upstream_port;
5868 struct drm_dp_mst_port *fec_port;
5869 struct drm_dp_desc desc = {};
5870 u8 endpoint_fec;
5871 u8 endpoint_dsc;
5872
5873 if (!port)
5874 return NULL;
5875
5876 if (port->parent->port_parent)
5877 immediate_upstream_port = port->parent->port_parent;
5878 else
5879 immediate_upstream_port = NULL;
5880
5881 fec_port = immediate_upstream_port;
5882 while (fec_port) {
5883
5884
5885
5886
5887 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5888 !fec_port->fec_capable)
5889 return NULL;
5890
5891 fec_port = fec_port->parent->port_parent;
5892 }
5893
5894
5895 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5896 u8 upstream_dsc;
5897
5898 if (drm_dp_dpcd_read(&port->aux,
5899 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5900 return NULL;
5901 if (drm_dp_dpcd_read(&port->aux,
5902 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5903 return NULL;
5904 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5905 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5906 return NULL;
5907
5908
5909 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5910 (endpoint_fec & DP_FEC_CAPABLE) &&
5911 (upstream_dsc & 0x2) )
5912 return &port->aux;
5913
5914
5915 return &immediate_upstream_port->aux;
5916 }
5917
5918
5919 if (drm_dp_mst_is_virtual_dpcd(port))
5920 return &port->aux;
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5931 return NULL;
5932
5933 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5934 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5935 port->parent == port->mgr->mst_primary) {
5936 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
5937
5938 if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
5939 return NULL;
5940
5941 if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
5942 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
5943 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5944 return port->mgr->aux;
5945 }
5946
5947
5948
5949
5950
5951
5952
5953 if (drm_dp_dpcd_read(&port->aux,
5954 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5955 return NULL;
5956 if (drm_dp_dpcd_read(&port->aux,
5957 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5958 return NULL;
5959 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5960 (endpoint_fec & DP_FEC_CAPABLE))
5961 return &port->aux;
5962
5963 return NULL;
5964 }
5965 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);