0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include <linux/clk.h>
0023 #include <linux/delay.h>
0024 #include <linux/err.h>
0025 #include <linux/firmware.h>
0026 #include <linux/io.h>
0027 #include <linux/iopoll.h>
0028 #include <linux/irq.h>
0029 #include <linux/media-bus-format.h>
0030 #include <linux/module.h>
0031 #include <linux/of.h>
0032 #include <linux/of_device.h>
0033 #include <linux/phy/phy.h>
0034 #include <linux/phy/phy-dp.h>
0035 #include <linux/platform_device.h>
0036 #include <linux/slab.h>
0037 #include <linux/wait.h>
0038
0039 #include <drm/display/drm_dp_helper.h>
0040 #include <drm/display/drm_hdcp_helper.h>
0041 #include <drm/drm_atomic.h>
0042 #include <drm/drm_atomic_helper.h>
0043 #include <drm/drm_atomic_state_helper.h>
0044 #include <drm/drm_bridge.h>
0045 #include <drm/drm_connector.h>
0046 #include <drm/drm_crtc_helper.h>
0047 #include <drm/drm_edid.h>
0048 #include <drm/drm_modeset_helper_vtables.h>
0049 #include <drm/drm_print.h>
0050 #include <drm/drm_probe_helper.h>
0051
0052 #include <asm/unaligned.h>
0053
0054 #include "cdns-mhdp8546-core.h"
0055 #include "cdns-mhdp8546-hdcp.h"
0056 #include "cdns-mhdp8546-j721e.h"
0057
0058 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
0059 {
0060 int ret, empty;
0061
0062 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
0063
0064 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
0065 empty, !empty, MAILBOX_RETRY_US,
0066 MAILBOX_TIMEOUT_US);
0067 if (ret < 0)
0068 return ret;
0069
0070 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
0071 }
0072
0073 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
0074 {
0075 int ret, full;
0076
0077 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
0078
0079 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
0080 full, !full, MAILBOX_RETRY_US,
0081 MAILBOX_TIMEOUT_US);
0082 if (ret < 0)
0083 return ret;
0084
0085 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
0086
0087 return 0;
0088 }
0089
0090 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
0091 u8 module_id, u8 opcode,
0092 u16 req_size)
0093 {
0094 u32 mbox_size, i;
0095 u8 header[4];
0096 int ret;
0097
0098
0099 for (i = 0; i < sizeof(header); i++) {
0100 ret = cdns_mhdp_mailbox_read(mhdp);
0101 if (ret < 0)
0102 return ret;
0103
0104 header[i] = ret;
0105 }
0106
0107 mbox_size = get_unaligned_be16(header + 2);
0108
0109 if (opcode != header[0] || module_id != header[1] ||
0110 req_size != mbox_size) {
0111
0112
0113
0114
0115 for (i = 0; i < mbox_size; i++)
0116 if (cdns_mhdp_mailbox_read(mhdp) < 0)
0117 break;
0118
0119 return -EINVAL;
0120 }
0121
0122 return 0;
0123 }
0124
0125 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
0126 u8 *buff, u16 buff_size)
0127 {
0128 u32 i;
0129 int ret;
0130
0131 for (i = 0; i < buff_size; i++) {
0132 ret = cdns_mhdp_mailbox_read(mhdp);
0133 if (ret < 0)
0134 return ret;
0135
0136 buff[i] = ret;
0137 }
0138
0139 return 0;
0140 }
0141
0142 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
0143 u8 opcode, u16 size, u8 *message)
0144 {
0145 u8 header[4];
0146 int ret, i;
0147
0148 header[0] = opcode;
0149 header[1] = module_id;
0150 put_unaligned_be16(size, header + 2);
0151
0152 for (i = 0; i < sizeof(header); i++) {
0153 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
0154 if (ret)
0155 return ret;
0156 }
0157
0158 for (i = 0; i < size; i++) {
0159 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
0160 if (ret)
0161 return ret;
0162 }
0163
0164 return 0;
0165 }
0166
0167 static
0168 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
0169 {
0170 u8 msg[4], resp[8];
0171 int ret;
0172
0173 put_unaligned_be32(addr, msg);
0174
0175 mutex_lock(&mhdp->mbox_mutex);
0176
0177 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
0178 GENERAL_REGISTER_READ,
0179 sizeof(msg), msg);
0180 if (ret)
0181 goto out;
0182
0183 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
0184 GENERAL_REGISTER_READ,
0185 sizeof(resp));
0186 if (ret)
0187 goto out;
0188
0189 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
0190 if (ret)
0191 goto out;
0192
0193
0194 if (memcmp(msg, resp, sizeof(msg))) {
0195 ret = -EINVAL;
0196 goto out;
0197 }
0198
0199 *value = get_unaligned_be32(resp + 4);
0200
0201 out:
0202 mutex_unlock(&mhdp->mbox_mutex);
0203 if (ret) {
0204 dev_err(mhdp->dev, "Failed to read register\n");
0205 *value = 0;
0206 }
0207
0208 return ret;
0209 }
0210
0211 static
0212 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
0213 {
0214 u8 msg[6];
0215 int ret;
0216
0217 put_unaligned_be16(addr, msg);
0218 put_unaligned_be32(val, msg + 2);
0219
0220 mutex_lock(&mhdp->mbox_mutex);
0221
0222 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0223 DPTX_WRITE_REGISTER, sizeof(msg), msg);
0224
0225 mutex_unlock(&mhdp->mbox_mutex);
0226
0227 return ret;
0228 }
0229
0230 static
0231 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
0232 u8 start_bit, u8 bits_no, u32 val)
0233 {
0234 u8 field[8];
0235 int ret;
0236
0237 put_unaligned_be16(addr, field);
0238 field[2] = start_bit;
0239 field[3] = bits_no;
0240 put_unaligned_be32(val, field + 4);
0241
0242 mutex_lock(&mhdp->mbox_mutex);
0243
0244 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0245 DPTX_WRITE_FIELD, sizeof(field), field);
0246
0247 mutex_unlock(&mhdp->mbox_mutex);
0248
0249 return ret;
0250 }
0251
0252 static
0253 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
0254 u32 addr, u8 *data, u16 len)
0255 {
0256 u8 msg[5], reg[5];
0257 int ret;
0258
0259 put_unaligned_be16(len, msg);
0260 put_unaligned_be24(addr, msg + 2);
0261
0262 mutex_lock(&mhdp->mbox_mutex);
0263
0264 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0265 DPTX_READ_DPCD, sizeof(msg), msg);
0266 if (ret)
0267 goto out;
0268
0269 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0270 DPTX_READ_DPCD,
0271 sizeof(reg) + len);
0272 if (ret)
0273 goto out;
0274
0275 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
0276 if (ret)
0277 goto out;
0278
0279 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
0280
0281 out:
0282 mutex_unlock(&mhdp->mbox_mutex);
0283
0284 return ret;
0285 }
0286
0287 static
0288 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
0289 {
0290 u8 msg[6], reg[5];
0291 int ret;
0292
0293 put_unaligned_be16(1, msg);
0294 put_unaligned_be24(addr, msg + 2);
0295 msg[5] = value;
0296
0297 mutex_lock(&mhdp->mbox_mutex);
0298
0299 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0300 DPTX_WRITE_DPCD, sizeof(msg), msg);
0301 if (ret)
0302 goto out;
0303
0304 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0305 DPTX_WRITE_DPCD, sizeof(reg));
0306 if (ret)
0307 goto out;
0308
0309 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
0310 if (ret)
0311 goto out;
0312
0313 if (addr != get_unaligned_be24(reg + 2))
0314 ret = -EINVAL;
0315
0316 out:
0317 mutex_unlock(&mhdp->mbox_mutex);
0318
0319 if (ret)
0320 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
0321 return ret;
0322 }
0323
0324 static
0325 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
0326 {
0327 u8 msg[5];
0328 int ret, i;
0329
0330 msg[0] = GENERAL_MAIN_CONTROL;
0331 msg[1] = MB_MODULE_ID_GENERAL;
0332 msg[2] = 0;
0333 msg[3] = 1;
0334 msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
0335
0336 mutex_lock(&mhdp->mbox_mutex);
0337
0338 for (i = 0; i < sizeof(msg); i++) {
0339 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
0340 if (ret)
0341 goto out;
0342 }
0343
0344
0345 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
0346 if (ret)
0347 goto out;
0348
0349 ret = 0;
0350
0351 out:
0352 mutex_unlock(&mhdp->mbox_mutex);
0353
0354 if (ret < 0)
0355 dev_err(mhdp->dev, "set firmware active failed\n");
0356 return ret;
0357 }
0358
0359 static
0360 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
0361 {
0362 u8 status;
0363 int ret;
0364
0365 mutex_lock(&mhdp->mbox_mutex);
0366
0367 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0368 DPTX_HPD_STATE, 0, NULL);
0369 if (ret)
0370 goto err_get_hpd;
0371
0372 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0373 DPTX_HPD_STATE,
0374 sizeof(status));
0375 if (ret)
0376 goto err_get_hpd;
0377
0378 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
0379 if (ret)
0380 goto err_get_hpd;
0381
0382 mutex_unlock(&mhdp->mbox_mutex);
0383
0384 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
0385 status ? "" : "un");
0386
0387 return status;
0388
0389 err_get_hpd:
0390 mutex_unlock(&mhdp->mbox_mutex);
0391
0392 return ret;
0393 }
0394
0395 static
0396 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
0397 unsigned int block, size_t length)
0398 {
0399 struct cdns_mhdp_device *mhdp = data;
0400 u8 msg[2], reg[2], i;
0401 int ret;
0402
0403 mutex_lock(&mhdp->mbox_mutex);
0404
0405 for (i = 0; i < 4; i++) {
0406 msg[0] = block / 2;
0407 msg[1] = block % 2;
0408
0409 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0410 DPTX_GET_EDID, sizeof(msg), msg);
0411 if (ret)
0412 continue;
0413
0414 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0415 DPTX_GET_EDID,
0416 sizeof(reg) + length);
0417 if (ret)
0418 continue;
0419
0420 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
0421 if (ret)
0422 continue;
0423
0424 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
0425 if (ret)
0426 continue;
0427
0428 if (reg[0] == length && reg[1] == block / 2)
0429 break;
0430 }
0431
0432 mutex_unlock(&mhdp->mbox_mutex);
0433
0434 if (ret)
0435 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
0436 block, ret);
0437
0438 return ret;
0439 }
0440
0441 static
0442 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
0443 {
0444 u8 event = 0;
0445 int ret;
0446
0447 mutex_lock(&mhdp->mbox_mutex);
0448
0449 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0450 DPTX_READ_EVENT, 0, NULL);
0451 if (ret)
0452 goto out;
0453
0454 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0455 DPTX_READ_EVENT, sizeof(event));
0456 if (ret < 0)
0457 goto out;
0458
0459 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
0460 out:
0461 mutex_unlock(&mhdp->mbox_mutex);
0462
0463 if (ret < 0)
0464 return ret;
0465
0466 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
0467 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
0468 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
0469 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
0470 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
0471
0472 return event;
0473 }
0474
0475 static
0476 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
0477 unsigned int udelay, const u8 *lanes_data,
0478 u8 link_status[DP_LINK_STATUS_SIZE])
0479 {
0480 u8 payload[7];
0481 u8 hdr[5];
0482 u32 addr;
0483 int ret;
0484
0485 if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
0486 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
0487 ret = -EINVAL;
0488 goto out;
0489 }
0490
0491 payload[0] = nlanes;
0492 put_unaligned_be16(udelay, payload + 1);
0493 memcpy(payload + 3, lanes_data, nlanes);
0494
0495 mutex_lock(&mhdp->mbox_mutex);
0496
0497 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
0498 DPTX_ADJUST_LT,
0499 sizeof(payload), payload);
0500 if (ret)
0501 goto out;
0502
0503
0504 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
0505 DPTX_READ_DPCD,
0506 sizeof(hdr) + DP_LINK_STATUS_SIZE);
0507 if (ret)
0508 goto out;
0509
0510 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
0511 if (ret)
0512 goto out;
0513
0514 addr = get_unaligned_be24(hdr + 2);
0515 if (addr != DP_LANE0_1_STATUS)
0516 goto out;
0517
0518 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
0519 DP_LINK_STATUS_SIZE);
0520
0521 out:
0522 mutex_unlock(&mhdp->mbox_mutex);
0523
0524 if (ret)
0525 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
0526
0527 return ret;
0528 }
0529
0530
0531
0532
0533
0534
0535
0536
0537 static
0538 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
0539 {
0540 u8 value;
0541 int err;
0542
0543
0544 if (link->revision < 0x11)
0545 return 0;
0546
0547 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
0548 if (err < 0)
0549 return err;
0550
0551 value &= ~DP_SET_POWER_MASK;
0552 value |= DP_SET_POWER_D0;
0553
0554 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
0555 if (err < 0)
0556 return err;
0557
0558
0559
0560
0561
0562
0563 usleep_range(1000, 2000);
0564
0565 return 0;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575 static
0576 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
0577 struct cdns_mhdp_link *link)
0578 {
0579 u8 value;
0580 int err;
0581
0582
0583 if (link->revision < 0x11)
0584 return 0;
0585
0586 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
0587 if (err < 0)
0588 return err;
0589
0590 value &= ~DP_SET_POWER_MASK;
0591 value |= DP_SET_POWER_D3;
0592
0593 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
0594 if (err < 0)
0595 return err;
0596
0597 return 0;
0598 }
0599
0600
0601
0602
0603
0604
0605
0606
0607 static
0608 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
0609 struct cdns_mhdp_link *link)
0610 {
0611 u8 values[2];
0612 int err;
0613
0614 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
0615 values[1] = link->num_lanes;
0616
0617 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
0618 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
0619
0620 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
0621 if (err < 0)
0622 return err;
0623
0624 return 0;
0625 }
0626
0627 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
0628 {
0629 return min(mhdp->host.link_rate, mhdp->sink.link_rate);
0630 }
0631
0632 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
0633 {
0634 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
0635 }
0636
0637 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
0638 {
0639 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
0640 }
0641
0642 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
0643 {
0644
0645 return mhdp->host.ssc && mhdp->sink.ssc;
0646 }
0647
0648 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
0649 {
0650 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
0651
0652 if (mhdp->plugged)
0653 return connector_status_connected;
0654 else
0655 return connector_status_disconnected;
0656 }
0657
0658 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
0659 {
0660 u32 major_num, minor_num, revision;
0661 u32 fw_ver, lib_ver;
0662
0663 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
0664 | readl(mhdp->regs + CDNS_VER_L);
0665
0666 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
0667 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
0668
0669 if (lib_ver < 33984) {
0670
0671
0672
0673
0674
0675 major_num = 1;
0676 minor_num = 2;
0677 if (fw_ver == 26098) {
0678 revision = 15;
0679 } else if (lib_ver == 0 && fw_ver == 0) {
0680 revision = 17;
0681 } else {
0682 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
0683 fw_ver, lib_ver);
0684 return -ENODEV;
0685 }
0686 } else {
0687
0688 major_num = fw_ver / 10000;
0689 minor_num = (fw_ver / 100) % 100;
0690 revision = (fw_ver % 10000) % 100;
0691 }
0692
0693 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
0694 revision);
0695 return 0;
0696 }
0697
0698 static int cdns_mhdp_fw_activate(const struct firmware *fw,
0699 struct cdns_mhdp_device *mhdp)
0700 {
0701 unsigned int reg;
0702 int ret;
0703
0704
0705 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
0706
0707 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
0708
0709
0710 writel(0, mhdp->regs + CDNS_APB_CTRL);
0711
0712
0713
0714
0715
0716 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
0717 reg & CDNS_KEEP_ALIVE_MASK, 500,
0718 CDNS_KEEP_ALIVE_TIMEOUT);
0719 if (ret) {
0720 dev_err(mhdp->dev,
0721 "device didn't give any life sign: reg %d\n", reg);
0722 return ret;
0723 }
0724
0725 ret = cdns_mhdp_check_fw_version(mhdp);
0726 if (ret)
0727 return ret;
0728
0729
0730 readl(mhdp->regs + CDNS_SW_EVENT0);
0731 readl(mhdp->regs + CDNS_SW_EVENT1);
0732 readl(mhdp->regs + CDNS_SW_EVENT2);
0733 readl(mhdp->regs + CDNS_SW_EVENT3);
0734
0735
0736 ret = cdns_mhdp_set_firmware_active(mhdp, true);
0737 if (ret)
0738 return ret;
0739
0740 spin_lock(&mhdp->start_lock);
0741
0742 mhdp->hw_state = MHDP_HW_READY;
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 if (mhdp->bridge_attached)
0754 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
0755 mhdp->regs + CDNS_APB_INT_MASK);
0756
0757 spin_unlock(&mhdp->start_lock);
0758
0759 wake_up(&mhdp->fw_load_wq);
0760 dev_dbg(mhdp->dev, "DP FW activated\n");
0761
0762 return 0;
0763 }
0764
0765 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
0766 {
0767 struct cdns_mhdp_device *mhdp = context;
0768 bool bridge_attached;
0769 int ret;
0770
0771 dev_dbg(mhdp->dev, "firmware callback\n");
0772
0773 if (!fw || !fw->data) {
0774 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
0775 return;
0776 }
0777
0778 ret = cdns_mhdp_fw_activate(fw, mhdp);
0779
0780 release_firmware(fw);
0781
0782 if (ret)
0783 return;
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 spin_lock(&mhdp->start_lock);
0794 bridge_attached = mhdp->bridge_attached;
0795 spin_unlock(&mhdp->start_lock);
0796 if (bridge_attached) {
0797 if (mhdp->connector.dev)
0798 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
0799 else
0800 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
0801 }
0802 }
0803
0804 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
0805 {
0806 int ret;
0807
0808 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
0809 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
0810 if (ret) {
0811 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
0812 FW_NAME, ret);
0813 return ret;
0814 }
0815
0816 return 0;
0817 }
0818
0819 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
0820 struct drm_dp_aux_msg *msg)
0821 {
0822 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
0823 int ret;
0824
0825 if (msg->request != DP_AUX_NATIVE_WRITE &&
0826 msg->request != DP_AUX_NATIVE_READ)
0827 return -EOPNOTSUPP;
0828
0829 if (msg->request == DP_AUX_NATIVE_WRITE) {
0830 const u8 *buf = msg->buffer;
0831 unsigned int i;
0832
0833 for (i = 0; i < msg->size; ++i) {
0834 ret = cdns_mhdp_dpcd_write(mhdp,
0835 msg->address + i, buf[i]);
0836 if (!ret)
0837 continue;
0838
0839 dev_err(mhdp->dev,
0840 "Failed to write DPCD addr %u\n",
0841 msg->address + i);
0842
0843 return ret;
0844 }
0845 } else {
0846 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
0847 msg->buffer, msg->size);
0848 if (ret) {
0849 dev_err(mhdp->dev,
0850 "Failed to read DPCD addr %u\n",
0851 msg->address);
0852
0853 return ret;
0854 }
0855 }
0856
0857 return msg->size;
0858 }
0859
0860 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
0861 {
0862 union phy_configure_opts phy_cfg;
0863 u32 reg32;
0864 int ret;
0865
0866 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
0867 DP_TRAINING_PATTERN_DISABLE);
0868
0869
0870 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
0871 if (!mhdp->host.scrambler)
0872 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
0873
0874 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
0875
0876 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
0877 mhdp->sink.enhanced & mhdp->host.enhanced);
0878
0879 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
0880 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
0881
0882 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
0883 phy_cfg.dp.link_rate = mhdp->link.rate / 100;
0884 phy_cfg.dp.lanes = mhdp->link.num_lanes;
0885
0886 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
0887 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
0888
0889 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
0890 phy_cfg.dp.set_lanes = true;
0891 phy_cfg.dp.set_rate = true;
0892 phy_cfg.dp.set_voltages = true;
0893 ret = phy_configure(mhdp->phy, &phy_cfg);
0894 if (ret) {
0895 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
0896 __func__, ret);
0897 return ret;
0898 }
0899
0900 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
0901 CDNS_PHY_COMMON_CONFIG |
0902 CDNS_PHY_TRAINING_EN |
0903 CDNS_PHY_TRAINING_TYPE(1) |
0904 CDNS_PHY_SCRAMBLER_BYPASS);
0905
0906 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
0907 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
0908
0909 return 0;
0910 }
0911
0912 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
0913 u8 link_status[DP_LINK_STATUS_SIZE],
0914 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
0915 union phy_configure_opts *phy_cfg)
0916 {
0917 u8 adjust, max_pre_emph, max_volt_swing;
0918 u8 set_volt, set_pre;
0919 unsigned int i;
0920
0921 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
0922 << DP_TRAIN_PRE_EMPHASIS_SHIFT;
0923 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
0924
0925 for (i = 0; i < mhdp->link.num_lanes; i++) {
0926
0927 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
0928 set_volt = min(adjust, max_volt_swing);
0929
0930 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
0931 set_pre = min(adjust, max_pre_emph)
0932 >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
0933
0934
0935
0936
0937
0938
0939 if (set_volt + set_pre > 3)
0940 set_volt = 3 - set_pre;
0941
0942 phy_cfg->dp.voltage[i] = set_volt;
0943 lanes_data[i] = set_volt;
0944
0945 if (set_volt == max_volt_swing)
0946 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
0947
0948 phy_cfg->dp.pre[i] = set_pre;
0949 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
0950
0951 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
0952 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
0953 }
0954 }
0955
0956 static
0957 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
0958 unsigned int lane, u8 volt)
0959 {
0960 unsigned int s = ((lane & 1) ?
0961 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
0962 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
0963 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
0964
0965 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
0966 link_status[idx] |= volt << s;
0967 }
0968
0969 static
0970 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
0971 unsigned int lane, u8 pre_emphasis)
0972 {
0973 unsigned int s = ((lane & 1) ?
0974 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
0975 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
0976 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
0977
0978 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
0979 link_status[idx] |= pre_emphasis << s;
0980 }
0981
0982 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
0983 u8 link_status[DP_LINK_STATUS_SIZE])
0984 {
0985 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
0986 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
0987 unsigned int i;
0988 u8 volt, pre;
0989
0990 for (i = 0; i < mhdp->link.num_lanes; i++) {
0991 volt = drm_dp_get_adjust_request_voltage(link_status, i);
0992 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
0993 if (volt + pre > 3)
0994 cdns_mhdp_set_adjust_request_voltage(link_status, i,
0995 3 - pre);
0996 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
0997 cdns_mhdp_set_adjust_request_voltage(link_status, i,
0998 max_volt);
0999 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
1000 cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1001 i, max_pre);
1002 }
1003 }
1004
1005 static void cdns_mhdp_print_lt_status(const char *prefix,
1006 struct cdns_mhdp_device *mhdp,
1007 union phy_configure_opts *phy_cfg)
1008 {
1009 char vs[8] = "0/0/0/0";
1010 char pe[8] = "0/0/0/0";
1011 unsigned int i;
1012
1013 for (i = 0; i < mhdp->link.num_lanes; i++) {
1014 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1015 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1016 }
1017
1018 vs[i * 2 - 1] = '\0';
1019 pe[i * 2 - 1] = '\0';
1020
1021 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1022 prefix,
1023 mhdp->link.num_lanes, mhdp->link.rate / 100,
1024 vs, pe);
1025 }
1026
1027 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1028 u8 eq_tps,
1029 unsigned int training_interval)
1030 {
1031 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1032 u8 link_status[DP_LINK_STATUS_SIZE];
1033 union phy_configure_opts phy_cfg;
1034 u32 reg32;
1035 int ret;
1036 bool r;
1037
1038 dev_dbg(mhdp->dev, "Starting EQ phase\n");
1039
1040
1041 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1042 CDNS_PHY_TRAINING_TYPE(eq_tps);
1043 if (eq_tps != 4)
1044 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1045 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1046
1047 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1048 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1049 CDNS_DP_TRAINING_PATTERN_4);
1050
1051 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1052
1053 do {
1054 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1055 &phy_cfg);
1056 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1057 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1058 phy_cfg.dp.set_lanes = false;
1059 phy_cfg.dp.set_rate = false;
1060 phy_cfg.dp.set_voltages = true;
1061 ret = phy_configure(mhdp->phy, &phy_cfg);
1062 if (ret) {
1063 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1064 __func__, ret);
1065 goto err;
1066 }
1067
1068 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1069 training_interval, lanes_data, link_status);
1070
1071 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1072 if (!r)
1073 goto err;
1074
1075 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1076 cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1077 &phy_cfg);
1078 return true;
1079 }
1080
1081 fail_counter_short++;
1082
1083 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1084 } while (fail_counter_short < 5);
1085
1086 err:
1087 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1088
1089 return false;
1090 }
1091
1092 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1093 u8 link_status[DP_LINK_STATUS_SIZE],
1094 u8 *req_volt, u8 *req_pre)
1095 {
1096 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1097 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1098 unsigned int i;
1099
1100 for (i = 0; i < mhdp->link.num_lanes; i++) {
1101 u8 val;
1102
1103 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1104 max_volt : req_volt[i];
1105 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1106
1107 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1108 max_pre : req_pre[i];
1109 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1110 }
1111 }
1112
1113 static
1114 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1115 bool *same_before_adjust, bool *max_swing_reached,
1116 u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1117 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1118 u8 *req_pre)
1119 {
1120 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1121 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1122 bool same_pre, same_volt;
1123 unsigned int i;
1124 u8 adjust;
1125
1126 *same_before_adjust = false;
1127 *max_swing_reached = false;
1128 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1129
1130 for (i = 0; i < mhdp->link.num_lanes; i++) {
1131 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1132 req_volt[i] = min(adjust, max_volt);
1133
1134 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1135 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1136 req_pre[i] = min(adjust, max_pre);
1137
1138 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1139 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1140 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1141 req_volt[i];
1142 if (same_pre && same_volt)
1143 *same_before_adjust = true;
1144
1145
1146 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1147 *max_swing_reached = true;
1148 return;
1149 }
1150 }
1151 }
1152
1153 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1154 {
1155 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1156 fail_counter_short = 0, fail_counter_cr_long = 0;
1157 u8 link_status[DP_LINK_STATUS_SIZE];
1158 bool cr_done;
1159 union phy_configure_opts phy_cfg;
1160 int ret;
1161
1162 dev_dbg(mhdp->dev, "Starting CR phase\n");
1163
1164 ret = cdns_mhdp_link_training_init(mhdp);
1165 if (ret)
1166 goto err;
1167
1168 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1169
1170 do {
1171 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1172 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1173 bool same_before_adjust, max_swing_reached;
1174
1175 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1176 &phy_cfg);
1177 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1178 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1179 phy_cfg.dp.set_lanes = false;
1180 phy_cfg.dp.set_rate = false;
1181 phy_cfg.dp.set_voltages = true;
1182 ret = phy_configure(mhdp->phy, &phy_cfg);
1183 if (ret) {
1184 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1185 __func__, ret);
1186 goto err;
1187 }
1188
1189 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1190 lanes_data, link_status);
1191
1192 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1193 &max_swing_reached, lanes_data,
1194 link_status,
1195 requested_adjust_volt_swing,
1196 requested_adjust_pre_emphasis);
1197
1198 if (max_swing_reached) {
1199 dev_err(mhdp->dev, "CR: max swing reached\n");
1200 goto err;
1201 }
1202
1203 if (cr_done) {
1204 cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1205 &phy_cfg);
1206 return true;
1207 }
1208
1209
1210 fail_counter_cr_long++;
1211
1212 if (same_before_adjust) {
1213 fail_counter_short++;
1214 continue;
1215 }
1216
1217 fail_counter_short = 0;
1218
1219
1220
1221
1222 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1223 requested_adjust_volt_swing,
1224 requested_adjust_pre_emphasis);
1225 } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1226
1227 err:
1228 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1229
1230 return false;
1231 }
1232
1233 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1234 {
1235 switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1236 case DP_LINK_BW_2_7:
1237 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1238 break;
1239 case DP_LINK_BW_5_4:
1240 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1241 break;
1242 case DP_LINK_BW_8_1:
1243 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1244 break;
1245 }
1246 }
1247
1248 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1249 unsigned int training_interval)
1250 {
1251 u32 reg32;
1252 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1253 int ret;
1254
1255 while (1) {
1256 if (!cdns_mhdp_link_training_cr(mhdp)) {
1257 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1258 DP_LINK_BW_1_62) {
1259 dev_dbg(mhdp->dev,
1260 "Reducing link rate during CR phase\n");
1261 cdns_mhdp_lower_link_rate(&mhdp->link);
1262
1263 continue;
1264 } else if (mhdp->link.num_lanes > 1) {
1265 dev_dbg(mhdp->dev,
1266 "Reducing lanes number during CR phase\n");
1267 mhdp->link.num_lanes >>= 1;
1268 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1269
1270 continue;
1271 }
1272
1273 dev_err(mhdp->dev,
1274 "Link training failed during CR phase\n");
1275 goto err;
1276 }
1277
1278 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1279 training_interval))
1280 break;
1281
1282 if (mhdp->link.num_lanes > 1) {
1283 dev_dbg(mhdp->dev,
1284 "Reducing lanes number during EQ phase\n");
1285 mhdp->link.num_lanes >>= 1;
1286
1287 continue;
1288 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1289 DP_LINK_BW_1_62) {
1290 dev_dbg(mhdp->dev,
1291 "Reducing link rate during EQ phase\n");
1292 cdns_mhdp_lower_link_rate(&mhdp->link);
1293 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1294
1295 continue;
1296 }
1297
1298 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1299 goto err;
1300 }
1301
1302 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1303 mhdp->link.num_lanes, mhdp->link.rate / 100);
1304
1305 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1306 mhdp->host.scrambler ? 0 :
1307 DP_LINK_SCRAMBLING_DISABLE);
1308
1309 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32);
1310 if (ret < 0) {
1311 dev_err(mhdp->dev,
1312 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1313 ret);
1314 return ret;
1315 }
1316 reg32 &= ~GENMASK(1, 0);
1317 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1318 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1319 reg32 |= CDNS_DP_FRAMER_EN;
1320 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1321
1322
1323 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1324 if (!mhdp->host.scrambler)
1325 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1326 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1327
1328 return 0;
1329 err:
1330
1331 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1332 if (!mhdp->host.scrambler)
1333 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1334 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1335
1336 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1337 DP_TRAINING_PATTERN_DISABLE);
1338
1339 return -EIO;
1340 }
1341
1342 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1343 u32 interval)
1344 {
1345 if (interval == 0)
1346 return 400;
1347 if (interval < 5)
1348 return 4000 << (interval - 1);
1349 dev_err(mhdp->dev,
1350 "wrong training interval returned by DPCD: %d\n", interval);
1351 return 0;
1352 }
1353
1354 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1355 {
1356 unsigned int link_rate;
1357
1358
1359
1360 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1361 if (!mhdp->host.lanes_cnt)
1362 mhdp->host.lanes_cnt = 4;
1363
1364 link_rate = mhdp->phy->attrs.max_link_rate;
1365 if (!link_rate)
1366 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1367 else
1368
1369 link_rate *= 100;
1370
1371 mhdp->host.link_rate = link_rate;
1372 mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1373 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1374 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1375 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1376 CDNS_SUPPORT_TPS(4);
1377 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1378 mhdp->host.fast_link = false;
1379 mhdp->host.enhanced = true;
1380 mhdp->host.scrambler = true;
1381 mhdp->host.ssc = false;
1382 }
1383
1384 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1385 u8 dpcd[DP_RECEIVER_CAP_SIZE])
1386 {
1387 mhdp->sink.link_rate = mhdp->link.rate;
1388 mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1389 mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1390 DP_LINK_CAP_ENHANCED_FRAMING);
1391
1392
1393 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1394 DP_MAX_DOWNSPREAD_0_5);
1395
1396
1397 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1398 if (drm_dp_tps3_supported(dpcd))
1399 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1400 if (drm_dp_tps4_supported(dpcd))
1401 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1402
1403
1404 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1405 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1406 }
1407
1408 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1409 {
1410 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1411 u32 resp, interval, interval_us;
1412 u8 ext_cap_chk = 0;
1413 unsigned int addr;
1414 int err;
1415
1416 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1417
1418 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1419 &ext_cap_chk);
1420
1421 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1422 addr = DP_DP13_DPCD_REV;
1423 else
1424 addr = DP_DPCD_REV;
1425
1426 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1427 if (err < 0) {
1428 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1429 return err;
1430 }
1431
1432 mhdp->link.revision = dpcd[0];
1433 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1434 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1435
1436 if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1437 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1438
1439 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1440 cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1441
1442 cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1443
1444 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1445 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1446
1447
1448 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1449 if (err < 0) {
1450 dev_err(mhdp->dev,
1451 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1452 err);
1453 return err;
1454 }
1455
1456 resp &= ~CDNS_DP_FRAMER_EN;
1457 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1458
1459
1460 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1461 amp[1] = DP_SET_ANSI_8B10B;
1462 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1463
1464 if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1465 dev_err(mhdp->dev, "fastlink not supported\n");
1466 return -EOPNOTSUPP;
1467 }
1468
1469 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1470 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1471 if (!interval_us ||
1472 cdns_mhdp_link_training(mhdp, interval_us)) {
1473 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1474 return -EIO;
1475 }
1476
1477 mhdp->link_up = true;
1478
1479 return 0;
1480 }
1481
1482 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1483 {
1484 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1485
1486 if (mhdp->plugged)
1487 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1488
1489 mhdp->link_up = false;
1490 }
1491
1492 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1493 struct drm_connector *connector)
1494 {
1495 if (!mhdp->plugged)
1496 return NULL;
1497
1498 return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1499 }
1500
1501 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1502 {
1503 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1504 struct edid *edid;
1505 int num_modes;
1506
1507 if (!mhdp->plugged)
1508 return 0;
1509
1510 edid = cdns_mhdp_get_edid(mhdp, connector);
1511 if (!edid) {
1512 dev_err(mhdp->dev, "Failed to read EDID\n");
1513 return 0;
1514 }
1515
1516 drm_connector_update_edid_property(connector, edid);
1517 num_modes = drm_add_edid_modes(connector, edid);
1518 kfree(edid);
1519
1520
1521
1522
1523
1524 if (connector->display_info.color_formats &&
1525 !(connector->display_info.color_formats &
1526 mhdp->display_fmt.color_format))
1527 dev_warn(mhdp->dev,
1528 "%s: No supported color_format found (0x%08x)\n",
1529 __func__, connector->display_info.color_formats);
1530
1531 if (connector->display_info.bpc &&
1532 connector->display_info.bpc < mhdp->display_fmt.bpc)
1533 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1534 __func__, connector->display_info.bpc,
1535 mhdp->display_fmt.bpc);
1536
1537 return num_modes;
1538 }
1539
1540 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1541 struct drm_modeset_acquire_ctx *ctx,
1542 bool force)
1543 {
1544 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1545
1546 return cdns_mhdp_detect(mhdp);
1547 }
1548
1549 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1550 {
1551 u32 bpp;
1552
1553 if (fmt->y_only)
1554 return fmt->bpc;
1555
1556 switch (fmt->color_format) {
1557 case DRM_COLOR_FORMAT_RGB444:
1558 case DRM_COLOR_FORMAT_YCBCR444:
1559 bpp = fmt->bpc * 3;
1560 break;
1561 case DRM_COLOR_FORMAT_YCBCR422:
1562 bpp = fmt->bpc * 2;
1563 break;
1564 case DRM_COLOR_FORMAT_YCBCR420:
1565 bpp = fmt->bpc * 3 / 2;
1566 break;
1567 default:
1568 bpp = fmt->bpc * 3;
1569 WARN_ON(1);
1570 }
1571 return bpp;
1572 }
1573
1574 static
1575 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1576 const struct drm_display_mode *mode,
1577 unsigned int lanes, unsigned int rate)
1578 {
1579 u32 max_bw, req_bw, bpp;
1580
1581
1582
1583
1584
1585
1586
1587
1588 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1589 req_bw = mode->clock * bpp / 8;
1590 max_bw = lanes * rate;
1591 if (req_bw > max_bw) {
1592 dev_dbg(mhdp->dev,
1593 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1594 mode->name, req_bw, max_bw);
1595
1596 return false;
1597 }
1598
1599 return true;
1600 }
1601
1602 static
1603 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1604 struct drm_display_mode *mode)
1605 {
1606 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1607
1608 mutex_lock(&mhdp->link_mutex);
1609
1610 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1611 mhdp->link.rate)) {
1612 mutex_unlock(&mhdp->link_mutex);
1613 return MODE_CLOCK_HIGH;
1614 }
1615
1616 mutex_unlock(&mhdp->link_mutex);
1617 return MODE_OK;
1618 }
1619
1620 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1621 struct drm_atomic_state *state)
1622 {
1623 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1624 struct drm_connector_state *old_state, *new_state;
1625 struct drm_crtc_state *crtc_state;
1626 u64 old_cp, new_cp;
1627
1628 if (!mhdp->hdcp_supported)
1629 return 0;
1630
1631 old_state = drm_atomic_get_old_connector_state(state, conn);
1632 new_state = drm_atomic_get_new_connector_state(state, conn);
1633 old_cp = old_state->content_protection;
1634 new_cp = new_state->content_protection;
1635
1636 if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1637 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1638 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1639 goto mode_changed;
1640 }
1641
1642 if (!new_state->crtc) {
1643 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1644 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1645 return 0;
1646 }
1647
1648 if (old_cp == new_cp ||
1649 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1650 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1651 return 0;
1652
1653 mode_changed:
1654 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1655 crtc_state->mode_changed = true;
1656
1657 return 0;
1658 }
1659
1660 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1661 .detect_ctx = cdns_mhdp_connector_detect,
1662 .get_modes = cdns_mhdp_get_modes,
1663 .mode_valid = cdns_mhdp_mode_valid,
1664 .atomic_check = cdns_mhdp_connector_atomic_check,
1665 };
1666
1667 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1668 .fill_modes = drm_helper_probe_single_connector_modes,
1669 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1670 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1671 .reset = drm_atomic_helper_connector_reset,
1672 .destroy = drm_connector_cleanup,
1673 };
1674
1675 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1676 {
1677 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1678 struct drm_connector *conn = &mhdp->connector;
1679 struct drm_bridge *bridge = &mhdp->bridge;
1680 int ret;
1681
1682 if (!bridge->encoder) {
1683 dev_err(mhdp->dev, "Parent encoder object not found");
1684 return -ENODEV;
1685 }
1686
1687 conn->polled = DRM_CONNECTOR_POLL_HPD;
1688
1689 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1690 DRM_MODE_CONNECTOR_DisplayPort);
1691 if (ret) {
1692 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1693 return ret;
1694 }
1695
1696 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1697
1698 ret = drm_display_info_set_bus_formats(&conn->display_info,
1699 &bus_format, 1);
1700 if (ret)
1701 return ret;
1702
1703 ret = drm_connector_attach_encoder(conn, bridge->encoder);
1704 if (ret) {
1705 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1706 return ret;
1707 }
1708
1709 if (mhdp->hdcp_supported)
1710 ret = drm_connector_attach_content_protection_property(conn, true);
1711
1712 return ret;
1713 }
1714
1715 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1716 enum drm_bridge_attach_flags flags)
1717 {
1718 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1719 bool hw_ready;
1720 int ret;
1721
1722 dev_dbg(mhdp->dev, "%s\n", __func__);
1723
1724 mhdp->aux.drm_dev = bridge->dev;
1725 ret = drm_dp_aux_register(&mhdp->aux);
1726 if (ret < 0)
1727 return ret;
1728
1729 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1730 ret = cdns_mhdp_connector_init(mhdp);
1731 if (ret)
1732 goto aux_unregister;
1733 }
1734
1735 spin_lock(&mhdp->start_lock);
1736
1737 mhdp->bridge_attached = true;
1738 hw_ready = mhdp->hw_state == MHDP_HW_READY;
1739
1740 spin_unlock(&mhdp->start_lock);
1741
1742
1743 if (hw_ready)
1744 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1745 mhdp->regs + CDNS_APB_INT_MASK);
1746
1747 return 0;
1748 aux_unregister:
1749 drm_dp_aux_unregister(&mhdp->aux);
1750 return ret;
1751 }
1752
1753 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1754 const struct drm_display_mode *mode)
1755 {
1756 unsigned int dp_framer_sp = 0, msa_horizontal_1,
1757 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1758 misc0 = 0, misc1 = 0, pxl_repr,
1759 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1760 dp_vertical_1;
1761 u8 stream_id = mhdp->stream_id;
1762 u32 bpp, bpc, pxlfmt, framer;
1763 int ret;
1764
1765 pxlfmt = mhdp->display_fmt.color_format;
1766 bpc = mhdp->display_fmt.bpc;
1767
1768
1769
1770
1771
1772 if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1773 pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1774 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1775
1776 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1777
1778 switch (pxlfmt) {
1779 case DRM_COLOR_FORMAT_RGB444:
1780 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1781 misc0 |= DP_COLOR_FORMAT_RGB;
1782 break;
1783 case DRM_COLOR_FORMAT_YCBCR444:
1784 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1785 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1786 break;
1787 case DRM_COLOR_FORMAT_YCBCR422:
1788 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1789 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1790 break;
1791 case DRM_COLOR_FORMAT_YCBCR420:
1792 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1793 break;
1794 default:
1795 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1796 }
1797
1798 switch (bpc) {
1799 case 6:
1800 misc0 |= DP_TEST_BIT_DEPTH_6;
1801 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1802 break;
1803 case 8:
1804 misc0 |= DP_TEST_BIT_DEPTH_8;
1805 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1806 break;
1807 case 10:
1808 misc0 |= DP_TEST_BIT_DEPTH_10;
1809 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1810 break;
1811 case 12:
1812 misc0 |= DP_TEST_BIT_DEPTH_12;
1813 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1814 break;
1815 case 16:
1816 misc0 |= DP_TEST_BIT_DEPTH_16;
1817 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1818 break;
1819 }
1820
1821 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1822 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1823 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1824
1825 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1826 bnd_hsync2vsync);
1827
1828 hsync2vsync_pol_ctrl = 0;
1829 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1830 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1831 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1832 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1833 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1834 hsync2vsync_pol_ctrl);
1835
1836 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1837
1838 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1839 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1840 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1841 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1842 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1843 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1844 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1845
1846 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1847 back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1848 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1849 CDNS_DP_FRONT_PORCH(front_porch) |
1850 CDNS_DP_BACK_PORCH(back_porch));
1851
1852 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1853 mode->crtc_hdisplay * bpp / 8);
1854
1855 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1856 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1857 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1858 CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1859
1860 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1861 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1862 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1863 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1864 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1865 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1866 msa_horizontal_1);
1867
1868 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1869 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1870 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1871 CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1872
1873 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1874 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1875 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1876 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1877 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1878 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1879 msa_vertical_1);
1880
1881 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1882 mode->crtc_vtotal % 2 == 0)
1883 misc1 = DP_TEST_INTERLACED;
1884 if (mhdp->display_fmt.y_only)
1885 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1886
1887 if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1888 misc1 = CDNS_DP_TEST_VSC_SDP;
1889
1890 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1891 misc0 | (misc1 << 8));
1892
1893 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1894 CDNS_DP_H_HSYNC_WIDTH(hsync) |
1895 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1896
1897 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1898 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1899 CDNS_DP_V0_VSTART(msa_v0));
1900
1901 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1902 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1903 mode->crtc_vtotal % 2 == 0)
1904 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1905
1906 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1907
1908 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1909 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1910 CDNS_DP_VB_ID_INTERLACED : 0);
1911
1912 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1913 if (ret < 0) {
1914 dev_err(mhdp->dev,
1915 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1916 ret);
1917 return;
1918 }
1919 framer |= CDNS_DP_FRAMER_EN;
1920 framer &= ~CDNS_DP_NO_VIDEO_MODE;
1921 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1922 }
1923
1924 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1925 const struct drm_display_mode *mode)
1926 {
1927 u32 rate, vs, required_bandwidth, available_bandwidth;
1928 s32 line_thresh1, line_thresh2, line_thresh = 0;
1929 int pxlclock = mode->crtc_clock;
1930 u32 tu_size = 64;
1931 u32 bpp;
1932
1933
1934 rate = mhdp->link.rate / 1000;
1935
1936 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1937
1938 required_bandwidth = pxlclock * bpp / 8;
1939 available_bandwidth = mhdp->link.num_lanes * rate;
1940
1941 vs = tu_size * required_bandwidth / available_bandwidth;
1942 vs /= 1000;
1943
1944 if (vs == tu_size)
1945 vs = tu_size - 1;
1946
1947 line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1948 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1949 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1950 line_thresh = (line_thresh >> 5) + 2;
1951
1952 mhdp->stream_id = 0;
1953
1954 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1955 CDNS_DP_FRAMER_TU_VS(vs) |
1956 CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1957 CDNS_DP_FRAMER_TU_CNT_RST_EN);
1958
1959 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1960 line_thresh & GENMASK(5, 0));
1961
1962 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1963 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1964 0 : tu_size - vs));
1965
1966 cdns_mhdp_configure_video(mhdp, mode);
1967 }
1968
1969 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1970 struct drm_bridge_state *bridge_state)
1971 {
1972 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1973 struct drm_atomic_state *state = bridge_state->base.state;
1974 struct cdns_mhdp_bridge_state *mhdp_state;
1975 struct drm_crtc_state *crtc_state;
1976 struct drm_connector *connector;
1977 struct drm_connector_state *conn_state;
1978 struct drm_bridge_state *new_state;
1979 const struct drm_display_mode *mode;
1980 u32 resp;
1981 int ret;
1982
1983 dev_dbg(mhdp->dev, "bridge enable\n");
1984
1985 mutex_lock(&mhdp->link_mutex);
1986
1987 if (mhdp->plugged && !mhdp->link_up) {
1988 ret = cdns_mhdp_link_up(mhdp);
1989 if (ret < 0)
1990 goto out;
1991 }
1992
1993 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1994 mhdp->info->ops->enable(mhdp);
1995
1996
1997 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1998 if (ret < 0) {
1999 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2000 goto out;
2001 }
2002
2003 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2004 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2005
2006 connector = drm_atomic_get_new_connector_for_encoder(state,
2007 bridge->encoder);
2008 if (WARN_ON(!connector))
2009 goto out;
2010
2011 conn_state = drm_atomic_get_new_connector_state(state, connector);
2012 if (WARN_ON(!conn_state))
2013 goto out;
2014
2015 if (mhdp->hdcp_supported &&
2016 mhdp->hw_state == MHDP_HW_READY &&
2017 conn_state->content_protection ==
2018 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2019 mutex_unlock(&mhdp->link_mutex);
2020 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2021 mutex_lock(&mhdp->link_mutex);
2022 }
2023
2024 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2025 if (WARN_ON(!crtc_state))
2026 goto out;
2027
2028 mode = &crtc_state->adjusted_mode;
2029
2030 new_state = drm_atomic_get_new_bridge_state(state, bridge);
2031 if (WARN_ON(!new_state))
2032 goto out;
2033
2034 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2035 mhdp->link.rate)) {
2036 ret = -EINVAL;
2037 goto out;
2038 }
2039
2040 cdns_mhdp_sst_enable(mhdp, mode);
2041
2042 mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2043
2044 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2045 drm_mode_set_name(mhdp_state->current_mode);
2046
2047 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2048
2049 mhdp->bridge_enabled = true;
2050
2051 out:
2052 mutex_unlock(&mhdp->link_mutex);
2053 if (ret < 0)
2054 schedule_work(&mhdp->modeset_retry_work);
2055 }
2056
2057 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2058 struct drm_bridge_state *bridge_state)
2059 {
2060 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2061 u32 resp;
2062
2063 dev_dbg(mhdp->dev, "%s\n", __func__);
2064
2065 mutex_lock(&mhdp->link_mutex);
2066
2067 if (mhdp->hdcp_supported)
2068 cdns_mhdp_hdcp_disable(mhdp);
2069
2070 mhdp->bridge_enabled = false;
2071 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2072 resp &= ~CDNS_DP_FRAMER_EN;
2073 resp |= CDNS_DP_NO_VIDEO_MODE;
2074 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2075
2076 cdns_mhdp_link_down(mhdp);
2077
2078
2079 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2080 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2081 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2082
2083 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2084 mhdp->info->ops->disable(mhdp);
2085
2086 mutex_unlock(&mhdp->link_mutex);
2087 }
2088
2089 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2090 {
2091 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2092
2093 dev_dbg(mhdp->dev, "%s\n", __func__);
2094
2095 drm_dp_aux_unregister(&mhdp->aux);
2096
2097 spin_lock(&mhdp->start_lock);
2098
2099 mhdp->bridge_attached = false;
2100
2101 spin_unlock(&mhdp->start_lock);
2102
2103 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2104 }
2105
2106 static struct drm_bridge_state *
2107 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2108 {
2109 struct cdns_mhdp_bridge_state *state;
2110
2111 state = kzalloc(sizeof(*state), GFP_KERNEL);
2112 if (!state)
2113 return NULL;
2114
2115 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2116
2117 return &state->base;
2118 }
2119
2120 static void
2121 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2122 struct drm_bridge_state *state)
2123 {
2124 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2125
2126 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2127
2128 if (cdns_mhdp_state->current_mode) {
2129 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2130 cdns_mhdp_state->current_mode = NULL;
2131 }
2132
2133 kfree(cdns_mhdp_state);
2134 }
2135
2136 static struct drm_bridge_state *
2137 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2138 {
2139 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2140
2141 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2142 if (!cdns_mhdp_state)
2143 return NULL;
2144
2145 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2146
2147 return &cdns_mhdp_state->base;
2148 }
2149
2150 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2151 struct drm_bridge_state *bridge_state,
2152 struct drm_crtc_state *crtc_state,
2153 struct drm_connector_state *conn_state)
2154 {
2155 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2156 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2157
2158 mutex_lock(&mhdp->link_mutex);
2159
2160 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2161 mhdp->link.rate)) {
2162 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2163 __func__, mode->name, mhdp->link.num_lanes,
2164 mhdp->link.rate / 100);
2165 mutex_unlock(&mhdp->link_mutex);
2166 return -EINVAL;
2167 }
2168
2169 mutex_unlock(&mhdp->link_mutex);
2170 return 0;
2171 }
2172
2173 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2174 {
2175 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2176
2177 return cdns_mhdp_detect(mhdp);
2178 }
2179
2180 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2181 struct drm_connector *connector)
2182 {
2183 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2184
2185 return cdns_mhdp_get_edid(mhdp, connector);
2186 }
2187
2188 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2189 {
2190 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2191
2192
2193 if (mhdp->bridge_attached)
2194 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2195 mhdp->regs + CDNS_APB_INT_MASK);
2196 }
2197
2198 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2199 {
2200 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2201
2202 writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2203 }
2204
2205 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2206 .atomic_enable = cdns_mhdp_atomic_enable,
2207 .atomic_disable = cdns_mhdp_atomic_disable,
2208 .atomic_check = cdns_mhdp_atomic_check,
2209 .attach = cdns_mhdp_attach,
2210 .detach = cdns_mhdp_detach,
2211 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2212 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2213 .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2214 .detect = cdns_mhdp_bridge_detect,
2215 .get_edid = cdns_mhdp_bridge_get_edid,
2216 .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2217 .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2218 };
2219
2220 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2221 {
2222 int hpd_event, hpd_status;
2223
2224 *hpd_pulse = false;
2225
2226 hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2227
2228
2229 if (hpd_event < 0) {
2230 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2231 __func__, hpd_event);
2232 return false;
2233 }
2234
2235 hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2236 if (hpd_status < 0) {
2237 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2238 __func__, hpd_status);
2239 return false;
2240 }
2241
2242 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2243 *hpd_pulse = true;
2244
2245 return !!hpd_status;
2246 }
2247
2248 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2249 {
2250 struct cdns_mhdp_bridge_state *cdns_bridge_state;
2251 struct drm_display_mode *current_mode;
2252 bool old_plugged = mhdp->plugged;
2253 struct drm_bridge_state *state;
2254 u8 status[DP_LINK_STATUS_SIZE];
2255 bool hpd_pulse;
2256 int ret = 0;
2257
2258 mutex_lock(&mhdp->link_mutex);
2259
2260 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2261
2262 if (!mhdp->plugged) {
2263 cdns_mhdp_link_down(mhdp);
2264 mhdp->link.rate = mhdp->host.link_rate;
2265 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2266 goto out;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275 if (hpd_pulse && old_plugged == mhdp->plugged) {
2276 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2277
2278
2279
2280
2281
2282 if (ret > 0 &&
2283 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2284 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2285 goto out;
2286
2287
2288 mhdp->link_up = false;
2289 }
2290
2291 if (!mhdp->link_up) {
2292 ret = cdns_mhdp_link_up(mhdp);
2293 if (ret < 0)
2294 goto out;
2295 }
2296
2297 if (mhdp->bridge_enabled) {
2298 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2299 if (!state) {
2300 ret = -EINVAL;
2301 goto out;
2302 }
2303
2304 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2305 if (!cdns_bridge_state) {
2306 ret = -EINVAL;
2307 goto out;
2308 }
2309
2310 current_mode = cdns_bridge_state->current_mode;
2311 if (!current_mode) {
2312 ret = -EINVAL;
2313 goto out;
2314 }
2315
2316 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2317 mhdp->link.rate)) {
2318 ret = -EINVAL;
2319 goto out;
2320 }
2321
2322 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2323 current_mode->name);
2324
2325 cdns_mhdp_sst_enable(mhdp, current_mode);
2326 }
2327 out:
2328 mutex_unlock(&mhdp->link_mutex);
2329 return ret;
2330 }
2331
2332 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2333 {
2334 struct cdns_mhdp_device *mhdp;
2335 struct drm_connector *conn;
2336
2337 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2338
2339 conn = &mhdp->connector;
2340
2341
2342 mutex_lock(&conn->dev->mode_config.mutex);
2343
2344
2345
2346
2347
2348 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2349 mutex_unlock(&conn->dev->mode_config.mutex);
2350
2351
2352 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2353 }
2354
2355 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2356 {
2357 struct cdns_mhdp_device *mhdp = data;
2358 u32 apb_stat, sw_ev0;
2359 bool bridge_attached;
2360
2361 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2362 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2363 return IRQ_NONE;
2364
2365 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2366
2367
2368
2369
2370
2371
2372
2373 spin_lock(&mhdp->start_lock);
2374 bridge_attached = mhdp->bridge_attached;
2375 spin_unlock(&mhdp->start_lock);
2376
2377 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2378 schedule_work(&mhdp->hpd_work);
2379 }
2380
2381 if (sw_ev0 & ~CDNS_DPTX_HPD) {
2382 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2383 wake_up(&mhdp->sw_events_wq);
2384 }
2385
2386 return IRQ_HANDLED;
2387 }
2388
2389 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2390 {
2391 u32 ret;
2392
2393 ret = wait_event_timeout(mhdp->sw_events_wq,
2394 mhdp->sw_events & event,
2395 msecs_to_jiffies(500));
2396 if (!ret) {
2397 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2398 goto sw_event_out;
2399 }
2400
2401 ret = mhdp->sw_events;
2402 mhdp->sw_events &= ~event;
2403
2404 sw_event_out:
2405 return ret;
2406 }
2407
2408 static void cdns_mhdp_hpd_work(struct work_struct *work)
2409 {
2410 struct cdns_mhdp_device *mhdp = container_of(work,
2411 struct cdns_mhdp_device,
2412 hpd_work);
2413 int ret;
2414
2415 ret = cdns_mhdp_update_link_status(mhdp);
2416 if (mhdp->connector.dev) {
2417 if (ret < 0)
2418 schedule_work(&mhdp->modeset_retry_work);
2419 else
2420 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2421 } else {
2422 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2423 }
2424 }
2425
2426 static int cdns_mhdp_probe(struct platform_device *pdev)
2427 {
2428 struct device *dev = &pdev->dev;
2429 struct cdns_mhdp_device *mhdp;
2430 unsigned long rate;
2431 struct clk *clk;
2432 int ret;
2433 int irq;
2434
2435 mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2436 if (!mhdp)
2437 return -ENOMEM;
2438
2439 clk = devm_clk_get(dev, NULL);
2440 if (IS_ERR(clk)) {
2441 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2442 return PTR_ERR(clk);
2443 }
2444
2445 mhdp->clk = clk;
2446 mhdp->dev = dev;
2447 mutex_init(&mhdp->mbox_mutex);
2448 mutex_init(&mhdp->link_mutex);
2449 spin_lock_init(&mhdp->start_lock);
2450
2451 drm_dp_aux_init(&mhdp->aux);
2452 mhdp->aux.dev = dev;
2453 mhdp->aux.transfer = cdns_mhdp_transfer;
2454
2455 mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2456 if (IS_ERR(mhdp->regs)) {
2457 dev_err(dev, "Failed to get memory resource\n");
2458 return PTR_ERR(mhdp->regs);
2459 }
2460
2461 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2462 if (IS_ERR(mhdp->sapb_regs)) {
2463 mhdp->hdcp_supported = false;
2464 dev_warn(dev,
2465 "Failed to get SAPB memory resource, HDCP not supported\n");
2466 } else {
2467 mhdp->hdcp_supported = true;
2468 }
2469
2470 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2471 if (IS_ERR(mhdp->phy)) {
2472 dev_err(dev, "no PHY configured\n");
2473 return PTR_ERR(mhdp->phy);
2474 }
2475
2476 platform_set_drvdata(pdev, mhdp);
2477
2478 mhdp->info = of_device_get_match_data(dev);
2479
2480 clk_prepare_enable(clk);
2481
2482 pm_runtime_enable(dev);
2483 ret = pm_runtime_resume_and_get(dev);
2484 if (ret < 0) {
2485 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2486 pm_runtime_disable(dev);
2487 goto clk_disable;
2488 }
2489
2490 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2491 ret = mhdp->info->ops->init(mhdp);
2492 if (ret != 0) {
2493 dev_err(dev, "MHDP platform initialization failed: %d\n",
2494 ret);
2495 goto runtime_put;
2496 }
2497 }
2498
2499 rate = clk_get_rate(clk);
2500 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2501 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2502
2503 dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2504
2505 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2506
2507 irq = platform_get_irq(pdev, 0);
2508 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2509 cdns_mhdp_irq_handler, IRQF_ONESHOT,
2510 "mhdp8546", mhdp);
2511 if (ret) {
2512 dev_err(dev, "cannot install IRQ %d\n", irq);
2513 ret = -EIO;
2514 goto plat_fini;
2515 }
2516
2517 cdns_mhdp_fill_host_caps(mhdp);
2518
2519
2520 mhdp->link.rate = mhdp->host.link_rate;
2521 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2522
2523
2524 mhdp->display_fmt.y_only = false;
2525 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2526 mhdp->display_fmt.bpc = 8;
2527
2528 mhdp->bridge.of_node = pdev->dev.of_node;
2529 mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2530 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2531 DRM_BRIDGE_OP_HPD;
2532 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2533 if (mhdp->info)
2534 mhdp->bridge.timings = mhdp->info->timings;
2535
2536 ret = phy_init(mhdp->phy);
2537 if (ret) {
2538 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2539 goto plat_fini;
2540 }
2541
2542
2543 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2544 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2545
2546 init_waitqueue_head(&mhdp->fw_load_wq);
2547 init_waitqueue_head(&mhdp->sw_events_wq);
2548
2549 ret = cdns_mhdp_load_firmware(mhdp);
2550 if (ret)
2551 goto phy_exit;
2552
2553 if (mhdp->hdcp_supported)
2554 cdns_mhdp_hdcp_init(mhdp);
2555
2556 drm_bridge_add(&mhdp->bridge);
2557
2558 return 0;
2559
2560 phy_exit:
2561 phy_exit(mhdp->phy);
2562 plat_fini:
2563 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2564 mhdp->info->ops->exit(mhdp);
2565 runtime_put:
2566 pm_runtime_put_sync(dev);
2567 pm_runtime_disable(dev);
2568 clk_disable:
2569 clk_disable_unprepare(mhdp->clk);
2570
2571 return ret;
2572 }
2573
2574 static int cdns_mhdp_remove(struct platform_device *pdev)
2575 {
2576 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2577 unsigned long timeout = msecs_to_jiffies(100);
2578 bool stop_fw = false;
2579 int ret;
2580
2581 drm_bridge_remove(&mhdp->bridge);
2582
2583 ret = wait_event_timeout(mhdp->fw_load_wq,
2584 mhdp->hw_state == MHDP_HW_READY,
2585 timeout);
2586 if (ret == 0)
2587 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2588 __func__);
2589 else
2590 stop_fw = true;
2591
2592 spin_lock(&mhdp->start_lock);
2593 mhdp->hw_state = MHDP_HW_STOPPED;
2594 spin_unlock(&mhdp->start_lock);
2595
2596 if (stop_fw)
2597 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2598
2599 phy_exit(mhdp->phy);
2600
2601 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2602 mhdp->info->ops->exit(mhdp);
2603
2604 pm_runtime_put_sync(&pdev->dev);
2605 pm_runtime_disable(&pdev->dev);
2606
2607 cancel_work_sync(&mhdp->modeset_retry_work);
2608 flush_scheduled_work();
2609
2610 clk_disable_unprepare(mhdp->clk);
2611
2612 return ret;
2613 }
2614
2615 static const struct of_device_id mhdp_ids[] = {
2616 { .compatible = "cdns,mhdp8546", },
2617 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2618 { .compatible = "ti,j721e-mhdp8546",
2619 .data = &(const struct cdns_mhdp_platform_info) {
2620 .timings = &mhdp_ti_j721e_bridge_timings,
2621 .ops = &mhdp_ti_j721e_ops,
2622 },
2623 },
2624 #endif
2625 { }
2626 };
2627 MODULE_DEVICE_TABLE(of, mhdp_ids);
2628
2629 static struct platform_driver mhdp_driver = {
2630 .driver = {
2631 .name = "cdns-mhdp8546",
2632 .of_match_table = of_match_ptr(mhdp_ids),
2633 },
2634 .probe = cdns_mhdp_probe,
2635 .remove = cdns_mhdp_remove,
2636 };
2637 module_platform_driver(mhdp_driver);
2638
2639 MODULE_FIRMWARE(FW_NAME);
2640
2641 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2642 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2643 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2644 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2645 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2646 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2647 MODULE_LICENSE("GPL");
2648 MODULE_ALIAS("platform:cdns-mhdp8546");