0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/slab.h>
0011
0012 #include <media/cec.h>
0013
0014 #include <drm/display/drm_dp_helper.h>
0015 #include <drm/drm_connector.h>
0016 #include <drm/drm_device.h>
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 #define NEVER_UNREG_DELAY 1000
0087 static unsigned int drm_dp_cec_unregister_delay = 1;
0088 module_param(drm_dp_cec_unregister_delay, uint, 0600);
0089 MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
0090 "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
0091
0092 static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
0093 {
0094 struct drm_dp_aux *aux = cec_get_drvdata(adap);
0095 u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
0096 ssize_t err = 0;
0097
0098 err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
0099 return (enable && err < 0) ? err : 0;
0100 }
0101
0102 static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
0103 {
0104 struct drm_dp_aux *aux = cec_get_drvdata(adap);
0105
0106 u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
0107 u8 mask[2];
0108 ssize_t err;
0109
0110 if (addr != CEC_LOG_ADDR_INVALID)
0111 la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
0112 mask[0] = la_mask & 0xff;
0113 mask[1] = la_mask >> 8;
0114 err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
0115 return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
0116 }
0117
0118 static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
0119 u32 signal_free_time, struct cec_msg *msg)
0120 {
0121 struct drm_dp_aux *aux = cec_get_drvdata(adap);
0122 unsigned int retries = min(5, attempts - 1);
0123 ssize_t err;
0124
0125 err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
0126 msg->msg, msg->len);
0127 if (err < 0)
0128 return err;
0129
0130 err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
0131 (msg->len - 1) | (retries << 4) |
0132 DP_CEC_TX_MESSAGE_SEND);
0133 return err < 0 ? err : 0;
0134 }
0135
0136 static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
0137 bool enable)
0138 {
0139 struct drm_dp_aux *aux = cec_get_drvdata(adap);
0140 ssize_t err;
0141 u8 val;
0142
0143 if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
0144 return 0;
0145
0146 err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
0147 if (err >= 0) {
0148 if (enable)
0149 val |= DP_CEC_SNOOPING_ENABLE;
0150 else
0151 val &= ~DP_CEC_SNOOPING_ENABLE;
0152 err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
0153 }
0154 return (enable && err < 0) ? err : 0;
0155 }
0156
0157 static void drm_dp_cec_adap_status(struct cec_adapter *adap,
0158 struct seq_file *file)
0159 {
0160 struct drm_dp_aux *aux = cec_get_drvdata(adap);
0161 struct drm_dp_desc desc;
0162 struct drm_dp_dpcd_ident *id = &desc.ident;
0163
0164 if (drm_dp_read_desc(aux, &desc, true))
0165 return;
0166 seq_printf(file, "OUI: %*phD\n",
0167 (int)sizeof(id->oui), id->oui);
0168 seq_printf(file, "ID: %*pE\n",
0169 (int)strnlen(id->device_id, sizeof(id->device_id)),
0170 id->device_id);
0171 seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
0172
0173
0174
0175
0176 seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
0177 id->sw_major_rev, id->sw_minor_rev,
0178 id->sw_major_rev, id->sw_minor_rev);
0179 }
0180
0181 static const struct cec_adap_ops drm_dp_cec_adap_ops = {
0182 .adap_enable = drm_dp_cec_adap_enable,
0183 .adap_log_addr = drm_dp_cec_adap_log_addr,
0184 .adap_transmit = drm_dp_cec_adap_transmit,
0185 .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
0186 .adap_status = drm_dp_cec_adap_status,
0187 };
0188
0189 static int drm_dp_cec_received(struct drm_dp_aux *aux)
0190 {
0191 struct cec_adapter *adap = aux->cec.adap;
0192 struct cec_msg msg;
0193 u8 rx_msg_info;
0194 ssize_t err;
0195
0196 err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
0197 if (err < 0)
0198 return err;
0199
0200 if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
0201 return 0;
0202
0203 msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
0204 err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
0205 if (err < 0)
0206 return err;
0207
0208 cec_received_msg(adap, &msg);
0209 return 0;
0210 }
0211
0212 static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
0213 {
0214 struct cec_adapter *adap = aux->cec.adap;
0215 u8 flags;
0216
0217 if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
0218 return;
0219
0220 if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
0221 drm_dp_cec_received(aux);
0222
0223 if (flags & DP_CEC_TX_MESSAGE_SENT)
0224 cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
0225 else if (flags & DP_CEC_TX_LINE_ERROR)
0226 cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
0227 CEC_TX_STATUS_MAX_RETRIES);
0228 else if (flags &
0229 (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
0230 cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
0231 CEC_TX_STATUS_MAX_RETRIES);
0232 drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
0233 }
0234
0235
0236
0237
0238
0239
0240
0241
0242 void drm_dp_cec_irq(struct drm_dp_aux *aux)
0243 {
0244 u8 cec_irq;
0245 int ret;
0246
0247
0248 if (!aux->transfer)
0249 return;
0250
0251 mutex_lock(&aux->cec.lock);
0252 if (!aux->cec.adap)
0253 goto unlock;
0254
0255 ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
0256 &cec_irq);
0257 if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
0258 goto unlock;
0259
0260 drm_dp_cec_handle_irq(aux);
0261 drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
0262 unlock:
0263 mutex_unlock(&aux->cec.lock);
0264 }
0265 EXPORT_SYMBOL(drm_dp_cec_irq);
0266
0267 static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
0268 {
0269 u8 cap = 0;
0270
0271 if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
0272 !(cap & DP_CEC_TUNNELING_CAPABLE))
0273 return false;
0274 if (cec_cap)
0275 *cec_cap = cap;
0276 return true;
0277 }
0278
0279
0280
0281
0282
0283 static void drm_dp_cec_unregister_work(struct work_struct *work)
0284 {
0285 struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
0286 cec.unregister_work.work);
0287
0288 mutex_lock(&aux->cec.lock);
0289 cec_unregister_adapter(aux->cec.adap);
0290 aux->cec.adap = NULL;
0291 mutex_unlock(&aux->cec.lock);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300 void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
0301 {
0302 struct drm_connector *connector = aux->cec.connector;
0303 u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
0304 CEC_CAP_CONNECTOR_INFO;
0305 struct cec_connector_info conn_info;
0306 unsigned int num_las = 1;
0307 u8 cap;
0308
0309
0310 if (!aux->transfer)
0311 return;
0312
0313 #ifndef CONFIG_MEDIA_CEC_RC
0314
0315
0316
0317
0318
0319
0320
0321 cec_caps &= ~CEC_CAP_RC;
0322 #endif
0323 cancel_delayed_work_sync(&aux->cec.unregister_work);
0324
0325 mutex_lock(&aux->cec.lock);
0326 if (!drm_dp_cec_cap(aux, &cap)) {
0327
0328 cec_unregister_adapter(aux->cec.adap);
0329 aux->cec.adap = NULL;
0330 goto unlock;
0331 }
0332
0333 if (cap & DP_CEC_SNOOPING_CAPABLE)
0334 cec_caps |= CEC_CAP_MONITOR_ALL;
0335 if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
0336 num_las = CEC_MAX_LOG_ADDRS;
0337
0338 if (aux->cec.adap) {
0339 if (aux->cec.adap->capabilities == cec_caps &&
0340 aux->cec.adap->available_log_addrs == num_las) {
0341
0342 cec_s_phys_addr_from_edid(aux->cec.adap, edid);
0343 goto unlock;
0344 }
0345
0346
0347
0348
0349 cec_unregister_adapter(aux->cec.adap);
0350 }
0351
0352
0353 aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
0354 aux, connector->name, cec_caps,
0355 num_las);
0356 if (IS_ERR(aux->cec.adap)) {
0357 aux->cec.adap = NULL;
0358 goto unlock;
0359 }
0360
0361 cec_fill_conn_info_from_drm(&conn_info, connector);
0362 cec_s_conn_info(aux->cec.adap, &conn_info);
0363
0364 if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
0365 cec_delete_adapter(aux->cec.adap);
0366 aux->cec.adap = NULL;
0367 } else {
0368
0369
0370
0371
0372
0373 cec_s_phys_addr_from_edid(aux->cec.adap, edid);
0374 }
0375 unlock:
0376 mutex_unlock(&aux->cec.lock);
0377 }
0378 EXPORT_SYMBOL(drm_dp_cec_set_edid);
0379
0380
0381
0382
0383 void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
0384 {
0385
0386 if (!aux->transfer)
0387 return;
0388
0389 cancel_delayed_work_sync(&aux->cec.unregister_work);
0390
0391 mutex_lock(&aux->cec.lock);
0392 if (!aux->cec.adap)
0393 goto unlock;
0394
0395 cec_phys_addr_invalidate(aux->cec.adap);
0396
0397
0398
0399
0400
0401
0402 if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
0403 !drm_dp_cec_cap(aux, NULL)) {
0404
0405
0406
0407
0408
0409 schedule_delayed_work(&aux->cec.unregister_work,
0410 drm_dp_cec_unregister_delay * HZ);
0411 }
0412 unlock:
0413 mutex_unlock(&aux->cec.lock);
0414 }
0415 EXPORT_SYMBOL(drm_dp_cec_unset_edid);
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
0428 struct drm_connector *connector)
0429 {
0430 WARN_ON(aux->cec.adap);
0431 if (WARN_ON(!aux->transfer))
0432 return;
0433 aux->cec.connector = connector;
0434 INIT_DELAYED_WORK(&aux->cec.unregister_work,
0435 drm_dp_cec_unregister_work);
0436 }
0437 EXPORT_SYMBOL(drm_dp_cec_register_connector);
0438
0439
0440
0441
0442
0443 void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
0444 {
0445 if (!aux->cec.adap)
0446 return;
0447 cancel_delayed_work_sync(&aux->cec.unregister_work);
0448 cec_unregister_adapter(aux->cec.adap);
0449 aux->cec.adap = NULL;
0450 }
0451 EXPORT_SYMBOL(drm_dp_cec_unregister_connector);