0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include "dm_services_types.h"
0027 #include "dc.h"
0028
0029 #include "amdgpu.h"
0030 #include "amdgpu_dm.h"
0031 #include "amdgpu_dm_irq.h"
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 struct amdgpu_dm_irq_handler_data {
0079 struct list_head list;
0080 interrupt_handler handler;
0081 void *handler_arg;
0082
0083 struct amdgpu_display_manager *dm;
0084
0085 enum dc_irq_source irq_source;
0086 struct work_struct work;
0087 };
0088
0089 #define DM_IRQ_TABLE_LOCK(adev, flags) \
0090 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
0091
0092 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
0093 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
0094
0095
0096
0097
0098
0099 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
0100 void (*ih)(void *),
0101 void *args,
0102 struct amdgpu_display_manager *dm)
0103 {
0104 hcd->handler = ih;
0105 hcd->handler_arg = args;
0106 hcd->dm = dm;
0107 }
0108
0109
0110
0111
0112
0113
0114 static void dm_irq_work_func(struct work_struct *work)
0115 {
0116 struct amdgpu_dm_irq_handler_data *handler_data =
0117 container_of(work, struct amdgpu_dm_irq_handler_data, work);
0118
0119 handler_data->handler(handler_data->handler_arg);
0120
0121
0122
0123
0124 }
0125
0126
0127
0128
0129
0130 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
0131 void *ih,
0132 const struct dc_interrupt_params *int_params)
0133 {
0134 struct list_head *hnd_list;
0135 struct list_head *entry, *tmp;
0136 struct amdgpu_dm_irq_handler_data *handler;
0137 unsigned long irq_table_flags;
0138 bool handler_removed = false;
0139 enum dc_irq_source irq_source;
0140
0141 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0142
0143 irq_source = int_params->irq_source;
0144
0145 switch (int_params->int_context) {
0146 case INTERRUPT_HIGH_IRQ_CONTEXT:
0147 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
0148 break;
0149 case INTERRUPT_LOW_IRQ_CONTEXT:
0150 default:
0151 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0152 break;
0153 }
0154
0155 list_for_each_safe(entry, tmp, hnd_list) {
0156
0157 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0158 list);
0159
0160 if (handler == NULL)
0161 continue;
0162
0163 if (ih == handler->handler) {
0164
0165 list_del(&handler->list);
0166 handler_removed = true;
0167 break;
0168 }
0169 }
0170
0171 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0172
0173 if (handler_removed == false) {
0174
0175
0176 return NULL;
0177 }
0178
0179 kfree(handler);
0180
0181 DRM_DEBUG_KMS(
0182 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
0183 ih, int_params->irq_source, int_params->int_context);
0184
0185 return hnd_list;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194 static void unregister_all_irq_handlers(struct amdgpu_device *adev)
0195 {
0196 struct list_head *hnd_list_low;
0197 struct list_head *hnd_list_high;
0198 struct list_head *entry, *tmp;
0199 struct amdgpu_dm_irq_handler_data *handler;
0200 unsigned long irq_table_flags;
0201 int i;
0202
0203 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0204
0205 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
0206 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
0207 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
0208
0209 list_for_each_safe(entry, tmp, hnd_list_low) {
0210
0211 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0212 list);
0213
0214 if (handler == NULL || handler->handler == NULL)
0215 continue;
0216
0217 list_del(&handler->list);
0218 kfree(handler);
0219 }
0220
0221 list_for_each_safe(entry, tmp, hnd_list_high) {
0222
0223 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0224 list);
0225
0226 if (handler == NULL || handler->handler == NULL)
0227 continue;
0228
0229 list_del(&handler->list);
0230 kfree(handler);
0231 }
0232 }
0233
0234 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0235 }
0236
0237 static bool
0238 validate_irq_registration_params(struct dc_interrupt_params *int_params,
0239 void (*ih)(void *))
0240 {
0241 if (NULL == int_params || NULL == ih) {
0242 DRM_ERROR("DM_IRQ: invalid input!\n");
0243 return false;
0244 }
0245
0246 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
0247 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
0248 int_params->int_context);
0249 return false;
0250 }
0251
0252 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
0253 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
0254 int_params->irq_source);
0255 return false;
0256 }
0257
0258 return true;
0259 }
0260
0261 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
0262 irq_handler_idx handler_idx)
0263 {
0264 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
0265 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
0266 return false;
0267 }
0268
0269 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
0270 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
0271 return false;
0272 }
0273
0274 return true;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
0301 struct dc_interrupt_params *int_params,
0302 void (*ih)(void *),
0303 void *handler_args)
0304 {
0305 struct list_head *hnd_list;
0306 struct amdgpu_dm_irq_handler_data *handler_data;
0307 unsigned long irq_table_flags;
0308 enum dc_irq_source irq_source;
0309
0310 if (false == validate_irq_registration_params(int_params, ih))
0311 return DAL_INVALID_IRQ_HANDLER_IDX;
0312
0313 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
0314 if (!handler_data) {
0315 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
0316 return DAL_INVALID_IRQ_HANDLER_IDX;
0317 }
0318
0319 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
0320
0321 irq_source = int_params->irq_source;
0322
0323 handler_data->irq_source = irq_source;
0324
0325
0326 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0327
0328 switch (int_params->int_context) {
0329 case INTERRUPT_HIGH_IRQ_CONTEXT:
0330 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
0331 break;
0332 case INTERRUPT_LOW_IRQ_CONTEXT:
0333 default:
0334 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0335 INIT_WORK(&handler_data->work, dm_irq_work_func);
0336 break;
0337 }
0338
0339 list_add_tail(&handler_data->list, hnd_list);
0340
0341 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0342
0343
0344
0345
0346
0347
0348 DRM_DEBUG_KMS(
0349 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
0350 handler_data,
0351 irq_source,
0352 int_params->int_context);
0353
0354 return handler_data;
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
0367 enum dc_irq_source irq_source,
0368 void *ih)
0369 {
0370 struct list_head *handler_list;
0371 struct dc_interrupt_params int_params;
0372 int i;
0373
0374 if (false == validate_irq_unregistration_params(irq_source, ih))
0375 return;
0376
0377 memset(&int_params, 0, sizeof(int_params));
0378
0379 int_params.irq_source = irq_source;
0380
0381 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
0382
0383 int_params.int_context = i;
0384
0385 handler_list = remove_irq_handler(adev, ih, &int_params);
0386
0387 if (handler_list != NULL)
0388 break;
0389 }
0390
0391 if (handler_list == NULL) {
0392
0393
0394 DRM_ERROR(
0395 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
0396 ih, irq_source);
0397 }
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
0415 {
0416 int src;
0417 struct list_head *lh;
0418
0419 DRM_DEBUG_KMS("DM_IRQ\n");
0420
0421 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
0422
0423 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
0424
0425 lh = &adev->dm.irq_handler_list_low_tab[src];
0426 INIT_LIST_HEAD(lh);
0427
0428 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
0429 }
0430
0431 return 0;
0432 }
0433
0434
0435
0436
0437
0438
0439
0440 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
0441 {
0442 int src;
0443 struct list_head *lh;
0444 struct list_head *entry, *tmp;
0445 struct amdgpu_dm_irq_handler_data *handler;
0446 unsigned long irq_table_flags;
0447
0448 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
0449 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
0450 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0451
0452
0453
0454 lh = &adev->dm.irq_handler_list_low_tab[src];
0455 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0456
0457 if (!list_empty(lh)) {
0458 list_for_each_safe(entry, tmp, lh) {
0459 handler = list_entry(
0460 entry,
0461 struct amdgpu_dm_irq_handler_data,
0462 list);
0463 flush_work(&handler->work);
0464 }
0465 }
0466 }
0467
0468 unregister_all_irq_handlers(adev);
0469 }
0470
0471 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
0472 {
0473 int src;
0474 struct list_head *hnd_list_h;
0475 struct list_head *hnd_list_l;
0476 unsigned long irq_table_flags;
0477 struct list_head *entry, *tmp;
0478 struct amdgpu_dm_irq_handler_data *handler;
0479
0480 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0481
0482 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
0483
0484
0485
0486
0487
0488 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
0489 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0490 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0491 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0492 dc_interrupt_set(adev->dm.dc, src, false);
0493
0494 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0495
0496 if (!list_empty(hnd_list_l)) {
0497 list_for_each_safe (entry, tmp, hnd_list_l) {
0498 handler = list_entry(
0499 entry,
0500 struct amdgpu_dm_irq_handler_data,
0501 list);
0502 flush_work(&handler->work);
0503 }
0504 }
0505 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0506 }
0507
0508 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0509 return 0;
0510 }
0511
0512 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
0513 {
0514 int src;
0515 struct list_head *hnd_list_h, *hnd_list_l;
0516 unsigned long irq_table_flags;
0517
0518 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0519
0520 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
0521
0522
0523 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
0524 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0525 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0526 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0527 dc_interrupt_set(adev->dm.dc, src, true);
0528 }
0529
0530 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0531
0532 return 0;
0533 }
0534
0535 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
0536 {
0537 int src;
0538 struct list_head *hnd_list_h, *hnd_list_l;
0539 unsigned long irq_table_flags;
0540
0541 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0542
0543 DRM_DEBUG_KMS("DM_IRQ: resume\n");
0544
0545
0546
0547
0548
0549 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
0550 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0551 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0552 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0553 dc_interrupt_set(adev->dm.dc, src, true);
0554 }
0555
0556 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0557 return 0;
0558 }
0559
0560
0561
0562
0563
0564 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
0565 enum dc_irq_source irq_source)
0566 {
0567 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0568 struct amdgpu_dm_irq_handler_data *handler_data;
0569 bool work_queued = false;
0570
0571 if (list_empty(handler_list))
0572 return;
0573
0574 list_for_each_entry (handler_data, handler_list, list) {
0575 if (queue_work(system_highpri_wq, &handler_data->work)) {
0576 work_queued = true;
0577 break;
0578 }
0579 }
0580
0581 if (!work_queued) {
0582 struct amdgpu_dm_irq_handler_data *handler_data_add;
0583
0584 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
0585
0586
0587 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
0588 if (!handler_data_add) {
0589 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
0590 return;
0591 }
0592
0593
0594 handler_data_add->handler = handler_data->handler;
0595 handler_data_add->handler_arg = handler_data->handler_arg;
0596 handler_data_add->dm = handler_data->dm;
0597 handler_data_add->irq_source = irq_source;
0598
0599 list_add_tail(&handler_data_add->list, handler_list);
0600
0601 INIT_WORK(&handler_data_add->work, dm_irq_work_func);
0602
0603 if (queue_work(system_highpri_wq, &handler_data_add->work))
0604 DRM_DEBUG("Queued work for handling interrupt from "
0605 "display for IRQ source %d\n",
0606 irq_source);
0607 else
0608 DRM_ERROR("Failed to queue work for handling interrupt "
0609 "from display for IRQ source %d\n",
0610 irq_source);
0611 }
0612 }
0613
0614
0615
0616
0617
0618 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
0619 enum dc_irq_source irq_source)
0620 {
0621 struct amdgpu_dm_irq_handler_data *handler_data;
0622 unsigned long irq_table_flags;
0623
0624 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0625
0626 list_for_each_entry(handler_data,
0627 &adev->dm.irq_handler_list_high_tab[irq_source],
0628 list) {
0629
0630
0631 handler_data->handler(handler_data->handler_arg);
0632 }
0633
0634 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0635 }
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
0647 struct amdgpu_irq_src *source,
0648 struct amdgpu_iv_entry *entry)
0649 {
0650
0651 enum dc_irq_source src =
0652 dc_interrupt_to_irq_source(
0653 adev->dm.dc,
0654 entry->src_id,
0655 entry->src_data[0]);
0656
0657 dc_interrupt_ack(adev->dm.dc, src);
0658
0659
0660 amdgpu_dm_irq_immediate_work(adev, src);
0661
0662 amdgpu_dm_irq_schedule_work(adev, src);
0663
0664 return 0;
0665 }
0666
0667 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
0668 {
0669 switch (type) {
0670 case AMDGPU_HPD_1:
0671 return DC_IRQ_SOURCE_HPD1;
0672 case AMDGPU_HPD_2:
0673 return DC_IRQ_SOURCE_HPD2;
0674 case AMDGPU_HPD_3:
0675 return DC_IRQ_SOURCE_HPD3;
0676 case AMDGPU_HPD_4:
0677 return DC_IRQ_SOURCE_HPD4;
0678 case AMDGPU_HPD_5:
0679 return DC_IRQ_SOURCE_HPD5;
0680 case AMDGPU_HPD_6:
0681 return DC_IRQ_SOURCE_HPD6;
0682 default:
0683 return DC_IRQ_SOURCE_INVALID;
0684 }
0685 }
0686
0687 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
0688 struct amdgpu_irq_src *source,
0689 unsigned type,
0690 enum amdgpu_interrupt_state state)
0691 {
0692 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
0693 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0694
0695 dc_interrupt_set(adev->dm.dc, src, st);
0696 return 0;
0697 }
0698
0699 static inline int dm_irq_state(struct amdgpu_device *adev,
0700 struct amdgpu_irq_src *source,
0701 unsigned crtc_id,
0702 enum amdgpu_interrupt_state state,
0703 const enum irq_type dal_irq_type,
0704 const char *func)
0705 {
0706 bool st;
0707 enum dc_irq_source irq_source;
0708
0709 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
0710
0711 if (!acrtc) {
0712 DRM_ERROR(
0713 "%s: crtc is NULL at id :%d\n",
0714 func,
0715 crtc_id);
0716 return 0;
0717 }
0718
0719 if (acrtc->otg_inst == -1)
0720 return 0;
0721
0722 irq_source = dal_irq_type + acrtc->otg_inst;
0723
0724 st = (state == AMDGPU_IRQ_STATE_ENABLE);
0725
0726 dc_interrupt_set(adev->dm.dc, irq_source, st);
0727 return 0;
0728 }
0729
0730 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
0731 struct amdgpu_irq_src *source,
0732 unsigned crtc_id,
0733 enum amdgpu_interrupt_state state)
0734 {
0735 return dm_irq_state(
0736 adev,
0737 source,
0738 crtc_id,
0739 state,
0740 IRQ_TYPE_PFLIP,
0741 __func__);
0742 }
0743
0744 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
0745 struct amdgpu_irq_src *source,
0746 unsigned crtc_id,
0747 enum amdgpu_interrupt_state state)
0748 {
0749 return dm_irq_state(
0750 adev,
0751 source,
0752 crtc_id,
0753 state,
0754 IRQ_TYPE_VBLANK,
0755 __func__);
0756 }
0757
0758 static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
0759 struct amdgpu_irq_src *source,
0760 unsigned int crtc_id,
0761 enum amdgpu_interrupt_state state)
0762 {
0763 return dm_irq_state(
0764 adev,
0765 source,
0766 crtc_id,
0767 state,
0768 IRQ_TYPE_VLINE0,
0769 __func__);
0770 }
0771
0772 static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
0773 struct amdgpu_irq_src *source,
0774 unsigned int crtc_id,
0775 enum amdgpu_interrupt_state state)
0776 {
0777 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
0778 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0779
0780 dc_interrupt_set(adev->dm.dc, irq_source, st);
0781 return 0;
0782 }
0783
0784 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
0785 struct amdgpu_irq_src *source,
0786 unsigned int crtc_id,
0787 enum amdgpu_interrupt_state state)
0788 {
0789 return dm_irq_state(
0790 adev,
0791 source,
0792 crtc_id,
0793 state,
0794 IRQ_TYPE_VUPDATE,
0795 __func__);
0796 }
0797
0798 static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
0799 struct amdgpu_irq_src *source,
0800 unsigned int type,
0801 enum amdgpu_interrupt_state state)
0802 {
0803 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
0804 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0805
0806 dc_interrupt_set(adev->dm.dc, irq_source, st);
0807 return 0;
0808 }
0809
0810 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
0811 .set = amdgpu_dm_set_crtc_irq_state,
0812 .process = amdgpu_dm_irq_handler,
0813 };
0814
0815 static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
0816 .set = amdgpu_dm_set_vline0_irq_state,
0817 .process = amdgpu_dm_irq_handler,
0818 };
0819
0820 static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
0821 .set = amdgpu_dm_set_dmub_outbox_irq_state,
0822 .process = amdgpu_dm_irq_handler,
0823 };
0824
0825 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
0826 .set = amdgpu_dm_set_vupdate_irq_state,
0827 .process = amdgpu_dm_irq_handler,
0828 };
0829
0830 static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
0831 .set = amdgpu_dm_set_dmub_trace_irq_state,
0832 .process = amdgpu_dm_irq_handler,
0833 };
0834
0835 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
0836 .set = amdgpu_dm_set_pflip_irq_state,
0837 .process = amdgpu_dm_irq_handler,
0838 };
0839
0840 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
0841 .set = amdgpu_dm_set_hpd_irq_state,
0842 .process = amdgpu_dm_irq_handler,
0843 };
0844
0845 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
0846 {
0847 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
0848 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
0849
0850 adev->vline0_irq.num_types = adev->mode_info.num_crtc;
0851 adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
0852
0853 adev->dmub_outbox_irq.num_types = 1;
0854 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
0855
0856 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
0857 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
0858
0859 adev->dmub_trace_irq.num_types = 1;
0860 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
0861
0862 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
0863 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
0864
0865 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
0866 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
0867 }
0868 void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
0869 {
0870 dc_interrupt_set(adev->dm.dc,
0871 DC_IRQ_SOURCE_DMCUB_OUTBOX,
0872 true);
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
0884 {
0885 struct drm_device *dev = adev_to_drm(adev);
0886 struct drm_connector *connector;
0887 struct drm_connector_list_iter iter;
0888
0889 drm_connector_list_iter_begin(dev, &iter);
0890 drm_for_each_connector_iter(connector, &iter) {
0891 struct amdgpu_dm_connector *amdgpu_dm_connector =
0892 to_amdgpu_dm_connector(connector);
0893
0894 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
0895
0896 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
0897 dc_interrupt_set(adev->dm.dc,
0898 dc_link->irq_source_hpd,
0899 true);
0900 }
0901
0902 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
0903 dc_interrupt_set(adev->dm.dc,
0904 dc_link->irq_source_hpd_rx,
0905 true);
0906 }
0907 }
0908 drm_connector_list_iter_end(&iter);
0909 }
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
0920 {
0921 struct drm_device *dev = adev_to_drm(adev);
0922 struct drm_connector *connector;
0923 struct drm_connector_list_iter iter;
0924
0925 drm_connector_list_iter_begin(dev, &iter);
0926 drm_for_each_connector_iter(connector, &iter) {
0927 struct amdgpu_dm_connector *amdgpu_dm_connector =
0928 to_amdgpu_dm_connector(connector);
0929 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
0930
0931 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
0932 dc_interrupt_set(adev->dm.dc,
0933 dc_link->irq_source_hpd,
0934 false);
0935 }
0936
0937 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
0938 dc_interrupt_set(adev->dm.dc,
0939 dc_link->irq_source_hpd_rx,
0940 false);
0941 }
0942 }
0943 drm_connector_list_iter_end(&iter);
0944 }