Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: AMD
0023  *
0024  */
0025 
0026 #include "dm_services_types.h"
0027 #include "dc.h"
0028 
0029 #include "amdgpu.h"
0030 #include "amdgpu_dm.h"
0031 #include "amdgpu_dm_irq.h"
0032 
0033 /**
0034  * DOC: overview
0035  *
0036  * DM provides another layer of IRQ management on top of what the base driver
0037  * already provides. This is something that could be cleaned up, and is a
0038  * future TODO item.
0039  *
0040  * The base driver provides IRQ source registration with DRM, handler
0041  * registration into the base driver's IRQ table, and a handler callback
0042  * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
0043  * handler looks up the IRQ table, and calls the respective
0044  * &amdgpu_irq_src_funcs.process hookups.
0045  *
0046  * What DM provides on top are two IRQ tables specifically for top-half and
0047  * bottom-half IRQ handling, with the bottom-half implementing workqueues:
0048  *
0049  * - &amdgpu_display_manager.irq_handler_list_high_tab
0050  * - &amdgpu_display_manager.irq_handler_list_low_tab
0051  *
0052  * They override the base driver's IRQ table, and the effect can be seen
0053  * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
0054  * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
0055  * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
0056  * still needs to register the IRQ with the base driver. See
0057  * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
0058  *
0059  * To expose DC's hardware interrupt toggle to the base driver, DM implements
0060  * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
0061  * amdgpu_irq_update() to enable or disable the interrupt.
0062  */
0063 
0064 /******************************************************************************
0065  * Private declarations.
0066  *****************************************************************************/
0067 
0068 /**
0069  * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
0070  *
0071  * @list: Linked list entry referencing the next/previous handler
0072  * @handler: Handler function
0073  * @handler_arg: Argument passed to the handler when triggered
0074  * @dm: DM which this handler belongs to
0075  * @irq_source: DC interrupt source that this handler is registered for
0076  * @work: work struct
0077  */
0078 struct amdgpu_dm_irq_handler_data {
0079     struct list_head list;
0080     interrupt_handler handler;
0081     void *handler_arg;
0082 
0083     struct amdgpu_display_manager *dm;
0084     /* DAL irq source which registered for this interrupt. */
0085     enum dc_irq_source irq_source;
0086     struct work_struct work;
0087 };
0088 
0089 #define DM_IRQ_TABLE_LOCK(adev, flags) \
0090     spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
0091 
0092 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
0093     spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
0094 
0095 /******************************************************************************
0096  * Private functions.
0097  *****************************************************************************/
0098 
0099 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
0100                      void (*ih)(void *),
0101                      void *args,
0102                      struct amdgpu_display_manager *dm)
0103 {
0104     hcd->handler = ih;
0105     hcd->handler_arg = args;
0106     hcd->dm = dm;
0107 }
0108 
0109 /**
0110  * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
0111  *
0112  * @work: work struct
0113  */
0114 static void dm_irq_work_func(struct work_struct *work)
0115 {
0116     struct amdgpu_dm_irq_handler_data *handler_data =
0117         container_of(work, struct amdgpu_dm_irq_handler_data, work);
0118 
0119     handler_data->handler(handler_data->handler_arg);
0120 
0121     /* Call a DAL subcomponent which registered for interrupt notification
0122      * at INTERRUPT_LOW_IRQ_CONTEXT.
0123      * (The most common use is HPD interrupt) */
0124 }
0125 
0126 /*
0127  * Remove a handler and return a pointer to handler list from which the
0128  * handler was removed.
0129  */
0130 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
0131                         void *ih,
0132                         const struct dc_interrupt_params *int_params)
0133 {
0134     struct list_head *hnd_list;
0135     struct list_head *entry, *tmp;
0136     struct amdgpu_dm_irq_handler_data *handler;
0137     unsigned long irq_table_flags;
0138     bool handler_removed = false;
0139     enum dc_irq_source irq_source;
0140 
0141     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0142 
0143     irq_source = int_params->irq_source;
0144 
0145     switch (int_params->int_context) {
0146     case INTERRUPT_HIGH_IRQ_CONTEXT:
0147         hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
0148         break;
0149     case INTERRUPT_LOW_IRQ_CONTEXT:
0150     default:
0151         hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0152         break;
0153     }
0154 
0155     list_for_each_safe(entry, tmp, hnd_list) {
0156 
0157         handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0158                      list);
0159 
0160         if (handler == NULL)
0161             continue;
0162 
0163         if (ih == handler->handler) {
0164             /* Found our handler. Remove it from the list. */
0165             list_del(&handler->list);
0166             handler_removed = true;
0167             break;
0168         }
0169     }
0170 
0171     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0172 
0173     if (handler_removed == false) {
0174         /* Not necessarily an error - caller may not
0175          * know the context. */
0176         return NULL;
0177     }
0178 
0179     kfree(handler);
0180 
0181     DRM_DEBUG_KMS(
0182     "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
0183         ih, int_params->irq_source, int_params->int_context);
0184 
0185     return hnd_list;
0186 }
0187 
0188 /**
0189  * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
0190  * @adev: The base driver device containing the DM device
0191  *
0192  * Go through low and high context IRQ tables and deallocate handlers.
0193  */
0194 static void unregister_all_irq_handlers(struct amdgpu_device *adev)
0195 {
0196     struct list_head *hnd_list_low;
0197     struct list_head *hnd_list_high;
0198     struct list_head *entry, *tmp;
0199     struct amdgpu_dm_irq_handler_data *handler;
0200     unsigned long irq_table_flags;
0201     int i;
0202 
0203     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0204 
0205     for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
0206         hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
0207         hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
0208 
0209         list_for_each_safe(entry, tmp, hnd_list_low) {
0210 
0211             handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0212                          list);
0213 
0214             if (handler == NULL || handler->handler == NULL)
0215                 continue;
0216 
0217             list_del(&handler->list);
0218             kfree(handler);
0219         }
0220 
0221         list_for_each_safe(entry, tmp, hnd_list_high) {
0222 
0223             handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
0224                          list);
0225 
0226             if (handler == NULL || handler->handler == NULL)
0227                 continue;
0228 
0229             list_del(&handler->list);
0230             kfree(handler);
0231         }
0232     }
0233 
0234     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0235 }
0236 
0237 static bool
0238 validate_irq_registration_params(struct dc_interrupt_params *int_params,
0239                  void (*ih)(void *))
0240 {
0241     if (NULL == int_params || NULL == ih) {
0242         DRM_ERROR("DM_IRQ: invalid input!\n");
0243         return false;
0244     }
0245 
0246     if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
0247         DRM_ERROR("DM_IRQ: invalid context: %d!\n",
0248                 int_params->int_context);
0249         return false;
0250     }
0251 
0252     if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
0253         DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
0254                 int_params->irq_source);
0255         return false;
0256     }
0257 
0258     return true;
0259 }
0260 
0261 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
0262                            irq_handler_idx handler_idx)
0263 {
0264     if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
0265         DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
0266         return false;
0267     }
0268 
0269     if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
0270         DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
0271         return false;
0272     }
0273 
0274     return true;
0275 }
0276 /******************************************************************************
0277  * Public functions.
0278  *
0279  * Note: caller is responsible for input validation.
0280  *****************************************************************************/
0281 
0282 /**
0283  * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
0284  * @adev: The base driver device containing the DM device.
0285  * @int_params: Interrupt parameters containing the source, and handler context
0286  * @ih: Function pointer to the interrupt handler to register
0287  * @handler_args: Arguments passed to the handler when the interrupt occurs
0288  *
0289  * Register an interrupt handler for the given IRQ source, under the given
0290  * context. The context can either be high or low. High context handlers are
0291  * executed directly within ISR context, while low context is executed within a
0292  * workqueue, thereby allowing operations that sleep.
0293  *
0294  * Registered handlers are called in a FIFO manner, i.e. the most recently
0295  * registered handler will be called first.
0296  *
0297  * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
0298  *         source, handler function, and args
0299  */
0300 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
0301                        struct dc_interrupt_params *int_params,
0302                        void (*ih)(void *),
0303                        void *handler_args)
0304 {
0305     struct list_head *hnd_list;
0306     struct amdgpu_dm_irq_handler_data *handler_data;
0307     unsigned long irq_table_flags;
0308     enum dc_irq_source irq_source;
0309 
0310     if (false == validate_irq_registration_params(int_params, ih))
0311         return DAL_INVALID_IRQ_HANDLER_IDX;
0312 
0313     handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
0314     if (!handler_data) {
0315         DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
0316         return DAL_INVALID_IRQ_HANDLER_IDX;
0317     }
0318 
0319     init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
0320 
0321     irq_source = int_params->irq_source;
0322 
0323     handler_data->irq_source = irq_source;
0324 
0325     /* Lock the list, add the handler. */
0326     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0327 
0328     switch (int_params->int_context) {
0329     case INTERRUPT_HIGH_IRQ_CONTEXT:
0330         hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
0331         break;
0332     case INTERRUPT_LOW_IRQ_CONTEXT:
0333     default:
0334         hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0335         INIT_WORK(&handler_data->work, dm_irq_work_func);
0336         break;
0337     }
0338 
0339     list_add_tail(&handler_data->list, hnd_list);
0340 
0341     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0342 
0343     /* This pointer will be stored by code which requested interrupt
0344      * registration.
0345      * The same pointer will be needed in order to unregister the
0346      * interrupt. */
0347 
0348     DRM_DEBUG_KMS(
0349         "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
0350         handler_data,
0351         irq_source,
0352         int_params->int_context);
0353 
0354     return handler_data;
0355 }
0356 
0357 /**
0358  * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
0359  * @adev: The base driver device containing the DM device
0360  * @irq_source: IRQ source to remove the given handler from
0361  * @ih: Function pointer to the interrupt handler to unregister
0362  *
0363  * Go through both low and high context IRQ tables, and find the given handler
0364  * for the given irq source. If found, remove it. Otherwise, do nothing.
0365  */
0366 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
0367                     enum dc_irq_source irq_source,
0368                     void *ih)
0369 {
0370     struct list_head *handler_list;
0371     struct dc_interrupt_params int_params;
0372     int i;
0373 
0374     if (false == validate_irq_unregistration_params(irq_source, ih))
0375         return;
0376 
0377     memset(&int_params, 0, sizeof(int_params));
0378 
0379     int_params.irq_source = irq_source;
0380 
0381     for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
0382 
0383         int_params.int_context = i;
0384 
0385         handler_list = remove_irq_handler(adev, ih, &int_params);
0386 
0387         if (handler_list != NULL)
0388             break;
0389     }
0390 
0391     if (handler_list == NULL) {
0392         /* If we got here, it means we searched all irq contexts
0393          * for this irq source, but the handler was not found. */
0394         DRM_ERROR(
0395         "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
0396             ih, irq_source);
0397     }
0398 }
0399 
0400 /**
0401  * amdgpu_dm_irq_init() - Initialize DM IRQ management
0402  * @adev:  The base driver device containing the DM device
0403  *
0404  * Initialize DM's high and low context IRQ tables.
0405  *
0406  * The N by M table contains N IRQ sources, with M
0407  * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
0408  * list_heads are initialized here. When an interrupt n is triggered, all m
0409  * handlers are called in sequence, FIFO according to registration order.
0410  *
0411  * The low context table requires special steps to initialize, since handlers
0412  * will be deferred to a workqueue. See &struct irq_list_head.
0413  */
0414 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
0415 {
0416     int src;
0417     struct list_head *lh;
0418 
0419     DRM_DEBUG_KMS("DM_IRQ\n");
0420 
0421     spin_lock_init(&adev->dm.irq_handler_list_table_lock);
0422 
0423     for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
0424         /* low context handler list init */
0425         lh = &adev->dm.irq_handler_list_low_tab[src];
0426         INIT_LIST_HEAD(lh);
0427         /* high context handler init */
0428         INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
0429     }
0430 
0431     return 0;
0432 }
0433 
0434 /**
0435  * amdgpu_dm_irq_fini() - Tear down DM IRQ management
0436  * @adev: The base driver device containing the DM device
0437  *
0438  * Flush all work within the low context IRQ table.
0439  */
0440 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
0441 {
0442     int src;
0443     struct list_head *lh;
0444     struct list_head *entry, *tmp;
0445     struct amdgpu_dm_irq_handler_data *handler;
0446     unsigned long irq_table_flags;
0447 
0448     DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
0449     for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
0450         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0451         /* The handler was removed from the table,
0452          * it means it is safe to flush all the 'work'
0453          * (because no code can schedule a new one). */
0454         lh = &adev->dm.irq_handler_list_low_tab[src];
0455         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0456 
0457         if (!list_empty(lh)) {
0458             list_for_each_safe(entry, tmp, lh) {
0459                 handler = list_entry(
0460                     entry,
0461                     struct amdgpu_dm_irq_handler_data,
0462                     list);
0463                 flush_work(&handler->work);
0464             }
0465         }
0466     }
0467     /* Deallocate handlers from the table. */
0468     unregister_all_irq_handlers(adev);
0469 }
0470 
0471 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
0472 {
0473     int src;
0474     struct list_head *hnd_list_h;
0475     struct list_head *hnd_list_l;
0476     unsigned long irq_table_flags;
0477     struct list_head *entry, *tmp;
0478     struct amdgpu_dm_irq_handler_data *handler;
0479 
0480     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0481 
0482     DRM_DEBUG_KMS("DM_IRQ: suspend\n");
0483 
0484     /**
0485      * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
0486      * will be disabled from manage_dm_interrupts on disable CRTC.
0487      */
0488     for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
0489         hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0490         hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0491         if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0492             dc_interrupt_set(adev->dm.dc, src, false);
0493 
0494         DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0495 
0496         if (!list_empty(hnd_list_l)) {
0497             list_for_each_safe (entry, tmp, hnd_list_l) {
0498                 handler = list_entry(
0499                     entry,
0500                     struct amdgpu_dm_irq_handler_data,
0501                     list);
0502                 flush_work(&handler->work);
0503             }
0504         }
0505         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0506     }
0507 
0508     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0509     return 0;
0510 }
0511 
0512 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
0513 {
0514     int src;
0515     struct list_head *hnd_list_h, *hnd_list_l;
0516     unsigned long irq_table_flags;
0517 
0518     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0519 
0520     DRM_DEBUG_KMS("DM_IRQ: early resume\n");
0521 
0522     /* re-enable short pulse interrupts HW interrupt */
0523     for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
0524         hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0525         hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0526         if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0527             dc_interrupt_set(adev->dm.dc, src, true);
0528     }
0529 
0530     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0531 
0532     return 0;
0533 }
0534 
0535 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
0536 {
0537     int src;
0538     struct list_head *hnd_list_h, *hnd_list_l;
0539     unsigned long irq_table_flags;
0540 
0541     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0542 
0543     DRM_DEBUG_KMS("DM_IRQ: resume\n");
0544 
0545     /**
0546      * Renable HW interrupt  for HPD and only since FLIP and VBLANK
0547      * will be enabled from manage_dm_interrupts on enable CRTC.
0548      */
0549     for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
0550         hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
0551         hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
0552         if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
0553             dc_interrupt_set(adev->dm.dc, src, true);
0554     }
0555 
0556     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0557     return 0;
0558 }
0559 
0560 /*
0561  * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
0562  * "irq_source".
0563  */
0564 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
0565                     enum dc_irq_source irq_source)
0566 {
0567     struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
0568     struct  amdgpu_dm_irq_handler_data *handler_data;
0569     bool    work_queued = false;
0570 
0571     if (list_empty(handler_list))
0572         return;
0573 
0574     list_for_each_entry (handler_data, handler_list, list) {
0575         if (queue_work(system_highpri_wq, &handler_data->work)) {
0576             work_queued = true;
0577             break;
0578         }
0579     }
0580 
0581     if (!work_queued) {
0582         struct  amdgpu_dm_irq_handler_data *handler_data_add;
0583         /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
0584         handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
0585 
0586         /*allocate a new amdgpu_dm_irq_handler_data*/
0587         handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
0588         if (!handler_data_add) {
0589             DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
0590             return;
0591         }
0592 
0593         /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
0594         handler_data_add->handler       = handler_data->handler;
0595         handler_data_add->handler_arg   = handler_data->handler_arg;
0596         handler_data_add->dm            = handler_data->dm;
0597         handler_data_add->irq_source    = irq_source;
0598 
0599         list_add_tail(&handler_data_add->list, handler_list);
0600 
0601         INIT_WORK(&handler_data_add->work, dm_irq_work_func);
0602 
0603         if (queue_work(system_highpri_wq, &handler_data_add->work))
0604             DRM_DEBUG("Queued work for handling interrupt from "
0605                   "display for IRQ source %d\n",
0606                   irq_source);
0607         else
0608             DRM_ERROR("Failed to queue work for handling interrupt "
0609                   "from display for IRQ source %d\n",
0610                   irq_source);
0611     }
0612 }
0613 
0614 /*
0615  * amdgpu_dm_irq_immediate_work
0616  * Callback high irq work immediately, don't send to work queue
0617  */
0618 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
0619                      enum dc_irq_source irq_source)
0620 {
0621     struct amdgpu_dm_irq_handler_data *handler_data;
0622     unsigned long irq_table_flags;
0623 
0624     DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
0625 
0626     list_for_each_entry(handler_data,
0627                 &adev->dm.irq_handler_list_high_tab[irq_source],
0628                 list) {
0629         /* Call a subcomponent which registered for immediate
0630          * interrupt notification */
0631         handler_data->handler(handler_data->handler_arg);
0632     }
0633 
0634     DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
0635 }
0636 
0637 /**
0638  * amdgpu_dm_irq_handler - Generic DM IRQ handler
0639  * @adev: amdgpu base driver device containing the DM device
0640  * @source: Unused
0641  * @entry: Data about the triggered interrupt
0642  *
0643  * Calls all registered high irq work immediately, and schedules work for low
0644  * irq. The DM IRQ table is used to find the corresponding handlers.
0645  */
0646 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
0647                  struct amdgpu_irq_src *source,
0648                  struct amdgpu_iv_entry *entry)
0649 {
0650 
0651     enum dc_irq_source src =
0652         dc_interrupt_to_irq_source(
0653             adev->dm.dc,
0654             entry->src_id,
0655             entry->src_data[0]);
0656 
0657     dc_interrupt_ack(adev->dm.dc, src);
0658 
0659     /* Call high irq work immediately */
0660     amdgpu_dm_irq_immediate_work(adev, src);
0661     /*Schedule low_irq work */
0662     amdgpu_dm_irq_schedule_work(adev, src);
0663 
0664     return 0;
0665 }
0666 
0667 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
0668 {
0669     switch (type) {
0670     case AMDGPU_HPD_1:
0671         return DC_IRQ_SOURCE_HPD1;
0672     case AMDGPU_HPD_2:
0673         return DC_IRQ_SOURCE_HPD2;
0674     case AMDGPU_HPD_3:
0675         return DC_IRQ_SOURCE_HPD3;
0676     case AMDGPU_HPD_4:
0677         return DC_IRQ_SOURCE_HPD4;
0678     case AMDGPU_HPD_5:
0679         return DC_IRQ_SOURCE_HPD5;
0680     case AMDGPU_HPD_6:
0681         return DC_IRQ_SOURCE_HPD6;
0682     default:
0683         return DC_IRQ_SOURCE_INVALID;
0684     }
0685 }
0686 
0687 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
0688                        struct amdgpu_irq_src *source,
0689                        unsigned type,
0690                        enum amdgpu_interrupt_state state)
0691 {
0692     enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
0693     bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0694 
0695     dc_interrupt_set(adev->dm.dc, src, st);
0696     return 0;
0697 }
0698 
0699 static inline int dm_irq_state(struct amdgpu_device *adev,
0700                    struct amdgpu_irq_src *source,
0701                    unsigned crtc_id,
0702                    enum amdgpu_interrupt_state state,
0703                    const enum irq_type dal_irq_type,
0704                    const char *func)
0705 {
0706     bool st;
0707     enum dc_irq_source irq_source;
0708 
0709     struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
0710 
0711     if (!acrtc) {
0712         DRM_ERROR(
0713             "%s: crtc is NULL at id :%d\n",
0714             func,
0715             crtc_id);
0716         return 0;
0717     }
0718 
0719     if (acrtc->otg_inst == -1)
0720         return 0;
0721 
0722     irq_source = dal_irq_type + acrtc->otg_inst;
0723 
0724     st = (state == AMDGPU_IRQ_STATE_ENABLE);
0725 
0726     dc_interrupt_set(adev->dm.dc, irq_source, st);
0727     return 0;
0728 }
0729 
0730 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
0731                      struct amdgpu_irq_src *source,
0732                      unsigned crtc_id,
0733                      enum amdgpu_interrupt_state state)
0734 {
0735     return dm_irq_state(
0736         adev,
0737         source,
0738         crtc_id,
0739         state,
0740         IRQ_TYPE_PFLIP,
0741         __func__);
0742 }
0743 
0744 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
0745                     struct amdgpu_irq_src *source,
0746                     unsigned crtc_id,
0747                     enum amdgpu_interrupt_state state)
0748 {
0749     return dm_irq_state(
0750         adev,
0751         source,
0752         crtc_id,
0753         state,
0754         IRQ_TYPE_VBLANK,
0755         __func__);
0756 }
0757 
0758 static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
0759                     struct amdgpu_irq_src *source,
0760                     unsigned int crtc_id,
0761                     enum amdgpu_interrupt_state state)
0762 {
0763     return dm_irq_state(
0764         adev,
0765         source,
0766         crtc_id,
0767         state,
0768         IRQ_TYPE_VLINE0,
0769         __func__);
0770 }
0771 
0772 static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
0773                     struct amdgpu_irq_src *source,
0774                     unsigned int crtc_id,
0775                     enum amdgpu_interrupt_state state)
0776 {
0777     enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
0778     bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0779 
0780     dc_interrupt_set(adev->dm.dc, irq_source, st);
0781     return 0;
0782 }
0783 
0784 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
0785                        struct amdgpu_irq_src *source,
0786                        unsigned int crtc_id,
0787                        enum amdgpu_interrupt_state state)
0788 {
0789     return dm_irq_state(
0790         adev,
0791         source,
0792         crtc_id,
0793         state,
0794         IRQ_TYPE_VUPDATE,
0795         __func__);
0796 }
0797 
0798 static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
0799                        struct amdgpu_irq_src *source,
0800                        unsigned int type,
0801                        enum amdgpu_interrupt_state state)
0802 {
0803     enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
0804     bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
0805 
0806     dc_interrupt_set(adev->dm.dc, irq_source, st);
0807     return 0;
0808 }
0809 
0810 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
0811     .set = amdgpu_dm_set_crtc_irq_state,
0812     .process = amdgpu_dm_irq_handler,
0813 };
0814 
0815 static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
0816     .set = amdgpu_dm_set_vline0_irq_state,
0817     .process = amdgpu_dm_irq_handler,
0818 };
0819 
0820 static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
0821     .set = amdgpu_dm_set_dmub_outbox_irq_state,
0822     .process = amdgpu_dm_irq_handler,
0823 };
0824 
0825 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
0826     .set = amdgpu_dm_set_vupdate_irq_state,
0827     .process = amdgpu_dm_irq_handler,
0828 };
0829 
0830 static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
0831     .set = amdgpu_dm_set_dmub_trace_irq_state,
0832     .process = amdgpu_dm_irq_handler,
0833 };
0834 
0835 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
0836     .set = amdgpu_dm_set_pflip_irq_state,
0837     .process = amdgpu_dm_irq_handler,
0838 };
0839 
0840 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
0841     .set = amdgpu_dm_set_hpd_irq_state,
0842     .process = amdgpu_dm_irq_handler,
0843 };
0844 
0845 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
0846 {
0847     adev->crtc_irq.num_types = adev->mode_info.num_crtc;
0848     adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
0849 
0850     adev->vline0_irq.num_types = adev->mode_info.num_crtc;
0851     adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
0852 
0853     adev->dmub_outbox_irq.num_types = 1;
0854     adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
0855 
0856     adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
0857     adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
0858 
0859     adev->dmub_trace_irq.num_types = 1;
0860     adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
0861 
0862     adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
0863     adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
0864 
0865     adev->hpd_irq.num_types = adev->mode_info.num_hpd;
0866     adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
0867 }
0868 void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
0869 {
0870     dc_interrupt_set(adev->dm.dc,
0871         DC_IRQ_SOURCE_DMCUB_OUTBOX,
0872         true);
0873 }
0874 
0875 /**
0876  * amdgpu_dm_hpd_init - hpd setup callback.
0877  *
0878  * @adev: amdgpu_device pointer
0879  *
0880  * Setup the hpd pins used by the card (evergreen+).
0881  * Enable the pin, set the polarity, and enable the hpd interrupts.
0882  */
0883 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
0884 {
0885     struct drm_device *dev = adev_to_drm(adev);
0886     struct drm_connector *connector;
0887     struct drm_connector_list_iter iter;
0888 
0889     drm_connector_list_iter_begin(dev, &iter);
0890     drm_for_each_connector_iter(connector, &iter) {
0891         struct amdgpu_dm_connector *amdgpu_dm_connector =
0892                 to_amdgpu_dm_connector(connector);
0893 
0894         const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
0895 
0896         if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
0897             dc_interrupt_set(adev->dm.dc,
0898                     dc_link->irq_source_hpd,
0899                     true);
0900         }
0901 
0902         if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
0903             dc_interrupt_set(adev->dm.dc,
0904                     dc_link->irq_source_hpd_rx,
0905                     true);
0906         }
0907     }
0908     drm_connector_list_iter_end(&iter);
0909 }
0910 
0911 /**
0912  * amdgpu_dm_hpd_fini - hpd tear down callback.
0913  *
0914  * @adev: amdgpu_device pointer
0915  *
0916  * Tear down the hpd pins used by the card (evergreen+).
0917  * Disable the hpd interrupts.
0918  */
0919 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
0920 {
0921     struct drm_device *dev = adev_to_drm(adev);
0922     struct drm_connector *connector;
0923     struct drm_connector_list_iter iter;
0924 
0925     drm_connector_list_iter_begin(dev, &iter);
0926     drm_for_each_connector_iter(connector, &iter) {
0927         struct amdgpu_dm_connector *amdgpu_dm_connector =
0928                 to_amdgpu_dm_connector(connector);
0929         const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
0930 
0931         if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
0932             dc_interrupt_set(adev->dm.dc,
0933                     dc_link->irq_source_hpd,
0934                     false);
0935         }
0936 
0937         if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
0938             dc_interrupt_set(adev->dm.dc,
0939                     dc_link->irq_source_hpd_rx,
0940                     false);
0941         }
0942     }
0943     drm_connector_list_iter_end(&iter);
0944 }