2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
30 #include "amdgpu_dm.h"
31 #include "amdgpu_dm_irq.h"
36 * DM provides another layer of IRQ management on top of what the base driver
37 * already provides. This is something that could be cleaned up, and is a
40 * The base driver provides IRQ source registration with DRM, handler
41 * registration into the base driver's IRQ table, and a handler callback
42 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
43 * handler looks up the IRQ table, and calls the respective
44 * &amdgpu_irq_src_funcs.process hookups.
46 * What DM provides on top are two IRQ tables specifically for top-half and
47 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
49 * - &amdgpu_display_manager.irq_handler_list_high_tab
50 * - &amdgpu_display_manager.irq_handler_list_low_tab
52 * They override the base driver's IRQ table, and the effect can be seen
53 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
54 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
55 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
56 * still needs to register the IRQ with the base driver. See
57 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
59 * To expose DC's hardware interrupt toggle to the base driver, DM implements
60 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
61 * amdgpu_irq_update() to enable or disable the interrupt.
64 /******************************************************************************
65 * Private declarations.
66 *****************************************************************************/
69 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
71 * @list: Linked list entry referencing the next/previous handler
72 * @handler: Handler function
73 * @handler_arg: Argument passed to the handler when triggered
74 * @dm: DM which this handler belongs to
75 * @irq_source: DC interrupt source that this handler is registered for
77 struct amdgpu_dm_irq_handler_data
{
78 struct list_head list
;
79 interrupt_handler handler
;
82 struct amdgpu_display_manager
*dm
;
83 /* DAL irq source which registered for this interrupt. */
84 enum dc_irq_source irq_source
;
87 #define DM_IRQ_TABLE_LOCK(adev, flags) \
88 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
90 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
91 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
93 /******************************************************************************
95 *****************************************************************************/
97 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data
*hcd
,
100 struct amdgpu_display_manager
*dm
)
103 hcd
->handler_arg
= args
;
108 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
112 static void dm_irq_work_func(struct work_struct
*work
)
114 struct irq_list_head
*irq_list_head
=
115 container_of(work
, struct irq_list_head
, work
);
116 struct list_head
*handler_list
= &irq_list_head
->head
;
117 struct amdgpu_dm_irq_handler_data
*handler_data
;
119 list_for_each_entry(handler_data
, handler_list
, list
) {
120 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
121 handler_data
->irq_source
);
123 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
124 handler_data
->irq_source
);
126 handler_data
->handler(handler_data
->handler_arg
);
129 /* Call a DAL subcomponent which registered for interrupt notification
130 * at INTERRUPT_LOW_IRQ_CONTEXT.
131 * (The most common use is HPD interrupt) */
135 * Remove a handler and return a pointer to handler list from which the
136 * handler was removed.
138 static struct list_head
*remove_irq_handler(struct amdgpu_device
*adev
,
140 const struct dc_interrupt_params
*int_params
)
142 struct list_head
*hnd_list
;
143 struct list_head
*entry
, *tmp
;
144 struct amdgpu_dm_irq_handler_data
*handler
;
145 unsigned long irq_table_flags
;
146 bool handler_removed
= false;
147 enum dc_irq_source irq_source
;
149 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
151 irq_source
= int_params
->irq_source
;
153 switch (int_params
->int_context
) {
154 case INTERRUPT_HIGH_IRQ_CONTEXT
:
155 hnd_list
= &adev
->dm
.irq_handler_list_high_tab
[irq_source
];
157 case INTERRUPT_LOW_IRQ_CONTEXT
:
159 hnd_list
= &adev
->dm
.irq_handler_list_low_tab
[irq_source
].head
;
163 list_for_each_safe(entry
, tmp
, hnd_list
) {
165 handler
= list_entry(entry
, struct amdgpu_dm_irq_handler_data
,
169 /* Found our handler. Remove it from the list. */
170 list_del(&handler
->list
);
171 handler_removed
= true;
176 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
178 if (handler_removed
== false) {
179 /* Not necessarily an error - caller may not
180 * know the context. */
187 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
188 ih
, int_params
->irq_source
, int_params
->int_context
);
194 validate_irq_registration_params(struct dc_interrupt_params
*int_params
,
197 if (NULL
== int_params
|| NULL
== ih
) {
198 DRM_ERROR("DM_IRQ: invalid input!\n");
202 if (int_params
->int_context
>= INTERRUPT_CONTEXT_NUMBER
) {
203 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
204 int_params
->int_context
);
208 if (!DAL_VALID_IRQ_SRC_NUM(int_params
->irq_source
)) {
209 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
210 int_params
->irq_source
);
217 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source
,
218 irq_handler_idx handler_idx
)
220 if (DAL_INVALID_IRQ_HANDLER_IDX
== handler_idx
) {
221 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
225 if (!DAL_VALID_IRQ_SRC_NUM(irq_source
)) {
226 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source
);
232 /******************************************************************************
235 * Note: caller is responsible for input validation.
236 *****************************************************************************/
239 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
240 * @adev: The base driver device containing the DM device.
241 * @int_params: Interrupt parameters containing the source, and handler context
242 * @ih: Function pointer to the interrupt handler to register
243 * @handler_args: Arguments passed to the handler when the interrupt occurs
245 * Register an interrupt handler for the given IRQ source, under the given
246 * context. The context can either be high or low. High context handlers are
247 * executed directly within ISR context, while low context is executed within a
248 * workqueue, thereby allowing operations that sleep.
250 * Registered handlers are called in a FIFO manner, i.e. the most recently
251 * registered handler will be called first.
253 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
254 * source, handler function, and args
256 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device
*adev
,
257 struct dc_interrupt_params
*int_params
,
261 struct list_head
*hnd_list
;
262 struct amdgpu_dm_irq_handler_data
*handler_data
;
263 unsigned long irq_table_flags
;
264 enum dc_irq_source irq_source
;
266 if (false == validate_irq_registration_params(int_params
, ih
))
267 return DAL_INVALID_IRQ_HANDLER_IDX
;
269 handler_data
= kzalloc(sizeof(*handler_data
), GFP_KERNEL
);
271 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
272 return DAL_INVALID_IRQ_HANDLER_IDX
;
275 init_handler_common_data(handler_data
, ih
, handler_args
, &adev
->dm
);
277 irq_source
= int_params
->irq_source
;
279 handler_data
->irq_source
= irq_source
;
281 /* Lock the list, add the handler. */
282 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
284 switch (int_params
->int_context
) {
285 case INTERRUPT_HIGH_IRQ_CONTEXT
:
286 hnd_list
= &adev
->dm
.irq_handler_list_high_tab
[irq_source
];
288 case INTERRUPT_LOW_IRQ_CONTEXT
:
290 hnd_list
= &adev
->dm
.irq_handler_list_low_tab
[irq_source
].head
;
294 list_add_tail(&handler_data
->list
, hnd_list
);
296 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
298 /* This pointer will be stored by code which requested interrupt
300 * The same pointer will be needed in order to unregister the
304 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
307 int_params
->int_context
);
313 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
314 * @adev: The base driver device containing the DM device
315 * @irq_source: IRQ source to remove the given handler from
316 * @ih: Function pointer to the interrupt handler to unregister
318 * Go through both low and high context IRQ tables, and find the given handler
319 * for the given irq source. If found, remove it. Otherwise, do nothing.
321 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device
*adev
,
322 enum dc_irq_source irq_source
,
325 struct list_head
*handler_list
;
326 struct dc_interrupt_params int_params
;
329 if (false == validate_irq_unregistration_params(irq_source
, ih
))
332 memset(&int_params
, 0, sizeof(int_params
));
334 int_params
.irq_source
= irq_source
;
336 for (i
= 0; i
< INTERRUPT_CONTEXT_NUMBER
; i
++) {
338 int_params
.int_context
= i
;
340 handler_list
= remove_irq_handler(adev
, ih
, &int_params
);
342 if (handler_list
!= NULL
)
346 if (handler_list
== NULL
) {
347 /* If we got here, it means we searched all irq contexts
348 * for this irq source, but the handler was not found. */
350 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
356 * amdgpu_dm_irq_init() - Initialize DM IRQ management
357 * @adev: The base driver device containing the DM device
359 * Initialize DM's high and low context IRQ tables.
361 * The N by M table contains N IRQ sources, with M
362 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
363 * list_heads are initialized here. When an interrupt n is triggered, all m
364 * handlers are called in sequence, FIFO according to registration order.
366 * The low context table requires special steps to initialize, since handlers
367 * will be deferred to a workqueue. See &struct irq_list_head.
369 int amdgpu_dm_irq_init(struct amdgpu_device
*adev
)
372 struct irq_list_head
*lh
;
374 DRM_DEBUG_KMS("DM_IRQ\n");
376 spin_lock_init(&adev
->dm
.irq_handler_list_table_lock
);
378 for (src
= 0; src
< DAL_IRQ_SOURCES_NUMBER
; src
++) {
379 /* low context handler list init */
380 lh
= &adev
->dm
.irq_handler_list_low_tab
[src
];
381 INIT_LIST_HEAD(&lh
->head
);
382 INIT_WORK(&lh
->work
, dm_irq_work_func
);
384 /* high context handler init */
385 INIT_LIST_HEAD(&adev
->dm
.irq_handler_list_high_tab
[src
]);
392 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
393 * @adev: The base driver device containing the DM device
395 * Flush all work within the low context IRQ table.
397 void amdgpu_dm_irq_fini(struct amdgpu_device
*adev
)
400 struct irq_list_head
*lh
;
401 unsigned long irq_table_flags
;
402 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
403 for (src
= 0; src
< DAL_IRQ_SOURCES_NUMBER
; src
++) {
404 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
405 /* The handler was removed from the table,
406 * it means it is safe to flush all the 'work'
407 * (because no code can schedule a new one). */
408 lh
= &adev
->dm
.irq_handler_list_low_tab
[src
];
409 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
410 flush_work(&lh
->work
);
414 int amdgpu_dm_irq_suspend(struct amdgpu_device
*adev
)
417 struct list_head
*hnd_list_h
;
418 struct list_head
*hnd_list_l
;
419 unsigned long irq_table_flags
;
421 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
423 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
426 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
427 * will be disabled from manage_dm_interrupts on disable CRTC.
429 for (src
= DC_IRQ_SOURCE_HPD1
; src
<= DC_IRQ_SOURCE_HPD6RX
; src
++) {
430 hnd_list_l
= &adev
->dm
.irq_handler_list_low_tab
[src
].head
;
431 hnd_list_h
= &adev
->dm
.irq_handler_list_high_tab
[src
];
432 if (!list_empty(hnd_list_l
) || !list_empty(hnd_list_h
))
433 dc_interrupt_set(adev
->dm
.dc
, src
, false);
435 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
436 flush_work(&adev
->dm
.irq_handler_list_low_tab
[src
].work
);
438 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
441 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
445 int amdgpu_dm_irq_resume_early(struct amdgpu_device
*adev
)
448 struct list_head
*hnd_list_h
, *hnd_list_l
;
449 unsigned long irq_table_flags
;
451 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
453 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
455 /* re-enable short pulse interrupts HW interrupt */
456 for (src
= DC_IRQ_SOURCE_HPD1RX
; src
<= DC_IRQ_SOURCE_HPD6RX
; src
++) {
457 hnd_list_l
= &adev
->dm
.irq_handler_list_low_tab
[src
].head
;
458 hnd_list_h
= &adev
->dm
.irq_handler_list_high_tab
[src
];
459 if (!list_empty(hnd_list_l
) || !list_empty(hnd_list_h
))
460 dc_interrupt_set(adev
->dm
.dc
, src
, true);
463 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
468 int amdgpu_dm_irq_resume_late(struct amdgpu_device
*adev
)
471 struct list_head
*hnd_list_h
, *hnd_list_l
;
472 unsigned long irq_table_flags
;
474 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
476 DRM_DEBUG_KMS("DM_IRQ: resume\n");
479 * Renable HW interrupt for HPD and only since FLIP and VBLANK
480 * will be enabled from manage_dm_interrupts on enable CRTC.
482 for (src
= DC_IRQ_SOURCE_HPD1
; src
<= DC_IRQ_SOURCE_HPD6
; src
++) {
483 hnd_list_l
= &adev
->dm
.irq_handler_list_low_tab
[src
].head
;
484 hnd_list_h
= &adev
->dm
.irq_handler_list_high_tab
[src
];
485 if (!list_empty(hnd_list_l
) || !list_empty(hnd_list_h
))
486 dc_interrupt_set(adev
->dm
.dc
, src
, true);
489 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
494 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
497 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device
*adev
,
498 enum dc_irq_source irq_source
)
500 unsigned long irq_table_flags
;
501 struct work_struct
*work
= NULL
;
503 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
505 if (!list_empty(&adev
->dm
.irq_handler_list_low_tab
[irq_source
].head
))
506 work
= &adev
->dm
.irq_handler_list_low_tab
[irq_source
].work
;
508 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
511 if (!schedule_work(work
))
512 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
519 * amdgpu_dm_irq_immediate_work
520 * Callback high irq work immediately, don't send to work queue
522 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device
*adev
,
523 enum dc_irq_source irq_source
)
525 struct amdgpu_dm_irq_handler_data
*handler_data
;
526 unsigned long irq_table_flags
;
528 DM_IRQ_TABLE_LOCK(adev
, irq_table_flags
);
530 list_for_each_entry(handler_data
,
531 &adev
->dm
.irq_handler_list_high_tab
[irq_source
],
533 /* Call a subcomponent which registered for immediate
534 * interrupt notification */
535 handler_data
->handler(handler_data
->handler_arg
);
538 DM_IRQ_TABLE_UNLOCK(adev
, irq_table_flags
);
542 * amdgpu_dm_irq_handler - Generic DM IRQ handler
543 * @adev: amdgpu base driver device containing the DM device
545 * @entry: Data about the triggered interrupt
547 * Calls all registered high irq work immediately, and schedules work for low
548 * irq. The DM IRQ table is used to find the corresponding handlers.
550 static int amdgpu_dm_irq_handler(struct amdgpu_device
*adev
,
551 struct amdgpu_irq_src
*source
,
552 struct amdgpu_iv_entry
*entry
)
555 enum dc_irq_source src
=
556 dc_interrupt_to_irq_source(
561 dc_interrupt_ack(adev
->dm
.dc
, src
);
563 /* Call high irq work immediately */
564 amdgpu_dm_irq_immediate_work(adev
, src
);
565 /*Schedule low_irq work */
566 amdgpu_dm_irq_schedule_work(adev
, src
);
571 static enum dc_irq_source
amdgpu_dm_hpd_to_dal_irq_source(unsigned type
)
575 return DC_IRQ_SOURCE_HPD1
;
577 return DC_IRQ_SOURCE_HPD2
;
579 return DC_IRQ_SOURCE_HPD3
;
581 return DC_IRQ_SOURCE_HPD4
;
583 return DC_IRQ_SOURCE_HPD5
;
585 return DC_IRQ_SOURCE_HPD6
;
587 return DC_IRQ_SOURCE_INVALID
;
591 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device
*adev
,
592 struct amdgpu_irq_src
*source
,
594 enum amdgpu_interrupt_state state
)
596 enum dc_irq_source src
= amdgpu_dm_hpd_to_dal_irq_source(type
);
597 bool st
= (state
== AMDGPU_IRQ_STATE_ENABLE
);
599 dc_interrupt_set(adev
->dm
.dc
, src
, st
);
603 static inline int dm_irq_state(struct amdgpu_device
*adev
,
604 struct amdgpu_irq_src
*source
,
606 enum amdgpu_interrupt_state state
,
607 const enum irq_type dal_irq_type
,
611 enum dc_irq_source irq_source
;
613 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc_id
];
617 "%s: crtc is NULL at id :%d\n",
623 if (acrtc
->otg_inst
== -1)
626 irq_source
= dal_irq_type
+ acrtc
->otg_inst
;
628 st
= (state
== AMDGPU_IRQ_STATE_ENABLE
);
630 dc_interrupt_set(adev
->dm
.dc
, irq_source
, st
);
634 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device
*adev
,
635 struct amdgpu_irq_src
*source
,
637 enum amdgpu_interrupt_state state
)
648 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device
*adev
,
649 struct amdgpu_irq_src
*source
,
651 enum amdgpu_interrupt_state state
)
662 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device
*adev
,
663 struct amdgpu_irq_src
*source
,
664 unsigned int crtc_id
,
665 enum amdgpu_interrupt_state state
)
676 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs
= {
677 .set
= amdgpu_dm_set_crtc_irq_state
,
678 .process
= amdgpu_dm_irq_handler
,
681 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs
= {
682 .set
= amdgpu_dm_set_vupdate_irq_state
,
683 .process
= amdgpu_dm_irq_handler
,
686 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs
= {
687 .set
= amdgpu_dm_set_pflip_irq_state
,
688 .process
= amdgpu_dm_irq_handler
,
691 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs
= {
692 .set
= amdgpu_dm_set_hpd_irq_state
,
693 .process
= amdgpu_dm_irq_handler
,
696 void amdgpu_dm_set_irq_funcs(struct amdgpu_device
*adev
)
699 adev
->crtc_irq
.num_types
= adev
->mode_info
.num_crtc
;
700 adev
->crtc_irq
.funcs
= &dm_crtc_irq_funcs
;
702 adev
->vupdate_irq
.num_types
= adev
->mode_info
.num_crtc
;
703 adev
->vupdate_irq
.funcs
= &dm_vupdate_irq_funcs
;
705 adev
->pageflip_irq
.num_types
= adev
->mode_info
.num_crtc
;
706 adev
->pageflip_irq
.funcs
= &dm_pageflip_irq_funcs
;
708 adev
->hpd_irq
.num_types
= adev
->mode_info
.num_hpd
;
709 adev
->hpd_irq
.funcs
= &dm_hpd_irq_funcs
;
713 * amdgpu_dm_hpd_init - hpd setup callback.
715 * @adev: amdgpu_device pointer
717 * Setup the hpd pins used by the card (evergreen+).
718 * Enable the pin, set the polarity, and enable the hpd interrupts.
720 void amdgpu_dm_hpd_init(struct amdgpu_device
*adev
)
722 struct drm_device
*dev
= adev
->ddev
;
723 struct drm_connector
*connector
;
724 struct drm_connector_list_iter iter
;
726 drm_connector_list_iter_begin(dev
, &iter
);
727 drm_for_each_connector_iter(connector
, &iter
) {
728 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
729 to_amdgpu_dm_connector(connector
);
731 const struct dc_link
*dc_link
= amdgpu_dm_connector
->dc_link
;
733 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
734 dc_interrupt_set(adev
->dm
.dc
,
735 dc_link
->irq_source_hpd
,
739 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
740 dc_interrupt_set(adev
->dm
.dc
,
741 dc_link
->irq_source_hpd_rx
,
745 drm_connector_list_iter_end(&iter
);
749 * amdgpu_dm_hpd_fini - hpd tear down callback.
751 * @adev: amdgpu_device pointer
753 * Tear down the hpd pins used by the card (evergreen+).
754 * Disable the hpd interrupts.
756 void amdgpu_dm_hpd_fini(struct amdgpu_device
*adev
)
758 struct drm_device
*dev
= adev
->ddev
;
759 struct drm_connector
*connector
;
760 struct drm_connector_list_iter iter
;
762 drm_connector_list_iter_begin(dev
, &iter
);
763 drm_for_each_connector_iter(connector
, &iter
) {
764 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
765 to_amdgpu_dm_connector(connector
);
766 const struct dc_link
*dc_link
= amdgpu_dm_connector
->dc_link
;
768 dc_interrupt_set(adev
->dm
.dc
, dc_link
->irq_source_hpd
, false);
770 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
771 dc_interrupt_set(adev
->dm
.dc
,
772 dc_link
->irq_source_hpd_rx
,
776 drm_connector_list_iter_end(&iter
);