2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
35 * GPU GFX IP block helpers function.
38 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device
*adev
, int mec
,
43 bit
+= mec
* adev
->gfx
.mec
.num_pipe_per_mec
44 * adev
->gfx
.mec
.num_queue_per_pipe
;
45 bit
+= pipe
* adev
->gfx
.mec
.num_queue_per_pipe
;
51 void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device
*adev
, int bit
,
52 int *mec
, int *pipe
, int *queue
)
54 *queue
= bit
% adev
->gfx
.mec
.num_queue_per_pipe
;
55 *pipe
= (bit
/ adev
->gfx
.mec
.num_queue_per_pipe
)
56 % adev
->gfx
.mec
.num_pipe_per_mec
;
57 *mec
= (bit
/ adev
->gfx
.mec
.num_queue_per_pipe
)
58 / adev
->gfx
.mec
.num_pipe_per_mec
;
62 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device
*adev
,
63 int mec
, int pipe
, int queue
)
65 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev
, mec
, pipe
, queue
),
66 adev
->gfx
.mec
.queue_bitmap
);
69 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device
*adev
,
70 int me
, int pipe
, int queue
)
74 bit
+= me
* adev
->gfx
.me
.num_pipe_per_me
75 * adev
->gfx
.me
.num_queue_per_pipe
;
76 bit
+= pipe
* adev
->gfx
.me
.num_queue_per_pipe
;
82 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device
*adev
, int bit
,
83 int *me
, int *pipe
, int *queue
)
85 *queue
= bit
% adev
->gfx
.me
.num_queue_per_pipe
;
86 *pipe
= (bit
/ adev
->gfx
.me
.num_queue_per_pipe
)
87 % adev
->gfx
.me
.num_pipe_per_me
;
88 *me
= (bit
/ adev
->gfx
.me
.num_queue_per_pipe
)
89 / adev
->gfx
.me
.num_pipe_per_me
;
92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device
*adev
,
93 int me
, int pipe
, int queue
)
95 return test_bit(amdgpu_gfx_me_queue_to_bit(adev
, me
, pipe
, queue
),
96 adev
->gfx
.me
.queue_bitmap
);
100 * amdgpu_gfx_scratch_get - Allocate a scratch register
102 * @adev: amdgpu_device pointer
103 * @reg: scratch register mmio offset
105 * Allocate a CP scratch register for use by the driver (all asics).
106 * Returns 0 on success or -EINVAL on failure.
108 int amdgpu_gfx_scratch_get(struct amdgpu_device
*adev
, uint32_t *reg
)
112 i
= ffs(adev
->gfx
.scratch
.free_mask
);
113 if (i
!= 0 && i
<= adev
->gfx
.scratch
.num_reg
) {
115 adev
->gfx
.scratch
.free_mask
&= ~(1u << i
);
116 *reg
= adev
->gfx
.scratch
.reg_base
+ i
;
123 * amdgpu_gfx_scratch_free - Free a scratch register
125 * @adev: amdgpu_device pointer
126 * @reg: scratch register mmio offset
128 * Free a CP scratch register allocated for use by the driver (all asics)
130 void amdgpu_gfx_scratch_free(struct amdgpu_device
*adev
, uint32_t reg
)
132 adev
->gfx
.scratch
.free_mask
|= 1u << (reg
- adev
->gfx
.scratch
.reg_base
);
136 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
138 * @mask: array in which the per-shader array disable masks will be stored
139 * @max_se: number of SEs
140 * @max_sh: number of SHs
142 * The bitmask of CUs to be disabled in the shader array determined by se and
143 * sh is stored in mask[se * max_sh + sh].
145 void amdgpu_gfx_parse_disable_cu(unsigned *mask
, unsigned max_se
, unsigned max_sh
)
150 memset(mask
, 0, sizeof(*mask
) * max_se
* max_sh
);
152 if (!amdgpu_disable_cu
|| !*amdgpu_disable_cu
)
155 p
= amdgpu_disable_cu
;
158 int ret
= sscanf(p
, "%u.%u.%u", &se
, &sh
, &cu
);
160 DRM_ERROR("amdgpu: could not parse disable_cu\n");
164 if (se
< max_se
&& sh
< max_sh
&& cu
< 16) {
165 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se
, sh
, cu
);
166 mask
[se
* max_sh
+ sh
] |= 1u << cu
;
168 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
172 next
= strchr(p
, ',');
179 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device
*adev
)
181 if (amdgpu_compute_multipipe
!= -1) {
182 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
183 amdgpu_compute_multipipe
);
184 return amdgpu_compute_multipipe
== 1;
187 /* FIXME: spreading the queues across pipes causes perf regressions
188 * on POLARIS11 compute workloads */
189 if (adev
->asic_type
== CHIP_POLARIS11
)
192 return adev
->gfx
.mec
.num_mec
> 1;
195 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
*adev
)
197 int i
, queue
, pipe
, mec
;
198 bool multipipe_policy
= amdgpu_gfx_is_multipipe_capable(adev
);
200 /* policy for amdgpu compute queue ownership */
201 for (i
= 0; i
< AMDGPU_MAX_COMPUTE_QUEUES
; ++i
) {
202 queue
= i
% adev
->gfx
.mec
.num_queue_per_pipe
;
203 pipe
= (i
/ adev
->gfx
.mec
.num_queue_per_pipe
)
204 % adev
->gfx
.mec
.num_pipe_per_mec
;
205 mec
= (i
/ adev
->gfx
.mec
.num_queue_per_pipe
)
206 / adev
->gfx
.mec
.num_pipe_per_mec
;
208 /* we've run out of HW */
209 if (mec
>= adev
->gfx
.mec
.num_mec
)
212 if (multipipe_policy
) {
213 /* policy: amdgpu owns the first two queues of the first MEC */
214 if (mec
== 0 && queue
< 2)
215 set_bit(i
, adev
->gfx
.mec
.queue_bitmap
);
217 /* policy: amdgpu owns all queues in the first pipe */
218 if (mec
== 0 && pipe
== 0)
219 set_bit(i
, adev
->gfx
.mec
.queue_bitmap
);
223 /* update the number of active compute rings */
224 adev
->gfx
.num_compute_rings
=
225 bitmap_weight(adev
->gfx
.mec
.queue_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
227 /* If you hit this case and edited the policy, you probably just
228 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
229 if (WARN_ON(adev
->gfx
.num_compute_rings
> AMDGPU_MAX_COMPUTE_RINGS
))
230 adev
->gfx
.num_compute_rings
= AMDGPU_MAX_COMPUTE_RINGS
;
233 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device
*adev
)
237 for (i
= 0; i
< AMDGPU_MAX_GFX_QUEUES
; ++i
) {
238 queue
= i
% adev
->gfx
.me
.num_queue_per_pipe
;
239 me
= (i
/ adev
->gfx
.me
.num_queue_per_pipe
)
240 / adev
->gfx
.me
.num_pipe_per_me
;
242 if (me
>= adev
->gfx
.me
.num_me
)
244 /* policy: amdgpu owns the first queue per pipe at this stage
245 * will extend to mulitple queues per pipe later */
246 if (me
== 0 && queue
< 1)
247 set_bit(i
, adev
->gfx
.me
.queue_bitmap
);
250 /* update the number of active graphics rings */
251 adev
->gfx
.num_gfx_rings
=
252 bitmap_weight(adev
->gfx
.me
.queue_bitmap
, AMDGPU_MAX_GFX_QUEUES
);
255 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device
*adev
,
256 struct amdgpu_ring
*ring
)
259 int mec
, pipe
, queue
;
261 queue_bit
= adev
->gfx
.mec
.num_mec
262 * adev
->gfx
.mec
.num_pipe_per_mec
263 * adev
->gfx
.mec
.num_queue_per_pipe
;
265 while (queue_bit
-- >= 0) {
266 if (test_bit(queue_bit
, adev
->gfx
.mec
.queue_bitmap
))
269 amdgpu_gfx_bit_to_mec_queue(adev
, queue_bit
, &mec
, &pipe
, &queue
);
272 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
273 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
274 * only can be issued on queue 0.
276 if ((mec
== 1 && pipe
> 1) || queue
!= 0)
286 dev_err(adev
->dev
, "Failed to find a queue for KIQ\n");
290 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device
*adev
,
291 struct amdgpu_ring
*ring
,
292 struct amdgpu_irq_src
*irq
)
294 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
297 spin_lock_init(&kiq
->ring_lock
);
299 r
= amdgpu_device_wb_get(adev
, &adev
->virt
.reg_val_offs
);
304 ring
->ring_obj
= NULL
;
305 ring
->use_doorbell
= true;
306 ring
->doorbell_index
= adev
->doorbell_index
.kiq
;
308 r
= amdgpu_gfx_kiq_acquire(adev
, ring
);
312 ring
->eop_gpu_addr
= kiq
->eop_gpu_addr
;
313 sprintf(ring
->name
, "kiq_%d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
314 r
= amdgpu_ring_init(adev
, ring
, 1024,
315 irq
, AMDGPU_CP_KIQ_IRQ_DRIVER0
);
317 dev_warn(adev
->dev
, "(%d) failed to init kiq ring\n", r
);
322 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring
*ring
)
324 amdgpu_device_wb_free(ring
->adev
, ring
->adev
->virt
.reg_val_offs
);
325 amdgpu_ring_fini(ring
);
328 void amdgpu_gfx_kiq_fini(struct amdgpu_device
*adev
)
330 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
332 amdgpu_bo_free_kernel(&kiq
->eop_obj
, &kiq
->eop_gpu_addr
, NULL
);
335 int amdgpu_gfx_kiq_init(struct amdgpu_device
*adev
,
340 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
342 r
= amdgpu_bo_create_kernel(adev
, hpd_size
, PAGE_SIZE
,
343 AMDGPU_GEM_DOMAIN_GTT
, &kiq
->eop_obj
,
344 &kiq
->eop_gpu_addr
, (void **)&hpd
);
346 dev_warn(adev
->dev
, "failed to create KIQ bo (%d).\n", r
);
350 memset(hpd
, 0, hpd_size
);
352 r
= amdgpu_bo_reserve(kiq
->eop_obj
, true);
353 if (unlikely(r
!= 0))
354 dev_warn(adev
->dev
, "(%d) reserve kiq eop bo failed\n", r
);
355 amdgpu_bo_kunmap(kiq
->eop_obj
);
356 amdgpu_bo_unreserve(kiq
->eop_obj
);
361 /* create MQD for each compute/gfx queue */
362 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device
*adev
,
365 struct amdgpu_ring
*ring
= NULL
;
368 /* create MQD for KIQ */
369 ring
= &adev
->gfx
.kiq
.ring
;
370 if (!ring
->mqd_obj
) {
371 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
372 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
373 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
374 * KIQ MQD no matter SRIOV or Bare-metal
376 r
= amdgpu_bo_create_kernel(adev
, mqd_size
, PAGE_SIZE
,
377 AMDGPU_GEM_DOMAIN_VRAM
, &ring
->mqd_obj
,
378 &ring
->mqd_gpu_addr
, &ring
->mqd_ptr
);
380 dev_warn(adev
->dev
, "failed to create ring mqd ob (%d)", r
);
384 /* prepare MQD backup */
385 adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
] = kmalloc(mqd_size
, GFP_KERNEL
);
386 if (!adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
])
387 dev_warn(adev
->dev
, "no memory to create MQD backup for ring %s\n", ring
->name
);
390 if (adev
->asic_type
>= CHIP_NAVI10
&& amdgpu_async_gfx_ring
) {
391 /* create MQD for each KGQ */
392 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
393 ring
= &adev
->gfx
.gfx_ring
[i
];
394 if (!ring
->mqd_obj
) {
395 r
= amdgpu_bo_create_kernel(adev
, mqd_size
, PAGE_SIZE
,
396 AMDGPU_GEM_DOMAIN_GTT
, &ring
->mqd_obj
,
397 &ring
->mqd_gpu_addr
, &ring
->mqd_ptr
);
399 dev_warn(adev
->dev
, "failed to create ring mqd bo (%d)", r
);
403 /* prepare MQD backup */
404 adev
->gfx
.me
.mqd_backup
[i
] = kmalloc(mqd_size
, GFP_KERNEL
);
405 if (!adev
->gfx
.me
.mqd_backup
[i
])
406 dev_warn(adev
->dev
, "no memory to create MQD backup for ring %s\n", ring
->name
);
411 /* create MQD for each KCQ */
412 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
413 ring
= &adev
->gfx
.compute_ring
[i
];
414 if (!ring
->mqd_obj
) {
415 r
= amdgpu_bo_create_kernel(adev
, mqd_size
, PAGE_SIZE
,
416 AMDGPU_GEM_DOMAIN_GTT
, &ring
->mqd_obj
,
417 &ring
->mqd_gpu_addr
, &ring
->mqd_ptr
);
419 dev_warn(adev
->dev
, "failed to create ring mqd bo (%d)", r
);
423 /* prepare MQD backup */
424 adev
->gfx
.mec
.mqd_backup
[i
] = kmalloc(mqd_size
, GFP_KERNEL
);
425 if (!adev
->gfx
.mec
.mqd_backup
[i
])
426 dev_warn(adev
->dev
, "no memory to create MQD backup for ring %s\n", ring
->name
);
433 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device
*adev
)
435 struct amdgpu_ring
*ring
= NULL
;
438 if (adev
->asic_type
>= CHIP_NAVI10
&& amdgpu_async_gfx_ring
) {
439 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
440 ring
= &adev
->gfx
.gfx_ring
[i
];
441 kfree(adev
->gfx
.me
.mqd_backup
[i
]);
442 amdgpu_bo_free_kernel(&ring
->mqd_obj
,
448 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
449 ring
= &adev
->gfx
.compute_ring
[i
];
450 kfree(adev
->gfx
.mec
.mqd_backup
[i
]);
451 amdgpu_bo_free_kernel(&ring
->mqd_obj
,
456 ring
= &adev
->gfx
.kiq
.ring
;
457 kfree(adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
]);
458 amdgpu_bo_free_kernel(&ring
->mqd_obj
,
463 int amdgpu_gfx_disable_kcq(struct amdgpu_device
*adev
)
465 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
466 struct amdgpu_ring
*kiq_ring
= &kiq
->ring
;
469 if (!kiq
->pmf
|| !kiq
->pmf
->kiq_unmap_queues
)
472 if (amdgpu_ring_alloc(kiq_ring
, kiq
->pmf
->unmap_queues_size
*
473 adev
->gfx
.num_compute_rings
))
476 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
477 kiq
->pmf
->kiq_unmap_queues(kiq_ring
, &adev
->gfx
.compute_ring
[i
],
480 return amdgpu_ring_test_ring(kiq_ring
);
483 int amdgpu_gfx_enable_kcq(struct amdgpu_device
*adev
)
485 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
486 struct amdgpu_ring
*kiq_ring
= &adev
->gfx
.kiq
.ring
;
487 uint64_t queue_mask
= 0;
490 if (!kiq
->pmf
|| !kiq
->pmf
->kiq_map_queues
|| !kiq
->pmf
->kiq_set_resources
)
493 for (i
= 0; i
< AMDGPU_MAX_COMPUTE_QUEUES
; ++i
) {
494 if (!test_bit(i
, adev
->gfx
.mec
.queue_bitmap
))
497 /* This situation may be hit in the future if a new HW
498 * generation exposes more than 64 queues. If so, the
499 * definition of queue_mask needs updating */
500 if (WARN_ON(i
> (sizeof(queue_mask
)*8))) {
501 DRM_ERROR("Invalid KCQ enabled: %d\n", i
);
505 queue_mask
|= (1ull << i
);
508 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring
->me
, kiq_ring
->pipe
,
511 r
= amdgpu_ring_alloc(kiq_ring
, kiq
->pmf
->map_queues_size
*
512 adev
->gfx
.num_compute_rings
+
513 kiq
->pmf
->set_resources_size
);
515 DRM_ERROR("Failed to lock KIQ (%d).\n", r
);
519 kiq
->pmf
->kiq_set_resources(kiq_ring
, queue_mask
);
520 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
521 kiq
->pmf
->kiq_map_queues(kiq_ring
, &adev
->gfx
.compute_ring
[i
]);
523 r
= amdgpu_ring_test_helper(kiq_ring
);
525 DRM_ERROR("KCQ enable failed\n");
530 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
532 * @adev: amdgpu_device pointer
533 * @bool enable true: enable gfx off feature, false: disable gfx off feature
535 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
536 * 2. other client can send request to disable gfx off feature, the request should be honored.
537 * 3. other client can cancel their request of disable gfx off feature
538 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
541 void amdgpu_gfx_off_ctrl(struct amdgpu_device
*adev
, bool enable
)
543 if (!(adev
->pm
.pp_feature
& PP_GFXOFF_MASK
))
546 mutex_lock(&adev
->gfx
.gfx_off_mutex
);
549 adev
->gfx
.gfx_off_req_count
++;
550 else if (adev
->gfx
.gfx_off_req_count
> 0)
551 adev
->gfx
.gfx_off_req_count
--;
553 if (enable
&& !adev
->gfx
.gfx_off_state
&& !adev
->gfx
.gfx_off_req_count
) {
554 schedule_delayed_work(&adev
->gfx
.gfx_off_delay_work
, GFX_OFF_DELAY_ENABLE
);
555 } else if (!enable
&& adev
->gfx
.gfx_off_state
) {
556 if (!amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_GFX
, false))
557 adev
->gfx
.gfx_off_state
= false;
560 mutex_unlock(&adev
->gfx
.gfx_off_mutex
);
563 int amdgpu_gfx_ras_late_init(struct amdgpu_device
*adev
)
566 struct ras_fs_if fs_info
= {
567 .sysfs_name
= "gfx_err_count",
568 .debugfs_name
= "gfx_err_inject",
570 struct ras_ih_if ih_info
= {
571 .cb
= amdgpu_gfx_process_ras_data_cb
,
574 if (!adev
->gfx
.ras_if
) {
575 adev
->gfx
.ras_if
= kmalloc(sizeof(struct ras_common_if
), GFP_KERNEL
);
576 if (!adev
->gfx
.ras_if
)
578 adev
->gfx
.ras_if
->block
= AMDGPU_RAS_BLOCK__GFX
;
579 adev
->gfx
.ras_if
->type
= AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE
;
580 adev
->gfx
.ras_if
->sub_block_index
= 0;
581 strcpy(adev
->gfx
.ras_if
->name
, "gfx");
583 fs_info
.head
= ih_info
.head
= *adev
->gfx
.ras_if
;
585 r
= amdgpu_ras_late_init(adev
, adev
->gfx
.ras_if
,
590 if (amdgpu_ras_is_supported(adev
, adev
->gfx
.ras_if
->block
)) {
591 r
= amdgpu_irq_get(adev
, &adev
->gfx
.cp_ecc_error_irq
, 0);
595 /* free gfx ras_if if ras is not supported */
602 amdgpu_ras_late_fini(adev
, adev
->gfx
.ras_if
, &ih_info
);
604 kfree(adev
->gfx
.ras_if
);
605 adev
->gfx
.ras_if
= NULL
;
609 void amdgpu_gfx_ras_fini(struct amdgpu_device
*adev
)
611 if (amdgpu_ras_is_supported(adev
, AMDGPU_RAS_BLOCK__GFX
) &&
613 struct ras_common_if
*ras_if
= adev
->gfx
.ras_if
;
614 struct ras_ih_if ih_info
= {
616 .cb
= amdgpu_gfx_process_ras_data_cb
,
619 amdgpu_ras_late_fini(adev
, ras_if
, &ih_info
);
624 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device
*adev
,
626 struct amdgpu_iv_entry
*entry
)
628 /* TODO ue will trigger an interrupt.
630 * When “Full RAS” is enabled, the per-IP interrupt sources should
631 * be disabled and the driver should only look for the aggregated
632 * interrupt via sync flood
634 if (!amdgpu_ras_is_supported(adev
, AMDGPU_RAS_BLOCK__GFX
)) {
635 kgd2kfd_set_sram_ecc_flag(adev
->kfd
.dev
);
636 if (adev
->gfx
.funcs
->query_ras_error_count
)
637 adev
->gfx
.funcs
->query_ras_error_count(adev
, err_data
);
638 amdgpu_ras_reset_gpu(adev
);
640 return AMDGPU_RAS_SUCCESS
;
643 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device
*adev
,
644 struct amdgpu_irq_src
*source
,
645 struct amdgpu_iv_entry
*entry
)
647 struct ras_common_if
*ras_if
= adev
->gfx
.ras_if
;
648 struct ras_dispatch_if ih_data
= {
655 ih_data
.head
= *ras_if
;
657 DRM_ERROR("CP ECC ERROR IRQ\n");
658 amdgpu_ras_interrupt_dispatch(adev
, &ih_data
);