2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
50 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
68 #include <linux/suspend.h>
69 #include <drm/task_barrier.h>
71 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
72 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
73 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
82 #define AMDGPU_RESUME_MS 2000
84 const char *amdgpu_asic_name
[] = {
117 * DOC: pcie_replay_count
119 * The amdgpu driver provides a sysfs API for reporting the total number
120 * of PCIe replays (NAKs)
121 * The file pcie_replay_count is used for this and returns the total
122 * number of replays as a sum of the NAKs generated and NAKs received
125 static ssize_t
amdgpu_device_get_pcie_replay_count(struct device
*dev
,
126 struct device_attribute
*attr
, char *buf
)
128 struct drm_device
*ddev
= dev_get_drvdata(dev
);
129 struct amdgpu_device
*adev
= ddev
->dev_private
;
130 uint64_t cnt
= amdgpu_asic_get_pcie_replay_count(adev
);
132 return snprintf(buf
, PAGE_SIZE
, "%llu\n", cnt
);
135 static DEVICE_ATTR(pcie_replay_count
, S_IRUGO
,
136 amdgpu_device_get_pcie_replay_count
, NULL
);
138 static void amdgpu_device_get_pcie_info(struct amdgpu_device
*adev
);
141 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
143 * @dev: drm_device pointer
145 * Returns true if the device is a dGPU with HG/PX power control,
146 * otherwise return false.
148 bool amdgpu_device_supports_boco(struct drm_device
*dev
)
150 struct amdgpu_device
*adev
= dev
->dev_private
;
152 if (adev
->flags
& AMD_IS_PX
)
158 * amdgpu_device_supports_baco - Does the device support BACO
160 * @dev: drm_device pointer
162 * Returns true if the device supporte BACO,
163 * otherwise return false.
165 bool amdgpu_device_supports_baco(struct drm_device
*dev
)
167 struct amdgpu_device
*adev
= dev
->dev_private
;
169 return amdgpu_asic_supports_baco(adev
);
173 * VRAM access helper functions.
175 * amdgpu_device_vram_access - read/write a buffer in vram
177 * @adev: amdgpu_device pointer
178 * @pos: offset of the buffer in vram
179 * @buf: virtual address of the buffer in system memory
180 * @size: read/write size, sizeof(@buf) must > @size
181 * @write: true - write to vram, otherwise - read from vram
183 void amdgpu_device_vram_access(struct amdgpu_device
*adev
, loff_t pos
,
184 uint32_t *buf
, size_t size
, bool write
)
190 for (last
+= pos
; pos
<= last
; pos
+= 4) {
191 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
192 WREG32_NO_KIQ(mmMM_INDEX
, ((uint32_t)pos
) | 0x80000000);
193 WREG32_NO_KIQ(mmMM_INDEX_HI
, pos
>> 31);
195 WREG32_NO_KIQ(mmMM_DATA
, *buf
++);
197 *buf
++ = RREG32_NO_KIQ(mmMM_DATA
);
198 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
203 * MMIO register access helper functions.
206 * amdgpu_mm_rreg - read a memory mapped IO register
208 * @adev: amdgpu_device pointer
209 * @reg: dword aligned register offset
210 * @acc_flags: access flags which require special behavior
212 * Returns the 32 bit value from the offset specified.
214 uint32_t amdgpu_mm_rreg(struct amdgpu_device
*adev
, uint32_t reg
,
219 if (!(acc_flags
& AMDGPU_REGS_NO_KIQ
) && amdgpu_sriov_runtime(adev
))
220 return amdgpu_virt_kiq_rreg(adev
, reg
);
222 if ((reg
* 4) < adev
->rmmio_size
&& !(acc_flags
& AMDGPU_REGS_IDX
))
223 ret
= readl(((void __iomem
*)adev
->rmmio
) + (reg
* 4));
227 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
228 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
229 ret
= readl(((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
230 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
232 trace_amdgpu_mm_rreg(adev
->pdev
->device
, reg
, ret
);
237 * MMIO register read with bytes helper functions
238 * @offset:bytes offset from MMIO start
243 * amdgpu_mm_rreg8 - read a memory mapped IO register
245 * @adev: amdgpu_device pointer
246 * @offset: byte aligned register offset
248 * Returns the 8 bit value from the offset specified.
250 uint8_t amdgpu_mm_rreg8(struct amdgpu_device
*adev
, uint32_t offset
) {
251 if (offset
< adev
->rmmio_size
)
252 return (readb(adev
->rmmio
+ offset
));
257 * MMIO register write with bytes helper functions
258 * @offset:bytes offset from MMIO start
259 * @value: the value want to be written to the register
263 * amdgpu_mm_wreg8 - read a memory mapped IO register
265 * @adev: amdgpu_device pointer
266 * @offset: byte aligned register offset
267 * @value: 8 bit value to write
269 * Writes the value specified to the offset specified.
271 void amdgpu_mm_wreg8(struct amdgpu_device
*adev
, uint32_t offset
, uint8_t value
) {
272 if (offset
< adev
->rmmio_size
)
273 writeb(value
, adev
->rmmio
+ offset
);
279 * amdgpu_mm_wreg - write to a memory mapped IO register
281 * @adev: amdgpu_device pointer
282 * @reg: dword aligned register offset
283 * @v: 32 bit value to write to the register
284 * @acc_flags: access flags which require special behavior
286 * Writes the value specified to the offset specified.
288 void amdgpu_mm_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
,
291 trace_amdgpu_mm_wreg(adev
->pdev
->device
, reg
, v
);
293 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 0) {
294 adev
->last_mm_index
= v
;
297 if (!(acc_flags
& AMDGPU_REGS_NO_KIQ
) && amdgpu_sriov_runtime(adev
))
298 return amdgpu_virt_kiq_wreg(adev
, reg
, v
);
300 if ((reg
* 4) < adev
->rmmio_size
&& !(acc_flags
& AMDGPU_REGS_IDX
))
301 writel(v
, ((void __iomem
*)adev
->rmmio
) + (reg
* 4));
305 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
306 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
307 writel(v
, ((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
308 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
311 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 1 && adev
->last_mm_index
== 0x5702C) {
317 * amdgpu_io_rreg - read an IO register
319 * @adev: amdgpu_device pointer
320 * @reg: dword aligned register offset
322 * Returns the 32 bit value from the offset specified.
324 u32
amdgpu_io_rreg(struct amdgpu_device
*adev
, u32 reg
)
326 if ((reg
* 4) < adev
->rio_mem_size
)
327 return ioread32(adev
->rio_mem
+ (reg
* 4));
329 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
330 return ioread32(adev
->rio_mem
+ (mmMM_DATA
* 4));
335 * amdgpu_io_wreg - write to an IO register
337 * @adev: amdgpu_device pointer
338 * @reg: dword aligned register offset
339 * @v: 32 bit value to write to the register
341 * Writes the value specified to the offset specified.
343 void amdgpu_io_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
345 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 0) {
346 adev
->last_mm_index
= v
;
349 if ((reg
* 4) < adev
->rio_mem_size
)
350 iowrite32(v
, adev
->rio_mem
+ (reg
* 4));
352 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
353 iowrite32(v
, adev
->rio_mem
+ (mmMM_DATA
* 4));
356 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 1 && adev
->last_mm_index
== 0x5702C) {
362 * amdgpu_mm_rdoorbell - read a doorbell dword
364 * @adev: amdgpu_device pointer
365 * @index: doorbell index
367 * Returns the value in the doorbell aperture at the
368 * requested doorbell index (CIK).
370 u32
amdgpu_mm_rdoorbell(struct amdgpu_device
*adev
, u32 index
)
372 if (index
< adev
->doorbell
.num_doorbells
) {
373 return readl(adev
->doorbell
.ptr
+ index
);
375 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
381 * amdgpu_mm_wdoorbell - write a doorbell dword
383 * @adev: amdgpu_device pointer
384 * @index: doorbell index
387 * Writes @v to the doorbell aperture at the
388 * requested doorbell index (CIK).
390 void amdgpu_mm_wdoorbell(struct amdgpu_device
*adev
, u32 index
, u32 v
)
392 if (index
< adev
->doorbell
.num_doorbells
) {
393 writel(v
, adev
->doorbell
.ptr
+ index
);
395 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
400 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
402 * @adev: amdgpu_device pointer
403 * @index: doorbell index
405 * Returns the value in the doorbell aperture at the
406 * requested doorbell index (VEGA10+).
408 u64
amdgpu_mm_rdoorbell64(struct amdgpu_device
*adev
, u32 index
)
410 if (index
< adev
->doorbell
.num_doorbells
) {
411 return atomic64_read((atomic64_t
*)(adev
->doorbell
.ptr
+ index
));
413 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
419 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
421 * @adev: amdgpu_device pointer
422 * @index: doorbell index
425 * Writes @v to the doorbell aperture at the
426 * requested doorbell index (VEGA10+).
428 void amdgpu_mm_wdoorbell64(struct amdgpu_device
*adev
, u32 index
, u64 v
)
430 if (index
< adev
->doorbell
.num_doorbells
) {
431 atomic64_set((atomic64_t
*)(adev
->doorbell
.ptr
+ index
), v
);
433 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
438 * amdgpu_invalid_rreg - dummy reg read function
440 * @adev: amdgpu device pointer
441 * @reg: offset of register
443 * Dummy register read function. Used for register blocks
444 * that certain asics don't have (all asics).
445 * Returns the value in the register.
447 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device
*adev
, uint32_t reg
)
449 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg
);
455 * amdgpu_invalid_wreg - dummy reg write function
457 * @adev: amdgpu device pointer
458 * @reg: offset of register
459 * @v: value to write to the register
461 * Dummy register read function. Used for register blocks
462 * that certain asics don't have (all asics).
464 static void amdgpu_invalid_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
)
466 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
472 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
474 * @adev: amdgpu device pointer
475 * @reg: offset of register
477 * Dummy register read function. Used for register blocks
478 * that certain asics don't have (all asics).
479 * Returns the value in the register.
481 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device
*adev
, uint32_t reg
)
483 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg
);
489 * amdgpu_invalid_wreg64 - dummy reg write function
491 * @adev: amdgpu device pointer
492 * @reg: offset of register
493 * @v: value to write to the register
495 * Dummy register read function. Used for register blocks
496 * that certain asics don't have (all asics).
498 static void amdgpu_invalid_wreg64(struct amdgpu_device
*adev
, uint32_t reg
, uint64_t v
)
500 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
506 * amdgpu_block_invalid_rreg - dummy reg read function
508 * @adev: amdgpu device pointer
509 * @block: offset of instance
510 * @reg: offset of register
512 * Dummy register read function. Used for register blocks
513 * that certain asics don't have (all asics).
514 * Returns the value in the register.
516 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device
*adev
,
517 uint32_t block
, uint32_t reg
)
519 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
526 * amdgpu_block_invalid_wreg - dummy reg write function
528 * @adev: amdgpu device pointer
529 * @block: offset of instance
530 * @reg: offset of register
531 * @v: value to write to the register
533 * Dummy register read function. Used for register blocks
534 * that certain asics don't have (all asics).
536 static void amdgpu_block_invalid_wreg(struct amdgpu_device
*adev
,
538 uint32_t reg
, uint32_t v
)
540 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
546 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
548 * @adev: amdgpu device pointer
550 * Allocates a scratch page of VRAM for use by various things in the
553 static int amdgpu_device_vram_scratch_init(struct amdgpu_device
*adev
)
555 return amdgpu_bo_create_kernel(adev
, AMDGPU_GPU_PAGE_SIZE
,
556 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
,
557 &adev
->vram_scratch
.robj
,
558 &adev
->vram_scratch
.gpu_addr
,
559 (void **)&adev
->vram_scratch
.ptr
);
563 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
565 * @adev: amdgpu device pointer
567 * Frees the VRAM scratch page.
569 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device
*adev
)
571 amdgpu_bo_free_kernel(&adev
->vram_scratch
.robj
, NULL
, NULL
);
575 * amdgpu_device_program_register_sequence - program an array of registers.
577 * @adev: amdgpu_device pointer
578 * @registers: pointer to the register array
579 * @array_size: size of the register array
581 * Programs an array or registers with and and or masks.
582 * This is a helper for setting golden registers.
584 void amdgpu_device_program_register_sequence(struct amdgpu_device
*adev
,
585 const u32
*registers
,
586 const u32 array_size
)
588 u32 tmp
, reg
, and_mask
, or_mask
;
594 for (i
= 0; i
< array_size
; i
+=3) {
595 reg
= registers
[i
+ 0];
596 and_mask
= registers
[i
+ 1];
597 or_mask
= registers
[i
+ 2];
599 if (and_mask
== 0xffffffff) {
604 if (adev
->family
>= AMDGPU_FAMILY_AI
)
605 tmp
|= (or_mask
& and_mask
);
614 * amdgpu_device_pci_config_reset - reset the GPU
616 * @adev: amdgpu_device pointer
618 * Resets the GPU using the pci config reset sequence.
619 * Only applicable to asics prior to vega10.
621 void amdgpu_device_pci_config_reset(struct amdgpu_device
*adev
)
623 pci_write_config_dword(adev
->pdev
, 0x7c, AMDGPU_ASIC_RESET_DATA
);
627 * GPU doorbell aperture helpers function.
630 * amdgpu_device_doorbell_init - Init doorbell driver information.
632 * @adev: amdgpu_device pointer
634 * Init doorbell driver information (CIK)
635 * Returns 0 on success, error on failure.
637 static int amdgpu_device_doorbell_init(struct amdgpu_device
*adev
)
640 /* No doorbell on SI hardware generation */
641 if (adev
->asic_type
< CHIP_BONAIRE
) {
642 adev
->doorbell
.base
= 0;
643 adev
->doorbell
.size
= 0;
644 adev
->doorbell
.num_doorbells
= 0;
645 adev
->doorbell
.ptr
= NULL
;
649 if (pci_resource_flags(adev
->pdev
, 2) & IORESOURCE_UNSET
)
652 amdgpu_asic_init_doorbell_index(adev
);
654 /* doorbell bar mapping */
655 adev
->doorbell
.base
= pci_resource_start(adev
->pdev
, 2);
656 adev
->doorbell
.size
= pci_resource_len(adev
->pdev
, 2);
658 adev
->doorbell
.num_doorbells
= min_t(u32
, adev
->doorbell
.size
/ sizeof(u32
),
659 adev
->doorbell_index
.max_assignment
+1);
660 if (adev
->doorbell
.num_doorbells
== 0)
663 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
664 * paging queue doorbell use the second page. The
665 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
666 * doorbells are in the first page. So with paging queue enabled,
667 * the max num_doorbells should + 1 page (0x400 in dword)
669 if (adev
->asic_type
>= CHIP_VEGA10
)
670 adev
->doorbell
.num_doorbells
+= 0x400;
672 adev
->doorbell
.ptr
= ioremap(adev
->doorbell
.base
,
673 adev
->doorbell
.num_doorbells
*
675 if (adev
->doorbell
.ptr
== NULL
)
682 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
684 * @adev: amdgpu_device pointer
686 * Tear down doorbell driver information (CIK)
688 static void amdgpu_device_doorbell_fini(struct amdgpu_device
*adev
)
690 iounmap(adev
->doorbell
.ptr
);
691 adev
->doorbell
.ptr
= NULL
;
697 * amdgpu_device_wb_*()
698 * Writeback is the method by which the GPU updates special pages in memory
699 * with the status of certain GPU events (fences, ring pointers,etc.).
703 * amdgpu_device_wb_fini - Disable Writeback and free memory
705 * @adev: amdgpu_device pointer
707 * Disables Writeback and frees the Writeback memory (all asics).
708 * Used at driver shutdown.
710 static void amdgpu_device_wb_fini(struct amdgpu_device
*adev
)
712 if (adev
->wb
.wb_obj
) {
713 amdgpu_bo_free_kernel(&adev
->wb
.wb_obj
,
715 (void **)&adev
->wb
.wb
);
716 adev
->wb
.wb_obj
= NULL
;
721 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
723 * @adev: amdgpu_device pointer
725 * Initializes writeback and allocates writeback memory (all asics).
726 * Used at driver startup.
727 * Returns 0 on success or an -error on failure.
729 static int amdgpu_device_wb_init(struct amdgpu_device
*adev
)
733 if (adev
->wb
.wb_obj
== NULL
) {
734 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
735 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_MAX_WB
* sizeof(uint32_t) * 8,
736 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
737 &adev
->wb
.wb_obj
, &adev
->wb
.gpu_addr
,
738 (void **)&adev
->wb
.wb
);
740 dev_warn(adev
->dev
, "(%d) create WB bo failed\n", r
);
744 adev
->wb
.num_wb
= AMDGPU_MAX_WB
;
745 memset(&adev
->wb
.used
, 0, sizeof(adev
->wb
.used
));
747 /* clear wb memory */
748 memset((char *)adev
->wb
.wb
, 0, AMDGPU_MAX_WB
* sizeof(uint32_t) * 8);
755 * amdgpu_device_wb_get - Allocate a wb entry
757 * @adev: amdgpu_device pointer
760 * Allocate a wb slot for use by the driver (all asics).
761 * Returns 0 on success or -EINVAL on failure.
763 int amdgpu_device_wb_get(struct amdgpu_device
*adev
, u32
*wb
)
765 unsigned long offset
= find_first_zero_bit(adev
->wb
.used
, adev
->wb
.num_wb
);
767 if (offset
< adev
->wb
.num_wb
) {
768 __set_bit(offset
, adev
->wb
.used
);
769 *wb
= offset
<< 3; /* convert to dw offset */
777 * amdgpu_device_wb_free - Free a wb entry
779 * @adev: amdgpu_device pointer
782 * Free a wb slot allocated for use by the driver (all asics)
784 void amdgpu_device_wb_free(struct amdgpu_device
*adev
, u32 wb
)
787 if (wb
< adev
->wb
.num_wb
)
788 __clear_bit(wb
, adev
->wb
.used
);
792 * amdgpu_device_resize_fb_bar - try to resize FB BAR
794 * @adev: amdgpu_device pointer
796 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
797 * to fail, but if any of the BARs is not accessible after the size we abort
798 * driver loading by returning -ENODEV.
800 int amdgpu_device_resize_fb_bar(struct amdgpu_device
*adev
)
802 u64 space_needed
= roundup_pow_of_two(adev
->gmc
.real_vram_size
);
803 u32 rbar_size
= order_base_2(((space_needed
>> 20) | 1)) - 1;
804 struct pci_bus
*root
;
805 struct resource
*res
;
811 if (amdgpu_sriov_vf(adev
))
814 /* Check if the root BUS has 64bit memory resources */
815 root
= adev
->pdev
->bus
;
819 pci_bus_for_each_resource(root
, res
, i
) {
820 if (res
&& res
->flags
& (IORESOURCE_MEM
| IORESOURCE_MEM_64
) &&
821 res
->start
> 0x100000000ull
)
825 /* Trying to resize is pointless without a root hub window above 4GB */
829 /* Disable memory decoding while we change the BAR addresses and size */
830 pci_read_config_word(adev
->pdev
, PCI_COMMAND
, &cmd
);
831 pci_write_config_word(adev
->pdev
, PCI_COMMAND
,
832 cmd
& ~PCI_COMMAND_MEMORY
);
834 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
835 amdgpu_device_doorbell_fini(adev
);
836 if (adev
->asic_type
>= CHIP_BONAIRE
)
837 pci_release_resource(adev
->pdev
, 2);
839 pci_release_resource(adev
->pdev
, 0);
841 r
= pci_resize_resource(adev
->pdev
, 0, rbar_size
);
843 DRM_INFO("Not enough PCI address space for a large BAR.");
844 else if (r
&& r
!= -ENOTSUPP
)
845 DRM_ERROR("Problem resizing BAR0 (%d).", r
);
847 pci_assign_unassigned_bus_resources(adev
->pdev
->bus
);
849 /* When the doorbell or fb BAR isn't available we have no chance of
852 r
= amdgpu_device_doorbell_init(adev
);
853 if (r
|| (pci_resource_flags(adev
->pdev
, 0) & IORESOURCE_UNSET
))
856 pci_write_config_word(adev
->pdev
, PCI_COMMAND
, cmd
);
862 * GPU helpers function.
865 * amdgpu_device_need_post - check if the hw need post or not
867 * @adev: amdgpu_device pointer
869 * Check if the asic has been initialized (all asics) at driver startup
870 * or post is needed if hw reset is performed.
871 * Returns true if need or false if not.
873 bool amdgpu_device_need_post(struct amdgpu_device
*adev
)
877 if (amdgpu_sriov_vf(adev
))
880 if (amdgpu_passthrough(adev
)) {
881 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
882 * some old smc fw still need driver do vPost otherwise gpu hang, while
883 * those smc fw version above 22.15 doesn't have this flaw, so we force
884 * vpost executed for smc version below 22.15
886 if (adev
->asic_type
== CHIP_FIJI
) {
889 err
= request_firmware(&adev
->pm
.fw
, "amdgpu/fiji_smc.bin", adev
->dev
);
890 /* force vPost if error occured */
894 fw_ver
= *((uint32_t *)adev
->pm
.fw
->data
+ 69);
895 if (fw_ver
< 0x00160e00)
900 if (adev
->has_hw_reset
) {
901 adev
->has_hw_reset
= false;
905 /* bios scratch used on CIK+ */
906 if (adev
->asic_type
>= CHIP_BONAIRE
)
907 return amdgpu_atombios_scratch_need_asic_init(adev
);
909 /* check MEM_SIZE for older asics */
910 reg
= amdgpu_asic_get_config_memsize(adev
);
912 if ((reg
!= 0) && (reg
!= 0xffffffff))
918 /* if we get transitioned to only one device, take VGA back */
920 * amdgpu_device_vga_set_decode - enable/disable vga decode
922 * @cookie: amdgpu_device pointer
923 * @state: enable/disable vga decode
925 * Enable/disable vga decode (all asics).
926 * Returns VGA resource flags.
928 static unsigned int amdgpu_device_vga_set_decode(void *cookie
, bool state
)
930 struct amdgpu_device
*adev
= cookie
;
931 amdgpu_asic_set_vga_state(adev
, state
);
933 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
934 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
936 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
940 * amdgpu_device_check_block_size - validate the vm block size
942 * @adev: amdgpu_device pointer
944 * Validates the vm block size specified via module parameter.
945 * The vm block size defines number of bits in page table versus page directory,
946 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
947 * page table and the remaining bits are in the page directory.
949 static void amdgpu_device_check_block_size(struct amdgpu_device
*adev
)
951 /* defines number of bits in page table versus page directory,
952 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
953 * page table and the remaining bits are in the page directory */
954 if (amdgpu_vm_block_size
== -1)
957 if (amdgpu_vm_block_size
< 9) {
958 dev_warn(adev
->dev
, "VM page table size (%d) too small\n",
959 amdgpu_vm_block_size
);
960 amdgpu_vm_block_size
= -1;
965 * amdgpu_device_check_vm_size - validate the vm size
967 * @adev: amdgpu_device pointer
969 * Validates the vm size in GB specified via module parameter.
970 * The VM size is the size of the GPU virtual memory space in GB.
972 static void amdgpu_device_check_vm_size(struct amdgpu_device
*adev
)
974 /* no need to check the default value */
975 if (amdgpu_vm_size
== -1)
978 if (amdgpu_vm_size
< 1) {
979 dev_warn(adev
->dev
, "VM size (%d) too small, min is 1GB\n",
985 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device
*adev
)
988 bool is_os_64
= (sizeof(void *) == 8) ? true : false;
989 uint64_t total_memory
;
990 uint64_t dram_size_seven_GB
= 0x1B8000000;
991 uint64_t dram_size_three_GB
= 0xB8000000;
993 if (amdgpu_smu_memory_pool_size
== 0)
997 DRM_WARN("Not 64-bit OS, feature not supported\n");
1001 total_memory
= (uint64_t)si
.totalram
* si
.mem_unit
;
1003 if ((amdgpu_smu_memory_pool_size
== 1) ||
1004 (amdgpu_smu_memory_pool_size
== 2)) {
1005 if (total_memory
< dram_size_three_GB
)
1007 } else if ((amdgpu_smu_memory_pool_size
== 4) ||
1008 (amdgpu_smu_memory_pool_size
== 8)) {
1009 if (total_memory
< dram_size_seven_GB
)
1012 DRM_WARN("Smu memory pool size not supported\n");
1015 adev
->pm
.smu_prv_buffer_size
= amdgpu_smu_memory_pool_size
<< 28;
1020 DRM_WARN("No enough system memory\n");
1022 adev
->pm
.smu_prv_buffer_size
= 0;
1026 * amdgpu_device_check_arguments - validate module params
1028 * @adev: amdgpu_device pointer
1030 * Validates certain module parameters and updates
1031 * the associated values used by the driver (all asics).
1033 static int amdgpu_device_check_arguments(struct amdgpu_device
*adev
)
1035 if (amdgpu_sched_jobs
< 4) {
1036 dev_warn(adev
->dev
, "sched jobs (%d) must be at least 4\n",
1038 amdgpu_sched_jobs
= 4;
1039 } else if (!is_power_of_2(amdgpu_sched_jobs
)){
1040 dev_warn(adev
->dev
, "sched jobs (%d) must be a power of 2\n",
1042 amdgpu_sched_jobs
= roundup_pow_of_two(amdgpu_sched_jobs
);
1045 if (amdgpu_gart_size
!= -1 && amdgpu_gart_size
< 32) {
1046 /* gart size must be greater or equal to 32M */
1047 dev_warn(adev
->dev
, "gart size (%d) too small\n",
1049 amdgpu_gart_size
= -1;
1052 if (amdgpu_gtt_size
!= -1 && amdgpu_gtt_size
< 32) {
1053 /* gtt size must be greater or equal to 32M */
1054 dev_warn(adev
->dev
, "gtt size (%d) too small\n",
1056 amdgpu_gtt_size
= -1;
1059 /* valid range is between 4 and 9 inclusive */
1060 if (amdgpu_vm_fragment_size
!= -1 &&
1061 (amdgpu_vm_fragment_size
> 9 || amdgpu_vm_fragment_size
< 4)) {
1062 dev_warn(adev
->dev
, "valid range is between 4 and 9\n");
1063 amdgpu_vm_fragment_size
= -1;
1066 amdgpu_device_check_smu_prv_buffer_size(adev
);
1068 amdgpu_device_check_vm_size(adev
);
1070 amdgpu_device_check_block_size(adev
);
1072 adev
->firmware
.load_type
= amdgpu_ucode_get_load_type(adev
, amdgpu_fw_load_type
);
1078 * amdgpu_switcheroo_set_state - set switcheroo state
1080 * @pdev: pci dev pointer
1081 * @state: vga_switcheroo state
1083 * Callback for the switcheroo driver. Suspends or resumes the
1084 * the asics before or after it is powered up using ACPI methods.
1086 static void amdgpu_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1088 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1091 if (amdgpu_device_supports_boco(dev
) && state
== VGA_SWITCHEROO_OFF
)
1094 if (state
== VGA_SWITCHEROO_ON
) {
1095 pr_info("amdgpu: switched on\n");
1096 /* don't suspend or resume card normally */
1097 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1099 pci_set_power_state(dev
->pdev
, PCI_D0
);
1100 pci_restore_state(dev
->pdev
);
1101 r
= pci_enable_device(dev
->pdev
);
1103 DRM_WARN("pci_enable_device failed (%d)\n", r
);
1104 amdgpu_device_resume(dev
, true);
1106 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1107 drm_kms_helper_poll_enable(dev
);
1109 pr_info("amdgpu: switched off\n");
1110 drm_kms_helper_poll_disable(dev
);
1111 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1112 amdgpu_device_suspend(dev
, true);
1113 pci_save_state(dev
->pdev
);
1114 /* Shut down the device */
1115 pci_disable_device(dev
->pdev
);
1116 pci_set_power_state(dev
->pdev
, PCI_D3cold
);
1117 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1122 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1124 * @pdev: pci dev pointer
1126 * Callback for the switcheroo driver. Check of the switcheroo
1127 * state can be changed.
1128 * Returns true if the state can be changed, false if not.
1130 static bool amdgpu_switcheroo_can_switch(struct pci_dev
*pdev
)
1132 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1135 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1136 * locking inversion with the driver load path. And the access here is
1137 * completely racy anyway. So don't bother with locking for now.
1139 return dev
->open_count
== 0;
1142 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops
= {
1143 .set_gpu_state
= amdgpu_switcheroo_set_state
,
1145 .can_switch
= amdgpu_switcheroo_can_switch
,
1149 * amdgpu_device_ip_set_clockgating_state - set the CG state
1151 * @dev: amdgpu_device pointer
1152 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1153 * @state: clockgating state (gate or ungate)
1155 * Sets the requested clockgating state for all instances of
1156 * the hardware IP specified.
1157 * Returns the error code from the last instance.
1159 int amdgpu_device_ip_set_clockgating_state(void *dev
,
1160 enum amd_ip_block_type block_type
,
1161 enum amd_clockgating_state state
)
1163 struct amdgpu_device
*adev
= dev
;
1166 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1167 if (!adev
->ip_blocks
[i
].status
.valid
)
1169 if (adev
->ip_blocks
[i
].version
->type
!= block_type
)
1171 if (!adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state
)
1173 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state(
1174 (void *)adev
, state
);
1176 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1177 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1183 * amdgpu_device_ip_set_powergating_state - set the PG state
1185 * @dev: amdgpu_device pointer
1186 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1187 * @state: powergating state (gate or ungate)
1189 * Sets the requested powergating state for all instances of
1190 * the hardware IP specified.
1191 * Returns the error code from the last instance.
1193 int amdgpu_device_ip_set_powergating_state(void *dev
,
1194 enum amd_ip_block_type block_type
,
1195 enum amd_powergating_state state
)
1197 struct amdgpu_device
*adev
= dev
;
1200 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1201 if (!adev
->ip_blocks
[i
].status
.valid
)
1203 if (adev
->ip_blocks
[i
].version
->type
!= block_type
)
1205 if (!adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state
)
1207 r
= adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state(
1208 (void *)adev
, state
);
1210 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1211 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1217 * amdgpu_device_ip_get_clockgating_state - get the CG state
1219 * @adev: amdgpu_device pointer
1220 * @flags: clockgating feature flags
1222 * Walks the list of IPs on the device and updates the clockgating
1223 * flags for each IP.
1224 * Updates @flags with the feature flags for each hardware IP where
1225 * clockgating is enabled.
1227 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device
*adev
,
1232 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1233 if (!adev
->ip_blocks
[i
].status
.valid
)
1235 if (adev
->ip_blocks
[i
].version
->funcs
->get_clockgating_state
)
1236 adev
->ip_blocks
[i
].version
->funcs
->get_clockgating_state((void *)adev
, flags
);
1241 * amdgpu_device_ip_wait_for_idle - wait for idle
1243 * @adev: amdgpu_device pointer
1244 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1246 * Waits for the request hardware IP to be idle.
1247 * Returns 0 for success or a negative error code on failure.
1249 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device
*adev
,
1250 enum amd_ip_block_type block_type
)
1254 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1255 if (!adev
->ip_blocks
[i
].status
.valid
)
1257 if (adev
->ip_blocks
[i
].version
->type
== block_type
) {
1258 r
= adev
->ip_blocks
[i
].version
->funcs
->wait_for_idle((void *)adev
);
1269 * amdgpu_device_ip_is_idle - is the hardware IP idle
1271 * @adev: amdgpu_device pointer
1272 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1274 * Check if the hardware IP is idle or not.
1275 * Returns true if it the IP is idle, false if not.
1277 bool amdgpu_device_ip_is_idle(struct amdgpu_device
*adev
,
1278 enum amd_ip_block_type block_type
)
1282 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1283 if (!adev
->ip_blocks
[i
].status
.valid
)
1285 if (adev
->ip_blocks
[i
].version
->type
== block_type
)
1286 return adev
->ip_blocks
[i
].version
->funcs
->is_idle((void *)adev
);
1293 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1295 * @adev: amdgpu_device pointer
1296 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1298 * Returns a pointer to the hardware IP block structure
1299 * if it exists for the asic, otherwise NULL.
1301 struct amdgpu_ip_block
*
1302 amdgpu_device_ip_get_ip_block(struct amdgpu_device
*adev
,
1303 enum amd_ip_block_type type
)
1307 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
1308 if (adev
->ip_blocks
[i
].version
->type
== type
)
1309 return &adev
->ip_blocks
[i
];
1315 * amdgpu_device_ip_block_version_cmp
1317 * @adev: amdgpu_device pointer
1318 * @type: enum amd_ip_block_type
1319 * @major: major version
1320 * @minor: minor version
1322 * return 0 if equal or greater
1323 * return 1 if smaller or the ip_block doesn't exist
1325 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device
*adev
,
1326 enum amd_ip_block_type type
,
1327 u32 major
, u32 minor
)
1329 struct amdgpu_ip_block
*ip_block
= amdgpu_device_ip_get_ip_block(adev
, type
);
1331 if (ip_block
&& ((ip_block
->version
->major
> major
) ||
1332 ((ip_block
->version
->major
== major
) &&
1333 (ip_block
->version
->minor
>= minor
))))
1340 * amdgpu_device_ip_block_add
1342 * @adev: amdgpu_device pointer
1343 * @ip_block_version: pointer to the IP to add
1345 * Adds the IP block driver information to the collection of IPs
1348 int amdgpu_device_ip_block_add(struct amdgpu_device
*adev
,
1349 const struct amdgpu_ip_block_version
*ip_block_version
)
1351 if (!ip_block_version
)
1354 DRM_INFO("add ip block number %d <%s>\n", adev
->num_ip_blocks
,
1355 ip_block_version
->funcs
->name
);
1357 adev
->ip_blocks
[adev
->num_ip_blocks
++].version
= ip_block_version
;
1363 * amdgpu_device_enable_virtual_display - enable virtual display feature
1365 * @adev: amdgpu_device pointer
1367 * Enabled the virtual display feature if the user has enabled it via
1368 * the module parameter virtual_display. This feature provides a virtual
1369 * display hardware on headless boards or in virtualized environments.
1370 * This function parses and validates the configuration string specified by
1371 * the user and configues the virtual display configuration (number of
1372 * virtual connectors, crtcs, etc.) specified.
1374 static void amdgpu_device_enable_virtual_display(struct amdgpu_device
*adev
)
1376 adev
->enable_virtual_display
= false;
1378 if (amdgpu_virtual_display
) {
1379 struct drm_device
*ddev
= adev
->ddev
;
1380 const char *pci_address_name
= pci_name(ddev
->pdev
);
1381 char *pciaddstr
, *pciaddstr_tmp
, *pciaddname_tmp
, *pciaddname
;
1383 pciaddstr
= kstrdup(amdgpu_virtual_display
, GFP_KERNEL
);
1384 pciaddstr_tmp
= pciaddstr
;
1385 while ((pciaddname_tmp
= strsep(&pciaddstr_tmp
, ";"))) {
1386 pciaddname
= strsep(&pciaddname_tmp
, ",");
1387 if (!strcmp("all", pciaddname
)
1388 || !strcmp(pci_address_name
, pciaddname
)) {
1392 adev
->enable_virtual_display
= true;
1395 res
= kstrtol(pciaddname_tmp
, 10,
1403 adev
->mode_info
.num_crtc
= num_crtc
;
1405 adev
->mode_info
.num_crtc
= 1;
1411 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1412 amdgpu_virtual_display
, pci_address_name
,
1413 adev
->enable_virtual_display
, adev
->mode_info
.num_crtc
);
1420 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1422 * @adev: amdgpu_device pointer
1424 * Parses the asic configuration parameters specified in the gpu info
1425 * firmware and makes them availale to the driver for use in configuring
1427 * Returns 0 on success, -EINVAL on failure.
1429 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device
*adev
)
1431 const char *chip_name
;
1434 const struct gpu_info_firmware_header_v1_0
*hdr
;
1436 adev
->firmware
.gpu_info_fw
= NULL
;
1438 switch (adev
->asic_type
) {
1442 case CHIP_POLARIS10
:
1443 case CHIP_POLARIS11
:
1444 case CHIP_POLARIS12
:
1448 #ifdef CONFIG_DRM_AMDGPU_SI
1455 #ifdef CONFIG_DRM_AMDGPU_CIK
1466 chip_name
= "vega10";
1469 chip_name
= "vega12";
1472 if (adev
->rev_id
>= 8)
1473 chip_name
= "raven2";
1474 else if (adev
->pdev
->device
== 0x15d8)
1475 chip_name
= "picasso";
1477 chip_name
= "raven";
1480 chip_name
= "arcturus";
1483 chip_name
= "renoir";
1486 chip_name
= "navi10";
1489 chip_name
= "navi14";
1492 chip_name
= "navi12";
1496 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_gpu_info.bin", chip_name
);
1497 err
= request_firmware(&adev
->firmware
.gpu_info_fw
, fw_name
, adev
->dev
);
1500 "Failed to load gpu_info firmware \"%s\"\n",
1504 err
= amdgpu_ucode_validate(adev
->firmware
.gpu_info_fw
);
1507 "Failed to validate gpu_info firmware \"%s\"\n",
1512 hdr
= (const struct gpu_info_firmware_header_v1_0
*)adev
->firmware
.gpu_info_fw
->data
;
1513 amdgpu_ucode_print_gpu_info_hdr(&hdr
->header
);
1515 switch (hdr
->version_major
) {
1518 const struct gpu_info_firmware_v1_0
*gpu_info_fw
=
1519 (const struct gpu_info_firmware_v1_0
*)(adev
->firmware
.gpu_info_fw
->data
+
1520 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
1522 if (amdgpu_discovery
&& adev
->asic_type
>= CHIP_NAVI10
)
1523 goto parse_soc_bounding_box
;
1525 adev
->gfx
.config
.max_shader_engines
= le32_to_cpu(gpu_info_fw
->gc_num_se
);
1526 adev
->gfx
.config
.max_cu_per_sh
= le32_to_cpu(gpu_info_fw
->gc_num_cu_per_sh
);
1527 adev
->gfx
.config
.max_sh_per_se
= le32_to_cpu(gpu_info_fw
->gc_num_sh_per_se
);
1528 adev
->gfx
.config
.max_backends_per_se
= le32_to_cpu(gpu_info_fw
->gc_num_rb_per_se
);
1529 adev
->gfx
.config
.max_texture_channel_caches
=
1530 le32_to_cpu(gpu_info_fw
->gc_num_tccs
);
1531 adev
->gfx
.config
.max_gprs
= le32_to_cpu(gpu_info_fw
->gc_num_gprs
);
1532 adev
->gfx
.config
.max_gs_threads
= le32_to_cpu(gpu_info_fw
->gc_num_max_gs_thds
);
1533 adev
->gfx
.config
.gs_vgt_table_depth
= le32_to_cpu(gpu_info_fw
->gc_gs_table_depth
);
1534 adev
->gfx
.config
.gs_prim_buffer_depth
= le32_to_cpu(gpu_info_fw
->gc_gsprim_buff_depth
);
1535 adev
->gfx
.config
.double_offchip_lds_buf
=
1536 le32_to_cpu(gpu_info_fw
->gc_double_offchip_lds_buffer
);
1537 adev
->gfx
.cu_info
.wave_front_size
= le32_to_cpu(gpu_info_fw
->gc_wave_size
);
1538 adev
->gfx
.cu_info
.max_waves_per_simd
=
1539 le32_to_cpu(gpu_info_fw
->gc_max_waves_per_simd
);
1540 adev
->gfx
.cu_info
.max_scratch_slots_per_cu
=
1541 le32_to_cpu(gpu_info_fw
->gc_max_scratch_slots_per_cu
);
1542 adev
->gfx
.cu_info
.lds_size
= le32_to_cpu(gpu_info_fw
->gc_lds_size
);
1543 if (hdr
->version_minor
>= 1) {
1544 const struct gpu_info_firmware_v1_1
*gpu_info_fw
=
1545 (const struct gpu_info_firmware_v1_1
*)(adev
->firmware
.gpu_info_fw
->data
+
1546 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
1547 adev
->gfx
.config
.num_sc_per_sh
=
1548 le32_to_cpu(gpu_info_fw
->num_sc_per_sh
);
1549 adev
->gfx
.config
.num_packer_per_sc
=
1550 le32_to_cpu(gpu_info_fw
->num_packer_per_sc
);
1553 parse_soc_bounding_box
:
1555 * soc bounding box info is not integrated in disocovery table,
1556 * we always need to parse it from gpu info firmware.
1558 if (hdr
->version_minor
== 2) {
1559 const struct gpu_info_firmware_v1_2
*gpu_info_fw
=
1560 (const struct gpu_info_firmware_v1_2
*)(adev
->firmware
.gpu_info_fw
->data
+
1561 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
1562 adev
->dm
.soc_bounding_box
= &gpu_info_fw
->soc_bounding_box
;
1568 "Unsupported gpu_info table %d\n", hdr
->header
.ucode_version
);
1577 * amdgpu_device_ip_early_init - run early init for hardware IPs
1579 * @adev: amdgpu_device pointer
1581 * Early initialization pass for hardware IPs. The hardware IPs that make
1582 * up each asic are discovered each IP's early_init callback is run. This
1583 * is the first stage in initializing the asic.
1584 * Returns 0 on success, negative error code on failure.
1586 static int amdgpu_device_ip_early_init(struct amdgpu_device
*adev
)
1590 amdgpu_device_enable_virtual_display(adev
);
1592 switch (adev
->asic_type
) {
1596 case CHIP_POLARIS10
:
1597 case CHIP_POLARIS11
:
1598 case CHIP_POLARIS12
:
1602 if (adev
->asic_type
== CHIP_CARRIZO
|| adev
->asic_type
== CHIP_STONEY
)
1603 adev
->family
= AMDGPU_FAMILY_CZ
;
1605 adev
->family
= AMDGPU_FAMILY_VI
;
1607 r
= vi_set_ip_blocks(adev
);
1611 #ifdef CONFIG_DRM_AMDGPU_SI
1617 adev
->family
= AMDGPU_FAMILY_SI
;
1618 r
= si_set_ip_blocks(adev
);
1623 #ifdef CONFIG_DRM_AMDGPU_CIK
1629 if ((adev
->asic_type
== CHIP_BONAIRE
) || (adev
->asic_type
== CHIP_HAWAII
))
1630 adev
->family
= AMDGPU_FAMILY_CI
;
1632 adev
->family
= AMDGPU_FAMILY_KV
;
1634 r
= cik_set_ip_blocks(adev
);
1645 if (adev
->asic_type
== CHIP_RAVEN
||
1646 adev
->asic_type
== CHIP_RENOIR
)
1647 adev
->family
= AMDGPU_FAMILY_RV
;
1649 adev
->family
= AMDGPU_FAMILY_AI
;
1651 r
= soc15_set_ip_blocks(adev
);
1658 adev
->family
= AMDGPU_FAMILY_NV
;
1660 r
= nv_set_ip_blocks(adev
);
1665 /* FIXME: not supported yet */
1669 r
= amdgpu_device_parse_gpu_info_fw(adev
);
1673 if (amdgpu_discovery
&& adev
->asic_type
>= CHIP_NAVI10
)
1674 amdgpu_discovery_get_gfx_info(adev
);
1676 amdgpu_amdkfd_device_probe(adev
);
1678 if (amdgpu_sriov_vf(adev
)) {
1679 r
= amdgpu_virt_request_full_gpu(adev
, true);
1684 adev
->pm
.pp_feature
= amdgpu_pp_feature_mask
;
1685 if (amdgpu_sriov_vf(adev
) || sched_policy
== KFD_SCHED_POLICY_NO_HWS
)
1686 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
1688 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1689 if ((amdgpu_ip_block_mask
& (1 << i
)) == 0) {
1690 DRM_ERROR("disabled ip block: %d <%s>\n",
1691 i
, adev
->ip_blocks
[i
].version
->funcs
->name
);
1692 adev
->ip_blocks
[i
].status
.valid
= false;
1694 if (adev
->ip_blocks
[i
].version
->funcs
->early_init
) {
1695 r
= adev
->ip_blocks
[i
].version
->funcs
->early_init((void *)adev
);
1697 adev
->ip_blocks
[i
].status
.valid
= false;
1699 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1700 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1703 adev
->ip_blocks
[i
].status
.valid
= true;
1706 adev
->ip_blocks
[i
].status
.valid
= true;
1709 /* get the vbios after the asic_funcs are set up */
1710 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
) {
1712 if (!amdgpu_get_bios(adev
))
1715 r
= amdgpu_atombios_init(adev
);
1717 dev_err(adev
->dev
, "amdgpu_atombios_init failed\n");
1718 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL
, 0, 0);
1724 adev
->cg_flags
&= amdgpu_cg_mask
;
1725 adev
->pg_flags
&= amdgpu_pg_mask
;
1730 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device
*adev
)
1734 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1735 if (!adev
->ip_blocks
[i
].status
.sw
)
1737 if (adev
->ip_blocks
[i
].status
.hw
)
1739 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
||
1740 (amdgpu_sriov_vf(adev
) && (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
)) ||
1741 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_IH
) {
1742 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init(adev
);
1744 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1745 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1748 adev
->ip_blocks
[i
].status
.hw
= true;
1755 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device
*adev
)
1759 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1760 if (!adev
->ip_blocks
[i
].status
.sw
)
1762 if (adev
->ip_blocks
[i
].status
.hw
)
1764 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init(adev
);
1766 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1767 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1770 adev
->ip_blocks
[i
].status
.hw
= true;
1776 static int amdgpu_device_fw_loading(struct amdgpu_device
*adev
)
1780 uint32_t smu_version
;
1782 if (adev
->asic_type
>= CHIP_VEGA10
) {
1783 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1784 if (adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_PSP
)
1787 /* no need to do the fw loading again if already done*/
1788 if (adev
->ip_blocks
[i
].status
.hw
== true)
1791 if (adev
->in_gpu_reset
|| adev
->in_suspend
) {
1792 r
= adev
->ip_blocks
[i
].version
->funcs
->resume(adev
);
1794 DRM_ERROR("resume of IP block <%s> failed %d\n",
1795 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1799 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init(adev
);
1801 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1802 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1807 adev
->ip_blocks
[i
].status
.hw
= true;
1812 if (!amdgpu_sriov_vf(adev
) || adev
->asic_type
== CHIP_TONGA
)
1813 r
= amdgpu_pm_load_smu_firmware(adev
, &smu_version
);
1819 * amdgpu_device_ip_init - run init for hardware IPs
1821 * @adev: amdgpu_device pointer
1823 * Main initialization pass for hardware IPs. The list of all the hardware
1824 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1825 * are run. sw_init initializes the software state associated with each IP
1826 * and hw_init initializes the hardware associated with each IP.
1827 * Returns 0 on success, negative error code on failure.
1829 static int amdgpu_device_ip_init(struct amdgpu_device
*adev
)
1833 r
= amdgpu_ras_init(adev
);
1837 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1838 if (!adev
->ip_blocks
[i
].status
.valid
)
1840 r
= adev
->ip_blocks
[i
].version
->funcs
->sw_init((void *)adev
);
1842 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1843 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1846 adev
->ip_blocks
[i
].status
.sw
= true;
1848 /* need to do gmc hw init early so we can allocate gpu mem */
1849 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) {
1850 r
= amdgpu_device_vram_scratch_init(adev
);
1852 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r
);
1855 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init((void *)adev
);
1857 DRM_ERROR("hw_init %d failed %d\n", i
, r
);
1860 r
= amdgpu_device_wb_init(adev
);
1862 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r
);
1865 adev
->ip_blocks
[i
].status
.hw
= true;
1867 /* right after GMC hw init, we create CSA */
1868 if (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
)) {
1869 r
= amdgpu_allocate_static_csa(adev
, &adev
->virt
.csa_obj
,
1870 AMDGPU_GEM_DOMAIN_VRAM
,
1873 DRM_ERROR("allocate CSA failed %d\n", r
);
1880 if (amdgpu_sriov_vf(adev
))
1881 amdgpu_virt_init_data_exchange(adev
);
1883 r
= amdgpu_ib_pool_init(adev
);
1885 dev_err(adev
->dev
, "IB initialization failed (%d).\n", r
);
1886 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_IB_INIT_FAIL
, 0, r
);
1890 r
= amdgpu_ucode_create_bo(adev
); /* create ucode bo when sw_init complete*/
1894 r
= amdgpu_device_ip_hw_init_phase1(adev
);
1898 r
= amdgpu_device_fw_loading(adev
);
1902 r
= amdgpu_device_ip_hw_init_phase2(adev
);
1907 * retired pages will be loaded from eeprom and reserved here,
1908 * it should be called after amdgpu_device_ip_hw_init_phase2 since
1909 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
1910 * for I2C communication which only true at this point.
1911 * recovery_init may fail, but it can free all resources allocated by
1912 * itself and its failure should not stop amdgpu init process.
1914 * Note: theoretically, this should be called before all vram allocations
1915 * to protect retired page from abusing
1917 amdgpu_ras_recovery_init(adev
);
1919 if (adev
->gmc
.xgmi
.num_physical_nodes
> 1)
1920 amdgpu_xgmi_add_device(adev
);
1921 amdgpu_amdkfd_device_init(adev
);
1924 if (amdgpu_sriov_vf(adev
))
1925 amdgpu_virt_release_full_gpu(adev
, true);
1931 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1933 * @adev: amdgpu_device pointer
1935 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1936 * this function before a GPU reset. If the value is retained after a
1937 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1939 static void amdgpu_device_fill_reset_magic(struct amdgpu_device
*adev
)
1941 memcpy(adev
->reset_magic
, adev
->gart
.ptr
, AMDGPU_RESET_MAGIC_NUM
);
1945 * amdgpu_device_check_vram_lost - check if vram is valid
1947 * @adev: amdgpu_device pointer
1949 * Checks the reset magic value written to the gart pointer in VRAM.
1950 * The driver calls this after a GPU reset to see if the contents of
1951 * VRAM is lost or now.
1952 * returns true if vram is lost, false if not.
1954 static bool amdgpu_device_check_vram_lost(struct amdgpu_device
*adev
)
1956 return !!memcmp(adev
->gart
.ptr
, adev
->reset_magic
,
1957 AMDGPU_RESET_MAGIC_NUM
);
1961 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
1963 * @adev: amdgpu_device pointer
1964 * @state: clockgating state (gate or ungate)
1966 * The list of all the hardware IPs that make up the asic is walked and the
1967 * set_clockgating_state callbacks are run.
1968 * Late initialization pass enabling clockgating for hardware IPs.
1969 * Fini or suspend, pass disabling clockgating for hardware IPs.
1970 * Returns 0 on success, negative error code on failure.
1973 static int amdgpu_device_set_cg_state(struct amdgpu_device
*adev
,
1974 enum amd_clockgating_state state
)
1978 if (amdgpu_emu_mode
== 1)
1981 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
1982 i
= state
== AMD_CG_STATE_GATE
? j
: adev
->num_ip_blocks
- j
- 1;
1983 if (!adev
->ip_blocks
[i
].status
.late_initialized
)
1985 /* skip CG for VCE/UVD, it's handled specially */
1986 if (adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_UVD
&&
1987 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCE
&&
1988 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCN
&&
1989 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_JPEG
&&
1990 adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state
) {
1991 /* enable clockgating to save power */
1992 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state((void *)adev
,
1995 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1996 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2005 static int amdgpu_device_set_pg_state(struct amdgpu_device
*adev
, enum amd_powergating_state state
)
2009 if (amdgpu_emu_mode
== 1)
2012 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
2013 i
= state
== AMD_PG_STATE_GATE
? j
: adev
->num_ip_blocks
- j
- 1;
2014 if (!adev
->ip_blocks
[i
].status
.late_initialized
)
2016 /* skip CG for VCE/UVD, it's handled specially */
2017 if (adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_UVD
&&
2018 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCE
&&
2019 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCN
&&
2020 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_JPEG
&&
2021 adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state
) {
2022 /* enable powergating to save power */
2023 r
= adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state((void *)adev
,
2026 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2027 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2035 static int amdgpu_device_enable_mgpu_fan_boost(void)
2037 struct amdgpu_gpu_instance
*gpu_ins
;
2038 struct amdgpu_device
*adev
;
2041 mutex_lock(&mgpu_info
.mutex
);
2044 * MGPU fan boost feature should be enabled
2045 * only when there are two or more dGPUs in
2048 if (mgpu_info
.num_dgpu
< 2)
2051 for (i
= 0; i
< mgpu_info
.num_dgpu
; i
++) {
2052 gpu_ins
= &(mgpu_info
.gpu_ins
[i
]);
2053 adev
= gpu_ins
->adev
;
2054 if (!(adev
->flags
& AMD_IS_APU
) &&
2055 !gpu_ins
->mgpu_fan_enabled
&&
2056 adev
->powerplay
.pp_funcs
&&
2057 adev
->powerplay
.pp_funcs
->enable_mgpu_fan_boost
) {
2058 ret
= amdgpu_dpm_enable_mgpu_fan_boost(adev
);
2062 gpu_ins
->mgpu_fan_enabled
= 1;
2067 mutex_unlock(&mgpu_info
.mutex
);
2073 * amdgpu_device_ip_late_init - run late init for hardware IPs
2075 * @adev: amdgpu_device pointer
2077 * Late initialization pass for hardware IPs. The list of all the hardware
2078 * IPs that make up the asic is walked and the late_init callbacks are run.
2079 * late_init covers any special initialization that an IP requires
2080 * after all of the have been initialized or something that needs to happen
2081 * late in the init process.
2082 * Returns 0 on success, negative error code on failure.
2084 static int amdgpu_device_ip_late_init(struct amdgpu_device
*adev
)
2086 struct amdgpu_gpu_instance
*gpu_instance
;
2089 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2090 if (!adev
->ip_blocks
[i
].status
.hw
)
2092 if (adev
->ip_blocks
[i
].version
->funcs
->late_init
) {
2093 r
= adev
->ip_blocks
[i
].version
->funcs
->late_init((void *)adev
);
2095 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2096 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2100 adev
->ip_blocks
[i
].status
.late_initialized
= true;
2103 amdgpu_device_set_cg_state(adev
, AMD_CG_STATE_GATE
);
2104 amdgpu_device_set_pg_state(adev
, AMD_PG_STATE_GATE
);
2106 amdgpu_device_fill_reset_magic(adev
);
2108 r
= amdgpu_device_enable_mgpu_fan_boost();
2110 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r
);
2113 if (adev
->gmc
.xgmi
.num_physical_nodes
> 1) {
2114 mutex_lock(&mgpu_info
.mutex
);
2117 * Reset device p-state to low as this was booted with high.
2119 * This should be performed only after all devices from the same
2120 * hive get initialized.
2122 * However, it's unknown how many device in the hive in advance.
2123 * As this is counted one by one during devices initializations.
2125 * So, we wait for all XGMI interlinked devices initialized.
2126 * This may bring some delays as those devices may come from
2127 * different hives. But that should be OK.
2129 if (mgpu_info
.num_dgpu
== adev
->gmc
.xgmi
.num_physical_nodes
) {
2130 for (i
= 0; i
< mgpu_info
.num_gpu
; i
++) {
2131 gpu_instance
= &(mgpu_info
.gpu_ins
[i
]);
2132 if (gpu_instance
->adev
->flags
& AMD_IS_APU
)
2135 r
= amdgpu_xgmi_set_pstate(gpu_instance
->adev
, 0);
2137 DRM_ERROR("pstate setting failed (%d).\n", r
);
2143 mutex_unlock(&mgpu_info
.mutex
);
2150 * amdgpu_device_ip_fini - run fini for hardware IPs
2152 * @adev: amdgpu_device pointer
2154 * Main teardown pass for hardware IPs. The list of all the hardware
2155 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2156 * are run. hw_fini tears down the hardware associated with each IP
2157 * and sw_fini tears down any software state associated with each IP.
2158 * Returns 0 on success, negative error code on failure.
2160 static int amdgpu_device_ip_fini(struct amdgpu_device
*adev
)
2164 amdgpu_ras_pre_fini(adev
);
2166 if (adev
->gmc
.xgmi
.num_physical_nodes
> 1)
2167 amdgpu_xgmi_remove_device(adev
);
2169 amdgpu_amdkfd_device_fini(adev
);
2171 amdgpu_device_set_pg_state(adev
, AMD_PG_STATE_UNGATE
);
2172 amdgpu_device_set_cg_state(adev
, AMD_CG_STATE_UNGATE
);
2174 /* need to disable SMC first */
2175 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2176 if (!adev
->ip_blocks
[i
].status
.hw
)
2178 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_SMC
) {
2179 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_fini((void *)adev
);
2180 /* XXX handle errors */
2182 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2183 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2185 adev
->ip_blocks
[i
].status
.hw
= false;
2190 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
2191 if (!adev
->ip_blocks
[i
].status
.hw
)
2194 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_fini((void *)adev
);
2195 /* XXX handle errors */
2197 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2198 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2201 adev
->ip_blocks
[i
].status
.hw
= false;
2205 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
2206 if (!adev
->ip_blocks
[i
].status
.sw
)
2209 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) {
2210 amdgpu_ucode_free_bo(adev
);
2211 amdgpu_free_static_csa(&adev
->virt
.csa_obj
);
2212 amdgpu_device_wb_fini(adev
);
2213 amdgpu_device_vram_scratch_fini(adev
);
2214 amdgpu_ib_pool_fini(adev
);
2217 r
= adev
->ip_blocks
[i
].version
->funcs
->sw_fini((void *)adev
);
2218 /* XXX handle errors */
2220 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2221 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2223 adev
->ip_blocks
[i
].status
.sw
= false;
2224 adev
->ip_blocks
[i
].status
.valid
= false;
2227 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
2228 if (!adev
->ip_blocks
[i
].status
.late_initialized
)
2230 if (adev
->ip_blocks
[i
].version
->funcs
->late_fini
)
2231 adev
->ip_blocks
[i
].version
->funcs
->late_fini((void *)adev
);
2232 adev
->ip_blocks
[i
].status
.late_initialized
= false;
2235 amdgpu_ras_fini(adev
);
2237 if (amdgpu_sriov_vf(adev
))
2238 if (amdgpu_virt_release_full_gpu(adev
, false))
2239 DRM_ERROR("failed to release exclusive mode on fini\n");
2245 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2247 * @work: work_struct.
2249 static void amdgpu_device_delayed_init_work_handler(struct work_struct
*work
)
2251 struct amdgpu_device
*adev
=
2252 container_of(work
, struct amdgpu_device
, delayed_init_work
.work
);
2255 r
= amdgpu_ib_ring_tests(adev
);
2257 DRM_ERROR("ib ring test failed (%d).\n", r
);
2260 static void amdgpu_device_delay_enable_gfx_off(struct work_struct
*work
)
2262 struct amdgpu_device
*adev
=
2263 container_of(work
, struct amdgpu_device
, gfx
.gfx_off_delay_work
.work
);
2265 mutex_lock(&adev
->gfx
.gfx_off_mutex
);
2266 if (!adev
->gfx
.gfx_off_state
&& !adev
->gfx
.gfx_off_req_count
) {
2267 if (!amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_GFX
, true))
2268 adev
->gfx
.gfx_off_state
= true;
2270 mutex_unlock(&adev
->gfx
.gfx_off_mutex
);
2274 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2276 * @adev: amdgpu_device pointer
2278 * Main suspend function for hardware IPs. The list of all the hardware
2279 * IPs that make up the asic is walked, clockgating is disabled and the
2280 * suspend callbacks are run. suspend puts the hardware and software state
2281 * in each IP into a state suitable for suspend.
2282 * Returns 0 on success, negative error code on failure.
2284 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device
*adev
)
2288 amdgpu_device_set_pg_state(adev
, AMD_PG_STATE_UNGATE
);
2289 amdgpu_device_set_cg_state(adev
, AMD_CG_STATE_UNGATE
);
2291 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
2292 if (!adev
->ip_blocks
[i
].status
.valid
)
2294 /* displays are handled separately */
2295 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_DCE
) {
2296 /* XXX handle errors */
2297 r
= adev
->ip_blocks
[i
].version
->funcs
->suspend(adev
);
2298 /* XXX handle errors */
2300 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2301 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2304 adev
->ip_blocks
[i
].status
.hw
= false;
2312 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2314 * @adev: amdgpu_device pointer
2316 * Main suspend function for hardware IPs. The list of all the hardware
2317 * IPs that make up the asic is walked, clockgating is disabled and the
2318 * suspend callbacks are run. suspend puts the hardware and software state
2319 * in each IP into a state suitable for suspend.
2320 * Returns 0 on success, negative error code on failure.
2322 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device
*adev
)
2326 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
2327 if (!adev
->ip_blocks
[i
].status
.valid
)
2329 /* displays are handled in phase1 */
2330 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_DCE
)
2332 /* PSP lost connection when err_event_athub occurs */
2333 if (amdgpu_ras_intr_triggered() &&
2334 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
) {
2335 adev
->ip_blocks
[i
].status
.hw
= false;
2338 /* XXX handle errors */
2339 r
= adev
->ip_blocks
[i
].version
->funcs
->suspend(adev
);
2340 /* XXX handle errors */
2342 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2343 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2345 adev
->ip_blocks
[i
].status
.hw
= false;
2346 /* handle putting the SMC in the appropriate state */
2347 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_SMC
) {
2348 r
= amdgpu_dpm_set_mp1_state(adev
, adev
->mp1_state
);
2350 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2351 adev
->mp1_state
, r
);
2356 adev
->ip_blocks
[i
].status
.hw
= false;
2363 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2365 * @adev: amdgpu_device pointer
2367 * Main suspend function for hardware IPs. The list of all the hardware
2368 * IPs that make up the asic is walked, clockgating is disabled and the
2369 * suspend callbacks are run. suspend puts the hardware and software state
2370 * in each IP into a state suitable for suspend.
2371 * Returns 0 on success, negative error code on failure.
2373 int amdgpu_device_ip_suspend(struct amdgpu_device
*adev
)
2377 if (amdgpu_sriov_vf(adev
))
2378 amdgpu_virt_request_full_gpu(adev
, false);
2380 r
= amdgpu_device_ip_suspend_phase1(adev
);
2383 r
= amdgpu_device_ip_suspend_phase2(adev
);
2385 if (amdgpu_sriov_vf(adev
))
2386 amdgpu_virt_release_full_gpu(adev
, false);
2391 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device
*adev
)
2395 static enum amd_ip_block_type ip_order
[] = {
2396 AMD_IP_BLOCK_TYPE_GMC
,
2397 AMD_IP_BLOCK_TYPE_COMMON
,
2398 AMD_IP_BLOCK_TYPE_PSP
,
2399 AMD_IP_BLOCK_TYPE_IH
,
2402 for (i
= 0; i
< ARRAY_SIZE(ip_order
); i
++) {
2404 struct amdgpu_ip_block
*block
;
2406 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
2407 block
= &adev
->ip_blocks
[j
];
2409 block
->status
.hw
= false;
2410 if (block
->version
->type
!= ip_order
[i
] ||
2411 !block
->status
.valid
)
2414 r
= block
->version
->funcs
->hw_init(adev
);
2415 DRM_INFO("RE-INIT-early: %s %s\n", block
->version
->funcs
->name
, r
?"failed":"succeeded");
2418 block
->status
.hw
= true;
2425 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device
*adev
)
2429 static enum amd_ip_block_type ip_order
[] = {
2430 AMD_IP_BLOCK_TYPE_SMC
,
2431 AMD_IP_BLOCK_TYPE_DCE
,
2432 AMD_IP_BLOCK_TYPE_GFX
,
2433 AMD_IP_BLOCK_TYPE_SDMA
,
2434 AMD_IP_BLOCK_TYPE_UVD
,
2435 AMD_IP_BLOCK_TYPE_VCE
,
2436 AMD_IP_BLOCK_TYPE_VCN
2439 for (i
= 0; i
< ARRAY_SIZE(ip_order
); i
++) {
2441 struct amdgpu_ip_block
*block
;
2443 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
2444 block
= &adev
->ip_blocks
[j
];
2446 if (block
->version
->type
!= ip_order
[i
] ||
2447 !block
->status
.valid
||
2451 if (block
->version
->type
== AMD_IP_BLOCK_TYPE_SMC
)
2452 r
= block
->version
->funcs
->resume(adev
);
2454 r
= block
->version
->funcs
->hw_init(adev
);
2456 DRM_INFO("RE-INIT-late: %s %s\n", block
->version
->funcs
->name
, r
?"failed":"succeeded");
2459 block
->status
.hw
= true;
2467 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2469 * @adev: amdgpu_device pointer
2471 * First resume function for hardware IPs. The list of all the hardware
2472 * IPs that make up the asic is walked and the resume callbacks are run for
2473 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2474 * after a suspend and updates the software state as necessary. This
2475 * function is also used for restoring the GPU after a GPU reset.
2476 * Returns 0 on success, negative error code on failure.
2478 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device
*adev
)
2482 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2483 if (!adev
->ip_blocks
[i
].status
.valid
|| adev
->ip_blocks
[i
].status
.hw
)
2485 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
||
2486 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
||
2487 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_IH
) {
2489 r
= adev
->ip_blocks
[i
].version
->funcs
->resume(adev
);
2491 DRM_ERROR("resume of IP block <%s> failed %d\n",
2492 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2495 adev
->ip_blocks
[i
].status
.hw
= true;
2503 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2505 * @adev: amdgpu_device pointer
2507 * First resume function for hardware IPs. The list of all the hardware
2508 * IPs that make up the asic is walked and the resume callbacks are run for
2509 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2510 * functional state after a suspend and updates the software state as
2511 * necessary. This function is also used for restoring the GPU after a GPU
2513 * Returns 0 on success, negative error code on failure.
2515 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device
*adev
)
2519 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2520 if (!adev
->ip_blocks
[i
].status
.valid
|| adev
->ip_blocks
[i
].status
.hw
)
2522 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
||
2523 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
||
2524 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_IH
||
2525 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
)
2527 r
= adev
->ip_blocks
[i
].version
->funcs
->resume(adev
);
2529 DRM_ERROR("resume of IP block <%s> failed %d\n",
2530 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
2533 adev
->ip_blocks
[i
].status
.hw
= true;
2540 * amdgpu_device_ip_resume - run resume for hardware IPs
2542 * @adev: amdgpu_device pointer
2544 * Main resume function for hardware IPs. The hardware IPs
2545 * are split into two resume functions because they are
2546 * are also used in in recovering from a GPU reset and some additional
2547 * steps need to be take between them. In this case (S3/S4) they are
2549 * Returns 0 on success, negative error code on failure.
2551 static int amdgpu_device_ip_resume(struct amdgpu_device
*adev
)
2555 r
= amdgpu_device_ip_resume_phase1(adev
);
2559 r
= amdgpu_device_fw_loading(adev
);
2563 r
= amdgpu_device_ip_resume_phase2(adev
);
2569 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2571 * @adev: amdgpu_device pointer
2573 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2575 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device
*adev
)
2577 if (amdgpu_sriov_vf(adev
)) {
2578 if (adev
->is_atom_fw
) {
2579 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev
))
2580 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
;
2582 if (amdgpu_atombios_has_gpu_virtualization_table(adev
))
2583 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
;
2586 if (!(adev
->virt
.caps
& AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
))
2587 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_NO_VBIOS
, 0, 0);
2592 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2594 * @asic_type: AMD asic type
2596 * Check if there is DC (new modesetting infrastructre) support for an asic.
2597 * returns true if DC has support, false if not.
2599 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type
)
2601 switch (asic_type
) {
2602 #if defined(CONFIG_DRM_AMD_DC)
2608 * We have systems in the wild with these ASICs that require
2609 * LVDS and VGA support which is not supported with DC.
2611 * Fallback to the non-DC driver here by default so as not to
2612 * cause regressions.
2614 return amdgpu_dc
> 0;
2618 case CHIP_POLARIS10
:
2619 case CHIP_POLARIS11
:
2620 case CHIP_POLARIS12
:
2627 #if defined(CONFIG_DRM_AMD_DC_DCN)
2634 return amdgpu_dc
!= 0;
2638 DRM_INFO("Display Core has been requested via kernel parameter "
2639 "but isn't supported by ASIC, ignoring\n");
2645 * amdgpu_device_has_dc_support - check if dc is supported
2647 * @adev: amdgpu_device_pointer
2649 * Returns true for supported, false for not supported
2651 bool amdgpu_device_has_dc_support(struct amdgpu_device
*adev
)
2653 if (amdgpu_sriov_vf(adev
))
2656 return amdgpu_device_asic_has_dc_support(adev
->asic_type
);
2660 static void amdgpu_device_xgmi_reset_func(struct work_struct
*__work
)
2662 struct amdgpu_device
*adev
=
2663 container_of(__work
, struct amdgpu_device
, xgmi_reset_work
);
2664 struct amdgpu_hive_info
*hive
= amdgpu_get_xgmi_hive(adev
, 0);
2666 /* It's a bug to not have a hive within this function */
2671 * Use task barrier to synchronize all xgmi reset works across the
2672 * hive. task_barrier_enter and task_barrier_exit will block
2673 * until all the threads running the xgmi reset works reach
2674 * those points. task_barrier_full will do both blocks.
2676 if (amdgpu_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
) {
2678 task_barrier_enter(&hive
->tb
);
2679 adev
->asic_reset_res
= amdgpu_device_baco_enter(adev
->ddev
);
2681 if (adev
->asic_reset_res
)
2684 task_barrier_exit(&hive
->tb
);
2685 adev
->asic_reset_res
= amdgpu_device_baco_exit(adev
->ddev
);
2687 if (adev
->asic_reset_res
)
2691 task_barrier_full(&hive
->tb
);
2692 adev
->asic_reset_res
= amdgpu_asic_reset(adev
);
2696 if (adev
->asic_reset_res
)
2697 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2698 adev
->asic_reset_res
, adev
->ddev
->unique
);
2701 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device
*adev
)
2703 char *input
= amdgpu_lockup_timeout
;
2704 char *timeout_setting
= NULL
;
2710 * By default timeout for non compute jobs is 10000.
2711 * And there is no timeout enforced on compute jobs.
2712 * In SR-IOV or passthrough mode, timeout for compute
2713 * jobs are 10000 by default.
2715 adev
->gfx_timeout
= msecs_to_jiffies(10000);
2716 adev
->sdma_timeout
= adev
->video_timeout
= adev
->gfx_timeout
;
2717 if (amdgpu_sriov_vf(adev
) || amdgpu_passthrough(adev
))
2718 adev
->compute_timeout
= adev
->gfx_timeout
;
2720 adev
->compute_timeout
= MAX_SCHEDULE_TIMEOUT
;
2722 if (strnlen(input
, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH
)) {
2723 while ((timeout_setting
= strsep(&input
, ",")) &&
2724 strnlen(timeout_setting
, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH
)) {
2725 ret
= kstrtol(timeout_setting
, 0, &timeout
);
2732 } else if (timeout
< 0) {
2733 timeout
= MAX_SCHEDULE_TIMEOUT
;
2735 timeout
= msecs_to_jiffies(timeout
);
2740 adev
->gfx_timeout
= timeout
;
2743 adev
->compute_timeout
= timeout
;
2746 adev
->sdma_timeout
= timeout
;
2749 adev
->video_timeout
= timeout
;
2756 * There is only one value specified and
2757 * it should apply to all non-compute jobs.
2760 adev
->sdma_timeout
= adev
->video_timeout
= adev
->gfx_timeout
;
2761 if (amdgpu_sriov_vf(adev
) || amdgpu_passthrough(adev
))
2762 adev
->compute_timeout
= adev
->gfx_timeout
;
2770 * amdgpu_device_init - initialize the driver
2772 * @adev: amdgpu_device pointer
2773 * @ddev: drm dev pointer
2774 * @pdev: pci dev pointer
2775 * @flags: driver flags
2777 * Initializes the driver info and hw (all asics).
2778 * Returns 0 for success or an error on failure.
2779 * Called at driver startup.
2781 int amdgpu_device_init(struct amdgpu_device
*adev
,
2782 struct drm_device
*ddev
,
2783 struct pci_dev
*pdev
,
2790 adev
->shutdown
= false;
2791 adev
->dev
= &pdev
->dev
;
2794 adev
->flags
= flags
;
2796 if (amdgpu_force_asic_type
>= 0 && amdgpu_force_asic_type
< CHIP_LAST
)
2797 adev
->asic_type
= amdgpu_force_asic_type
;
2799 adev
->asic_type
= flags
& AMD_ASIC_MASK
;
2801 adev
->usec_timeout
= AMDGPU_MAX_USEC_TIMEOUT
;
2802 if (amdgpu_emu_mode
== 1)
2803 adev
->usec_timeout
*= 2;
2804 adev
->gmc
.gart_size
= 512 * 1024 * 1024;
2805 adev
->accel_working
= false;
2806 adev
->num_rings
= 0;
2807 adev
->mman
.buffer_funcs
= NULL
;
2808 adev
->mman
.buffer_funcs_ring
= NULL
;
2809 adev
->vm_manager
.vm_pte_funcs
= NULL
;
2810 adev
->vm_manager
.vm_pte_num_scheds
= 0;
2811 adev
->gmc
.gmc_funcs
= NULL
;
2812 adev
->fence_context
= dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
2813 bitmap_zero(adev
->gfx
.pipe_reserve_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
2815 adev
->smc_rreg
= &amdgpu_invalid_rreg
;
2816 adev
->smc_wreg
= &amdgpu_invalid_wreg
;
2817 adev
->pcie_rreg
= &amdgpu_invalid_rreg
;
2818 adev
->pcie_wreg
= &amdgpu_invalid_wreg
;
2819 adev
->pciep_rreg
= &amdgpu_invalid_rreg
;
2820 adev
->pciep_wreg
= &amdgpu_invalid_wreg
;
2821 adev
->pcie_rreg64
= &amdgpu_invalid_rreg64
;
2822 adev
->pcie_wreg64
= &amdgpu_invalid_wreg64
;
2823 adev
->uvd_ctx_rreg
= &amdgpu_invalid_rreg
;
2824 adev
->uvd_ctx_wreg
= &amdgpu_invalid_wreg
;
2825 adev
->didt_rreg
= &amdgpu_invalid_rreg
;
2826 adev
->didt_wreg
= &amdgpu_invalid_wreg
;
2827 adev
->gc_cac_rreg
= &amdgpu_invalid_rreg
;
2828 adev
->gc_cac_wreg
= &amdgpu_invalid_wreg
;
2829 adev
->audio_endpt_rreg
= &amdgpu_block_invalid_rreg
;
2830 adev
->audio_endpt_wreg
= &amdgpu_block_invalid_wreg
;
2832 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2833 amdgpu_asic_name
[adev
->asic_type
], pdev
->vendor
, pdev
->device
,
2834 pdev
->subsystem_vendor
, pdev
->subsystem_device
, pdev
->revision
);
2836 /* mutex initialization are all done here so we
2837 * can recall function without having locking issues */
2838 atomic_set(&adev
->irq
.ih
.lock
, 0);
2839 mutex_init(&adev
->firmware
.mutex
);
2840 mutex_init(&adev
->pm
.mutex
);
2841 mutex_init(&adev
->gfx
.gpu_clock_mutex
);
2842 mutex_init(&adev
->srbm_mutex
);
2843 mutex_init(&adev
->gfx
.pipe_reserve_mutex
);
2844 mutex_init(&adev
->gfx
.gfx_off_mutex
);
2845 mutex_init(&adev
->grbm_idx_mutex
);
2846 mutex_init(&adev
->mn_lock
);
2847 mutex_init(&adev
->virt
.vf_errors
.lock
);
2848 hash_init(adev
->mn_hash
);
2849 mutex_init(&adev
->lock_reset
);
2850 mutex_init(&adev
->psp
.mutex
);
2851 mutex_init(&adev
->notifier_lock
);
2853 r
= amdgpu_device_check_arguments(adev
);
2857 spin_lock_init(&adev
->mmio_idx_lock
);
2858 spin_lock_init(&adev
->smc_idx_lock
);
2859 spin_lock_init(&adev
->pcie_idx_lock
);
2860 spin_lock_init(&adev
->uvd_ctx_idx_lock
);
2861 spin_lock_init(&adev
->didt_idx_lock
);
2862 spin_lock_init(&adev
->gc_cac_idx_lock
);
2863 spin_lock_init(&adev
->se_cac_idx_lock
);
2864 spin_lock_init(&adev
->audio_endpt_idx_lock
);
2865 spin_lock_init(&adev
->mm_stats
.lock
);
2867 INIT_LIST_HEAD(&adev
->shadow_list
);
2868 mutex_init(&adev
->shadow_list_lock
);
2870 INIT_LIST_HEAD(&adev
->ring_lru_list
);
2871 spin_lock_init(&adev
->ring_lru_list_lock
);
2873 INIT_DELAYED_WORK(&adev
->delayed_init_work
,
2874 amdgpu_device_delayed_init_work_handler
);
2875 INIT_DELAYED_WORK(&adev
->gfx
.gfx_off_delay_work
,
2876 amdgpu_device_delay_enable_gfx_off
);
2878 INIT_WORK(&adev
->xgmi_reset_work
, amdgpu_device_xgmi_reset_func
);
2880 adev
->gfx
.gfx_off_req_count
= 1;
2881 adev
->pm
.ac_power
= power_supply_is_system_supplied() > 0 ? true : false;
2883 /* Registers mapping */
2884 /* TODO: block userspace mapping of io register */
2885 if (adev
->asic_type
>= CHIP_BONAIRE
) {
2886 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 5);
2887 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 5);
2889 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 2);
2890 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 2);
2893 adev
->rmmio
= ioremap(adev
->rmmio_base
, adev
->rmmio_size
);
2894 if (adev
->rmmio
== NULL
) {
2897 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev
->rmmio_base
);
2898 DRM_INFO("register mmio size: %u\n", (unsigned)adev
->rmmio_size
);
2900 /* io port mapping */
2901 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
2902 if (pci_resource_flags(adev
->pdev
, i
) & IORESOURCE_IO
) {
2903 adev
->rio_mem_size
= pci_resource_len(adev
->pdev
, i
);
2904 adev
->rio_mem
= pci_iomap(adev
->pdev
, i
, adev
->rio_mem_size
);
2908 if (adev
->rio_mem
== NULL
)
2909 DRM_INFO("PCI I/O BAR is not found.\n");
2911 /* enable PCIE atomic ops */
2912 r
= pci_enable_atomic_ops_to_root(adev
->pdev
,
2913 PCI_EXP_DEVCAP2_ATOMIC_COMP32
|
2914 PCI_EXP_DEVCAP2_ATOMIC_COMP64
);
2916 adev
->have_atomics_support
= false;
2917 DRM_INFO("PCIE atomic ops is not supported\n");
2919 adev
->have_atomics_support
= true;
2922 amdgpu_device_get_pcie_info(adev
);
2925 DRM_INFO("MCBP is enabled\n");
2927 if (amdgpu_mes
&& adev
->asic_type
>= CHIP_NAVI10
)
2928 adev
->enable_mes
= true;
2930 if (amdgpu_discovery
&& adev
->asic_type
>= CHIP_NAVI10
) {
2931 r
= amdgpu_discovery_init(adev
);
2933 dev_err(adev
->dev
, "amdgpu_discovery_init failed\n");
2938 /* early init functions */
2939 r
= amdgpu_device_ip_early_init(adev
);
2943 r
= amdgpu_device_get_job_timeout_settings(adev
);
2945 dev_err(adev
->dev
, "invalid lockup_timeout parameter syntax\n");
2949 /* doorbell bar mapping and doorbell index init*/
2950 amdgpu_device_doorbell_init(adev
);
2952 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2953 /* this will fail for cards that aren't VGA class devices, just
2955 vga_client_register(adev
->pdev
, adev
, NULL
, amdgpu_device_vga_set_decode
);
2957 if (amdgpu_device_supports_boco(ddev
))
2959 if (amdgpu_has_atpx() &&
2960 (amdgpu_is_atpx_hybrid() ||
2961 amdgpu_has_atpx_dgpu_power_cntl()) &&
2962 !pci_is_thunderbolt_attached(adev
->pdev
))
2963 vga_switcheroo_register_client(adev
->pdev
,
2964 &amdgpu_switcheroo_ops
, boco
);
2966 vga_switcheroo_init_domain_pm_ops(adev
->dev
, &adev
->vga_pm_domain
);
2968 if (amdgpu_emu_mode
== 1) {
2969 /* post the asic on emulation mode */
2970 emu_soc_asic_init(adev
);
2971 goto fence_driver_init
;
2974 /* detect if we are with an SRIOV vbios */
2975 amdgpu_device_detect_sriov_bios(adev
);
2977 /* check if we need to reset the asic
2978 * E.g., driver was not cleanly unloaded previously, etc.
2980 if (!amdgpu_sriov_vf(adev
) && amdgpu_asic_need_reset_on_init(adev
)) {
2981 r
= amdgpu_asic_reset(adev
);
2983 dev_err(adev
->dev
, "asic reset on init failed\n");
2988 /* Post card if necessary */
2989 if (amdgpu_device_need_post(adev
)) {
2991 dev_err(adev
->dev
, "no vBIOS found\n");
2995 DRM_INFO("GPU posting now...\n");
2996 r
= amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
2998 dev_err(adev
->dev
, "gpu post error!\n");
3003 if (adev
->is_atom_fw
) {
3004 /* Initialize clocks */
3005 r
= amdgpu_atomfirmware_get_clock_info(adev
);
3007 dev_err(adev
->dev
, "amdgpu_atomfirmware_get_clock_info failed\n");
3008 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL
, 0, 0);
3012 /* Initialize clocks */
3013 r
= amdgpu_atombios_get_clock_info(adev
);
3015 dev_err(adev
->dev
, "amdgpu_atombios_get_clock_info failed\n");
3016 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL
, 0, 0);
3019 /* init i2c buses */
3020 if (!amdgpu_device_has_dc_support(adev
))
3021 amdgpu_atombios_i2c_init(adev
);
3026 r
= amdgpu_fence_driver_init(adev
);
3028 dev_err(adev
->dev
, "amdgpu_fence_driver_init failed\n");
3029 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_FENCE_INIT_FAIL
, 0, 0);
3033 /* init the mode config */
3034 drm_mode_config_init(adev
->ddev
);
3036 r
= amdgpu_device_ip_init(adev
);
3038 /* failed in exclusive mode due to timeout */
3039 if (amdgpu_sriov_vf(adev
) &&
3040 !amdgpu_sriov_runtime(adev
) &&
3041 amdgpu_virt_mmio_blocked(adev
) &&
3042 !amdgpu_virt_wait_reset(adev
)) {
3043 dev_err(adev
->dev
, "VF exclusive mode timeout\n");
3044 /* Don't send request since VF is inactive. */
3045 adev
->virt
.caps
&= ~AMDGPU_SRIOV_CAPS_RUNTIME
;
3046 adev
->virt
.ops
= NULL
;
3050 dev_err(adev
->dev
, "amdgpu_device_ip_init failed\n");
3051 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL
, 0, 0);
3055 DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3056 adev
->gfx
.config
.max_shader_engines
,
3057 adev
->gfx
.config
.max_sh_per_se
,
3058 adev
->gfx
.config
.max_cu_per_sh
,
3059 adev
->gfx
.cu_info
.number
);
3061 amdgpu_ctx_init_sched(adev
);
3063 adev
->accel_working
= true;
3065 amdgpu_vm_check_compute_bug(adev
);
3067 /* Initialize the buffer migration limit. */
3068 if (amdgpu_moverate
>= 0)
3069 max_MBps
= amdgpu_moverate
;
3071 max_MBps
= 8; /* Allow 8 MB/s. */
3072 /* Get a log2 for easy divisions. */
3073 adev
->mm_stats
.log2_max_MBps
= ilog2(max(1u, max_MBps
));
3075 amdgpu_fbdev_init(adev
);
3077 r
= amdgpu_pm_sysfs_init(adev
);
3079 adev
->pm_sysfs_en
= false;
3080 DRM_ERROR("registering pm debugfs failed (%d).\n", r
);
3082 adev
->pm_sysfs_en
= true;
3084 r
= amdgpu_ucode_sysfs_init(adev
);
3086 adev
->ucode_sysfs_en
= false;
3087 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r
);
3089 adev
->ucode_sysfs_en
= true;
3091 r
= amdgpu_debugfs_gem_init(adev
);
3093 DRM_ERROR("registering gem debugfs failed (%d).\n", r
);
3095 r
= amdgpu_debugfs_regs_init(adev
);
3097 DRM_ERROR("registering register debugfs failed (%d).\n", r
);
3099 r
= amdgpu_debugfs_firmware_init(adev
);
3101 DRM_ERROR("registering firmware debugfs failed (%d).\n", r
);
3103 r
= amdgpu_debugfs_init(adev
);
3105 DRM_ERROR("Creating debugfs files failed (%d).\n", r
);
3107 if ((amdgpu_testing
& 1)) {
3108 if (adev
->accel_working
)
3109 amdgpu_test_moves(adev
);
3111 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3113 if (amdgpu_benchmarking
) {
3114 if (adev
->accel_working
)
3115 amdgpu_benchmark(adev
, amdgpu_benchmarking
);
3117 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3121 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3122 * Otherwise the mgpu fan boost feature will be skipped due to the
3123 * gpu instance is counted less.
3125 amdgpu_register_gpu_instance(adev
);
3127 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3128 * explicit gating rather than handling it automatically.
3130 r
= amdgpu_device_ip_late_init(adev
);
3132 dev_err(adev
->dev
, "amdgpu_device_ip_late_init failed\n");
3133 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL
, 0, r
);
3138 amdgpu_ras_resume(adev
);
3140 queue_delayed_work(system_wq
, &adev
->delayed_init_work
,
3141 msecs_to_jiffies(AMDGPU_RESUME_MS
));
3143 r
= device_create_file(adev
->dev
, &dev_attr_pcie_replay_count
);
3145 dev_err(adev
->dev
, "Could not create pcie_replay_count");
3149 if (IS_ENABLED(CONFIG_PERF_EVENTS
))
3150 r
= amdgpu_pmu_init(adev
);
3152 dev_err(adev
->dev
, "amdgpu_pmu_init failed\n");
3157 amdgpu_vf_error_trans_all(adev
);
3159 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
3165 * amdgpu_device_fini - tear down the driver
3167 * @adev: amdgpu_device pointer
3169 * Tear down the driver info (all asics).
3170 * Called at driver shutdown.
3172 void amdgpu_device_fini(struct amdgpu_device
*adev
)
3176 DRM_INFO("amdgpu: finishing device.\n");
3177 flush_delayed_work(&adev
->delayed_init_work
);
3178 adev
->shutdown
= true;
3180 /* disable all interrupts */
3181 amdgpu_irq_disable_all(adev
);
3182 if (adev
->mode_info
.mode_config_initialized
){
3183 if (!amdgpu_device_has_dc_support(adev
))
3184 drm_helper_force_disable_all(adev
->ddev
);
3186 drm_atomic_helper_shutdown(adev
->ddev
);
3188 amdgpu_fence_driver_fini(adev
);
3189 if (adev
->pm_sysfs_en
)
3190 amdgpu_pm_sysfs_fini(adev
);
3191 amdgpu_fbdev_fini(adev
);
3192 r
= amdgpu_device_ip_fini(adev
);
3193 if (adev
->firmware
.gpu_info_fw
) {
3194 release_firmware(adev
->firmware
.gpu_info_fw
);
3195 adev
->firmware
.gpu_info_fw
= NULL
;
3197 adev
->accel_working
= false;
3198 /* free i2c buses */
3199 if (!amdgpu_device_has_dc_support(adev
))
3200 amdgpu_i2c_fini(adev
);
3202 if (amdgpu_emu_mode
!= 1)
3203 amdgpu_atombios_fini(adev
);
3207 if (amdgpu_has_atpx() &&
3208 (amdgpu_is_atpx_hybrid() ||
3209 amdgpu_has_atpx_dgpu_power_cntl()) &&
3210 !pci_is_thunderbolt_attached(adev
->pdev
))
3211 vga_switcheroo_unregister_client(adev
->pdev
);
3212 if (amdgpu_device_supports_boco(adev
->ddev
))
3213 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
3214 vga_client_register(adev
->pdev
, NULL
, NULL
, NULL
);
3216 pci_iounmap(adev
->pdev
, adev
->rio_mem
);
3217 adev
->rio_mem
= NULL
;
3218 iounmap(adev
->rmmio
);
3220 amdgpu_device_doorbell_fini(adev
);
3222 amdgpu_debugfs_regs_cleanup(adev
);
3223 device_remove_file(adev
->dev
, &dev_attr_pcie_replay_count
);
3224 if (adev
->ucode_sysfs_en
)
3225 amdgpu_ucode_sysfs_fini(adev
);
3226 if (IS_ENABLED(CONFIG_PERF_EVENTS
))
3227 amdgpu_pmu_fini(adev
);
3228 amdgpu_debugfs_preempt_cleanup(adev
);
3229 if (amdgpu_discovery
&& adev
->asic_type
>= CHIP_NAVI10
)
3230 amdgpu_discovery_fini(adev
);
3238 * amdgpu_device_suspend - initiate device suspend
3240 * @dev: drm dev pointer
3241 * @suspend: suspend state
3242 * @fbcon : notify the fbdev of suspend
3244 * Puts the hw in the suspend state (all asics).
3245 * Returns 0 for success or an error on failure.
3246 * Called at driver suspend.
3248 int amdgpu_device_suspend(struct drm_device
*dev
, bool fbcon
)
3250 struct amdgpu_device
*adev
;
3251 struct drm_crtc
*crtc
;
3252 struct drm_connector
*connector
;
3253 struct drm_connector_list_iter iter
;
3256 if (dev
== NULL
|| dev
->dev_private
== NULL
) {
3260 adev
= dev
->dev_private
;
3262 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
3265 adev
->in_suspend
= true;
3266 drm_kms_helper_poll_disable(dev
);
3269 amdgpu_fbdev_set_suspend(adev
, 1);
3271 cancel_delayed_work_sync(&adev
->delayed_init_work
);
3273 if (!amdgpu_device_has_dc_support(adev
)) {
3274 /* turn off display hw */
3275 drm_modeset_lock_all(dev
);
3276 drm_connector_list_iter_begin(dev
, &iter
);
3277 drm_for_each_connector_iter(connector
, &iter
)
3278 drm_helper_connector_dpms(connector
,
3280 drm_connector_list_iter_end(&iter
);
3281 drm_modeset_unlock_all(dev
);
3282 /* unpin the front buffers and cursors */
3283 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
3284 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3285 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
3286 struct amdgpu_bo
*robj
;
3288 if (amdgpu_crtc
->cursor_bo
&& !adev
->enable_virtual_display
) {
3289 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
3290 r
= amdgpu_bo_reserve(aobj
, true);
3292 amdgpu_bo_unpin(aobj
);
3293 amdgpu_bo_unreserve(aobj
);
3297 if (fb
== NULL
|| fb
->obj
[0] == NULL
) {
3300 robj
= gem_to_amdgpu_bo(fb
->obj
[0]);
3301 /* don't unpin kernel fb objects */
3302 if (!amdgpu_fbdev_robj_is_fb(adev
, robj
)) {
3303 r
= amdgpu_bo_reserve(robj
, true);
3305 amdgpu_bo_unpin(robj
);
3306 amdgpu_bo_unreserve(robj
);
3312 amdgpu_amdkfd_suspend(adev
);
3314 amdgpu_ras_suspend(adev
);
3316 r
= amdgpu_device_ip_suspend_phase1(adev
);
3318 /* evict vram memory */
3319 amdgpu_bo_evict_vram(adev
);
3321 amdgpu_fence_driver_suspend(adev
);
3323 r
= amdgpu_device_ip_suspend_phase2(adev
);
3325 /* evict remaining vram memory
3326 * This second call to evict vram is to evict the gart page table
3329 amdgpu_bo_evict_vram(adev
);
3335 * amdgpu_device_resume - initiate device resume
3337 * @dev: drm dev pointer
3338 * @resume: resume state
3339 * @fbcon : notify the fbdev of resume
3341 * Bring the hw back to operating state (all asics).
3342 * Returns 0 for success or an error on failure.
3343 * Called at driver resume.
3345 int amdgpu_device_resume(struct drm_device
*dev
, bool fbcon
)
3347 struct drm_connector
*connector
;
3348 struct drm_connector_list_iter iter
;
3349 struct amdgpu_device
*adev
= dev
->dev_private
;
3350 struct drm_crtc
*crtc
;
3353 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
3357 if (amdgpu_device_need_post(adev
)) {
3358 r
= amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
3360 DRM_ERROR("amdgpu asic init failed\n");
3363 r
= amdgpu_device_ip_resume(adev
);
3365 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r
);
3368 amdgpu_fence_driver_resume(adev
);
3371 r
= amdgpu_device_ip_late_init(adev
);
3375 queue_delayed_work(system_wq
, &adev
->delayed_init_work
,
3376 msecs_to_jiffies(AMDGPU_RESUME_MS
));
3378 if (!amdgpu_device_has_dc_support(adev
)) {
3380 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
3381 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
3383 if (amdgpu_crtc
->cursor_bo
&& !adev
->enable_virtual_display
) {
3384 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
3385 r
= amdgpu_bo_reserve(aobj
, true);
3387 r
= amdgpu_bo_pin(aobj
, AMDGPU_GEM_DOMAIN_VRAM
);
3389 DRM_ERROR("Failed to pin cursor BO (%d)\n", r
);
3390 amdgpu_crtc
->cursor_addr
= amdgpu_bo_gpu_offset(aobj
);
3391 amdgpu_bo_unreserve(aobj
);
3396 r
= amdgpu_amdkfd_resume(adev
);
3400 /* Make sure IB tests flushed */
3401 flush_delayed_work(&adev
->delayed_init_work
);
3403 /* blat the mode back in */
3405 if (!amdgpu_device_has_dc_support(adev
)) {
3407 drm_helper_resume_force_mode(dev
);
3409 /* turn on display hw */
3410 drm_modeset_lock_all(dev
);
3412 drm_connector_list_iter_begin(dev
, &iter
);
3413 drm_for_each_connector_iter(connector
, &iter
)
3414 drm_helper_connector_dpms(connector
,
3416 drm_connector_list_iter_end(&iter
);
3418 drm_modeset_unlock_all(dev
);
3420 amdgpu_fbdev_set_suspend(adev
, 0);
3423 drm_kms_helper_poll_enable(dev
);
3425 amdgpu_ras_resume(adev
);
3428 * Most of the connector probing functions try to acquire runtime pm
3429 * refs to ensure that the GPU is powered on when connector polling is
3430 * performed. Since we're calling this from a runtime PM callback,
3431 * trying to acquire rpm refs will cause us to deadlock.
3433 * Since we're guaranteed to be holding the rpm lock, it's safe to
3434 * temporarily disable the rpm helpers so this doesn't deadlock us.
3437 dev
->dev
->power
.disable_depth
++;
3439 if (!amdgpu_device_has_dc_support(adev
))
3440 drm_helper_hpd_irq_event(dev
);
3442 drm_kms_helper_hotplug_event(dev
);
3444 dev
->dev
->power
.disable_depth
--;
3446 adev
->in_suspend
= false;
3452 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3454 * @adev: amdgpu_device pointer
3456 * The list of all the hardware IPs that make up the asic is walked and
3457 * the check_soft_reset callbacks are run. check_soft_reset determines
3458 * if the asic is still hung or not.
3459 * Returns true if any of the IPs are still in a hung state, false if not.
3461 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device
*adev
)
3464 bool asic_hang
= false;
3466 if (amdgpu_sriov_vf(adev
))
3469 if (amdgpu_asic_need_full_reset(adev
))
3472 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
3473 if (!adev
->ip_blocks
[i
].status
.valid
)
3475 if (adev
->ip_blocks
[i
].version
->funcs
->check_soft_reset
)
3476 adev
->ip_blocks
[i
].status
.hang
=
3477 adev
->ip_blocks
[i
].version
->funcs
->check_soft_reset(adev
);
3478 if (adev
->ip_blocks
[i
].status
.hang
) {
3479 DRM_INFO("IP block:%s is hung!\n", adev
->ip_blocks
[i
].version
->funcs
->name
);
3487 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3489 * @adev: amdgpu_device pointer
3491 * The list of all the hardware IPs that make up the asic is walked and the
3492 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3493 * handles any IP specific hardware or software state changes that are
3494 * necessary for a soft reset to succeed.
3495 * Returns 0 on success, negative error code on failure.
3497 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device
*adev
)
3501 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
3502 if (!adev
->ip_blocks
[i
].status
.valid
)
3504 if (adev
->ip_blocks
[i
].status
.hang
&&
3505 adev
->ip_blocks
[i
].version
->funcs
->pre_soft_reset
) {
3506 r
= adev
->ip_blocks
[i
].version
->funcs
->pre_soft_reset(adev
);
3516 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3518 * @adev: amdgpu_device pointer
3520 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3521 * reset is necessary to recover.
3522 * Returns true if a full asic reset is required, false if not.
3524 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device
*adev
)
3528 if (amdgpu_asic_need_full_reset(adev
))
3531 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
3532 if (!adev
->ip_blocks
[i
].status
.valid
)
3534 if ((adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) ||
3535 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_SMC
) ||
3536 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_ACP
) ||
3537 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_DCE
) ||
3538 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
) {
3539 if (adev
->ip_blocks
[i
].status
.hang
) {
3540 DRM_INFO("Some block need full reset!\n");
3549 * amdgpu_device_ip_soft_reset - do a soft reset
3551 * @adev: amdgpu_device pointer
3553 * The list of all the hardware IPs that make up the asic is walked and the
3554 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3555 * IP specific hardware or software state changes that are necessary to soft
3557 * Returns 0 on success, negative error code on failure.
3559 static int amdgpu_device_ip_soft_reset(struct amdgpu_device
*adev
)
3563 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
3564 if (!adev
->ip_blocks
[i
].status
.valid
)
3566 if (adev
->ip_blocks
[i
].status
.hang
&&
3567 adev
->ip_blocks
[i
].version
->funcs
->soft_reset
) {
3568 r
= adev
->ip_blocks
[i
].version
->funcs
->soft_reset(adev
);
3578 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3580 * @adev: amdgpu_device pointer
3582 * The list of all the hardware IPs that make up the asic is walked and the
3583 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3584 * handles any IP specific hardware or software state changes that are
3585 * necessary after the IP has been soft reset.
3586 * Returns 0 on success, negative error code on failure.
3588 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device
*adev
)
3592 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
3593 if (!adev
->ip_blocks
[i
].status
.valid
)
3595 if (adev
->ip_blocks
[i
].status
.hang
&&
3596 adev
->ip_blocks
[i
].version
->funcs
->post_soft_reset
)
3597 r
= adev
->ip_blocks
[i
].version
->funcs
->post_soft_reset(adev
);
3606 * amdgpu_device_recover_vram - Recover some VRAM contents
3608 * @adev: amdgpu_device pointer
3610 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3611 * restore things like GPUVM page tables after a GPU reset where
3612 * the contents of VRAM might be lost.
3615 * 0 on success, negative error code on failure.
3617 static int amdgpu_device_recover_vram(struct amdgpu_device
*adev
)
3619 struct dma_fence
*fence
= NULL
, *next
= NULL
;
3620 struct amdgpu_bo
*shadow
;
3623 if (amdgpu_sriov_runtime(adev
))
3624 tmo
= msecs_to_jiffies(8000);
3626 tmo
= msecs_to_jiffies(100);
3628 DRM_INFO("recover vram bo from shadow start\n");
3629 mutex_lock(&adev
->shadow_list_lock
);
3630 list_for_each_entry(shadow
, &adev
->shadow_list
, shadow_list
) {
3632 /* No need to recover an evicted BO */
3633 if (shadow
->tbo
.mem
.mem_type
!= TTM_PL_TT
||
3634 shadow
->tbo
.mem
.start
== AMDGPU_BO_INVALID_OFFSET
||
3635 shadow
->parent
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
)
3638 r
= amdgpu_bo_restore_shadow(shadow
, &next
);
3643 tmo
= dma_fence_wait_timeout(fence
, false, tmo
);
3644 dma_fence_put(fence
);
3649 } else if (tmo
< 0) {
3657 mutex_unlock(&adev
->shadow_list_lock
);
3660 tmo
= dma_fence_wait_timeout(fence
, false, tmo
);
3661 dma_fence_put(fence
);
3663 if (r
< 0 || tmo
<= 0) {
3664 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r
, tmo
);
3668 DRM_INFO("recover vram bo from shadow done\n");
3674 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3676 * @adev: amdgpu device pointer
3677 * @from_hypervisor: request from hypervisor
3679 * do VF FLR and reinitialize Asic
3680 * return 0 means succeeded otherwise failed
3682 static int amdgpu_device_reset_sriov(struct amdgpu_device
*adev
,
3683 bool from_hypervisor
)
3687 if (from_hypervisor
)
3688 r
= amdgpu_virt_request_full_gpu(adev
, true);
3690 r
= amdgpu_virt_reset_gpu(adev
);
3694 /* Resume IP prior to SMC */
3695 r
= amdgpu_device_ip_reinit_early_sriov(adev
);
3699 amdgpu_virt_init_data_exchange(adev
);
3700 /* we need recover gart prior to run SMC/CP/SDMA resume */
3701 amdgpu_gtt_mgr_recover(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
3703 r
= amdgpu_device_fw_loading(adev
);
3707 /* now we are okay to resume SMC/CP/SDMA */
3708 r
= amdgpu_device_ip_reinit_late_sriov(adev
);
3712 amdgpu_irq_gpu_reset_resume_helper(adev
);
3713 r
= amdgpu_ib_ring_tests(adev
);
3714 amdgpu_amdkfd_post_reset(adev
);
3717 amdgpu_virt_release_full_gpu(adev
, true);
3718 if (!r
&& adev
->virt
.gim_feature
& AMDGIM_FEATURE_GIM_FLR_VRAMLOST
) {
3719 amdgpu_inc_vram_lost(adev
);
3720 r
= amdgpu_device_recover_vram(adev
);
3727 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3729 * @adev: amdgpu device pointer
3731 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3734 bool amdgpu_device_should_recover_gpu(struct amdgpu_device
*adev
)
3736 if (!amdgpu_device_ip_check_soft_reset(adev
)) {
3737 DRM_INFO("Timeout, but no hardware hang detected.\n");
3741 if (amdgpu_gpu_recovery
== 0)
3744 if (amdgpu_sriov_vf(adev
))
3747 if (amdgpu_gpu_recovery
== -1) {
3748 switch (adev
->asic_type
) {
3754 case CHIP_POLARIS10
:
3755 case CHIP_POLARIS11
:
3756 case CHIP_POLARIS12
:
3772 DRM_INFO("GPU recovery disabled.\n");
3777 static int amdgpu_device_pre_asic_reset(struct amdgpu_device
*adev
,
3778 struct amdgpu_job
*job
,
3779 bool *need_full_reset_arg
)
3782 bool need_full_reset
= *need_full_reset_arg
;
3784 /* block all schedulers and reset given job's ring */
3785 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
3786 struct amdgpu_ring
*ring
= adev
->rings
[i
];
3788 if (!ring
|| !ring
->sched
.thread
)
3791 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3792 amdgpu_fence_driver_force_completion(ring
);
3796 drm_sched_increase_karma(&job
->base
);
3798 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3799 if (!amdgpu_sriov_vf(adev
)) {
3801 if (!need_full_reset
)
3802 need_full_reset
= amdgpu_device_ip_need_full_reset(adev
);
3804 if (!need_full_reset
) {
3805 amdgpu_device_ip_pre_soft_reset(adev
);
3806 r
= amdgpu_device_ip_soft_reset(adev
);
3807 amdgpu_device_ip_post_soft_reset(adev
);
3808 if (r
|| amdgpu_device_ip_check_soft_reset(adev
)) {
3809 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3810 need_full_reset
= true;
3814 if (need_full_reset
)
3815 r
= amdgpu_device_ip_suspend(adev
);
3817 *need_full_reset_arg
= need_full_reset
;
3823 static int amdgpu_do_asic_reset(struct amdgpu_hive_info
*hive
,
3824 struct list_head
*device_list_handle
,
3825 bool *need_full_reset_arg
)
3827 struct amdgpu_device
*tmp_adev
= NULL
;
3828 bool need_full_reset
= *need_full_reset_arg
, vram_lost
= false;
3832 * ASIC reset has to be done on all HGMI hive nodes ASAP
3833 * to allow proper links negotiation in FW (within 1 sec)
3835 if (need_full_reset
) {
3836 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
3837 /* For XGMI run all resets in parallel to speed up the process */
3838 if (tmp_adev
->gmc
.xgmi
.num_physical_nodes
> 1) {
3839 if (!queue_work(system_unbound_wq
, &tmp_adev
->xgmi_reset_work
))
3842 r
= amdgpu_asic_reset(tmp_adev
);
3845 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3846 r
, tmp_adev
->ddev
->unique
);
3851 /* For XGMI wait for all resets to complete before proceed */
3853 list_for_each_entry(tmp_adev
, device_list_handle
,
3855 if (tmp_adev
->gmc
.xgmi
.num_physical_nodes
> 1) {
3856 flush_work(&tmp_adev
->xgmi_reset_work
);
3857 r
= tmp_adev
->asic_reset_res
;
3865 if (!r
&& amdgpu_ras_intr_triggered())
3866 amdgpu_ras_intr_cleared();
3868 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
3869 if (need_full_reset
) {
3871 if (amdgpu_atom_asic_init(tmp_adev
->mode_info
.atom_context
))
3872 DRM_WARN("asic atom init failed!");
3875 dev_info(tmp_adev
->dev
, "GPU reset succeeded, trying to resume\n");
3876 r
= amdgpu_device_ip_resume_phase1(tmp_adev
);
3880 vram_lost
= amdgpu_device_check_vram_lost(tmp_adev
);
3882 DRM_INFO("VRAM is lost due to GPU reset!\n");
3883 amdgpu_inc_vram_lost(tmp_adev
);
3886 r
= amdgpu_gtt_mgr_recover(
3887 &tmp_adev
->mman
.bdev
.man
[TTM_PL_TT
]);
3891 r
= amdgpu_device_fw_loading(tmp_adev
);
3895 r
= amdgpu_device_ip_resume_phase2(tmp_adev
);
3900 amdgpu_device_fill_reset_magic(tmp_adev
);
3903 * Add this ASIC as tracked as reset was already
3904 * complete successfully.
3906 amdgpu_register_gpu_instance(tmp_adev
);
3908 r
= amdgpu_device_ip_late_init(tmp_adev
);
3913 amdgpu_ras_resume(tmp_adev
);
3915 /* Update PSP FW topology after reset */
3916 if (hive
&& tmp_adev
->gmc
.xgmi
.num_physical_nodes
> 1)
3917 r
= amdgpu_xgmi_update_topology(hive
, tmp_adev
);
3924 amdgpu_irq_gpu_reset_resume_helper(tmp_adev
);
3925 r
= amdgpu_ib_ring_tests(tmp_adev
);
3927 dev_err(tmp_adev
->dev
, "ib ring test failed (%d).\n", r
);
3928 r
= amdgpu_device_ip_suspend(tmp_adev
);
3929 need_full_reset
= true;
3936 r
= amdgpu_device_recover_vram(tmp_adev
);
3938 tmp_adev
->asic_reset_res
= r
;
3942 *need_full_reset_arg
= need_full_reset
;
3946 static bool amdgpu_device_lock_adev(struct amdgpu_device
*adev
, bool trylock
)
3949 if (!mutex_trylock(&adev
->lock_reset
))
3952 mutex_lock(&adev
->lock_reset
);
3954 atomic_inc(&adev
->gpu_reset_counter
);
3955 adev
->in_gpu_reset
= true;
3956 switch (amdgpu_asic_reset_method(adev
)) {
3957 case AMD_RESET_METHOD_MODE1
:
3958 adev
->mp1_state
= PP_MP1_STATE_SHUTDOWN
;
3960 case AMD_RESET_METHOD_MODE2
:
3961 adev
->mp1_state
= PP_MP1_STATE_RESET
;
3964 adev
->mp1_state
= PP_MP1_STATE_NONE
;
3971 static void amdgpu_device_unlock_adev(struct amdgpu_device
*adev
)
3973 amdgpu_vf_error_trans_all(adev
);
3974 adev
->mp1_state
= PP_MP1_STATE_NONE
;
3975 adev
->in_gpu_reset
= false;
3976 mutex_unlock(&adev
->lock_reset
);
3980 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3982 * @adev: amdgpu device pointer
3983 * @job: which job trigger hang
3985 * Attempt to reset the GPU if it has hung (all asics).
3986 * Attempt to do soft-reset or full-reset and reinitialize Asic
3987 * Returns 0 for success or an error on failure.
3990 int amdgpu_device_gpu_recover(struct amdgpu_device
*adev
,
3991 struct amdgpu_job
*job
)
3993 struct list_head device_list
, *device_list_handle
= NULL
;
3994 bool need_full_reset
, job_signaled
;
3995 struct amdgpu_hive_info
*hive
= NULL
;
3996 struct amdgpu_device
*tmp_adev
= NULL
;
3998 bool in_ras_intr
= amdgpu_ras_intr_triggered();
4000 (amdgpu_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
) ?
4004 * Flush RAM to disk so that after reboot
4005 * the user can read log and see why the system rebooted.
4007 if (in_ras_intr
&& !use_baco
&& amdgpu_ras_get_context(adev
)->reboot
) {
4009 DRM_WARN("Emergency reboot.");
4012 emergency_restart();
4015 need_full_reset
= job_signaled
= false;
4016 INIT_LIST_HEAD(&device_list
);
4018 dev_info(adev
->dev
, "GPU %s begin!\n",
4019 (in_ras_intr
&& !use_baco
) ? "jobs stop":"reset");
4021 cancel_delayed_work_sync(&adev
->delayed_init_work
);
4023 hive
= amdgpu_get_xgmi_hive(adev
, false);
4026 * Here we trylock to avoid chain of resets executing from
4027 * either trigger by jobs on different adevs in XGMI hive or jobs on
4028 * different schedulers for same device while this TO handler is running.
4029 * We always reset all schedulers for device and all devices for XGMI
4030 * hive so that should take care of them too.
4033 if (hive
&& !mutex_trylock(&hive
->reset_lock
)) {
4034 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4035 job
? job
->base
.id
: -1, hive
->hive_id
);
4039 /* Start with adev pre asic reset first for soft reset check.*/
4040 if (!amdgpu_device_lock_adev(adev
, !hive
)) {
4041 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4042 job
? job
->base
.id
: -1);
4046 /* Block kfd: SRIOV would do it separately */
4047 if (!amdgpu_sriov_vf(adev
))
4048 amdgpu_amdkfd_pre_reset(adev
);
4050 /* Build list of devices to reset */
4051 if (adev
->gmc
.xgmi
.num_physical_nodes
> 1) {
4053 /*unlock kfd: SRIOV would do it separately */
4054 if (!amdgpu_sriov_vf(adev
))
4055 amdgpu_amdkfd_post_reset(adev
);
4056 amdgpu_device_unlock_adev(adev
);
4061 * In case we are in XGMI hive mode device reset is done for all the
4062 * nodes in the hive to retrain all XGMI links and hence the reset
4063 * sequence is executed in loop on all nodes.
4065 device_list_handle
= &hive
->device_list
;
4067 list_add_tail(&adev
->gmc
.xgmi
.head
, &device_list
);
4068 device_list_handle
= &device_list
;
4071 /* block all schedulers and reset given job's ring */
4072 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
4073 if (tmp_adev
!= adev
) {
4074 amdgpu_device_lock_adev(tmp_adev
, false);
4075 if (!amdgpu_sriov_vf(tmp_adev
))
4076 amdgpu_amdkfd_pre_reset(tmp_adev
);
4080 * Mark these ASICs to be reseted as untracked first
4081 * And add them back after reset completed
4083 amdgpu_unregister_gpu_instance(tmp_adev
);
4085 /* disable ras on ALL IPs */
4086 if (!(in_ras_intr
&& !use_baco
) &&
4087 amdgpu_device_ip_need_full_reset(tmp_adev
))
4088 amdgpu_ras_suspend(tmp_adev
);
4090 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
4091 struct amdgpu_ring
*ring
= tmp_adev
->rings
[i
];
4093 if (!ring
|| !ring
->sched
.thread
)
4096 drm_sched_stop(&ring
->sched
, job
? &job
->base
: NULL
);
4098 if (in_ras_intr
&& !use_baco
)
4099 amdgpu_job_stop_all_jobs_on_sched(&ring
->sched
);
4104 if (in_ras_intr
&& !use_baco
)
4105 goto skip_sched_resume
;
4108 * Must check guilty signal here since after this point all old
4109 * HW fences are force signaled.
4111 * job->base holds a reference to parent fence
4113 if (job
&& job
->base
.s_fence
->parent
&&
4114 dma_fence_is_signaled(job
->base
.s_fence
->parent
))
4115 job_signaled
= true;
4118 dev_info(adev
->dev
, "Guilty job already signaled, skipping HW reset");
4123 /* Guilty job will be freed after this*/
4124 r
= amdgpu_device_pre_asic_reset(adev
, job
, &need_full_reset
);
4126 /*TODO Should we stop ?*/
4127 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4128 r
, adev
->ddev
->unique
);
4129 adev
->asic_reset_res
= r
;
4132 retry
: /* Rest of adevs pre asic reset from XGMI hive. */
4133 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
4135 if (tmp_adev
== adev
)
4138 r
= amdgpu_device_pre_asic_reset(tmp_adev
,
4141 /*TODO Should we stop ?*/
4143 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4144 r
, tmp_adev
->ddev
->unique
);
4145 tmp_adev
->asic_reset_res
= r
;
4149 /* Actual ASIC resets if needed.*/
4150 /* TODO Implement XGMI hive reset logic for SRIOV */
4151 if (amdgpu_sriov_vf(adev
)) {
4152 r
= amdgpu_device_reset_sriov(adev
, job
? false : true);
4154 adev
->asic_reset_res
= r
;
4156 r
= amdgpu_do_asic_reset(hive
, device_list_handle
, &need_full_reset
);
4157 if (r
&& r
== -EAGAIN
)
4163 /* Post ASIC reset for all devs .*/
4164 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
4166 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
4167 struct amdgpu_ring
*ring
= tmp_adev
->rings
[i
];
4169 if (!ring
|| !ring
->sched
.thread
)
4172 /* No point to resubmit jobs if we didn't HW reset*/
4173 if (!tmp_adev
->asic_reset_res
&& !job_signaled
)
4174 drm_sched_resubmit_jobs(&ring
->sched
);
4176 drm_sched_start(&ring
->sched
, !tmp_adev
->asic_reset_res
);
4179 if (!amdgpu_device_has_dc_support(tmp_adev
) && !job_signaled
) {
4180 drm_helper_resume_force_mode(tmp_adev
->ddev
);
4183 tmp_adev
->asic_reset_res
= 0;
4186 /* bad news, how to tell it to userspace ? */
4187 dev_info(tmp_adev
->dev
, "GPU reset(%d) failed\n", atomic_read(&tmp_adev
->gpu_reset_counter
));
4188 amdgpu_vf_error_put(tmp_adev
, AMDGIM_ERROR_VF_GPU_RESET_FAIL
, 0, r
);
4190 dev_info(tmp_adev
->dev
, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev
->gpu_reset_counter
));
4195 list_for_each_entry(tmp_adev
, device_list_handle
, gmc
.xgmi
.head
) {
4196 /*unlock kfd: SRIOV would do it separately */
4197 if (!(in_ras_intr
&& !use_baco
) && !amdgpu_sriov_vf(tmp_adev
))
4198 amdgpu_amdkfd_post_reset(tmp_adev
);
4199 amdgpu_device_unlock_adev(tmp_adev
);
4203 mutex_unlock(&hive
->reset_lock
);
4206 dev_info(adev
->dev
, "GPU reset end with ret = %d\n", r
);
4211 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4213 * @adev: amdgpu_device pointer
4215 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4216 * and lanes) of the slot the device is in. Handles APUs and
4217 * virtualized environments where PCIE config space may not be available.
4219 static void amdgpu_device_get_pcie_info(struct amdgpu_device
*adev
)
4221 struct pci_dev
*pdev
;
4222 enum pci_bus_speed speed_cap
, platform_speed_cap
;
4223 enum pcie_link_width platform_link_width
;
4225 if (amdgpu_pcie_gen_cap
)
4226 adev
->pm
.pcie_gen_mask
= amdgpu_pcie_gen_cap
;
4228 if (amdgpu_pcie_lane_cap
)
4229 adev
->pm
.pcie_mlw_mask
= amdgpu_pcie_lane_cap
;
4231 /* covers APUs as well */
4232 if (pci_is_root_bus(adev
->pdev
->bus
)) {
4233 if (adev
->pm
.pcie_gen_mask
== 0)
4234 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
4235 if (adev
->pm
.pcie_mlw_mask
== 0)
4236 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
4240 if (adev
->pm
.pcie_gen_mask
&& adev
->pm
.pcie_mlw_mask
)
4243 pcie_bandwidth_available(adev
->pdev
, NULL
,
4244 &platform_speed_cap
, &platform_link_width
);
4246 if (adev
->pm
.pcie_gen_mask
== 0) {
4249 speed_cap
= pcie_get_speed_cap(pdev
);
4250 if (speed_cap
== PCI_SPEED_UNKNOWN
) {
4251 adev
->pm
.pcie_gen_mask
|= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4252 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
4253 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
);
4255 if (speed_cap
== PCIE_SPEED_16_0GT
)
4256 adev
->pm
.pcie_gen_mask
|= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4257 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
4258 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
|
4259 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4
);
4260 else if (speed_cap
== PCIE_SPEED_8_0GT
)
4261 adev
->pm
.pcie_gen_mask
|= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4262 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
4263 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
);
4264 else if (speed_cap
== PCIE_SPEED_5_0GT
)
4265 adev
->pm
.pcie_gen_mask
|= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4266 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
);
4268 adev
->pm
.pcie_gen_mask
|= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
;
4271 if (platform_speed_cap
== PCI_SPEED_UNKNOWN
) {
4272 adev
->pm
.pcie_gen_mask
|= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4273 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
);
4275 if (platform_speed_cap
== PCIE_SPEED_16_0GT
)
4276 adev
->pm
.pcie_gen_mask
|= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4277 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
4278 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
|
4279 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
);
4280 else if (platform_speed_cap
== PCIE_SPEED_8_0GT
)
4281 adev
->pm
.pcie_gen_mask
|= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4282 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
4283 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
);
4284 else if (platform_speed_cap
== PCIE_SPEED_5_0GT
)
4285 adev
->pm
.pcie_gen_mask
|= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
|
4286 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
);
4288 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
;
4292 if (adev
->pm
.pcie_mlw_mask
== 0) {
4293 if (platform_link_width
== PCIE_LNK_WIDTH_UNKNOWN
) {
4294 adev
->pm
.pcie_mlw_mask
|= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
4296 switch (platform_link_width
) {
4298 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32
|
4299 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
4300 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
4301 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
4302 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
4303 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4304 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4307 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
4308 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
4309 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
4310 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
4311 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4312 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4315 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
4316 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
4317 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
4318 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4319 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4322 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
4323 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
4324 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4325 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4328 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
4329 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4330 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4333 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
4334 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
4337 adev
->pm
.pcie_mlw_mask
= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
;
4346 int amdgpu_device_baco_enter(struct drm_device
*dev
)
4348 struct amdgpu_device
*adev
= dev
->dev_private
;
4349 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
4351 if (!amdgpu_device_supports_baco(adev
->ddev
))
4354 if (ras
&& ras
->supported
)
4355 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, false);
4357 return amdgpu_dpm_baco_enter(adev
);
4360 int amdgpu_device_baco_exit(struct drm_device
*dev
)
4362 struct amdgpu_device
*adev
= dev
->dev_private
;
4363 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
4366 if (!amdgpu_device_supports_baco(adev
->ddev
))
4369 ret
= amdgpu_dpm_baco_exit(adev
);
4373 if (ras
&& ras
->supported
)
4374 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, true);